From 988998503bc6d8c03fbea001a0513e8372fddf28 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 12 May 2023 18:12:43 +0100 Subject: [PATCH 01/74] tcg/i386: Set P_REXW in tcg_out_addi_ptr The REXW bit must be set to produce a 64-bit pointer result; the bit is disabled in 32-bit mode, so we can do this unconditionally. Fixes: 7d9e1ee424b0 ("tcg/i386: Adjust assert in tcg_out_addi_ptr") Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1592 Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1642 Signed-off-by: Richard Henderson --- tcg/i386/tcg-target.c.inc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index a01bfad773..9fc5592f5d 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -1091,7 +1091,7 @@ static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, { /* This function is only used for passing structs by reference. */ tcg_debug_assert(imm == (int32_t)imm); - tcg_out_modrm_offset(s, OPC_LEA, rd, rs, imm); + tcg_out_modrm_offset(s, OPC_LEA | P_REXW, rd, rs, imm); } static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val) From 37031fefc777a715320f86fc35ee3dd82d9d945e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 21 Oct 2022 21:24:40 +1000 Subject: [PATCH 02/74] include/exec/memop: Add MO_ATOM_* This field may be used to describe the precise atomicity requirements of the guest, which may then be used to constrain the methods by which it may be emulated by the host. For instance, the AArch64 LDP (32-bit) instruction changes semantics with ARMv8.4 LSE2, from MO_64 | MO_ATOM_IFALIGN_PAIR (64-bits, single-copy atomic only on 4 byte units, nonatomic if not aligned by 4), to MO_64 | MO_ATOM_WITHIN16 (64-bits, single-copy atomic within a 16 byte block) The former may be implemented with two 4 byte loads, or a single 8 byte load if that happens to be efficient on the host. The latter may not be implemented with two 4 byte loads and may also require a helper when misaligned. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- include/exec/memop.h | 37 +++++++++++++++++++++++++++++++++++++ tcg/tcg.c | 27 +++++++++++++++++++++------ 2 files changed, 58 insertions(+), 6 deletions(-) diff --git a/include/exec/memop.h b/include/exec/memop.h index 07f5f88188..a86dc6743a 100644 --- a/include/exec/memop.h +++ b/include/exec/memop.h @@ -72,6 +72,43 @@ typedef enum MemOp { MO_ALIGN_64 = 6 << MO_ASHIFT, MO_ALIGN = MO_AMASK, + /* + * MO_ATOM_* describes the atomicity requirements of the operation: + * MO_ATOM_IFALIGN: the operation must be single-copy atomic if it + * is aligned; if unaligned there is no atomicity. + * MO_ATOM_IFALIGN_PAIR: the entire operation may be considered to + * be a pair of half-sized operations which are packed together + * for convenience, with single-copy atomicity on each half if + * the half is aligned. + * This is the atomicity e.g. of Arm pre-FEAT_LSE2 LDP. + * MO_ATOM_WITHIN16: the operation is single-copy atomic, even if it + * is unaligned, so long as it does not cross a 16-byte boundary; + * if it crosses a 16-byte boundary there is no atomicity. + * This is the atomicity e.g. of Arm FEAT_LSE2 LDR. + * MO_ATOM_WITHIN16_PAIR: the entire operation is single-copy atomic, + * if it happens to be within a 16-byte boundary, otherwise it + * devolves to a pair of half-sized MO_ATOM_WITHIN16 operations. + * Depending on alignment, one or both will be single-copy atomic. + * This is the atomicity e.g. of Arm FEAT_LSE2 LDP. + * MO_ATOM_SUBALIGN: the operation is single-copy atomic by parts + * by the alignment. E.g. if the address is 0 mod 4, then each + * 4-byte subobject is single-copy atomic. + * This is the atomicity e.g. of IBM Power. + * MO_ATOM_NONE: the operation has no atomicity requirements. + * + * Note the default (i.e. 0) value is single-copy atomic to the + * size of the operation, if aligned. This retains the behaviour + * from before this field was introduced. + */ + MO_ATOM_SHIFT = 8, + MO_ATOM_IFALIGN = 0 << MO_ATOM_SHIFT, + MO_ATOM_IFALIGN_PAIR = 1 << MO_ATOM_SHIFT, + MO_ATOM_WITHIN16 = 2 << MO_ATOM_SHIFT, + MO_ATOM_WITHIN16_PAIR = 3 << MO_ATOM_SHIFT, + MO_ATOM_SUBALIGN = 4 << MO_ATOM_SHIFT, + MO_ATOM_NONE = 5 << MO_ATOM_SHIFT, + MO_ATOM_MASK = 7 << MO_ATOM_SHIFT, + /* Combinations of the above, for ease of use. */ MO_UB = MO_8, MO_UW = MO_16, diff --git a/tcg/tcg.c b/tcg/tcg.c index 1231c8ab4c..f156ca65f5 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -2195,6 +2195,15 @@ static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = { [MO_ALIGN_64 >> MO_ASHIFT] = "al64+", }; +static const char * const atom_name[(MO_ATOM_MASK >> MO_ATOM_SHIFT) + 1] = { + [MO_ATOM_IFALIGN >> MO_ATOM_SHIFT] = "", + [MO_ATOM_IFALIGN_PAIR >> MO_ATOM_SHIFT] = "pair+", + [MO_ATOM_WITHIN16 >> MO_ATOM_SHIFT] = "w16+", + [MO_ATOM_WITHIN16_PAIR >> MO_ATOM_SHIFT] = "w16p+", + [MO_ATOM_SUBALIGN >> MO_ATOM_SHIFT] = "sub+", + [MO_ATOM_NONE >> MO_ATOM_SHIFT] = "noat+", +}; + static const char bswap_flag_name[][6] = { [TCG_BSWAP_IZ] = "iz", [TCG_BSWAP_OZ] = "oz", @@ -2330,17 +2339,23 @@ static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_st_i64: { + const char *s_al, *s_op, *s_at; MemOpIdx oi = op->args[k++]; MemOp op = get_memop(oi); unsigned ix = get_mmuidx(oi); - if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { - col += ne_fprintf(f, ",$0x%x,%u", op, ix); + s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT]; + s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)]; + s_at = atom_name[(op & MO_ATOM_MASK) >> MO_ATOM_SHIFT]; + op &= ~(MO_AMASK | MO_BSWAP | MO_SSIZE | MO_ATOM_MASK); + + /* If all fields are accounted for, print symbolically. */ + if (!op && s_al && s_op && s_at) { + col += ne_fprintf(f, ",%s%s%s,%u", + s_at, s_al, s_op, ix); } else { - const char *s_al, *s_op; - s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT]; - s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)]; - col += ne_fprintf(f, ",%s%s,%u", s_al, s_op, ix); + op = get_memop(oi); + col += ne_fprintf(f, ",$0x%x,%u", op, ix); } i = 1; } From cdfac37be0d1876832d2bb813d29df3ab885329c Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 29 Oct 2022 16:01:04 +1100 Subject: [PATCH 03/74] accel/tcg: Honor atomicity of loads Create ldst_atomicity.c.inc. Not required for user-only code loads, because we've ensured that the page is read-only before beginning to translate code. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 175 +++++++--- accel/tcg/ldst_atomicity.c.inc | 566 +++++++++++++++++++++++++++++++++ accel/tcg/user-exec.c | 26 +- 3 files changed, 716 insertions(+), 51 deletions(-) create mode 100644 accel/tcg/ldst_atomicity.c.inc diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 617777055a..33e75ae962 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1668,6 +1668,9 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, return qemu_ram_addr_from_host_nofail(p); } +/* Load/store with atomicity primitives. */ +#include "ldst_atomicity.c.inc" + #ifdef CONFIG_PLUGIN /* * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. @@ -2035,35 +2038,7 @@ static void validate_memop(MemOpIdx oi, MemOp expected) * specifically for reading instructions from system memory. It is * called by the translation loop and in some helpers where the code * is disassembled. It shouldn't be called directly by guest code. - */ - -typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); - -static inline uint64_t QEMU_ALWAYS_INLINE -load_memop(const void *haddr, MemOp op) -{ - switch (op) { - case MO_UB: - return ldub_p(haddr); - case MO_BEUW: - return lduw_be_p(haddr); - case MO_LEUW: - return lduw_le_p(haddr); - case MO_BEUL: - return (uint32_t)ldl_be_p(haddr); - case MO_LEUL: - return (uint32_t)ldl_le_p(haddr); - case MO_BEUQ: - return ldq_be_p(haddr); - case MO_LEUQ: - return ldq_le_p(haddr); - default: - qemu_build_not_reached(); - } -} - -/* + * * For the benefit of TCG generated code, we want to avoid the * complication of ABI-specific return type promotion and always * return a value extended to the register size of the host. This is @@ -2119,17 +2094,139 @@ static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be) return ret_be; } +/** + * do_ld_parts_beN + * @p: translation parameters + * @ret_be: accumulated data + * + * As do_ld_bytes_beN, but atomically on each aligned part. + */ +static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be) +{ + void *haddr = p->haddr; + int size = p->size; + + do { + uint64_t x; + int n; + + /* + * Find minimum of alignment and size. + * This is slightly stronger than required by MO_ATOM_SUBALIGN, which + * would have only checked the low bits of addr|size once at the start, + * but is just as easy. + */ + switch (((uintptr_t)haddr | size) & 7) { + case 4: + x = cpu_to_be32(load_atomic4(haddr)); + ret_be = (ret_be << 32) | x; + n = 4; + break; + case 2: + case 6: + x = cpu_to_be16(load_atomic2(haddr)); + ret_be = (ret_be << 16) | x; + n = 2; + break; + default: + x = *(uint8_t *)haddr; + ret_be = (ret_be << 8) | x; + n = 1; + break; + case 0: + g_assert_not_reached(); + } + haddr += n; + size -= n; + } while (size != 0); + return ret_be; +} + +/** + * do_ld_parts_be4 + * @p: translation parameters + * @ret_be: accumulated data + * + * As do_ld_bytes_beN, but with one atomic load. + * Four aligned bytes are guaranteed to cover the load. + */ +static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be) +{ + int o = p->addr & 3; + uint32_t x = load_atomic4(p->haddr - o); + + x = cpu_to_be32(x); + x <<= o * 8; + x >>= (4 - p->size) * 8; + return (ret_be << (p->size * 8)) | x; +} + +/** + * do_ld_parts_be8 + * @p: translation parameters + * @ret_be: accumulated data + * + * As do_ld_bytes_beN, but with one atomic load. + * Eight aligned bytes are guaranteed to cover the load. + */ +static uint64_t do_ld_whole_be8(CPUArchState *env, uintptr_t ra, + MMULookupPageData *p, uint64_t ret_be) +{ + int o = p->addr & 7; + uint64_t x = load_atomic8_or_exit(env, ra, p->haddr - o); + + x = cpu_to_be64(x); + x <<= o * 8; + x >>= (8 - p->size) * 8; + return (ret_be << (p->size * 8)) | x; +} + /* * Wrapper for the above. */ static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p, - uint64_t ret_be, int mmu_idx, - MMUAccessType type, uintptr_t ra) + uint64_t ret_be, int mmu_idx, MMUAccessType type, + MemOp mop, uintptr_t ra) { + MemOp atom; + unsigned tmp, half_size; + if (unlikely(p->flags & TLB_MMIO)) { return do_ld_mmio_beN(env, p, ret_be, mmu_idx, type, ra); - } else { + } + + /* + * It is a given that we cross a page and therefore there is no + * atomicity for the load as a whole, but subobjects may need attention. + */ + atom = mop & MO_ATOM_MASK; + switch (atom) { + case MO_ATOM_SUBALIGN: + return do_ld_parts_beN(p, ret_be); + + case MO_ATOM_IFALIGN_PAIR: + case MO_ATOM_WITHIN16_PAIR: + tmp = mop & MO_SIZE; + tmp = tmp ? tmp - 1 : 0; + half_size = 1 << tmp; + if (atom == MO_ATOM_IFALIGN_PAIR + ? p->size == half_size + : p->size >= half_size) { + if (!HAVE_al8_fast && p->size < 4) { + return do_ld_whole_be4(p, ret_be); + } else { + return do_ld_whole_be8(env, ra, p, ret_be); + } + } + /* fall through */ + + case MO_ATOM_IFALIGN: + case MO_ATOM_WITHIN16: + case MO_ATOM_NONE: return do_ld_bytes_beN(p, ret_be); + + default: + g_assert_not_reached(); } } @@ -2153,7 +2250,7 @@ static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx, } /* Perform the load host endian, then swap if necessary. */ - ret = load_memop(p->haddr, MO_UW); + ret = load_atom_2(env, ra, p->haddr, memop); if (memop & MO_BSWAP) { ret = bswap16(ret); } @@ -2170,7 +2267,7 @@ static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx, } /* Perform the load host endian. */ - ret = load_memop(p->haddr, MO_UL); + ret = load_atom_4(env, ra, p->haddr, memop); if (memop & MO_BSWAP) { ret = bswap32(ret); } @@ -2187,7 +2284,7 @@ static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx, } /* Perform the load host endian. */ - ret = load_memop(p->haddr, MO_UQ); + ret = load_atom_8(env, ra, p->haddr, memop); if (memop & MO_BSWAP) { ret = bswap64(ret); } @@ -2263,8 +2360,8 @@ static uint32_t do_ld4_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); } - ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, ra); - ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, ra); + ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); + ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); if ((l.memop & MO_BSWAP) == MO_LE) { ret = bswap32(ret); } @@ -2297,8 +2394,8 @@ static uint64_t do_ld8_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); } - ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, ra); - ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, ra); + ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); + ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); if ((l.memop & MO_BSWAP) == MO_LE) { ret = bswap64(ret); } diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc new file mode 100644 index 0000000000..a5bce641f8 --- /dev/null +++ b/accel/tcg/ldst_atomicity.c.inc @@ -0,0 +1,566 @@ +/* + * Routines common to user and system emulation of load/store. + * + * Copyright (c) 2022 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifdef CONFIG_ATOMIC64 +# define HAVE_al8 true +#else +# define HAVE_al8 false +#endif +#define HAVE_al8_fast (ATOMIC_REG_SIZE >= 8) + +#if defined(CONFIG_ATOMIC128) +# define HAVE_al16_fast true +#else +# define HAVE_al16_fast false +#endif + +/** + * required_atomicity: + * + * Return the lg2 bytes of atomicity required by @memop for @p. + * If the operation must be split into two operations to be + * examined separately for atomicity, return -lg2. + */ +static int required_atomicity(CPUArchState *env, uintptr_t p, MemOp memop) +{ + MemOp atom = memop & MO_ATOM_MASK; + MemOp size = memop & MO_SIZE; + MemOp half = size ? size - 1 : 0; + unsigned tmp; + int atmax; + + switch (atom) { + case MO_ATOM_NONE: + atmax = MO_8; + break; + + case MO_ATOM_IFALIGN_PAIR: + size = half; + /* fall through */ + + case MO_ATOM_IFALIGN: + tmp = (1 << size) - 1; + atmax = p & tmp ? MO_8 : size; + break; + + case MO_ATOM_WITHIN16: + tmp = p & 15; + atmax = (tmp + (1 << size) <= 16 ? size : MO_8); + break; + + case MO_ATOM_WITHIN16_PAIR: + tmp = p & 15; + if (tmp + (1 << size) <= 16) { + atmax = size; + } else if (tmp + (1 << half) == 16) { + /* + * The pair exactly straddles the boundary. + * Both halves are naturally aligned and atomic. + */ + atmax = half; + } else { + /* + * One of the pair crosses the boundary, and is non-atomic. + * The other of the pair does not cross, and is atomic. + */ + atmax = -half; + } + break; + + case MO_ATOM_SUBALIGN: + /* + * Examine the alignment of p to determine if there are subobjects + * that must be aligned. Note that we only really need ctz4() -- + * any more sigificant bits are discarded by the immediately + * following comparison. + */ + tmp = ctz32(p); + atmax = MIN(size, tmp); + break; + + default: + g_assert_not_reached(); + } + + /* + * Here we have the architectural atomicity of the operation. + * However, when executing in a serial context, we need no extra + * host atomicity in order to avoid racing. This reduction + * avoids looping with cpu_loop_exit_atomic. + */ + if (cpu_in_serial_context(env_cpu(env))) { + return MO_8; + } + return atmax; +} + +/** + * load_atomic2: + * @pv: host address + * + * Atomically load 2 aligned bytes from @pv. + */ +static inline uint16_t load_atomic2(void *pv) +{ + uint16_t *p = __builtin_assume_aligned(pv, 2); + return qatomic_read(p); +} + +/** + * load_atomic4: + * @pv: host address + * + * Atomically load 4 aligned bytes from @pv. + */ +static inline uint32_t load_atomic4(void *pv) +{ + uint32_t *p = __builtin_assume_aligned(pv, 4); + return qatomic_read(p); +} + +/** + * load_atomic8: + * @pv: host address + * + * Atomically load 8 aligned bytes from @pv. + */ +static inline uint64_t load_atomic8(void *pv) +{ + uint64_t *p = __builtin_assume_aligned(pv, 8); + + qemu_build_assert(HAVE_al8); + return qatomic_read__nocheck(p); +} + +/** + * load_atomic16: + * @pv: host address + * + * Atomically load 16 aligned bytes from @pv. + */ +static inline Int128 load_atomic16(void *pv) +{ +#ifdef CONFIG_ATOMIC128 + __uint128_t *p = __builtin_assume_aligned(pv, 16); + Int128Alias r; + + r.u = qatomic_read__nocheck(p); + return r.s; +#else + qemu_build_not_reached(); +#endif +} + +/** + * load_atomic8_or_exit: + * @env: cpu context + * @ra: host unwind address + * @pv: host address + * + * Atomically load 8 aligned bytes from @pv. + * If this is not possible, longjmp out to restart serially. + */ +static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv) +{ + if (HAVE_al8) { + return load_atomic8(pv); + } + +#ifdef CONFIG_USER_ONLY + /* + * If the page is not writable, then assume the value is immutable + * and requires no locking. This ignores the case of MAP_SHARED with + * another process, because the fallback start_exclusive solution + * provides no protection across processes. + */ + if (!page_check_range(h2g(pv), 8, PAGE_WRITE)) { + uint64_t *p = __builtin_assume_aligned(pv, 8); + return *p; + } +#endif + + /* Ultimate fallback: re-execute in serial context. */ + cpu_loop_exit_atomic(env_cpu(env), ra); +} + +/** + * load_atomic16_or_exit: + * @env: cpu context + * @ra: host unwind address + * @pv: host address + * + * Atomically load 16 aligned bytes from @pv. + * If this is not possible, longjmp out to restart serially. + */ +static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv) +{ + Int128 *p = __builtin_assume_aligned(pv, 16); + + if (HAVE_al16_fast) { + return load_atomic16(p); + } + +#ifdef CONFIG_USER_ONLY + /* + * We can only use cmpxchg to emulate a load if the page is writable. + * If the page is not writable, then assume the value is immutable + * and requires no locking. This ignores the case of MAP_SHARED with + * another process, because the fallback start_exclusive solution + * provides no protection across processes. + */ + if (!page_check_range(h2g(p), 16, PAGE_WRITE)) { + return *p; + } +#endif + + /* + * In system mode all guest pages are writable, and for user-only + * we have just checked writability. Try cmpxchg. + */ +#if defined(CONFIG_CMPXCHG128) + /* Swap 0 with 0, with the side-effect of returning the old value. */ + { + Int128Alias r; + r.u = __sync_val_compare_and_swap_16((__uint128_t *)p, 0, 0); + return r.s; + } +#endif + + /* Ultimate fallback: re-execute in serial context. */ + cpu_loop_exit_atomic(env_cpu(env), ra); +} + +/** + * load_atom_extract_al4x2: + * @pv: host address + * + * Load 4 bytes from @p, from two sequential atomic 4-byte loads. + */ +static uint32_t load_atom_extract_al4x2(void *pv) +{ + uintptr_t pi = (uintptr_t)pv; + int sh = (pi & 3) * 8; + uint32_t a, b; + + pv = (void *)(pi & ~3); + a = load_atomic4(pv); + b = load_atomic4(pv + 4); + + if (HOST_BIG_ENDIAN) { + return (a << sh) | (b >> (-sh & 31)); + } else { + return (a >> sh) | (b << (-sh & 31)); + } +} + +/** + * load_atom_extract_al8x2: + * @pv: host address + * + * Load 8 bytes from @p, from two sequential atomic 8-byte loads. + */ +static uint64_t load_atom_extract_al8x2(void *pv) +{ + uintptr_t pi = (uintptr_t)pv; + int sh = (pi & 7) * 8; + uint64_t a, b; + + pv = (void *)(pi & ~7); + a = load_atomic8(pv); + b = load_atomic8(pv + 8); + + if (HOST_BIG_ENDIAN) { + return (a << sh) | (b >> (-sh & 63)); + } else { + return (a >> sh) | (b << (-sh & 63)); + } +} + +/** + * load_atom_extract_al8_or_exit: + * @env: cpu context + * @ra: host unwind address + * @pv: host address + * @s: object size in bytes, @s <= 4. + * + * Atomically load @s bytes from @p, when p % s != 0, and [p, p+s-1] does + * not cross an 8-byte boundary. This means that we can perform an atomic + * 8-byte load and extract. + * The value is returned in the low bits of a uint32_t. + */ +static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra, + void *pv, int s) +{ + uintptr_t pi = (uintptr_t)pv; + int o = pi & 7; + int shr = (HOST_BIG_ENDIAN ? 8 - s - o : o) * 8; + + pv = (void *)(pi & ~7); + return load_atomic8_or_exit(env, ra, pv) >> shr; +} + +/** + * load_atom_extract_al16_or_exit: + * @env: cpu context + * @ra: host unwind address + * @p: host address + * @s: object size in bytes, @s <= 8. + * + * Atomically load @s bytes from @p, when p % 16 < 8 + * and p % 16 + s > 8. I.e. does not cross a 16-byte + * boundary, but *does* cross an 8-byte boundary. + * This is the slow version, so we must have eliminated + * any faster load_atom_extract_al8_or_exit case. + * + * If this is not possible, longjmp out to restart serially. + */ +static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra, + void *pv, int s) +{ + uintptr_t pi = (uintptr_t)pv; + int o = pi & 7; + int shr = (HOST_BIG_ENDIAN ? 16 - s - o : o) * 8; + Int128 r; + + /* + * Note constraints above: p & 8 must be clear. + * Provoke SIGBUS if possible otherwise. + */ + pv = (void *)(pi & ~7); + r = load_atomic16_or_exit(env, ra, pv); + + r = int128_urshift(r, shr); + return int128_getlo(r); +} + +/** + * load_atom_extract_al16_or_al8: + * @p: host address + * @s: object size in bytes, @s <= 8. + * + * Load @s bytes from @p, when p % s != 0. If [p, p+s-1] does not + * cross an 16-byte boundary then the access must be 16-byte atomic, + * otherwise the access must be 8-byte atomic. + */ +static inline uint64_t load_atom_extract_al16_or_al8(void *pv, int s) +{ +#if defined(CONFIG_ATOMIC128) + uintptr_t pi = (uintptr_t)pv; + int o = pi & 7; + int shr = (HOST_BIG_ENDIAN ? 16 - s - o : o) * 8; + __uint128_t r; + + pv = (void *)(pi & ~7); + if (pi & 8) { + uint64_t *p8 = __builtin_assume_aligned(pv, 16, 8); + uint64_t a = qatomic_read__nocheck(p8); + uint64_t b = qatomic_read__nocheck(p8 + 1); + + if (HOST_BIG_ENDIAN) { + r = ((__uint128_t)a << 64) | b; + } else { + r = ((__uint128_t)b << 64) | a; + } + } else { + __uint128_t *p16 = __builtin_assume_aligned(pv, 16, 0); + r = qatomic_read__nocheck(p16); + } + return r >> shr; +#else + qemu_build_not_reached(); +#endif +} + +/** + * load_atom_4_by_2: + * @pv: host address + * + * Load 4 bytes from @pv, with two 2-byte atomic loads. + */ +static inline uint32_t load_atom_4_by_2(void *pv) +{ + uint32_t a = load_atomic2(pv); + uint32_t b = load_atomic2(pv + 2); + + if (HOST_BIG_ENDIAN) { + return (a << 16) | b; + } else { + return (b << 16) | a; + } +} + +/** + * load_atom_8_by_2: + * @pv: host address + * + * Load 8 bytes from @pv, with four 2-byte atomic loads. + */ +static inline uint64_t load_atom_8_by_2(void *pv) +{ + uint32_t a = load_atom_4_by_2(pv); + uint32_t b = load_atom_4_by_2(pv + 4); + + if (HOST_BIG_ENDIAN) { + return ((uint64_t)a << 32) | b; + } else { + return ((uint64_t)b << 32) | a; + } +} + +/** + * load_atom_8_by_4: + * @pv: host address + * + * Load 8 bytes from @pv, with two 4-byte atomic loads. + */ +static inline uint64_t load_atom_8_by_4(void *pv) +{ + uint32_t a = load_atomic4(pv); + uint32_t b = load_atomic4(pv + 4); + + if (HOST_BIG_ENDIAN) { + return ((uint64_t)a << 32) | b; + } else { + return ((uint64_t)b << 32) | a; + } +} + +/** + * load_atom_2: + * @p: host address + * @memop: the full memory op + * + * Load 2 bytes from @p, honoring the atomicity of @memop. + */ +static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop) +{ + uintptr_t pi = (uintptr_t)pv; + int atmax; + + if (likely((pi & 1) == 0)) { + return load_atomic2(pv); + } + if (HAVE_al16_fast) { + return load_atom_extract_al16_or_al8(pv, 2); + } + + atmax = required_atomicity(env, pi, memop); + switch (atmax) { + case MO_8: + return lduw_he_p(pv); + case MO_16: + /* The only case remaining is MO_ATOM_WITHIN16. */ + if (!HAVE_al8_fast && (pi & 3) == 1) { + /* Big or little endian, we want the middle two bytes. */ + return load_atomic4(pv - 1) >> 8; + } + if ((pi & 15) != 7) { + return load_atom_extract_al8_or_exit(env, ra, pv, 2); + } + return load_atom_extract_al16_or_exit(env, ra, pv, 2); + default: + g_assert_not_reached(); + } +} + +/** + * load_atom_4: + * @p: host address + * @memop: the full memory op + * + * Load 4 bytes from @p, honoring the atomicity of @memop. + */ +static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop) +{ + uintptr_t pi = (uintptr_t)pv; + int atmax; + + if (likely((pi & 3) == 0)) { + return load_atomic4(pv); + } + if (HAVE_al16_fast) { + return load_atom_extract_al16_or_al8(pv, 4); + } + + atmax = required_atomicity(env, pi, memop); + switch (atmax) { + case MO_8: + case MO_16: + case -MO_16: + /* + * For MO_ATOM_IFALIGN, this is more atomicity than required, + * but it's trivially supported on all hosts, better than 4 + * individual byte loads (when the host requires alignment), + * and overlaps with the MO_ATOM_SUBALIGN case of p % 2 == 0. + */ + return load_atom_extract_al4x2(pv); + case MO_32: + if (!(pi & 4)) { + return load_atom_extract_al8_or_exit(env, ra, pv, 4); + } + return load_atom_extract_al16_or_exit(env, ra, pv, 4); + default: + g_assert_not_reached(); + } +} + +/** + * load_atom_8: + * @p: host address + * @memop: the full memory op + * + * Load 8 bytes from @p, honoring the atomicity of @memop. + */ +static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop) +{ + uintptr_t pi = (uintptr_t)pv; + int atmax; + + /* + * If the host does not support 8-byte atomics, wait until we have + * examined the atomicity parameters below. + */ + if (HAVE_al8 && likely((pi & 7) == 0)) { + return load_atomic8(pv); + } + if (HAVE_al16_fast) { + return load_atom_extract_al16_or_al8(pv, 8); + } + + atmax = required_atomicity(env, pi, memop); + if (atmax == MO_64) { + if (!HAVE_al8 && (pi & 7) == 0) { + load_atomic8_or_exit(env, ra, pv); + } + return load_atom_extract_al16_or_exit(env, ra, pv, 8); + } + if (HAVE_al8_fast) { + return load_atom_extract_al8x2(pv); + } + switch (atmax) { + case MO_8: + return ldq_he_p(pv); + case MO_16: + return load_atom_8_by_2(pv); + case MO_32: + return load_atom_8_by_4(pv); + case -MO_32: + if (HAVE_al8) { + return load_atom_extract_al8x2(pv); + } + cpu_loop_exit_atomic(env_cpu(env), ra); + default: + g_assert_not_reached(); + } +} diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index fc597a010d..fefc83cc8c 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -931,6 +931,8 @@ static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, return ret; } +#include "ldst_atomicity.c.inc" + uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) { @@ -953,10 +955,10 @@ uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, validate_memop(oi, MO_BEUW); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = lduw_be_p(haddr); + ret = load_atom_2(env, ra, haddr, get_memop(oi)); clear_helper_retaddr(); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return ret; + return cpu_to_be16(ret); } uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, @@ -967,10 +969,10 @@ uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, validate_memop(oi, MO_BEUL); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = ldl_be_p(haddr); + ret = load_atom_4(env, ra, haddr, get_memop(oi)); clear_helper_retaddr(); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return ret; + return cpu_to_be32(ret); } uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, @@ -981,10 +983,10 @@ uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, validate_memop(oi, MO_BEUQ); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = ldq_be_p(haddr); + ret = load_atom_8(env, ra, haddr, get_memop(oi)); clear_helper_retaddr(); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return ret; + return cpu_to_be64(ret); } uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, @@ -995,10 +997,10 @@ uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, validate_memop(oi, MO_LEUW); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = lduw_le_p(haddr); + ret = load_atom_2(env, ra, haddr, get_memop(oi)); clear_helper_retaddr(); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return ret; + return cpu_to_le16(ret); } uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, @@ -1009,10 +1011,10 @@ uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, validate_memop(oi, MO_LEUL); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = ldl_le_p(haddr); + ret = load_atom_4(env, ra, haddr, get_memop(oi)); clear_helper_retaddr(); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return ret; + return cpu_to_le32(ret); } uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, @@ -1023,10 +1025,10 @@ uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, validate_memop(oi, MO_LEUQ); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = ldq_le_p(haddr); + ret = load_atom_8(env, ra, haddr, get_memop(oi)); clear_helper_retaddr(); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return ret; + return cpu_to_le64(ret); } Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, From 5b36f2684cbb07fcd75cc29b618651c43e7a80ed Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 30 Oct 2022 10:46:12 +1100 Subject: [PATCH 04/74] accel/tcg: Honor atomicity of stores Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 108 ++++---- accel/tcg/ldst_atomicity.c.inc | 491 +++++++++++++++++++++++++++++++++ accel/tcg/user-exec.c | 12 +- 3 files changed, 545 insertions(+), 66 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 33e75ae962..d910464c36 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -2599,36 +2599,6 @@ Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, * Store Helpers */ -static inline void QEMU_ALWAYS_INLINE -store_memop(void *haddr, uint64_t val, MemOp op) -{ - switch (op) { - case MO_UB: - stb_p(haddr, val); - break; - case MO_BEUW: - stw_be_p(haddr, val); - break; - case MO_LEUW: - stw_le_p(haddr, val); - break; - case MO_BEUL: - stl_be_p(haddr, val); - break; - case MO_LEUL: - stl_le_p(haddr, val); - break; - case MO_BEUQ: - stq_be_p(haddr, val); - break; - case MO_LEUQ: - stq_le_p(haddr, val); - break; - default: - qemu_build_not_reached(); - } -} - /** * do_st_mmio_leN: * @env: cpu context @@ -2655,38 +2625,56 @@ static uint64_t do_st_mmio_leN(CPUArchState *env, MMULookupPageData *p, return val_le; } -/** - * do_st_bytes_leN: - * @p: translation parameters - * @val_le: data to store - * - * Store @p->size bytes at @p->haddr, which is RAM. - * The bytes to store are extracted in little-endian order from @val_le; - * return the bytes of @val_le beyond @p->size that have not been stored. - */ -static uint64_t do_st_bytes_leN(MMULookupPageData *p, uint64_t val_le) -{ - uint8_t *haddr = p->haddr; - int i, size = p->size; - - for (i = 0; i < size; i++, val_le >>= 8) { - haddr[i] = val_le; - } - return val_le; -} - /* * Wrapper for the above. */ static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p, - uint64_t val_le, int mmu_idx, uintptr_t ra) + uint64_t val_le, int mmu_idx, + MemOp mop, uintptr_t ra) { + MemOp atom; + unsigned tmp, half_size; + if (unlikely(p->flags & TLB_MMIO)) { return do_st_mmio_leN(env, p, val_le, mmu_idx, ra); } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { return val_le >> (p->size * 8); - } else { - return do_st_bytes_leN(p, val_le); + } + + /* + * It is a given that we cross a page and therefore there is no atomicity + * for the store as a whole, but subobjects may need attention. + */ + atom = mop & MO_ATOM_MASK; + switch (atom) { + case MO_ATOM_SUBALIGN: + return store_parts_leN(p->haddr, p->size, val_le); + + case MO_ATOM_IFALIGN_PAIR: + case MO_ATOM_WITHIN16_PAIR: + tmp = mop & MO_SIZE; + tmp = tmp ? tmp - 1 : 0; + half_size = 1 << tmp; + if (atom == MO_ATOM_IFALIGN_PAIR + ? p->size == half_size + : p->size >= half_size) { + if (!HAVE_al8_fast && p->size <= 4) { + return store_whole_le4(p->haddr, p->size, val_le); + } else if (HAVE_al8) { + return store_whole_le8(p->haddr, p->size, val_le); + } else { + cpu_loop_exit_atomic(env_cpu(env), ra); + } + } + /* fall through */ + + case MO_ATOM_IFALIGN: + case MO_ATOM_WITHIN16: + case MO_ATOM_NONE: + return store_bytes_leN(p->haddr, p->size, val_le); + + default: + g_assert_not_reached(); } } @@ -2714,7 +2702,7 @@ static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val, if (memop & MO_BSWAP) { val = bswap16(val); } - store_memop(p->haddr, val, MO_UW); + store_atom_2(env, ra, p->haddr, memop, val); } } @@ -2730,7 +2718,7 @@ static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val, if (memop & MO_BSWAP) { val = bswap32(val); } - store_memop(p->haddr, val, MO_UL); + store_atom_4(env, ra, p->haddr, memop, val); } } @@ -2746,7 +2734,7 @@ static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val, if (memop & MO_BSWAP) { val = bswap64(val); } - store_memop(p->haddr, val, MO_UQ); + store_atom_8(env, ra, p->haddr, memop, val); } } @@ -2815,8 +2803,8 @@ static void do_st4_mmu(CPUArchState *env, target_ulong addr, uint32_t val, if ((l.memop & MO_BSWAP) != MO_LE) { val = bswap32(val); } - val = do_st_leN(env, &l.page[0], val, l.mmu_idx, ra); - (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, ra); + val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra); + (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); } void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, @@ -2849,8 +2837,8 @@ static void do_st8_mmu(CPUArchState *env, target_ulong addr, uint64_t val, if ((l.memop & MO_BSWAP) != MO_LE) { val = bswap64(val); } - val = do_st_leN(env, &l.page[0], val, l.mmu_idx, ra); - (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, ra); + val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra); + (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); } void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc index a5bce641f8..1f39e43896 100644 --- a/accel/tcg/ldst_atomicity.c.inc +++ b/accel/tcg/ldst_atomicity.c.inc @@ -21,6 +21,12 @@ #else # define HAVE_al16_fast false #endif +#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128) +# define HAVE_al16 true +#else +# define HAVE_al16 false +#endif + /** * required_atomicity: @@ -564,3 +570,488 @@ static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra, g_assert_not_reached(); } } + +/** + * store_atomic2: + * @pv: host address + * @val: value to store + * + * Atomically store 2 aligned bytes to @pv. + */ +static inline void store_atomic2(void *pv, uint16_t val) +{ + uint16_t *p = __builtin_assume_aligned(pv, 2); + qatomic_set(p, val); +} + +/** + * store_atomic4: + * @pv: host address + * @val: value to store + * + * Atomically store 4 aligned bytes to @pv. + */ +static inline void store_atomic4(void *pv, uint32_t val) +{ + uint32_t *p = __builtin_assume_aligned(pv, 4); + qatomic_set(p, val); +} + +/** + * store_atomic8: + * @pv: host address + * @val: value to store + * + * Atomically store 8 aligned bytes to @pv. + */ +static inline void store_atomic8(void *pv, uint64_t val) +{ + uint64_t *p = __builtin_assume_aligned(pv, 8); + + qemu_build_assert(HAVE_al8); + qatomic_set__nocheck(p, val); +} + +/** + * store_atom_4x2 + */ +static inline void store_atom_4_by_2(void *pv, uint32_t val) +{ + store_atomic2(pv, val >> (HOST_BIG_ENDIAN ? 16 : 0)); + store_atomic2(pv + 2, val >> (HOST_BIG_ENDIAN ? 0 : 16)); +} + +/** + * store_atom_8_by_2 + */ +static inline void store_atom_8_by_2(void *pv, uint64_t val) +{ + store_atom_4_by_2(pv, val >> (HOST_BIG_ENDIAN ? 32 : 0)); + store_atom_4_by_2(pv + 4, val >> (HOST_BIG_ENDIAN ? 0 : 32)); +} + +/** + * store_atom_8_by_4 + */ +static inline void store_atom_8_by_4(void *pv, uint64_t val) +{ + store_atomic4(pv, val >> (HOST_BIG_ENDIAN ? 32 : 0)); + store_atomic4(pv + 4, val >> (HOST_BIG_ENDIAN ? 0 : 32)); +} + +/** + * store_atom_insert_al4: + * @p: host address + * @val: shifted value to store + * @msk: mask for value to store + * + * Atomically store @val to @p, masked by @msk. + */ +static void store_atom_insert_al4(uint32_t *p, uint32_t val, uint32_t msk) +{ + uint32_t old, new; + + p = __builtin_assume_aligned(p, 4); + old = qatomic_read(p); + do { + new = (old & ~msk) | val; + } while (!__atomic_compare_exchange_n(p, &old, new, true, + __ATOMIC_RELAXED, __ATOMIC_RELAXED)); +} + +/** + * store_atom_insert_al8: + * @p: host address + * @val: shifted value to store + * @msk: mask for value to store + * + * Atomically store @val to @p masked by @msk. + */ +static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk) +{ + uint64_t old, new; + + qemu_build_assert(HAVE_al8); + p = __builtin_assume_aligned(p, 8); + old = qatomic_read__nocheck(p); + do { + new = (old & ~msk) | val; + } while (!__atomic_compare_exchange_n(p, &old, new, true, + __ATOMIC_RELAXED, __ATOMIC_RELAXED)); +} + +/** + * store_atom_insert_al16: + * @p: host address + * @val: shifted value to store + * @msk: mask for value to store + * + * Atomically store @val to @p masked by @msk. + */ +static void store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk) +{ +#if defined(CONFIG_ATOMIC128) + __uint128_t *pu, old, new; + + /* With CONFIG_ATOMIC128, we can avoid the memory barriers. */ + pu = __builtin_assume_aligned(ps, 16); + old = *pu; + do { + new = (old & ~msk.u) | val.u; + } while (!__atomic_compare_exchange_n(pu, &old, new, true, + __ATOMIC_RELAXED, __ATOMIC_RELAXED)); +#elif defined(CONFIG_CMPXCHG128) + __uint128_t *pu, old, new; + + /* + * Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always + * defer to libatomic, so we must use __sync_*_compare_and_swap_16 + * and accept the sequential consistency that comes with it. + */ + pu = __builtin_assume_aligned(ps, 16); + do { + old = *pu; + new = (old & ~msk.u) | val.u; + } while (!__sync_bool_compare_and_swap_16(pu, old, new)); +#else + qemu_build_not_reached(); +#endif +} + +/** + * store_bytes_leN: + * @pv: host address + * @size: number of bytes to store + * @val_le: data to store + * + * Store @size bytes at @p. The bytes to store are extracted in little-endian order + * from @val_le; return the bytes of @val_le beyond @size that have not been stored. + */ +static uint64_t store_bytes_leN(void *pv, int size, uint64_t val_le) +{ + uint8_t *p = pv; + for (int i = 0; i < size; i++, val_le >>= 8) { + p[i] = val_le; + } + return val_le; +} + +/** + * store_parts_leN + * @pv: host address + * @size: number of bytes to store + * @val_le: data to store + * + * As store_bytes_leN, but atomically on each aligned part. + */ +G_GNUC_UNUSED +static uint64_t store_parts_leN(void *pv, int size, uint64_t val_le) +{ + do { + int n; + + /* Find minimum of alignment and size */ + switch (((uintptr_t)pv | size) & 7) { + case 4: + store_atomic4(pv, le32_to_cpu(val_le)); + val_le >>= 32; + n = 4; + break; + case 2: + case 6: + store_atomic2(pv, le16_to_cpu(val_le)); + val_le >>= 16; + n = 2; + break; + default: + *(uint8_t *)pv = val_le; + val_le >>= 8; + n = 1; + break; + case 0: + g_assert_not_reached(); + } + pv += n; + size -= n; + } while (size != 0); + + return val_le; +} + +/** + * store_whole_le4 + * @pv: host address + * @size: number of bytes to store + * @val_le: data to store + * + * As store_bytes_leN, but atomically as a whole. + * Four aligned bytes are guaranteed to cover the store. + */ +static uint64_t store_whole_le4(void *pv, int size, uint64_t val_le) +{ + int sz = size * 8; + int o = (uintptr_t)pv & 3; + int sh = o * 8; + uint32_t m = MAKE_64BIT_MASK(0, sz); + uint32_t v; + + if (HOST_BIG_ENDIAN) { + v = bswap32(val_le) >> sh; + m = bswap32(m) >> sh; + } else { + v = val_le << sh; + m <<= sh; + } + store_atom_insert_al4(pv - o, v, m); + return val_le >> sz; +} + +/** + * store_whole_le8 + * @pv: host address + * @size: number of bytes to store + * @val_le: data to store + * + * As store_bytes_leN, but atomically as a whole. + * Eight aligned bytes are guaranteed to cover the store. + */ +static uint64_t store_whole_le8(void *pv, int size, uint64_t val_le) +{ + int sz = size * 8; + int o = (uintptr_t)pv & 7; + int sh = o * 8; + uint64_t m = MAKE_64BIT_MASK(0, sz); + uint64_t v; + + qemu_build_assert(HAVE_al8); + if (HOST_BIG_ENDIAN) { + v = bswap64(val_le) >> sh; + m = bswap64(m) >> sh; + } else { + v = val_le << sh; + m <<= sh; + } + store_atom_insert_al8(pv - o, v, m); + return val_le >> sz; +} + +/** + * store_whole_le16 + * @pv: host address + * @size: number of bytes to store + * @val_le: data to store + * + * As store_bytes_leN, but atomically as a whole. + * 16 aligned bytes are guaranteed to cover the store. + */ +static uint64_t store_whole_le16(void *pv, int size, Int128 val_le) +{ + int sz = size * 8; + int o = (uintptr_t)pv & 15; + int sh = o * 8; + Int128 m, v; + + qemu_build_assert(HAVE_al16); + + /* Like MAKE_64BIT_MASK(0, sz), but larger. */ + if (sz <= 64) { + m = int128_make64(MAKE_64BIT_MASK(0, sz)); + } else { + m = int128_make128(-1, MAKE_64BIT_MASK(0, sz - 64)); + } + + if (HOST_BIG_ENDIAN) { + v = int128_urshift(bswap128(val_le), sh); + m = int128_urshift(bswap128(m), sh); + } else { + v = int128_lshift(val_le, sh); + m = int128_lshift(m, sh); + } + store_atom_insert_al16(pv - o, v, m); + + /* Unused if sz <= 64. */ + return int128_gethi(val_le) >> (sz - 64); +} + +/** + * store_atom_2: + * @p: host address + * @val: the value to store + * @memop: the full memory op + * + * Store 2 bytes to @p, honoring the atomicity of @memop. + */ +static void store_atom_2(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop, uint16_t val) +{ + uintptr_t pi = (uintptr_t)pv; + int atmax; + + if (likely((pi & 1) == 0)) { + store_atomic2(pv, val); + return; + } + + atmax = required_atomicity(env, pi, memop); + if (atmax == MO_8) { + stw_he_p(pv, val); + return; + } + + /* + * The only case remaining is MO_ATOM_WITHIN16. + * Big or little endian, we want the middle two bytes in each test. + */ + if ((pi & 3) == 1) { + store_atom_insert_al4(pv - 1, (uint32_t)val << 8, MAKE_64BIT_MASK(8, 16)); + return; + } else if ((pi & 7) == 3) { + if (HAVE_al8) { + store_atom_insert_al8(pv - 3, (uint64_t)val << 24, MAKE_64BIT_MASK(24, 16)); + return; + } + } else if ((pi & 15) == 7) { + if (HAVE_al16) { + Int128 v = int128_lshift(int128_make64(val), 56); + Int128 m = int128_lshift(int128_make64(0xffff), 56); + store_atom_insert_al16(pv - 7, v, m); + return; + } + } else { + g_assert_not_reached(); + } + + cpu_loop_exit_atomic(env_cpu(env), ra); +} + +/** + * store_atom_4: + * @p: host address + * @val: the value to store + * @memop: the full memory op + * + * Store 4 bytes to @p, honoring the atomicity of @memop. + */ +static void store_atom_4(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop, uint32_t val) +{ + uintptr_t pi = (uintptr_t)pv; + int atmax; + + if (likely((pi & 3) == 0)) { + store_atomic4(pv, val); + return; + } + + atmax = required_atomicity(env, pi, memop); + switch (atmax) { + case MO_8: + stl_he_p(pv, val); + return; + case MO_16: + store_atom_4_by_2(pv, val); + return; + case -MO_16: + { + uint32_t val_le = cpu_to_le32(val); + int s2 = pi & 3; + int s1 = 4 - s2; + + switch (s2) { + case 1: + val_le = store_whole_le4(pv, s1, val_le); + *(uint8_t *)(pv + 3) = val_le; + break; + case 3: + *(uint8_t *)pv = val_le; + store_whole_le4(pv + 1, s2, val_le >> 8); + break; + case 0: /* aligned */ + case 2: /* atmax MO_16 */ + default: + g_assert_not_reached(); + } + } + return; + case MO_32: + if ((pi & 7) < 4) { + if (HAVE_al8) { + store_whole_le8(pv, 4, cpu_to_le32(val)); + return; + } + } else { + if (HAVE_al16) { + store_whole_le16(pv, 4, int128_make64(cpu_to_le32(val))); + return; + } + } + cpu_loop_exit_atomic(env_cpu(env), ra); + default: + g_assert_not_reached(); + } +} + +/** + * store_atom_8: + * @p: host address + * @val: the value to store + * @memop: the full memory op + * + * Store 8 bytes to @p, honoring the atomicity of @memop. + */ +static void store_atom_8(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop, uint64_t val) +{ + uintptr_t pi = (uintptr_t)pv; + int atmax; + + if (HAVE_al8 && likely((pi & 7) == 0)) { + store_atomic8(pv, val); + return; + } + + atmax = required_atomicity(env, pi, memop); + switch (atmax) { + case MO_8: + stq_he_p(pv, val); + return; + case MO_16: + store_atom_8_by_2(pv, val); + return; + case MO_32: + store_atom_8_by_4(pv, val); + return; + case -MO_32: + if (HAVE_al8) { + uint64_t val_le = cpu_to_le64(val); + int s2 = pi & 7; + int s1 = 8 - s2; + + switch (s2) { + case 1 ... 3: + val_le = store_whole_le8(pv, s1, val_le); + store_bytes_leN(pv + s1, s2, val_le); + break; + case 5 ... 7: + val_le = store_bytes_leN(pv, s1, val_le); + store_whole_le8(pv + s1, s2, val_le); + break; + case 0: /* aligned */ + case 4: /* atmax MO_32 */ + default: + g_assert_not_reached(); + } + return; + } + break; + case MO_64: + if (HAVE_al16) { + store_whole_le16(pv, 8, int128_make64(cpu_to_le64(val))); + return; + } + break; + default: + g_assert_not_reached(); + } + cpu_loop_exit_atomic(env_cpu(env), ra); +} diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index fefc83cc8c..b89fa35a83 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -1086,7 +1086,7 @@ void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, validate_memop(oi, MO_BEUW); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - stw_be_p(haddr, val); + store_atom_2(env, ra, haddr, get_memop(oi), be16_to_cpu(val)); clear_helper_retaddr(); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } @@ -1098,7 +1098,7 @@ void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, validate_memop(oi, MO_BEUL); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - stl_be_p(haddr, val); + store_atom_4(env, ra, haddr, get_memop(oi), be32_to_cpu(val)); clear_helper_retaddr(); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } @@ -1110,7 +1110,7 @@ void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, validate_memop(oi, MO_BEUQ); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - stq_be_p(haddr, val); + store_atom_8(env, ra, haddr, get_memop(oi), be64_to_cpu(val)); clear_helper_retaddr(); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } @@ -1122,7 +1122,7 @@ void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, validate_memop(oi, MO_LEUW); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - stw_le_p(haddr, val); + store_atom_2(env, ra, haddr, get_memop(oi), le16_to_cpu(val)); clear_helper_retaddr(); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } @@ -1134,7 +1134,7 @@ void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, validate_memop(oi, MO_LEUL); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - stl_le_p(haddr, val); + store_atom_4(env, ra, haddr, get_memop(oi), le32_to_cpu(val)); clear_helper_retaddr(); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } @@ -1146,7 +1146,7 @@ void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, validate_memop(oi, MO_LEUQ); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - stq_le_p(haddr, val); + store_atom_8(env, ra, haddr, get_memop(oi), le64_to_cpu(val)); clear_helper_retaddr(); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } From 0cadc1eda1a3120c37c713ab6d6b7a02da0d2e6f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 1 Nov 2022 12:51:04 +1100 Subject: [PATCH 05/74] tcg: Unify helper_{be,le}_{ld,st}* MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the current structure of cputlb.c, there is no difference between the little-endian and big-endian entry points, aside from the assert. Unify the pairs of functions. Hoist the qemu_{ld,st}_helpers arrays to tcg.c. Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 190 ++++++++++--------------------- docs/devel/loads-stores.rst | 36 ++---- include/tcg/tcg-ldst.h | 60 ++++------ tcg/aarch64/tcg-target.c.inc | 33 ------ tcg/arm/tcg-target.c.inc | 37 ------ tcg/i386/tcg-target.c.inc | 30 +---- tcg/loongarch64/tcg-target.c.inc | 23 ---- tcg/mips/tcg-target.c.inc | 31 ----- tcg/ppc/tcg-target.c.inc | 30 +---- tcg/riscv/tcg-target.c.inc | 42 ------- tcg/s390x/tcg-target.c.inc | 31 +---- tcg/sparc64/tcg-target.c.inc | 32 +----- tcg/tcg.c | 21 ++++ tcg/tci.c | 61 ++++------ 14 files changed, 146 insertions(+), 511 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index d910464c36..34796ef568 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -2012,25 +2012,6 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, cpu_loop_exit_atomic(env_cpu(env), retaddr); } -/* - * Verify that we have passed the correct MemOp to the correct function. - * - * In the case of the helper_*_mmu functions, we will have done this by - * using the MemOp to look up the helper during code generation. - * - * In the case of the cpu_*_mmu functions, this is up to the caller. - * We could present one function to target code, and dispatch based on - * the MemOp, but so far we have worked hard to avoid an indirect function - * call along the memory path. - */ -static void validate_memop(MemOpIdx oi, MemOp expected) -{ -#ifdef CONFIG_DEBUG_TCG - MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); - assert(have == expected); -#endif -} - /* * Load Helpers * @@ -2303,10 +2284,10 @@ static uint8_t do_ld1_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, return do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra); } -tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +tcg_target_ulong helper_ldub_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) { - validate_memop(oi, MO_UB); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8); return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); } @@ -2334,17 +2315,10 @@ static uint16_t do_ld2_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, return ret; } -tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +tcg_target_ulong helper_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) { - validate_memop(oi, MO_LEUW); - return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); -} - -tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) -{ - validate_memop(oi, MO_BEUW); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); } @@ -2368,17 +2342,10 @@ static uint32_t do_ld4_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, return ret; } -tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +tcg_target_ulong helper_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) { - validate_memop(oi, MO_LEUL); - return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); -} - -tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) -{ - validate_memop(oi, MO_BEUL); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); } @@ -2402,17 +2369,10 @@ static uint64_t do_ld8_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, return ret; } -uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) { - validate_memop(oi, MO_LEUQ); - return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); -} - -uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) -{ - validate_memop(oi, MO_BEUQ); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); } @@ -2421,35 +2381,22 @@ uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, * avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ - -tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) { - return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); + return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr); } -tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) { - return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); + return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr); } -tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) +tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr) { - return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); -} - -tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) -{ - return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); -} - -tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr) -{ - return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); + return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr); } /* @@ -2465,7 +2412,7 @@ uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) { uint8_t ret; - validate_memop(oi, MO_UB); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB); ret = do_ld1_mmu(env, addr, oi, ra, MMU_DATA_LOAD); plugin_load_cb(env, addr, oi); return ret; @@ -2476,7 +2423,7 @@ uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, { uint16_t ret; - validate_memop(oi, MO_BEUW); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUW); ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD); plugin_load_cb(env, addr, oi); return ret; @@ -2487,7 +2434,7 @@ uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, { uint32_t ret; - validate_memop(oi, MO_BEUL); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUL); ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD); plugin_load_cb(env, addr, oi); return ret; @@ -2498,7 +2445,7 @@ uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, { uint64_t ret; - validate_memop(oi, MO_BEUQ); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUQ); ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD); plugin_load_cb(env, addr, oi); return ret; @@ -2509,7 +2456,7 @@ uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, { uint16_t ret; - validate_memop(oi, MO_LEUW); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUW); ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD); plugin_load_cb(env, addr, oi); return ret; @@ -2520,7 +2467,7 @@ uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, { uint32_t ret; - validate_memop(oi, MO_LEUL); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUL); ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD); plugin_load_cb(env, addr, oi); return ret; @@ -2531,7 +2478,7 @@ uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, { uint64_t ret; - validate_memop(oi, MO_LEUQ); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUQ); ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD); plugin_load_cb(env, addr, oi); return ret; @@ -2559,8 +2506,8 @@ Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; new_oi = make_memop_idx(mop, mmu_idx); - h = helper_be_ldq_mmu(env, addr, new_oi, ra); - l = helper_be_ldq_mmu(env, addr + 8, new_oi, ra); + h = helper_ldq_mmu(env, addr, new_oi, ra); + l = helper_ldq_mmu(env, addr + 8, new_oi, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return int128_make128(l, h); @@ -2588,8 +2535,8 @@ Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; new_oi = make_memop_idx(mop, mmu_idx); - l = helper_le_ldq_mmu(env, addr, new_oi, ra); - h = helper_le_ldq_mmu(env, addr + 8, new_oi, ra); + l = helper_ldq_mmu(env, addr, new_oi, ra); + h = helper_ldq_mmu(env, addr + 8, new_oi, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return int128_make128(l, h); @@ -2738,13 +2685,13 @@ static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val, } } -void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t ra) +void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) { MMULookupLocals l; bool crosspage; - validate_memop(oi, MO_UB); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8); crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); tcg_debug_assert(!crosspage); @@ -2773,17 +2720,10 @@ static void do_st2_mmu(CPUArchState *env, target_ulong addr, uint16_t val, do_st_1(env, &l.page[1], b, l.mmu_idx, ra); } -void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr) +void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) { - validate_memop(oi, MO_LEUW); - do_st2_mmu(env, addr, val, oi, retaddr); -} - -void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - validate_memop(oi, MO_BEUW); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); do_st2_mmu(env, addr, val, oi, retaddr); } @@ -2807,17 +2747,10 @@ static void do_st4_mmu(CPUArchState *env, target_ulong addr, uint32_t val, (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); } -void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr) +void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) { - validate_memop(oi, MO_LEUL); - do_st4_mmu(env, addr, val, oi, retaddr); -} - -void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - validate_memop(oi, MO_BEUL); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); do_st4_mmu(env, addr, val, oi, retaddr); } @@ -2841,17 +2774,10 @@ static void do_st8_mmu(CPUArchState *env, target_ulong addr, uint64_t val, (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); } -void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr) +void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) { - validate_memop(oi, MO_LEUQ); - do_st8_mmu(env, addr, val, oi, retaddr); -} - -void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - validate_memop(oi, MO_BEUQ); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); do_st8_mmu(env, addr, val, oi, retaddr); } @@ -2867,49 +2793,55 @@ static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, MemOpIdx oi, uintptr_t retaddr) { - helper_ret_stb_mmu(env, addr, val, oi, retaddr); + helper_stb_mmu(env, addr, val, oi, retaddr); plugin_store_cb(env, addr, oi); } void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val, MemOpIdx oi, uintptr_t retaddr) { - helper_be_stw_mmu(env, addr, val, oi, retaddr); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUW); + do_st2_mmu(env, addr, val, oi, retaddr); plugin_store_cb(env, addr, oi); } void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val, MemOpIdx oi, uintptr_t retaddr) { - helper_be_stl_mmu(env, addr, val, oi, retaddr); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUL); + do_st4_mmu(env, addr, val, oi, retaddr); plugin_store_cb(env, addr, oi); } void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val, MemOpIdx oi, uintptr_t retaddr) { - helper_be_stq_mmu(env, addr, val, oi, retaddr); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUQ); + do_st8_mmu(env, addr, val, oi, retaddr); plugin_store_cb(env, addr, oi); } void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val, MemOpIdx oi, uintptr_t retaddr) { - helper_le_stw_mmu(env, addr, val, oi, retaddr); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUW); + do_st2_mmu(env, addr, val, oi, retaddr); plugin_store_cb(env, addr, oi); } void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val, MemOpIdx oi, uintptr_t retaddr) { - helper_le_stl_mmu(env, addr, val, oi, retaddr); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUL); + do_st4_mmu(env, addr, val, oi, retaddr); plugin_store_cb(env, addr, oi); } void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, MemOpIdx oi, uintptr_t retaddr) { - helper_le_stq_mmu(env, addr, val, oi, retaddr); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUQ); + do_st8_mmu(env, addr, val, oi, retaddr); plugin_store_cb(env, addr, oi); } @@ -2934,8 +2866,8 @@ void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, Int128 val, mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; new_oi = make_memop_idx(mop, mmu_idx); - helper_be_stq_mmu(env, addr, int128_gethi(val), new_oi, ra); - helper_be_stq_mmu(env, addr + 8, int128_getlo(val), new_oi, ra); + helper_stq_mmu(env, addr, int128_gethi(val), new_oi, ra); + helper_stq_mmu(env, addr + 8, int128_getlo(val), new_oi, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } @@ -2961,8 +2893,8 @@ void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val, mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; new_oi = make_memop_idx(mop, mmu_idx); - helper_le_stq_mmu(env, addr, int128_getlo(val), new_oi, ra); - helper_le_stq_mmu(env, addr + 8, int128_gethi(val), new_oi, ra); + helper_stq_mmu(env, addr, int128_getlo(val), new_oi, ra); + helper_stq_mmu(env, addr + 8, int128_gethi(val), new_oi, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst index ad5dfe133e..d2cefc77a2 100644 --- a/docs/devel/loads-stores.rst +++ b/docs/devel/loads-stores.rst @@ -297,31 +297,20 @@ swap: ``translator_ld{sign}{size}_swap(env, ptr, swap)`` Regexes for git grep - ``\`` -``helper_*_{ld,st}*_mmu`` +``helper_{ld,st}*_mmu`` ~~~~~~~~~~~~~~~~~~~~~~~~~ These functions are intended primarily to be called by the code -generated by the TCG backend. They may also be called by target -CPU helper function code. Like the ``cpu_{ld,st}_mmuidx_ra`` functions -they perform accesses by guest virtual address, with a given ``mmuidx``. +generated by the TCG backend. Like the ``cpu_{ld,st}_mmu`` functions +they perform accesses by guest virtual address, with a given ``MemOpIdx``. -These functions specify an ``opindex`` parameter which encodes -(among other things) the mmu index to use for the access. This parameter -should be created by calling ``make_memop_idx()``. +They differ from ``cpu_{ld,st}_mmu`` in that they take the endianness +of the operation only from the MemOpIdx, and loads extend the return +value to the size of a host general register (``tcg_target_ulong``). -The ``retaddr`` parameter should be the result of GETPC() called directly -from the top level HELPER(foo) function (or 0 if no guest CPU state -unwinding is required). +load: ``helper_ld{sign}{size}_mmu(env, addr, opindex, retaddr)`` -**TODO** The names of these functions are a bit odd for historical -reasons because they were originally expected to be called only from -within generated code. We should rename them to bring them more in -line with the other memory access functions. The explicit endianness -is the only feature they have beyond ``*_mmuidx_ra``. - -load: ``helper_{endian}_ld{sign}{size}_mmu(env, addr, opindex, retaddr)`` - -store: ``helper_{endian}_st{size}_mmu(env, addr, val, opindex, retaddr)`` +store: ``helper_{size}_mmu(env, addr, val, opindex, retaddr)`` ``sign`` - (empty) : for 32 or 64 bit sizes @@ -334,14 +323,9 @@ store: ``helper_{endian}_st{size}_mmu(env, addr, val, opindex, retaddr)`` - ``l`` : 32 bits - ``q`` : 64 bits -``endian`` - - ``le`` : little endian - - ``be`` : big endian - - ``ret`` : target endianness - Regexes for git grep - - ``\`` - - ``\`` + - ``\`` + - ``\`` ``address_space_*`` ~~~~~~~~~~~~~~~~~~~ diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h index 684e394b06..3d897ca942 100644 --- a/include/tcg/tcg-ldst.h +++ b/include/tcg/tcg-ldst.h @@ -28,51 +28,35 @@ #ifdef CONFIG_SOFTMMU /* Value zero-extended to tcg register size. */ -tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_ldub_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); /* Value sign-extended to tcg register size. */ -tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); /* * Value extended to at least uint32_t, so that some ABIs do not require * zero-extension from uint8_t or uint16_t. */ -void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr); -void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr); -void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr); -void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr); -void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr); -void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr); -void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr); +void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr); #else diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index 62dd22d73c..e6636c1f8b 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -1587,39 +1587,6 @@ typedef struct { } HostAddress; #ifdef CONFIG_SOFTMMU -/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * MemOpIdx oi, uintptr_t ra) - */ -static void * const qemu_ld_helpers[MO_SIZE + 1] = { - [MO_8] = helper_ret_ldub_mmu, -#if HOST_BIG_ENDIAN - [MO_16] = helper_be_lduw_mmu, - [MO_32] = helper_be_ldul_mmu, - [MO_64] = helper_be_ldq_mmu, -#else - [MO_16] = helper_le_lduw_mmu, - [MO_32] = helper_le_ldul_mmu, - [MO_64] = helper_le_ldq_mmu, -#endif -}; - -/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, MemOpIdx oi, - * uintptr_t ra) - */ -static void * const qemu_st_helpers[MO_SIZE + 1] = { - [MO_8] = helper_ret_stb_mmu, -#if HOST_BIG_ENDIAN - [MO_16] = helper_be_stw_mmu, - [MO_32] = helper_be_stl_mmu, - [MO_64] = helper_be_stq_mmu, -#else - [MO_16] = helper_le_stw_mmu, - [MO_32] = helper_le_stl_mmu, - [MO_64] = helper_le_stq_mmu, -#endif -}; - static const TCGLdstHelperParam ldst_helper_param = { .ntmp = 1, .tmp = { TCG_REG_TMP } }; diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index df514e56fc..8b0d526659 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -1333,43 +1333,6 @@ typedef struct { } HostAddress; #ifdef CONFIG_SOFTMMU -/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * int mmu_idx, uintptr_t ra) - */ -static void * const qemu_ld_helpers[MO_SSIZE + 1] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_SB] = helper_ret_ldsb_mmu, -#if HOST_BIG_ENDIAN - [MO_UW] = helper_be_lduw_mmu, - [MO_UL] = helper_be_ldul_mmu, - [MO_UQ] = helper_be_ldq_mmu, - [MO_SW] = helper_be_ldsw_mmu, - [MO_SL] = helper_be_ldul_mmu, -#else - [MO_UW] = helper_le_lduw_mmu, - [MO_UL] = helper_le_ldul_mmu, - [MO_UQ] = helper_le_ldq_mmu, - [MO_SW] = helper_le_ldsw_mmu, - [MO_SL] = helper_le_ldul_mmu, -#endif -}; - -/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, int mmu_idx, uintptr_t ra) - */ -static void * const qemu_st_helpers[MO_SIZE + 1] = { - [MO_8] = helper_ret_stb_mmu, -#if HOST_BIG_ENDIAN - [MO_16] = helper_be_stw_mmu, - [MO_32] = helper_be_stl_mmu, - [MO_64] = helper_be_stq_mmu, -#else - [MO_16] = helper_le_stw_mmu, - [MO_32] = helper_le_stl_mmu, - [MO_64] = helper_le_stq_mmu, -#endif -}; - static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg) { /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */ diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 9fc5592f5d..826f7764c9 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -1776,32 +1776,6 @@ typedef struct { } HostAddress; #if defined(CONFIG_SOFTMMU) -/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * int mmu_idx, uintptr_t ra) - */ -static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEUQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEUQ] = helper_be_ldq_mmu, -}; - -/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, int mmu_idx, uintptr_t ra) - */ -static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEUQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEUQ] = helper_be_stq_mmu, -}; - /* * Because i686 has no register parameters and because x86_64 has xchg * to handle addr/data register overlap, we have placed all input arguments @@ -1842,7 +1816,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) } tcg_out_ld_helper_args(s, l, &ldst_helper_param); - tcg_out_branch(s, 1, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); + tcg_out_branch(s, 1, qemu_ld_helpers[opc & MO_SIZE]); tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param); tcg_out_jmp(s, l->raddr); @@ -1864,7 +1838,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) } tcg_out_st_helper_args(s, l, &ldst_helper_param); - tcg_out_branch(s, 1, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); + tcg_out_branch(s, 1, qemu_st_helpers[opc & MO_SIZE]); tcg_out_jmp(s, l->raddr); return true; diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index 83fa45c802..d1bc29826f 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -784,29 +784,6 @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, */ #if defined(CONFIG_SOFTMMU) -/* - * helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * MemOpIdx oi, uintptr_t ra) - */ -static void * const qemu_ld_helpers[4] = { - [MO_8] = helper_ret_ldub_mmu, - [MO_16] = helper_le_lduw_mmu, - [MO_32] = helper_le_ldul_mmu, - [MO_64] = helper_le_ldq_mmu, -}; - -/* - * helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, MemOpIdx oi, - * uintptr_t ra) - */ -static void * const qemu_st_helpers[4] = { - [MO_8] = helper_ret_stb_mmu, - [MO_16] = helper_le_stw_mmu, - [MO_32] = helper_le_stl_mmu, - [MO_64] = helper_le_stq_mmu, -}; - static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) { tcg_out_opc_b(s, 0); diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index 5ad9867882..7770ef46bd 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -1076,37 +1076,6 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, } #if defined(CONFIG_SOFTMMU) -static void * const qemu_ld_helpers[MO_SSIZE + 1] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_SB] = helper_ret_ldsb_mmu, -#if HOST_BIG_ENDIAN - [MO_UW] = helper_be_lduw_mmu, - [MO_SW] = helper_be_ldsw_mmu, - [MO_UL] = helper_be_ldul_mmu, - [MO_SL] = helper_be_ldsl_mmu, - [MO_UQ] = helper_be_ldq_mmu, -#else - [MO_UW] = helper_le_lduw_mmu, - [MO_SW] = helper_le_ldsw_mmu, - [MO_UL] = helper_le_ldul_mmu, - [MO_UQ] = helper_le_ldq_mmu, - [MO_SL] = helper_le_ldsl_mmu, -#endif -}; - -static void * const qemu_st_helpers[MO_SIZE + 1] = { - [MO_UB] = helper_ret_stb_mmu, -#if HOST_BIG_ENDIAN - [MO_UW] = helper_be_stw_mmu, - [MO_UL] = helper_be_stl_mmu, - [MO_UQ] = helper_be_stq_mmu, -#else - [MO_UW] = helper_le_stw_mmu, - [MO_UL] = helper_le_stl_mmu, - [MO_UQ] = helper_le_stq_mmu, -#endif -}; - /* We have four temps, we might as well expose three of them. */ static const TCGLdstHelperParam ldst_helper_param = { .ntmp = 3, .tmp = { TCG_TMP0, TCG_TMP1, TCG_TMP2 } diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index 29bfbfcc61..6a81916e64 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -1963,32 +1963,6 @@ static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = { }; #if defined (CONFIG_SOFTMMU) -/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, - * int mmu_idx, uintptr_t ra) - */ -static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEUQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEUQ] = helper_be_ldq_mmu, -}; - -/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, int mmu_idx, uintptr_t ra) - */ -static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEUQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEUQ] = helper_be_stq_mmu, -}; - static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg) { if (arg < 0) { @@ -2017,7 +1991,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } tcg_out_ld_helper_args(s, lb, &ldst_helper_param); - tcg_out_call_int(s, LK, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); + tcg_out_call_int(s, LK, qemu_ld_helpers[opc & MO_SIZE]); tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param); tcg_out_b(s, 0, lb->raddr); @@ -2033,7 +2007,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } tcg_out_st_helper_args(s, lb, &ldst_helper_param); - tcg_out_call_int(s, LK, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); + tcg_out_call_int(s, LK, qemu_st_helpers[opc & MO_SIZE]); tcg_out_b(s, 0, lb->raddr); return true; diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index d12b824d8c..8ed0e2f210 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -847,48 +847,6 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) */ #if defined(CONFIG_SOFTMMU) -/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * MemOpIdx oi, uintptr_t ra) - */ -static void * const qemu_ld_helpers[MO_SSIZE + 1] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_SB] = helper_ret_ldsb_mmu, -#if HOST_BIG_ENDIAN - [MO_UW] = helper_be_lduw_mmu, - [MO_SW] = helper_be_ldsw_mmu, - [MO_UL] = helper_be_ldul_mmu, -#if TCG_TARGET_REG_BITS == 64 - [MO_SL] = helper_be_ldsl_mmu, -#endif - [MO_UQ] = helper_be_ldq_mmu, -#else - [MO_UW] = helper_le_lduw_mmu, - [MO_SW] = helper_le_ldsw_mmu, - [MO_UL] = helper_le_ldul_mmu, -#if TCG_TARGET_REG_BITS == 64 - [MO_SL] = helper_le_ldsl_mmu, -#endif - [MO_UQ] = helper_le_ldq_mmu, -#endif -}; - -/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, MemOpIdx oi, - * uintptr_t ra) - */ -static void * const qemu_st_helpers[MO_SIZE + 1] = { - [MO_8] = helper_ret_stb_mmu, -#if HOST_BIG_ENDIAN - [MO_16] = helper_be_stw_mmu, - [MO_32] = helper_be_stl_mmu, - [MO_64] = helper_be_stq_mmu, -#else - [MO_16] = helper_le_stw_mmu, - [MO_32] = helper_le_stl_mmu, - [MO_64] = helper_le_stq_mmu, -#endif -}; - static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) { tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc index aacbaf21d5..968977be98 100644 --- a/tcg/s390x/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -438,33 +438,6 @@ static const uint8_t tcg_cond_to_ltr_cond[] = { [TCG_COND_GEU] = S390_CC_ALWAYS, }; -#ifdef CONFIG_SOFTMMU -static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_SB] = helper_ret_ldsb_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LESW] = helper_le_ldsw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, - [MO_LESL] = helper_le_ldsl_mmu, - [MO_LEUQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BESW] = helper_be_ldsw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, - [MO_BESL] = helper_be_ldsl_mmu, - [MO_BEUQ] = helper_be_ldq_mmu, -}; - -static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEUQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEUQ] = helper_be_stq_mmu, -}; -#endif - static const tcg_insn_unit *tb_ret_addr; uint64_t s390_facilities[3]; @@ -1721,7 +1694,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } tcg_out_ld_helper_args(s, lb, &ldst_helper_param); - tcg_out_call_int(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); + tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]); tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param); tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); @@ -1738,7 +1711,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } tcg_out_st_helper_args(s, lb, &ldst_helper_param); - tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); + tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE]); tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); return true; diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index 7e6466d3b6..e997db2645 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -919,33 +919,11 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) } #ifdef CONFIG_SOFTMMU -static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1]; -static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1]; +static const tcg_insn_unit *qemu_ld_trampoline[MO_SSIZE + 1]; +static const tcg_insn_unit *qemu_st_trampoline[MO_SIZE + 1]; static void build_trampolines(TCGContext *s) { - static void * const qemu_ld_helpers[] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_SB] = helper_ret_ldsb_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LESW] = helper_le_ldsw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEUQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BESW] = helper_be_ldsw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEUQ] = helper_be_ldq_mmu, - }; - static void * const qemu_st_helpers[] = { - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEUQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEUQ] = helper_be_stq_mmu, - }; - int i; for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) { @@ -1210,9 +1188,9 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, /* We use the helpers to extend SB and SW data, leaving the case of SL needing explicit extending below. */ if ((memop & MO_SSIZE) == MO_SL) { - func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)]; + func = qemu_ld_trampoline[MO_UL]; } else { - func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)]; + func = qemu_ld_trampoline[memop & MO_SSIZE]; } tcg_debug_assert(func != NULL); tcg_out_call_nodelay(s, func, false); @@ -1353,7 +1331,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, tcg_out_movext(s, (memop & MO_SIZE) == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, TCG_REG_O2, data_type, memop & MO_SIZE, data); - func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)]; + func = qemu_st_trampoline[memop & MO_SIZE]; tcg_debug_assert(func != NULL); tcg_out_call_nodelay(s, func, false); /* delay slot */ diff --git a/tcg/tcg.c b/tcg/tcg.c index f156ca65f5..f5f9d8f7e8 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -197,6 +197,27 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *l, const TCGLdstHelperParam *p) __attribute__((unused)); +#ifdef CONFIG_SOFTMMU +static void * const qemu_ld_helpers[MO_SSIZE + 1] = { + [MO_UB] = helper_ldub_mmu, + [MO_SB] = helper_ldsb_mmu, + [MO_UW] = helper_lduw_mmu, + [MO_SW] = helper_ldsw_mmu, + [MO_UL] = helper_ldul_mmu, + [MO_UQ] = helper_ldq_mmu, +#if TCG_TARGET_REG_BITS == 64 + [MO_SL] = helper_ldsl_mmu, +#endif +}; + +static void * const qemu_st_helpers[MO_SIZE + 1] = { + [MO_8] = helper_stb_mmu, + [MO_16] = helper_stw_mmu, + [MO_32] = helper_stl_mmu, + [MO_64] = helper_stq_mmu, +}; +#endif + TCGContext tcg_init_ctx; __thread TCGContext *tcg_ctx; diff --git a/tcg/tci.c b/tcg/tci.c index fc67e7e767..5bde2e1f2e 100644 --- a/tcg/tci.c +++ b/tcg/tci.c @@ -293,31 +293,21 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, uintptr_t ra = (uintptr_t)tb_ptr; #ifdef CONFIG_SOFTMMU - switch (mop & (MO_BSWAP | MO_SSIZE)) { + switch (mop & MO_SSIZE) { case MO_UB: - return helper_ret_ldub_mmu(env, taddr, oi, ra); + return helper_ldub_mmu(env, taddr, oi, ra); case MO_SB: - return helper_ret_ldsb_mmu(env, taddr, oi, ra); - case MO_LEUW: - return helper_le_lduw_mmu(env, taddr, oi, ra); - case MO_LESW: - return helper_le_ldsw_mmu(env, taddr, oi, ra); - case MO_LEUL: - return helper_le_ldul_mmu(env, taddr, oi, ra); - case MO_LESL: - return helper_le_ldsl_mmu(env, taddr, oi, ra); - case MO_LEUQ: - return helper_le_ldq_mmu(env, taddr, oi, ra); - case MO_BEUW: - return helper_be_lduw_mmu(env, taddr, oi, ra); - case MO_BESW: - return helper_be_ldsw_mmu(env, taddr, oi, ra); - case MO_BEUL: - return helper_be_ldul_mmu(env, taddr, oi, ra); - case MO_BESL: - return helper_be_ldsl_mmu(env, taddr, oi, ra); - case MO_BEUQ: - return helper_be_ldq_mmu(env, taddr, oi, ra); + return helper_ldsb_mmu(env, taddr, oi, ra); + case MO_UW: + return helper_lduw_mmu(env, taddr, oi, ra); + case MO_SW: + return helper_ldsw_mmu(env, taddr, oi, ra); + case MO_UL: + return helper_ldul_mmu(env, taddr, oi, ra); + case MO_SL: + return helper_ldsl_mmu(env, taddr, oi, ra); + case MO_UQ: + return helper_ldq_mmu(env, taddr, oi, ra); default: g_assert_not_reached(); } @@ -382,27 +372,18 @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, uintptr_t ra = (uintptr_t)tb_ptr; #ifdef CONFIG_SOFTMMU - switch (mop & (MO_BSWAP | MO_SIZE)) { + switch (mop & MO_SIZE) { case MO_UB: - helper_ret_stb_mmu(env, taddr, val, oi, ra); + helper_stb_mmu(env, taddr, val, oi, ra); break; - case MO_LEUW: - helper_le_stw_mmu(env, taddr, val, oi, ra); + case MO_UW: + helper_stw_mmu(env, taddr, val, oi, ra); break; - case MO_LEUL: - helper_le_stl_mmu(env, taddr, val, oi, ra); + case MO_UL: + helper_stl_mmu(env, taddr, val, oi, ra); break; - case MO_LEUQ: - helper_le_stq_mmu(env, taddr, val, oi, ra); - break; - case MO_BEUW: - helper_be_stw_mmu(env, taddr, val, oi, ra); - break; - case MO_BEUL: - helper_be_stl_mmu(env, taddr, val, oi, ra); - break; - case MO_BEUQ: - helper_be_stq_mmu(env, taddr, val, oi, ra); + case MO_UQ: + helper_stq_mmu(env, taddr, val, oi, ra); break; default: g_assert_not_reached(); From de95016dfbf774b34ef510ad43bfce29d627cb62 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 7 Nov 2022 19:08:33 +1100 Subject: [PATCH 06/74] accel/tcg: Implement helper_{ld,st}*_mmu for user-only TCG backends may need to defer to a helper to implement the atomicity required by a given operation. Mirror the interface used in system mode. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- accel/tcg/user-exec.c | 393 ++++++++++++++++++++++++++++------------- include/tcg/tcg-ldst.h | 6 +- tcg/tcg.c | 6 +- 3 files changed, 278 insertions(+), 127 deletions(-) diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index b89fa35a83..d9f9766b7f 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -889,21 +889,6 @@ void page_reset_target_data(target_ulong start, target_ulong last) { } /* The softmmu versions of these helpers are in cputlb.c. */ -/* - * Verify that we have passed the correct MemOp to the correct function. - * - * We could present one function to target code, and dispatch based on - * the MemOp, but so far we have worked hard to avoid an indirect function - * call along the memory path. - */ -static void validate_memop(MemOpIdx oi, MemOp expected) -{ -#ifdef CONFIG_DEBUG_TCG - MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); - assert(have == expected); -#endif -} - void helper_unaligned_ld(CPUArchState *env, target_ulong addr) { cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC()); @@ -914,10 +899,9 @@ void helper_unaligned_st(CPUArchState *env, target_ulong addr) cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC()); } -static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, - MemOpIdx oi, uintptr_t ra, MMUAccessType type) +static void *cpu_mmu_lookup(CPUArchState *env, abi_ptr addr, + MemOp mop, uintptr_t ra, MMUAccessType type) { - MemOp mop = get_memop(oi); int a_bits = get_alignment_bits(mop); void *ret; @@ -933,100 +917,206 @@ static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, #include "ldst_atomicity.c.inc" -uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr, + MemOp mop, uintptr_t ra) { void *haddr; uint8_t ret; - validate_memop(oi, MO_UB); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + tcg_debug_assert((mop & MO_SIZE) == MO_8); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); ret = ldub_p(haddr); clear_helper_retaddr(); + return ret; +} + +tcg_target_ulong helper_ldub_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t ra) +{ + return do_ld1_mmu(env, addr, get_memop(oi), ra); +} + +tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t ra) +{ + return (int8_t)do_ld1_mmu(env, addr, get_memop(oi), ra); +} + +uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + uint8_t ret = do_ld1_mmu(env, addr, get_memop(oi), ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } +static uint16_t do_ld2_he_mmu(CPUArchState *env, abi_ptr addr, + MemOp mop, uintptr_t ra) +{ + void *haddr; + uint16_t ret; + + tcg_debug_assert((mop & MO_SIZE) == MO_16); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); + ret = load_atom_2(env, ra, haddr, mop); + clear_helper_retaddr(); + return ret; +} + +tcg_target_ulong helper_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + uint16_t ret = do_ld2_he_mmu(env, addr, mop, ra); + + if (mop & MO_BSWAP) { + ret = bswap16(ret); + } + return ret; +} + +tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + int16_t ret = do_ld2_he_mmu(env, addr, mop, ra); + + if (mop & MO_BSWAP) { + ret = bswap16(ret); + } + return ret; +} + uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) { - void *haddr; + MemOp mop = get_memop(oi); uint16_t ret; - validate_memop(oi, MO_BEUW); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = load_atom_2(env, ra, haddr, get_memop(oi)); - clear_helper_retaddr(); + tcg_debug_assert((mop & MO_BSWAP) == MO_BE); + ret = do_ld2_he_mmu(env, addr, mop, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return cpu_to_be16(ret); } -uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) -{ - void *haddr; - uint32_t ret; - - validate_memop(oi, MO_BEUL); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = load_atom_4(env, ra, haddr, get_memop(oi)); - clear_helper_retaddr(); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return cpu_to_be32(ret); -} - -uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) -{ - void *haddr; - uint64_t ret; - - validate_memop(oi, MO_BEUQ); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = load_atom_8(env, ra, haddr, get_memop(oi)); - clear_helper_retaddr(); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return cpu_to_be64(ret); -} - uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) { - void *haddr; + MemOp mop = get_memop(oi); uint16_t ret; - validate_memop(oi, MO_LEUW); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = load_atom_2(env, ra, haddr, get_memop(oi)); - clear_helper_retaddr(); + tcg_debug_assert((mop & MO_BSWAP) == MO_LE); + ret = do_ld2_he_mmu(env, addr, mop, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return cpu_to_le16(ret); } +static uint32_t do_ld4_he_mmu(CPUArchState *env, abi_ptr addr, + MemOp mop, uintptr_t ra) +{ + void *haddr; + uint32_t ret; + + tcg_debug_assert((mop & MO_SIZE) == MO_32); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); + ret = load_atom_4(env, ra, haddr, mop); + clear_helper_retaddr(); + return ret; +} + +tcg_target_ulong helper_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + uint32_t ret = do_ld4_he_mmu(env, addr, mop, ra); + + if (mop & MO_BSWAP) { + ret = bswap32(ret); + } + return ret; +} + +tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + int32_t ret = do_ld4_he_mmu(env, addr, mop, ra); + + if (mop & MO_BSWAP) { + ret = bswap32(ret); + } + return ret; +} + +uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + uint32_t ret; + + tcg_debug_assert((mop & MO_BSWAP) == MO_BE); + ret = do_ld4_he_mmu(env, addr, mop, ra); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return cpu_to_be32(ret); +} + uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) { - void *haddr; + MemOp mop = get_memop(oi); uint32_t ret; - validate_memop(oi, MO_LEUL); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = load_atom_4(env, ra, haddr, get_memop(oi)); - clear_helper_retaddr(); + tcg_debug_assert((mop & MO_BSWAP) == MO_LE); + ret = do_ld4_he_mmu(env, addr, mop, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return cpu_to_le32(ret); } +static uint64_t do_ld8_he_mmu(CPUArchState *env, abi_ptr addr, + MemOp mop, uintptr_t ra) +{ + void *haddr; + uint64_t ret; + + tcg_debug_assert((mop & MO_SIZE) == MO_64); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); + ret = load_atom_8(env, ra, haddr, mop); + clear_helper_retaddr(); + return ret; +} + +uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + uint64_t ret = do_ld8_he_mmu(env, addr, mop, ra); + + if (mop & MO_BSWAP) { + ret = bswap64(ret); + } + return ret; +} + +uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + uint64_t ret; + + tcg_debug_assert((mop & MO_BSWAP) == MO_BE); + ret = do_ld8_he_mmu(env, addr, mop, ra); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return cpu_to_be64(ret); +} + uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) { - void *haddr; + MemOp mop = get_memop(oi); uint64_t ret; - validate_memop(oi, MO_LEUQ); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - ret = load_atom_8(env, ra, haddr, get_memop(oi)); - clear_helper_retaddr(); + tcg_debug_assert((mop & MO_BSWAP) == MO_LE); + ret = do_ld8_he_mmu(env, addr, mop, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return cpu_to_le64(ret); } @@ -1037,7 +1127,7 @@ Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, void *haddr; Int128 ret; - validate_memop(oi, MO_128 | MO_BE); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == (MO_128 | MO_BE)); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); memcpy(&ret, haddr, 16); clear_helper_retaddr(); @@ -1055,7 +1145,7 @@ Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, void *haddr; Int128 ret; - validate_memop(oi, MO_128 | MO_LE); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == (MO_128 | MO_LE)); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); memcpy(&ret, haddr, 16); clear_helper_retaddr(); @@ -1067,87 +1157,153 @@ Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, return ret; } -void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, - MemOpIdx oi, uintptr_t ra) +static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, + MemOp mop, uintptr_t ra) { void *haddr; - validate_memop(oi, MO_UB); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + tcg_debug_assert((mop & MO_SIZE) == MO_8); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); stb_p(haddr, val); clear_helper_retaddr(); +} + +void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) +{ + do_st1_mmu(env, addr, val, get_memop(oi), ra); +} + +void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, + MemOpIdx oi, uintptr_t ra) +{ + do_st1_mmu(env, addr, val, get_memop(oi), ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } +static void do_st2_he_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, + MemOp mop, uintptr_t ra) +{ + void *haddr; + + tcg_debug_assert((mop & MO_SIZE) == MO_16); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); + store_atom_2(env, ra, haddr, mop, val); + clear_helper_retaddr(); +} + +void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + + if (mop & MO_BSWAP) { + val = bswap16(val); + } + do_st2_he_mmu(env, addr, val, mop, ra); +} + void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, MemOpIdx oi, uintptr_t ra) { - void *haddr; + MemOp mop = get_memop(oi); - validate_memop(oi, MO_BEUW); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - store_atom_2(env, ra, haddr, get_memop(oi), be16_to_cpu(val)); - clear_helper_retaddr(); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); -} - -void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, - MemOpIdx oi, uintptr_t ra) -{ - void *haddr; - - validate_memop(oi, MO_BEUL); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - store_atom_4(env, ra, haddr, get_memop(oi), be32_to_cpu(val)); - clear_helper_retaddr(); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); -} - -void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, - MemOpIdx oi, uintptr_t ra) -{ - void *haddr; - - validate_memop(oi, MO_BEUQ); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - store_atom_8(env, ra, haddr, get_memop(oi), be64_to_cpu(val)); - clear_helper_retaddr(); + tcg_debug_assert((mop & MO_BSWAP) == MO_BE); + do_st2_he_mmu(env, addr, be16_to_cpu(val), mop, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + + tcg_debug_assert((mop & MO_BSWAP) == MO_LE); + do_st2_he_mmu(env, addr, le16_to_cpu(val), mop, ra); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +static void do_st4_he_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, + MemOp mop, uintptr_t ra) { void *haddr; - validate_memop(oi, MO_LEUW); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - store_atom_2(env, ra, haddr, get_memop(oi), le16_to_cpu(val)); + tcg_debug_assert((mop & MO_SIZE) == MO_32); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); + store_atom_4(env, ra, haddr, mop, val); clear_helper_retaddr(); +} + +void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + + if (mop & MO_BSWAP) { + val = bswap32(val); + } + do_st4_he_mmu(env, addr, val, mop, ra); +} + +void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + + tcg_debug_assert((mop & MO_BSWAP) == MO_BE); + do_st4_he_mmu(env, addr, be32_to_cpu(val), mop, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + + tcg_debug_assert((mop & MO_BSWAP) == MO_LE); + do_st4_he_mmu(env, addr, le32_to_cpu(val), mop, ra); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +static void do_st8_he_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, + MemOp mop, uintptr_t ra) { void *haddr; - validate_memop(oi, MO_LEUL); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - store_atom_4(env, ra, haddr, get_memop(oi), le32_to_cpu(val)); + tcg_debug_assert((mop & MO_SIZE) == MO_64); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); + store_atom_8(env, ra, haddr, mop, val); clear_helper_retaddr(); +} + +void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + + if (mop & MO_BSWAP) { + val = bswap64(val); + } + do_st8_he_mmu(env, addr, val, mop, ra); +} + +void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + + tcg_debug_assert((mop & MO_BSWAP) == MO_BE); + do_st8_he_mmu(env, addr, cpu_to_be64(val), mop, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, MemOpIdx oi, uintptr_t ra) { - void *haddr; + MemOp mop = get_memop(oi); - validate_memop(oi, MO_LEUQ); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); - store_atom_8(env, ra, haddr, get_memop(oi), le64_to_cpu(val)); - clear_helper_retaddr(); + tcg_debug_assert((mop & MO_BSWAP) == MO_LE); + do_st8_he_mmu(env, addr, cpu_to_le64(val), mop, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } @@ -1156,7 +1312,7 @@ void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, { void *haddr; - validate_memop(oi, MO_128 | MO_BE); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == (MO_128 | MO_BE)); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); if (!HOST_BIG_ENDIAN) { val = bswap128(val); @@ -1171,7 +1327,7 @@ void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, { void *haddr; - validate_memop(oi, MO_128 | MO_LE); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == (MO_128 | MO_LE)); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); if (HOST_BIG_ENDIAN) { val = bswap128(val); @@ -1269,7 +1425,6 @@ uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, void *haddr; uint64_t ret; - validate_memop(oi, MO_BEUQ); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); ret = ldq_p(haddr); clear_helper_retaddr(); diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h index 3d897ca942..57fafa14b1 100644 --- a/include/tcg/tcg-ldst.h +++ b/include/tcg/tcg-ldst.h @@ -25,8 +25,6 @@ #ifndef TCG_LDST_H #define TCG_LDST_H -#ifdef CONFIG_SOFTMMU - /* Value zero-extended to tcg register size. */ tcg_target_ulong helper_ldub_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, uintptr_t retaddr); @@ -58,10 +56,10 @@ void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, MemOpIdx oi, uintptr_t retaddr); -#else +#ifdef CONFIG_USER_ONLY G_NORETURN void helper_unaligned_ld(CPUArchState *env, target_ulong addr); G_NORETURN void helper_unaligned_st(CPUArchState *env, target_ulong addr); -#endif /* CONFIG_SOFTMMU */ +#endif /* CONFIG_USER_ONLY */ #endif /* TCG_LDST_H */ diff --git a/tcg/tcg.c b/tcg/tcg.c index f5f9d8f7e8..a864ff1f4b 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -197,8 +197,7 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *l, const TCGLdstHelperParam *p) __attribute__((unused)); -#ifdef CONFIG_SOFTMMU -static void * const qemu_ld_helpers[MO_SSIZE + 1] = { +static void * const qemu_ld_helpers[MO_SSIZE + 1] __attribute__((unused)) = { [MO_UB] = helper_ldub_mmu, [MO_SB] = helper_ldsb_mmu, [MO_UW] = helper_lduw_mmu, @@ -210,13 +209,12 @@ static void * const qemu_ld_helpers[MO_SSIZE + 1] = { #endif }; -static void * const qemu_st_helpers[MO_SIZE + 1] = { +static void * const qemu_st_helpers[MO_SIZE + 1] __attribute__((unused)) = { [MO_8] = helper_stb_mmu, [MO_16] = helper_stw_mmu, [MO_32] = helper_stl_mmu, [MO_64] = helper_stq_mmu, }; -#endif TCGContext tcg_init_ctx; __thread TCGContext *tcg_ctx; From 0bbf501570801a101a741a7f79e1865c4ec411e2 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 8 Mar 2023 15:43:41 -0800 Subject: [PATCH 07/74] tcg/tci: Use helper_{ld,st}*_mmu for user-only We can now fold these two pieces of code. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/tci.c | 89 ------------------------------------------------------- 1 file changed, 89 deletions(-) diff --git a/tcg/tci.c b/tcg/tci.c index 5bde2e1f2e..15f2f8c463 100644 --- a/tcg/tci.c +++ b/tcg/tci.c @@ -292,7 +292,6 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, MemOp mop = get_memop(oi); uintptr_t ra = (uintptr_t)tb_ptr; -#ifdef CONFIG_SOFTMMU switch (mop & MO_SSIZE) { case MO_UB: return helper_ldub_mmu(env, taddr, oi, ra); @@ -311,58 +310,6 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, default: g_assert_not_reached(); } -#else - void *haddr = g2h(env_cpu(env), taddr); - unsigned a_mask = (1u << get_alignment_bits(mop)) - 1; - uint64_t ret; - - set_helper_retaddr(ra); - if (taddr & a_mask) { - helper_unaligned_ld(env, taddr); - } - switch (mop & (MO_BSWAP | MO_SSIZE)) { - case MO_UB: - ret = ldub_p(haddr); - break; - case MO_SB: - ret = ldsb_p(haddr); - break; - case MO_LEUW: - ret = lduw_le_p(haddr); - break; - case MO_LESW: - ret = ldsw_le_p(haddr); - break; - case MO_LEUL: - ret = (uint32_t)ldl_le_p(haddr); - break; - case MO_LESL: - ret = (int32_t)ldl_le_p(haddr); - break; - case MO_LEUQ: - ret = ldq_le_p(haddr); - break; - case MO_BEUW: - ret = lduw_be_p(haddr); - break; - case MO_BESW: - ret = ldsw_be_p(haddr); - break; - case MO_BEUL: - ret = (uint32_t)ldl_be_p(haddr); - break; - case MO_BESL: - ret = (int32_t)ldl_be_p(haddr); - break; - case MO_BEUQ: - ret = ldq_be_p(haddr); - break; - default: - g_assert_not_reached(); - } - clear_helper_retaddr(); - return ret; -#endif } static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, @@ -371,7 +318,6 @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, MemOp mop = get_memop(oi); uintptr_t ra = (uintptr_t)tb_ptr; -#ifdef CONFIG_SOFTMMU switch (mop & MO_SIZE) { case MO_UB: helper_stb_mmu(env, taddr, val, oi, ra); @@ -388,41 +334,6 @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, default: g_assert_not_reached(); } -#else - void *haddr = g2h(env_cpu(env), taddr); - unsigned a_mask = (1u << get_alignment_bits(mop)) - 1; - - set_helper_retaddr(ra); - if (taddr & a_mask) { - helper_unaligned_st(env, taddr); - } - switch (mop & (MO_BSWAP | MO_SIZE)) { - case MO_UB: - stb_p(haddr, val); - break; - case MO_LEUW: - stw_le_p(haddr, val); - break; - case MO_LEUL: - stl_le_p(haddr, val); - break; - case MO_LEUQ: - stq_le_p(haddr, val); - break; - case MO_BEUW: - stw_be_p(haddr, val); - break; - case MO_BEUL: - stl_be_p(haddr, val); - break; - case MO_BEUQ: - stq_be_p(haddr, val); - break; - default: - g_assert_not_reached(); - } - clear_helper_retaddr(); -#endif } #if TCG_TARGET_REG_BITS == 64 From 35c653c4029794f67a523191941104fe12f2b22d Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 14 Feb 2023 22:16:17 -1000 Subject: [PATCH 08/74] tcg: Add 128-bit guest memory primitives Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 399 +++++++++++++++++++++++++-------- accel/tcg/ldst_atomicity.c.inc | 184 +++++++++++++++ accel/tcg/tcg-runtime.h | 3 + accel/tcg/user-exec.c | 94 ++++++-- include/tcg/tcg-ldst.h | 4 + tcg/tcg-op.c | 173 +++++++++----- 6 files changed, 679 insertions(+), 178 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 34796ef568..49e49f75a4 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -40,6 +40,7 @@ #include "qemu/plugin-memory.h" #endif #include "tcg/tcg-ldst.h" +#include "exec/helper-proto.h" /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ @@ -2162,6 +2163,31 @@ static uint64_t do_ld_whole_be8(CPUArchState *env, uintptr_t ra, return (ret_be << (p->size * 8)) | x; } +/** + * do_ld_parts_be16 + * @p: translation parameters + * @ret_be: accumulated data + * + * As do_ld_bytes_beN, but with one atomic load. + * 16 aligned bytes are guaranteed to cover the load. + */ +static Int128 do_ld_whole_be16(CPUArchState *env, uintptr_t ra, + MMULookupPageData *p, uint64_t ret_be) +{ + int o = p->addr & 15; + Int128 x, y = load_atomic16_or_exit(env, ra, p->haddr - o); + int size = p->size; + + if (!HOST_BIG_ENDIAN) { + y = bswap128(y); + } + y = int128_lshift(y, o * 8); + y = int128_urshift(y, (16 - size) * 8); + x = int128_make64(ret_be); + x = int128_lshift(x, size * 8); + return int128_or(x, y); +} + /* * Wrapper for the above. */ @@ -2211,6 +2237,63 @@ static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p, } } +/* + * Wrapper for the above, for 8 < size < 16. + */ +static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p, + uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra) +{ + int size = p->size; + uint64_t b; + MemOp atom; + + if (unlikely(p->flags & TLB_MMIO)) { + p->size = size - 8; + a = do_ld_mmio_beN(env, p, a, mmu_idx, MMU_DATA_LOAD, ra); + p->addr += p->size; + p->size = 8; + b = do_ld_mmio_beN(env, p, 0, mmu_idx, MMU_DATA_LOAD, ra); + return int128_make128(b, a); + } + + /* + * It is a given that we cross a page and therefore there is no + * atomicity for the load as a whole, but subobjects may need attention. + */ + atom = mop & MO_ATOM_MASK; + switch (atom) { + case MO_ATOM_SUBALIGN: + p->size = size - 8; + a = do_ld_parts_beN(p, a); + p->haddr += size - 8; + p->size = 8; + b = do_ld_parts_beN(p, 0); + break; + + case MO_ATOM_WITHIN16_PAIR: + /* Since size > 8, this is the half that must be atomic. */ + return do_ld_whole_be16(env, ra, p, a); + + case MO_ATOM_IFALIGN_PAIR: + /* + * Since size > 8, both halves are misaligned, + * and so neither is atomic. + */ + case MO_ATOM_IFALIGN: + case MO_ATOM_WITHIN16: + case MO_ATOM_NONE: + p->size = size - 8; + a = do_ld_bytes_beN(p, a); + b = ldq_be_p(p->haddr + size - 8); + break; + + default: + g_assert_not_reached(); + } + + return int128_make128(b, a); +} + static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx, MMUAccessType type, uintptr_t ra) { @@ -2399,6 +2482,80 @@ tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, target_ulong addr, return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr); } +static Int128 do_ld16_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t ra) +{ + MMULookupLocals l; + bool crosspage; + uint64_t a, b; + Int128 ret; + int first; + + crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l); + if (likely(!crosspage)) { + /* Perform the load host endian. */ + if (unlikely(l.page[0].flags & TLB_MMIO)) { + QEMU_IOTHREAD_LOCK_GUARD(); + a = io_readx(env, l.page[0].full, l.mmu_idx, addr, + ra, MMU_DATA_LOAD, MO_64); + b = io_readx(env, l.page[0].full, l.mmu_idx, addr + 8, + ra, MMU_DATA_LOAD, MO_64); + ret = int128_make128(HOST_BIG_ENDIAN ? b : a, + HOST_BIG_ENDIAN ? a : b); + } else { + ret = load_atom_16(env, ra, l.page[0].haddr, l.memop); + } + if (l.memop & MO_BSWAP) { + ret = bswap128(ret); + } + return ret; + } + + first = l.page[0].size; + if (first == 8) { + MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64; + + a = do_ld_8(env, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); + b = do_ld_8(env, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); + if ((mop8 & MO_BSWAP) == MO_LE) { + ret = int128_make128(a, b); + } else { + ret = int128_make128(b, a); + } + return ret; + } + + if (first < 8) { + a = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, + MMU_DATA_LOAD, l.memop, ra); + ret = do_ld16_beN(env, &l.page[1], a, l.mmu_idx, l.memop, ra); + } else { + ret = do_ld16_beN(env, &l.page[0], 0, l.mmu_idx, l.memop, ra); + b = int128_getlo(ret); + ret = int128_lshift(ret, l.page[1].size * 8); + a = int128_gethi(ret); + b = do_ld_beN(env, &l.page[1], b, l.mmu_idx, + MMU_DATA_LOAD, l.memop, ra); + ret = int128_make128(b, a); + } + if ((l.memop & MO_BSWAP) == MO_LE) { + ret = bswap128(ret); + } + return ret; +} + +Int128 helper_ld16_mmu(CPUArchState *env, target_ulong addr, + uint32_t oi, uintptr_t retaddr) +{ + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); + return do_ld16_mmu(env, addr, oi, retaddr); +} + +Int128 helper_ld_i128(CPUArchState *env, target_ulong addr, uint32_t oi) +{ + return helper_ld16_mmu(env, addr, oi, GETPC()); +} + /* * Load helpers for cpu_ldst.h. */ @@ -2487,59 +2644,23 @@ uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) { - MemOp mop = get_memop(oi); - int mmu_idx = get_mmuidx(oi); - MemOpIdx new_oi; - unsigned a_bits; - uint64_t h, l; + Int128 ret; - tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128)); - a_bits = get_alignment_bits(mop); - - /* Handle CPU specific unaligned behaviour */ - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD, - mmu_idx, ra); - } - - /* Construct an unaligned 64-bit replacement MemOpIdx. */ - mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; - new_oi = make_memop_idx(mop, mmu_idx); - - h = helper_ldq_mmu(env, addr, new_oi, ra); - l = helper_ldq_mmu(env, addr + 8, new_oi, ra); - - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return int128_make128(l, h); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_BE|MO_128)); + ret = do_ld16_mmu(env, addr, oi, ra); + plugin_load_cb(env, addr, oi); + return ret; } Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) { - MemOp mop = get_memop(oi); - int mmu_idx = get_mmuidx(oi); - MemOpIdx new_oi; - unsigned a_bits; - uint64_t h, l; + Int128 ret; - tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128)); - a_bits = get_alignment_bits(mop); - - /* Handle CPU specific unaligned behaviour */ - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD, - mmu_idx, ra); - } - - /* Construct an unaligned 64-bit replacement MemOpIdx. */ - mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; - new_oi = make_memop_idx(mop, mmu_idx); - - l = helper_ldq_mmu(env, addr, new_oi, ra); - h = helper_ldq_mmu(env, addr + 8, new_oi, ra); - - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - return int128_make128(l, h); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_LE|MO_128)); + ret = do_ld16_mmu(env, addr, oi, ra); + plugin_load_cb(env, addr, oi); + return ret; } /* @@ -2625,6 +2746,60 @@ static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p, } } +/* + * Wrapper for the above, for 8 < size < 16. + */ +static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p, + Int128 val_le, int mmu_idx, + MemOp mop, uintptr_t ra) +{ + int size = p->size; + MemOp atom; + + if (unlikely(p->flags & TLB_MMIO)) { + p->size = 8; + do_st_mmio_leN(env, p, int128_getlo(val_le), mmu_idx, ra); + p->size = size - 8; + p->addr += 8; + return do_st_mmio_leN(env, p, int128_gethi(val_le), mmu_idx, ra); + } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { + return int128_gethi(val_le) >> ((size - 8) * 8); + } + + /* + * It is a given that we cross a page and therefore there is no atomicity + * for the store as a whole, but subobjects may need attention. + */ + atom = mop & MO_ATOM_MASK; + switch (atom) { + case MO_ATOM_SUBALIGN: + store_parts_leN(p->haddr, 8, int128_getlo(val_le)); + return store_parts_leN(p->haddr + 8, p->size - 8, + int128_gethi(val_le)); + + case MO_ATOM_WITHIN16_PAIR: + /* Since size > 8, this is the half that must be atomic. */ + if (!HAVE_al16) { + cpu_loop_exit_atomic(env_cpu(env), ra); + } + return store_whole_le16(p->haddr, p->size, val_le); + + case MO_ATOM_IFALIGN_PAIR: + /* + * Since size > 8, both halves are misaligned, + * and so neither is atomic. + */ + case MO_ATOM_IFALIGN: + case MO_ATOM_NONE: + stq_le_p(p->haddr, int128_getlo(val_le)); + return store_bytes_leN(p->haddr + 8, p->size - 8, + int128_gethi(val_le)); + + default: + g_assert_not_reached(); + } +} + static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val, int mmu_idx, uintptr_t ra) { @@ -2781,6 +2956,80 @@ void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, do_st8_mmu(env, addr, val, oi, retaddr); } +static void do_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, + MemOpIdx oi, uintptr_t ra) +{ + MMULookupLocals l; + bool crosspage; + uint64_t a, b; + int first; + + crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); + if (likely(!crosspage)) { + /* Swap to host endian if necessary, then store. */ + if (l.memop & MO_BSWAP) { + val = bswap128(val); + } + if (unlikely(l.page[0].flags & TLB_MMIO)) { + QEMU_IOTHREAD_LOCK_GUARD(); + if (HOST_BIG_ENDIAN) { + b = int128_getlo(val), a = int128_gethi(val); + } else { + a = int128_getlo(val), b = int128_gethi(val); + } + io_writex(env, l.page[0].full, l.mmu_idx, a, addr, ra, MO_64); + io_writex(env, l.page[0].full, l.mmu_idx, b, addr + 8, ra, MO_64); + } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) { + /* nothing */ + } else { + store_atom_16(env, ra, l.page[0].haddr, l.memop, val); + } + return; + } + + first = l.page[0].size; + if (first == 8) { + MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64; + + if (l.memop & MO_BSWAP) { + val = bswap128(val); + } + if (HOST_BIG_ENDIAN) { + b = int128_getlo(val), a = int128_gethi(val); + } else { + a = int128_getlo(val), b = int128_gethi(val); + } + do_st_8(env, &l.page[0], a, l.mmu_idx, mop8, ra); + do_st_8(env, &l.page[1], b, l.mmu_idx, mop8, ra); + return; + } + + if ((l.memop & MO_BSWAP) != MO_LE) { + val = bswap128(val); + } + if (first < 8) { + do_st_leN(env, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra); + val = int128_urshift(val, first * 8); + do_st16_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); + } else { + b = do_st16_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra); + do_st_leN(env, &l.page[1], b, l.mmu_idx, l.memop, ra); + } +} + +void helper_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, + MemOpIdx oi, uintptr_t retaddr) +{ + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); + do_st16_mmu(env, addr, val, oi, retaddr); +} + +void helper_st_i128(CPUArchState *env, target_ulong addr, Int128 val, + MemOpIdx oi) +{ + helper_st16_mmu(env, addr, val, oi, GETPC()); +} + /* * Store Helpers for cpu_ldst.h */ @@ -2845,58 +3094,20 @@ void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, plugin_store_cb(env, addr, oi); } -void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, Int128 val, - MemOpIdx oi, uintptr_t ra) +void cpu_st16_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, + MemOpIdx oi, uintptr_t retaddr) { - MemOp mop = get_memop(oi); - int mmu_idx = get_mmuidx(oi); - MemOpIdx new_oi; - unsigned a_bits; - - tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128)); - a_bits = get_alignment_bits(mop); - - /* Handle CPU specific unaligned behaviour */ - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, - mmu_idx, ra); - } - - /* Construct an unaligned 64-bit replacement MemOpIdx. */ - mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; - new_oi = make_memop_idx(mop, mmu_idx); - - helper_stq_mmu(env, addr, int128_gethi(val), new_oi, ra); - helper_stq_mmu(env, addr + 8, int128_getlo(val), new_oi, ra); - - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_BE|MO_128)); + do_st16_mmu(env, addr, val, oi, retaddr); + plugin_store_cb(env, addr, oi); } -void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val, - MemOpIdx oi, uintptr_t ra) +void cpu_st16_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, + MemOpIdx oi, uintptr_t retaddr) { - MemOp mop = get_memop(oi); - int mmu_idx = get_mmuidx(oi); - MemOpIdx new_oi; - unsigned a_bits; - - tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128)); - a_bits = get_alignment_bits(mop); - - /* Handle CPU specific unaligned behaviour */ - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, - mmu_idx, ra); - } - - /* Construct an unaligned 64-bit replacement MemOpIdx. */ - mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; - new_oi = make_memop_idx(mop, mmu_idx); - - helper_stq_mmu(env, addr, int128_getlo(val), new_oi, ra); - helper_stq_mmu(env, addr + 8, int128_gethi(val), new_oi, ra); - - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); + tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_LE|MO_128)); + do_st16_mmu(env, addr, val, oi, retaddr); + plugin_store_cb(env, addr, oi); } #include "ldst_common.c.inc" diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc index 1f39e43896..ce73b32def 100644 --- a/accel/tcg/ldst_atomicity.c.inc +++ b/accel/tcg/ldst_atomicity.c.inc @@ -439,6 +439,21 @@ static inline uint64_t load_atom_8_by_4(void *pv) } } +/** + * load_atom_8_by_8_or_4: + * @pv: host address + * + * Load 8 bytes from aligned @pv, with at least 4-byte atomicity. + */ +static inline uint64_t load_atom_8_by_8_or_4(void *pv) +{ + if (HAVE_al8_fast) { + return load_atomic8(pv); + } else { + return load_atom_8_by_4(pv); + } +} + /** * load_atom_2: * @p: host address @@ -571,6 +586,64 @@ static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra, } } +/** + * load_atom_16: + * @p: host address + * @memop: the full memory op + * + * Load 16 bytes from @p, honoring the atomicity of @memop. + */ +static Int128 load_atom_16(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop) +{ + uintptr_t pi = (uintptr_t)pv; + int atmax; + Int128 r; + uint64_t a, b; + + /* + * If the host does not support 16-byte atomics, wait until we have + * examined the atomicity parameters below. + */ + if (HAVE_al16_fast && likely((pi & 15) == 0)) { + return load_atomic16(pv); + } + + atmax = required_atomicity(env, pi, memop); + switch (atmax) { + case MO_8: + memcpy(&r, pv, 16); + return r; + case MO_16: + a = load_atom_8_by_2(pv); + b = load_atom_8_by_2(pv + 8); + break; + case MO_32: + a = load_atom_8_by_4(pv); + b = load_atom_8_by_4(pv + 8); + break; + case MO_64: + if (!HAVE_al8) { + cpu_loop_exit_atomic(env_cpu(env), ra); + } + a = load_atomic8(pv); + b = load_atomic8(pv + 8); + break; + case -MO_64: + if (!HAVE_al8) { + cpu_loop_exit_atomic(env_cpu(env), ra); + } + a = load_atom_extract_al8x2(pv); + b = load_atom_extract_al8x2(pv + 8); + break; + case MO_128: + return load_atomic16_or_exit(env, ra, pv); + default: + g_assert_not_reached(); + } + return int128_make128(HOST_BIG_ENDIAN ? b : a, HOST_BIG_ENDIAN ? a : b); +} + /** * store_atomic2: * @pv: host address @@ -612,6 +685,35 @@ static inline void store_atomic8(void *pv, uint64_t val) qatomic_set__nocheck(p, val); } +/** + * store_atomic16: + * @pv: host address + * @val: value to store + * + * Atomically store 16 aligned bytes to @pv. + */ +static inline void store_atomic16(void *pv, Int128Alias val) +{ +#if defined(CONFIG_ATOMIC128) + __uint128_t *pu = __builtin_assume_aligned(pv, 16); + qatomic_set__nocheck(pu, val.u); +#elif defined(CONFIG_CMPXCHG128) + __uint128_t *pu = __builtin_assume_aligned(pv, 16); + __uint128_t o; + + /* + * Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always + * defer to libatomic, so we must use __sync_*_compare_and_swap_16 + * and accept the sequential consistency that comes with it. + */ + do { + o = *pu; + } while (!__sync_bool_compare_and_swap_16(pu, o, val.u)); +#else + qemu_build_not_reached(); +#endif +} + /** * store_atom_4x2 */ @@ -1055,3 +1157,85 @@ static void store_atom_8(CPUArchState *env, uintptr_t ra, } cpu_loop_exit_atomic(env_cpu(env), ra); } + +/** + * store_atom_16: + * @p: host address + * @val: the value to store + * @memop: the full memory op + * + * Store 16 bytes to @p, honoring the atomicity of @memop. + */ +static void store_atom_16(CPUArchState *env, uintptr_t ra, + void *pv, MemOp memop, Int128 val) +{ + uintptr_t pi = (uintptr_t)pv; + uint64_t a, b; + int atmax; + + if (HAVE_al16_fast && likely((pi & 15) == 0)) { + store_atomic16(pv, val); + return; + } + + atmax = required_atomicity(env, pi, memop); + + a = HOST_BIG_ENDIAN ? int128_gethi(val) : int128_getlo(val); + b = HOST_BIG_ENDIAN ? int128_getlo(val) : int128_gethi(val); + switch (atmax) { + case MO_8: + memcpy(pv, &val, 16); + return; + case MO_16: + store_atom_8_by_2(pv, a); + store_atom_8_by_2(pv + 8, b); + return; + case MO_32: + store_atom_8_by_4(pv, a); + store_atom_8_by_4(pv + 8, b); + return; + case MO_64: + if (HAVE_al8) { + store_atomic8(pv, a); + store_atomic8(pv + 8, b); + return; + } + break; + case -MO_64: + if (HAVE_al16) { + uint64_t val_le; + int s2 = pi & 15; + int s1 = 16 - s2; + + if (HOST_BIG_ENDIAN) { + val = bswap128(val); + } + switch (s2) { + case 1 ... 7: + val_le = store_whole_le16(pv, s1, val); + store_bytes_leN(pv + s1, s2, val_le); + break; + case 9 ... 15: + store_bytes_leN(pv, s1, int128_getlo(val)); + val = int128_urshift(val, s1 * 8); + store_whole_le16(pv + s1, s2, val); + break; + case 0: /* aligned */ + case 8: /* atmax MO_64 */ + default: + g_assert_not_reached(); + } + return; + } + break; + case MO_128: + if (HAVE_al16) { + store_atomic16(pv, val); + return; + } + break; + default: + g_assert_not_reached(); + } + cpu_loop_exit_atomic(env_cpu(env), ra); +} diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h index b8e6421c8a..d9adc646c1 100644 --- a/accel/tcg/tcg-runtime.h +++ b/accel/tcg/tcg-runtime.h @@ -39,6 +39,9 @@ DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env) DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr) #endif /* IN_HELPER_PROTO */ +DEF_HELPER_FLAGS_3(ld_i128, TCG_CALL_NO_WG, i128, env, tl, i32) +DEF_HELPER_FLAGS_4(st_i128, TCG_CALL_NO_WG, void, env, tl, i128, i32) + DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, i32, env, tl, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG, diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index d9f9766b7f..8f86254eb4 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -1121,18 +1121,45 @@ uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, return cpu_to_le64(ret); } -Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +static Int128 do_ld16_he_mmu(CPUArchState *env, abi_ptr addr, + MemOp mop, uintptr_t ra) { void *haddr; Int128 ret; - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == (MO_128 | MO_BE)); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - memcpy(&ret, haddr, 16); + tcg_debug_assert((mop & MO_SIZE) == MO_128); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); + ret = load_atom_16(env, ra, haddr, mop); clear_helper_retaddr(); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} +Int128 helper_ld16_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + Int128 ret = do_ld16_he_mmu(env, addr, mop, ra); + + if (mop & MO_BSWAP) { + ret = bswap128(ret); + } + return ret; +} + +Int128 helper_ld_i128(CPUArchState *env, target_ulong addr, MemOpIdx oi) +{ + return helper_ld16_mmu(env, addr, oi, GETPC()); +} + +Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + Int128 ret; + + tcg_debug_assert((mop & MO_BSWAP) == MO_BE); + ret = do_ld16_he_mmu(env, addr, mop, ra); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); if (!HOST_BIG_ENDIAN) { ret = bswap128(ret); } @@ -1142,15 +1169,12 @@ Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) { - void *haddr; + MemOp mop = get_memop(oi); Int128 ret; - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == (MO_128 | MO_LE)); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); - memcpy(&ret, haddr, 16); - clear_helper_retaddr(); + tcg_debug_assert((mop & MO_BSWAP) == MO_LE); + ret = do_ld16_he_mmu(env, addr, mop, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); - if (HOST_BIG_ENDIAN) { ret = bswap128(ret); } @@ -1307,33 +1331,57 @@ void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, - Int128 val, MemOpIdx oi, uintptr_t ra) +static void do_st16_he_mmu(CPUArchState *env, abi_ptr addr, Int128 val, + MemOp mop, uintptr_t ra) { void *haddr; - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == (MO_128 | MO_BE)); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + tcg_debug_assert((mop & MO_SIZE) == MO_128); + haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); + store_atom_16(env, ra, haddr, mop, val); + clear_helper_retaddr(); +} + +void helper_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + + if (mop & MO_BSWAP) { + val = bswap128(val); + } + do_st16_he_mmu(env, addr, val, mop, ra); +} + +void helper_st_i128(CPUArchState *env, target_ulong addr, + Int128 val, MemOpIdx oi) +{ + helper_st16_mmu(env, addr, val, oi, GETPC()); +} + +void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, + Int128 val, MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + + tcg_debug_assert((mop & MO_BSWAP) == MO_BE); if (!HOST_BIG_ENDIAN) { val = bswap128(val); } - memcpy(haddr, &val, 16); - clear_helper_retaddr(); + do_st16_he_mmu(env, addr, val, mop, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val, MemOpIdx oi, uintptr_t ra) { - void *haddr; + MemOp mop = get_memop(oi); - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == (MO_128 | MO_LE)); - haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + tcg_debug_assert((mop & MO_BSWAP) == MO_LE); if (HOST_BIG_ENDIAN) { val = bswap128(val); } - memcpy(haddr, &val, 16); - clear_helper_retaddr(); + do_st16_he_mmu(env, addr, val, mop, ra); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h index 57fafa14b1..64f48e6990 100644 --- a/include/tcg/tcg-ldst.h +++ b/include/tcg/tcg-ldst.h @@ -34,6 +34,8 @@ tcg_target_ulong helper_ldul_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, uintptr_t retaddr); uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, uintptr_t retaddr); +Int128 helper_ld16_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); /* Value sign-extended to tcg register size. */ tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, target_ulong addr, @@ -55,6 +57,8 @@ void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, MemOpIdx oi, uintptr_t retaddr); void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, MemOpIdx oi, uintptr_t retaddr); +void helper_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, + MemOpIdx oi, uintptr_t retaddr); #ifdef CONFIG_USER_ONLY diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 3136cef81a..22481a344c 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -3119,6 +3119,37 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) } } +/* + * Return true if @mop, without knowledge of the pointer alignment, + * does not require 16-byte atomicity, and it would be adventagous + * to avoid a call to a helper function. + */ +static bool use_two_i64_for_i128(MemOp mop) +{ +#ifdef CONFIG_SOFTMMU + /* Two softmmu tlb lookups is larger than one function call. */ + return false; +#else + /* + * For user-only, two 64-bit operations may well be smaller than a call. + * Determine if that would be legal for the requested atomicity. + */ + switch (mop & MO_ATOM_MASK) { + case MO_ATOM_NONE: + case MO_ATOM_IFALIGN_PAIR: + return true; + case MO_ATOM_IFALIGN: + case MO_ATOM_SUBALIGN: + case MO_ATOM_WITHIN16: + case MO_ATOM_WITHIN16_PAIR: + /* In a serialized context, no atomicity is required. */ + return !(tcg_ctx->gen_tb->cflags & CF_PARALLEL); + default: + g_assert_not_reached(); + } +#endif +} + static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) { MemOp mop_1 = orig, mop_2; @@ -3164,93 +3195,113 @@ static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) ret[1] = mop_2; } +#if TARGET_LONG_BITS == 64 +#define tcg_temp_ebb_new tcg_temp_ebb_new_i64 +#else +#define tcg_temp_ebb_new tcg_temp_ebb_new_i32 +#endif + void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) { - MemOp mop[2]; - TCGv addr_p8; - TCGv_i64 x, y; + MemOpIdx oi = make_memop_idx(memop, idx); - canonicalize_memop_i128_as_i64(mop, memop); + tcg_debug_assert((memop & MO_SIZE) == MO_128); + tcg_debug_assert((memop & MO_SIGN) == 0); tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); addr = plugin_prep_mem_callbacks(addr); - /* TODO: respect atomicity of the operation. */ /* TODO: allow the tcg backend to see the whole operation. */ - /* - * Since there are no global TCGv_i128, there is no visible state - * changed if the second load faults. Load directly into the two - * subwords. - */ - if ((memop & MO_BSWAP) == MO_LE) { - x = TCGV128_LOW(val); - y = TCGV128_HIGH(val); + if (use_two_i64_for_i128(memop)) { + MemOp mop[2]; + TCGv addr_p8; + TCGv_i64 x, y; + + canonicalize_memop_i128_as_i64(mop, memop); + + /* + * Since there are no global TCGv_i128, there is no visible state + * changed if the second load faults. Load directly into the two + * subwords. + */ + if ((memop & MO_BSWAP) == MO_LE) { + x = TCGV128_LOW(val); + y = TCGV128_HIGH(val); + } else { + x = TCGV128_HIGH(val); + y = TCGV128_LOW(val); + } + + gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr, mop[0], idx); + + if ((mop[0] ^ memop) & MO_BSWAP) { + tcg_gen_bswap64_i64(x, x); + } + + addr_p8 = tcg_temp_ebb_new(); + tcg_gen_addi_tl(addr_p8, addr, 8); + gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8, mop[1], idx); + tcg_temp_free(addr_p8); + + if ((mop[0] ^ memop) & MO_BSWAP) { + tcg_gen_bswap64_i64(y, y); + } } else { - x = TCGV128_HIGH(val); - y = TCGV128_LOW(val); + gen_helper_ld_i128(val, cpu_env, addr, tcg_constant_i32(oi)); } - gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr, mop[0], idx); - - if ((mop[0] ^ memop) & MO_BSWAP) { - tcg_gen_bswap64_i64(x, x); - } - - addr_p8 = tcg_temp_new(); - tcg_gen_addi_tl(addr_p8, addr, 8); - gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8, mop[1], idx); - tcg_temp_free(addr_p8); - - if ((mop[0] ^ memop) & MO_BSWAP) { - tcg_gen_bswap64_i64(y, y); - } - - plugin_gen_mem_callbacks(addr, make_memop_idx(memop, idx), - QEMU_PLUGIN_MEM_R); + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); } void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) { - MemOp mop[2]; - TCGv addr_p8; - TCGv_i64 x, y; + MemOpIdx oi = make_memop_idx(memop, idx); - canonicalize_memop_i128_as_i64(mop, memop); + tcg_debug_assert((memop & MO_SIZE) == MO_128); + tcg_debug_assert((memop & MO_SIGN) == 0); tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST); addr = plugin_prep_mem_callbacks(addr); - /* TODO: respect atomicity of the operation. */ /* TODO: allow the tcg backend to see the whole operation. */ - if ((memop & MO_BSWAP) == MO_LE) { - x = TCGV128_LOW(val); - y = TCGV128_HIGH(val); + if (use_two_i64_for_i128(memop)) { + MemOp mop[2]; + TCGv addr_p8; + TCGv_i64 x, y; + + canonicalize_memop_i128_as_i64(mop, memop); + + if ((memop & MO_BSWAP) == MO_LE) { + x = TCGV128_LOW(val); + y = TCGV128_HIGH(val); + } else { + x = TCGV128_HIGH(val); + y = TCGV128_LOW(val); + } + + addr_p8 = tcg_temp_ebb_new(); + if ((mop[0] ^ memop) & MO_BSWAP) { + TCGv_i64 t = tcg_temp_ebb_new_i64(); + + tcg_gen_bswap64_i64(t, x); + gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr, mop[0], idx); + tcg_gen_bswap64_i64(t, y); + tcg_gen_addi_tl(addr_p8, addr, 8); + gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr_p8, mop[1], idx); + tcg_temp_free_i64(t); + } else { + gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr, mop[0], idx); + tcg_gen_addi_tl(addr_p8, addr, 8); + gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8, mop[1], idx); + } + tcg_temp_free(addr_p8); } else { - x = TCGV128_HIGH(val); - y = TCGV128_LOW(val); + gen_helper_st_i128(cpu_env, addr, val, tcg_constant_i32(oi)); } - addr_p8 = tcg_temp_new(); - if ((mop[0] ^ memop) & MO_BSWAP) { - TCGv_i64 t = tcg_temp_ebb_new_i64(); - - tcg_gen_bswap64_i64(t, x); - gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr, mop[0], idx); - tcg_gen_bswap64_i64(t, y); - tcg_gen_addi_tl(addr_p8, addr, 8); - gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr_p8, mop[1], idx); - tcg_temp_free_i64(t); - } else { - gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr, mop[0], idx); - tcg_gen_addi_tl(addr_p8, addr, 8); - gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8, mop[1], idx); - } - tcg_temp_free(addr_p8); - - plugin_gen_mem_callbacks(addr, make_memop_idx(memop, idx), - QEMU_PLUGIN_MEM_W); + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); } static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) From e61f1efeb730fd64441131ea721086065904ff67 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 5 Nov 2022 11:34:58 +0000 Subject: [PATCH 09/74] meson: Detect atomic128 support with optimization There is an edge condition prior to gcc13 for which optimization is required to generate 16-byte atomic sequences. Detect this. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- accel/tcg/ldst_atomicity.c.inc | 29 ++++++++++++++++--- meson.build | 52 ++++++++++++++++++++++------------ 2 files changed, 59 insertions(+), 22 deletions(-) diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc index ce73b32def..ba5db7c366 100644 --- a/accel/tcg/ldst_atomicity.c.inc +++ b/accel/tcg/ldst_atomicity.c.inc @@ -16,6 +16,23 @@ #endif #define HAVE_al8_fast (ATOMIC_REG_SIZE >= 8) +/* + * If __alignof(unsigned __int128) < 16, GCC may refuse to inline atomics + * that are supported by the host, e.g. s390x. We can force the pointer to + * have our known alignment with __builtin_assume_aligned, however prior to + * GCC 13 that was only reliable with optimization enabled. See + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389 + */ +#if defined(CONFIG_ATOMIC128_OPT) +# if !defined(__OPTIMIZE__) +# define ATTRIBUTE_ATOMIC128_OPT __attribute__((optimize("O1"))) +# endif +# define CONFIG_ATOMIC128 +#endif +#ifndef ATTRIBUTE_ATOMIC128_OPT +# define ATTRIBUTE_ATOMIC128_OPT +#endif + #if defined(CONFIG_ATOMIC128) # define HAVE_al16_fast true #else @@ -152,7 +169,8 @@ static inline uint64_t load_atomic8(void *pv) * * Atomically load 16 aligned bytes from @pv. */ -static inline Int128 load_atomic16(void *pv) +static inline Int128 ATTRIBUTE_ATOMIC128_OPT +load_atomic16(void *pv) { #ifdef CONFIG_ATOMIC128 __uint128_t *p = __builtin_assume_aligned(pv, 16); @@ -356,7 +374,8 @@ static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra, * cross an 16-byte boundary then the access must be 16-byte atomic, * otherwise the access must be 8-byte atomic. */ -static inline uint64_t load_atom_extract_al16_or_al8(void *pv, int s) +static inline uint64_t ATTRIBUTE_ATOMIC128_OPT +load_atom_extract_al16_or_al8(void *pv, int s) { #if defined(CONFIG_ATOMIC128) uintptr_t pi = (uintptr_t)pv; @@ -692,7 +711,8 @@ static inline void store_atomic8(void *pv, uint64_t val) * * Atomically store 16 aligned bytes to @pv. */ -static inline void store_atomic16(void *pv, Int128Alias val) +static inline void ATTRIBUTE_ATOMIC128_OPT +store_atomic16(void *pv, Int128Alias val) { #if defined(CONFIG_ATOMIC128) __uint128_t *pu = __builtin_assume_aligned(pv, 16); @@ -790,7 +810,8 @@ static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk) * * Atomically store @val to @p masked by @msk. */ -static void store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk) +static void ATTRIBUTE_ATOMIC128_OPT +store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk) { #if defined(CONFIG_ATOMIC128) __uint128_t *pu, old, new; diff --git a/meson.build b/meson.build index 5e2807ea7c..4dddccb890 100644 --- a/meson.build +++ b/meson.build @@ -2259,23 +2259,21 @@ config_host_data.set('HAVE_BROKEN_SIZE_MAX', not cc.compiles(''' return printf("%zu", SIZE_MAX); }''', args: ['-Werror'])) -atomic_test = ''' +# See if 64-bit atomic operations are supported. +# Note that without __atomic builtins, we can only +# assume atomic loads/stores max at pointer size. +config_host_data.set('CONFIG_ATOMIC64', cc.links(''' #include int main(void) { - @0@ x = 0, y = 0; + uint64_t x = 0, y = 0; y = __atomic_load_n(&x, __ATOMIC_RELAXED); __atomic_store_n(&x, y, __ATOMIC_RELAXED); __atomic_compare_exchange_n(&x, &y, x, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); __atomic_exchange_n(&x, y, __ATOMIC_RELAXED); __atomic_fetch_add(&x, y, __ATOMIC_RELAXED); return 0; - }''' - -# See if 64-bit atomic operations are supported. -# Note that without __atomic builtins, we can only -# assume atomic loads/stores max at pointer size. -config_host_data.set('CONFIG_ATOMIC64', cc.links(atomic_test.format('uint64_t'))) + }''')) has_int128 = cc.links(''' __int128_t a; @@ -2293,21 +2291,39 @@ if has_int128 # "do we have 128-bit atomics which are handled inline and specifically not # via libatomic". The reason we can't use libatomic is documented in the # comment starting "GCC is a house divided" in include/qemu/atomic128.h. - has_atomic128 = cc.links(atomic_test.format('unsigned __int128')) + # We only care about these operations on 16-byte aligned pointers, so + # force 16-byte alignment of the pointer, which may be greater than + # __alignof(unsigned __int128) for the host. + atomic_test_128 = ''' + int main(int ac, char **av) { + unsigned __int128 *p = __builtin_assume_aligned(av[ac - 1], sizeof(16)); + p[1] = __atomic_load_n(&p[0], __ATOMIC_RELAXED); + __atomic_store_n(&p[2], p[3], __ATOMIC_RELAXED); + __atomic_compare_exchange_n(&p[4], &p[5], p[6], 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); + return 0; + }''' + has_atomic128 = cc.links(atomic_test_128) config_host_data.set('CONFIG_ATOMIC128', has_atomic128) if not has_atomic128 - has_cmpxchg128 = cc.links(''' - int main(void) - { - unsigned __int128 x = 0, y = 0; - __sync_val_compare_and_swap_16(&x, y, x); - return 0; - } - ''') + # Even with __builtin_assume_aligned, the above test may have failed + # without optimization enabled. Try again with optimizations locally + # enabled for the function. See + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389 + has_atomic128_opt = cc.links('__attribute__((optimize("O1")))' + atomic_test_128) + config_host_data.set('CONFIG_ATOMIC128_OPT', has_atomic128_opt) - config_host_data.set('CONFIG_CMPXCHG128', has_cmpxchg128) + if not has_atomic128_opt + config_host_data.set('CONFIG_CMPXCHG128', cc.links(''' + int main(void) + { + unsigned __int128 x = 0, y = 0; + __sync_val_compare_and_swap_16(&x, y, x); + return 0; + } + ''')) + endif endif endif From 6d3f2e3c64ac93ff6f7e286068091d5559df255c Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 6 Nov 2022 16:43:21 +1100 Subject: [PATCH 10/74] tcg/i386: Add have_atomic16 Notice when Intel or AMD have guaranteed that vmovdqa is atomic. The new variable will also be used in generated code. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- include/qemu/cpuid.h | 18 ++++++++++++++++++ tcg/i386/tcg-target.c.inc | 27 +++++++++++++++++++++++++++ tcg/i386/tcg-target.h | 1 + 3 files changed, 46 insertions(+) diff --git a/include/qemu/cpuid.h b/include/qemu/cpuid.h index 1451e8ef2f..35325f1995 100644 --- a/include/qemu/cpuid.h +++ b/include/qemu/cpuid.h @@ -71,6 +71,24 @@ #define bit_LZCNT (1 << 5) #endif +/* + * Signatures for different CPU implementations as returned from Leaf 0. + */ + +#ifndef signature_INTEL_ecx +/* "Genu" "ineI" "ntel" */ +#define signature_INTEL_ebx 0x756e6547 +#define signature_INTEL_edx 0x49656e69 +#define signature_INTEL_ecx 0x6c65746e +#endif + +#ifndef signature_AMD_ecx +/* "Auth" "enti" "cAMD" */ +#define signature_AMD_ebx 0x68747541 +#define signature_AMD_edx 0x69746e65 +#define signature_AMD_ecx 0x444d4163 +#endif + static inline unsigned xgetbv_low(unsigned c) { unsigned a, d; diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 826f7764c9..911123cfa8 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -185,6 +185,7 @@ bool have_avx512dq; bool have_avx512vbmi2; bool have_avx512vl; bool have_movbe; +bool have_atomic16; #ifdef CONFIG_CPUID_H static bool have_bmi2; @@ -4026,6 +4027,32 @@ static void tcg_target_init(TCGContext *s) have_avx512dq = (b7 & bit_AVX512DQ) != 0; have_avx512vbmi2 = (c7 & bit_AVX512VBMI2) != 0; } + + /* + * The Intel SDM has added: + * Processors that enumerate support for Intel® AVX + * (by setting the feature flag CPUID.01H:ECX.AVX[bit 28]) + * guarantee that the 16-byte memory operations performed + * by the following instructions will always be carried + * out atomically: + * - MOVAPD, MOVAPS, and MOVDQA. + * - VMOVAPD, VMOVAPS, and VMOVDQA when encoded with VEX.128. + * - VMOVAPD, VMOVAPS, VMOVDQA32, and VMOVDQA64 when encoded + * with EVEX.128 and k0 (masking disabled). + * Note that these instructions require the linear addresses + * of their memory operands to be 16-byte aligned. + * + * AMD has provided an even stronger guarantee that processors + * with AVX provide 16-byte atomicity for all cachable, + * naturally aligned single loads and stores, e.g. MOVDQU. + * + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104688 + */ + if (have_avx1) { + __cpuid(0, a, b, c, d); + have_atomic16 = (c == signature_INTEL_ecx || + c == signature_AMD_ecx); + } } } } diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h index d4f2a6f8c2..0421776cb8 100644 --- a/tcg/i386/tcg-target.h +++ b/tcg/i386/tcg-target.h @@ -120,6 +120,7 @@ extern bool have_avx512dq; extern bool have_avx512vbmi2; extern bool have_avx512vl; extern bool have_movbe; +extern bool have_atomic16; /* optional instructions */ #define TCG_TARGET_HAS_div2_i32 1 From b764941955d5ff13dd2760b59ce37d98941fc46b Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 6 Nov 2022 15:31:22 +1100 Subject: [PATCH 11/74] tcg/aarch64: Detect have_lse, have_lse2 for linux MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Notice when the host has additional atomic instructions. The new variables will also be used in generated code. Reviewed-by: Peter Maydell Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- tcg/aarch64/tcg-target.c.inc | 12 ++++++++++++ tcg/aarch64/tcg-target.h | 3 +++ 2 files changed, 15 insertions(+) diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index e6636c1f8b..fc551a3d10 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -13,6 +13,9 @@ #include "../tcg-ldst.c.inc" #include "../tcg-pool.c.inc" #include "qemu/bitops.h" +#ifdef __linux__ +#include +#endif /* We're going to re-use TCGType in setting of the SF bit, which controls the size of the operation performed. If we know the values match, it @@ -71,6 +74,9 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) return TCG_REG_X0 + slot; } +bool have_lse; +bool have_lse2; + #define TCG_REG_TMP TCG_REG_X30 #define TCG_VEC_TMP TCG_REG_V31 @@ -2899,6 +2905,12 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) static void tcg_target_init(TCGContext *s) { +#ifdef __linux__ + unsigned long hwcap = qemu_getauxval(AT_HWCAP); + have_lse = hwcap & HWCAP_ATOMICS; + have_lse2 = hwcap & HWCAP_USCAT; +#endif + tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffffu; tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffffu; tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull; diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h index c0b0f614ba..3c0b0d312d 100644 --- a/tcg/aarch64/tcg-target.h +++ b/tcg/aarch64/tcg-target.h @@ -57,6 +57,9 @@ typedef enum { #define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN #define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL +extern bool have_lse; +extern bool have_lse2; + /* optional instructions */ #define TCG_TARGET_HAS_div_i32 1 #define TCG_TARGET_HAS_rem_i32 1 From 1ce12a8c83f2dcd10aa324916dcf6f67cc58a882 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 15 Feb 2023 20:11:03 -0600 Subject: [PATCH 12/74] tcg/aarch64: Detect have_lse, have_lse2 for darwin MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These features are present for Apple M1. Tested-by: Philippe Mathieu-Daudé Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/aarch64/tcg-target.c.inc | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index fc551a3d10..c64606af5b 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -16,6 +16,9 @@ #ifdef __linux__ #include #endif +#ifdef CONFIG_DARWIN +#include +#endif /* We're going to re-use TCGType in setting of the SF bit, which controls the size of the operation performed. If we know the values match, it @@ -2903,6 +2906,27 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) } } +#ifdef CONFIG_DARWIN +static bool sysctl_for_bool(const char *name) +{ + int val = 0; + size_t len = sizeof(val); + + if (sysctlbyname(name, &val, &len, NULL, 0) == 0) { + return val != 0; + } + + /* + * We might in the future ask for properties not present in older kernels, + * but we're only asking about static properties, all of which should be + * 'int'. So we shouln't see ENOMEM (val too small), or any of the other + * more exotic errors. + */ + assert(errno == ENOENT); + return false; +} +#endif + static void tcg_target_init(TCGContext *s) { #ifdef __linux__ @@ -2910,6 +2934,10 @@ static void tcg_target_init(TCGContext *s) have_lse = hwcap & HWCAP_ATOMICS; have_lse2 = hwcap & HWCAP_USCAT; #endif +#ifdef CONFIG_DARWIN + have_lse = sysctl_for_bool("hw.optional.arm.FEAT_LSE"); + have_lse2 = sysctl_for_bool("hw.optional.arm.FEAT_LSE2"); +#endif tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffffu; tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffffu; From 30cc7a7e912481b5ba03df370d638f7ff4d7d6e1 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 7 Nov 2022 20:51:56 +1100 Subject: [PATCH 13/74] tcg/i386: Use full load/store helpers in user-only mode Instead of using helper_unaligned_{ld,st}, use the full load/store helpers. This will allow the fast path to increase alignment to implement atomicity while not immediately raising an alignment exception. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/i386/tcg-target.c.inc | 52 +++------------------------------------ 1 file changed, 4 insertions(+), 48 deletions(-) diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 911123cfa8..21553f3c39 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -1776,7 +1776,6 @@ typedef struct { int seg; } HostAddress; -#if defined(CONFIG_SOFTMMU) /* * Because i686 has no register parameters and because x86_64 has xchg * to handle addr/data register overlap, we have placed all input arguments @@ -1812,7 +1811,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) /* resolve label address */ tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4); - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + if (label_ptr[1]) { tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); } @@ -1834,7 +1833,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) /* resolve label address */ tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4); - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + if (label_ptr[1]) { tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); } @@ -1844,51 +1843,8 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) tcg_out_jmp(s, l->raddr); return true; } -#else -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - /* resolve label address */ - tcg_patch32(l->label_ptr[0], s->code_ptr - l->label_ptr[0] - 4); - - if (TCG_TARGET_REG_BITS == 32) { - int ofs = 0; - - tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); - ofs += 4; - - tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); - ofs += 4; - if (TARGET_LONG_BITS == 64) { - tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); - ofs += 4; - } - - tcg_out_pushi(s, (uintptr_t)l->raddr); - } else { - tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], - l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); - - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, (uintptr_t)l->raddr); - tcg_out_push(s, TCG_REG_RAX); - } - - /* "Tail call" to the helper, with the return address back inline. */ - tcg_out_jmp(s, (const void *)(l->is_ld ? helper_unaligned_ld - : helper_unaligned_st)); - return true; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} +#ifndef CONFIG_SOFTMMU static HostAddress x86_guest_base = { .index = -1 }; @@ -1920,7 +1876,7 @@ static inline int setup_guest_base_seg(void) return 0; } #endif /* setup_guest_base_seg */ -#endif /* SOFTMMU */ +#endif /* !SOFTMMU */ /* * For softmmu, perform the TLB load and compare. From e9266ecd8f4aeef164e53da8f1731f4e01ba3dca Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 2 Apr 2023 21:01:53 +0000 Subject: [PATCH 14/74] tcg/aarch64: Use full load/store helpers in user-only mode Instead of using helper_unaligned_{ld,st}, use the full load/store helpers. This will allow the fast path to increase alignment to implement atomicity while not immediately raising an alignment exception. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/aarch64/tcg-target.c.inc | 35 ----------------------------------- 1 file changed, 35 deletions(-) diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index c64606af5b..36d8798bca 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -1595,7 +1595,6 @@ typedef struct { TCGType index_ext; } HostAddress; -#ifdef CONFIG_SOFTMMU static const TCGLdstHelperParam ldst_helper_param = { .ntmp = 1, .tmp = { TCG_REG_TMP } }; @@ -1628,40 +1627,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_goto(s, lb->raddr); return true; } -#else -static void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target) -{ - ptrdiff_t offset = tcg_pcrel_diff(s, target); - tcg_debug_assert(offset == sextract64(offset, 0, 21)); - tcg_out_insn(s, 3406, ADR, rd, offset); -} - -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - if (!reloc_pc19(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; - } - - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_X1, l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0); - - /* "Tail call" to the helper, with the return address back inline. */ - tcg_out_adr(s, TCG_REG_LR, l->raddr); - tcg_out_goto_long(s, (const void *)(l->is_ld ? helper_unaligned_ld - : helper_unaligned_st)); - return true; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} -#endif /* CONFIG_SOFTMMU */ /* * For softmmu, perform the TLB load and compare. From 64d51dc31a5bdf5a0571ce48717a7daf1f7dc4aa Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 8 Apr 2023 08:36:25 -0700 Subject: [PATCH 15/74] tcg/ppc: Use full load/store helpers in user-only mode Instead of using helper_unaligned_{ld,st}, use the full load/store helpers. This will allow the fast path to increase alignment to implement atomicity while not immediately raising an alignment exception. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.c.inc | 44 ---------------------------------------- 1 file changed, 44 deletions(-) diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index 6a81916e64..218602c10c 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -1962,7 +1962,6 @@ static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = { [MO_BSWAP | MO_UQ] = STDBRX, }; -#if defined (CONFIG_SOFTMMU) static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg) { if (arg < 0) { @@ -2012,49 +2011,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_b(s, 0, lb->raddr); return true; } -#else -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - if (!reloc_pc14(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; - } - - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - TCGReg arg = TCG_REG_R4; - - arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN); - if (l->addrlo_reg != arg) { - tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg); - tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg); - } else if (l->addrhi_reg != arg + 1) { - tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg); - } else { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R0, arg); - tcg_out_mov(s, TCG_TYPE_I32, arg, arg + 1); - tcg_out_mov(s, TCG_TYPE_I32, arg + 1, TCG_REG_R0); - } - } else { - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R4, l->addrlo_reg); - } - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, TCG_AREG0); - - /* "Tail call" to the helper, with the return address back inline. */ - tcg_out_call_int(s, 0, (const void *)(l->is_ld ? helper_unaligned_ld - : helper_unaligned_st)); - return true; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} -#endif /* SOFTMMU */ typedef struct { TCGReg base; From 3fb4934d3446940866fdbbba370d84a22fd3a7e1 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 2 Apr 2023 22:05:39 +0000 Subject: [PATCH 16/74] tcg/loongarch64: Use full load/store helpers in user-only mode Instead of using helper_unaligned_{ld,st}, use the full load/store helpers. This will allow the fast path to increase alignment to implement atomicity while not immediately raising an alignment exception. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/loongarch64/tcg-target.c.inc | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index d1bc29826f..e651ec5c71 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -783,7 +783,6 @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, * Load/store helpers for SoftMMU, and qemu_ld/st implementations */ -#if defined(CONFIG_SOFTMMU) static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) { tcg_out_opc_b(s, 0); @@ -822,35 +821,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false); return tcg_out_goto(s, l->raddr); } -#else -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - /* resolve label address */ - if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; - } - - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); - - /* tail call, with the return address back inline. */ - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr); - tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld - : helper_unaligned_st), true); - return true; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -#endif /* CONFIG_SOFTMMU */ typedef struct { TCGReg base; From 9161e9ae2eb62655ce1e9c4ec67cd1ff69639e7f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 2 Apr 2023 22:24:37 +0000 Subject: [PATCH 17/74] tcg/riscv: Use full load/store helpers in user-only mode Instead of using helper_unaligned_{ld,st}, use the full load/store helpers. This will allow the fast path to increase alignment to implement atomicity while not immediately raising an alignment exception. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/riscv/tcg-target.c.inc | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 8ed0e2f210..19cd4507fb 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -846,7 +846,6 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) * Load/store and TLB */ -#if defined(CONFIG_SOFTMMU) static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) { tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); @@ -893,34 +892,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) tcg_out_goto(s, l->raddr); return true; } -#else -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - /* resolve label address */ - if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; - } - - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); - - /* tail call, with the return address back inline. */ - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr); - tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld - : helper_unaligned_st), true); - return true; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} -#endif /* CONFIG_SOFTMMU */ /* * For softmmu, perform the TLB load and compare. From 7212812263402605abb147e5b2468f523a1471ab Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 24 Apr 2023 12:31:46 +0100 Subject: [PATCH 18/74] tcg/arm: Adjust constraints on qemu_ld/st Always reserve r3 for tlb softmmu lookup. Fix a bug in user-only ALL_QLDST_REGS, in that r14 is clobbered by the BLNE that leads to the misaligned trap. Remove r0+r1 from user-only ALL_QLDST_REGS; I believe these had been reserved for bswap, which we no longer perform during qemu_st. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/arm/tcg-target-con-set.h | 16 ++++++++-------- tcg/arm/tcg-target-con-str.h | 5 ++--- tcg/arm/tcg-target.c.inc | 23 ++++++++--------------- 3 files changed, 18 insertions(+), 26 deletions(-) diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h index b8849b2478..229ae258ac 100644 --- a/tcg/arm/tcg-target-con-set.h +++ b/tcg/arm/tcg-target-con-set.h @@ -12,19 +12,19 @@ C_O0_I1(r) C_O0_I2(r, r) C_O0_I2(r, rIN) -C_O0_I2(s, s) +C_O0_I2(q, q) C_O0_I2(w, r) -C_O0_I3(s, s, s) -C_O0_I3(S, p, s) +C_O0_I3(q, q, q) +C_O0_I3(Q, p, q) C_O0_I4(r, r, rI, rI) -C_O0_I4(S, p, s, s) -C_O1_I1(r, l) +C_O0_I4(Q, p, q, q) +C_O1_I1(r, q) C_O1_I1(r, r) C_O1_I1(w, r) C_O1_I1(w, w) C_O1_I1(w, wr) C_O1_I2(r, 0, rZ) -C_O1_I2(r, l, l) +C_O1_I2(r, q, q) C_O1_I2(r, r, r) C_O1_I2(r, r, rI) C_O1_I2(r, r, rIK) @@ -39,8 +39,8 @@ C_O1_I2(w, w, wZ) C_O1_I3(w, w, w, w) C_O1_I4(r, r, r, rI, rI) C_O1_I4(r, r, rIN, rIK, 0) -C_O2_I1(e, p, l) -C_O2_I2(e, p, l, l) +C_O2_I1(e, p, q) +C_O2_I2(e, p, q, q) C_O2_I2(r, r, r, r) C_O2_I4(r, r, r, r, rIN, rIK) C_O2_I4(r, r, rI, rI, rIN, rIK) diff --git a/tcg/arm/tcg-target-con-str.h b/tcg/arm/tcg-target-con-str.h index 24b4b59feb..f83f1d3919 100644 --- a/tcg/arm/tcg-target-con-str.h +++ b/tcg/arm/tcg-target-con-str.h @@ -10,9 +10,8 @@ */ REGS('e', ALL_GENERAL_REGS & 0x5555) /* even regs */ REGS('r', ALL_GENERAL_REGS) -REGS('l', ALL_QLOAD_REGS) -REGS('s', ALL_QSTORE_REGS) -REGS('S', ALL_QSTORE_REGS & 0x5555) /* even qstore */ +REGS('q', ALL_QLDST_REGS) +REGS('Q', ALL_QLDST_REGS & 0x5555) /* even qldst */ REGS('w', ALL_VECTOR_REGS) /* diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index 8b0d526659..a02804dd69 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -353,23 +353,16 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, #define ALL_VECTOR_REGS 0xffff0000u /* - * r0-r2 will be overwritten when reading the tlb entry (softmmu only) - * and r0-r1 doing the byte swapping, so don't use these. - * r3 is removed for softmmu to avoid clashes with helper arguments. + * r0-r3 will be overwritten when reading the tlb entry (softmmu only); + * r14 will be overwritten by the BLNE branching to the slow path. */ #ifdef CONFIG_SOFTMMU -#define ALL_QLOAD_REGS \ +#define ALL_QLDST_REGS \ (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \ (1 << TCG_REG_R14))) -#define ALL_QSTORE_REGS \ - (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ - (1 << TCG_REG_R2) | (1 << TCG_REG_R14) | \ - ((TARGET_LONG_BITS == 64) << TCG_REG_R3))) #else -#define ALL_QLOAD_REGS ALL_GENERAL_REGS -#define ALL_QSTORE_REGS \ - (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1))) +#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R14)) #endif /* @@ -2203,13 +2196,13 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) return C_O1_I4(r, r, r, rI, rI); case INDEX_op_qemu_ld_i32: - return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l); + return TARGET_LONG_BITS == 32 ? C_O1_I1(r, q) : C_O1_I2(r, q, q); case INDEX_op_qemu_ld_i64: - return TARGET_LONG_BITS == 32 ? C_O2_I1(e, p, l) : C_O2_I2(e, p, l, l); + return TARGET_LONG_BITS == 32 ? C_O2_I1(e, p, q) : C_O2_I2(e, p, q, q); case INDEX_op_qemu_st_i32: - return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s); + return TARGET_LONG_BITS == 32 ? C_O0_I2(q, q) : C_O0_I3(q, q, q); case INDEX_op_qemu_st_i64: - return TARGET_LONG_BITS == 32 ? C_O0_I3(S, p, s) : C_O0_I4(S, p, s, s); + return TARGET_LONG_BITS == 32 ? C_O0_I3(Q, p, q) : C_O0_I4(Q, p, q, q); case INDEX_op_st_vec: return C_O0_I2(w, r); From b6ee2453f63bf1b9fe5d3e20e00b128820b4902e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 2 Apr 2023 20:48:47 -0700 Subject: [PATCH 19/74] tcg/arm: Use full load/store helpers in user-only mode Instead of using helper_unaligned_{ld,st}, use the full load/store helpers. This will allow the fast path to increase alignment to implement atomicity while not immediately raising an alignment exception. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/arm/tcg-target.c.inc | 45 ---------------------------------------- 1 file changed, 45 deletions(-) diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index a02804dd69..eb0542f32e 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -1325,7 +1325,6 @@ typedef struct { bool index_scratch; } HostAddress; -#ifdef CONFIG_SOFTMMU static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg) { /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */ @@ -1368,50 +1367,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]); return true; } -#else -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - if (!reloc_pc24(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; - } - - if (TARGET_LONG_BITS == 64) { - /* 64-bit target address is aligned into R2:R3. */ - TCGMovExtend ext[2] = { - { .dst = TCG_REG_R2, .dst_type = TCG_TYPE_I32, - .src = l->addrlo_reg, - .src_type = TCG_TYPE_I32, .src_ext = MO_UL }, - { .dst = TCG_REG_R3, .dst_type = TCG_TYPE_I32, - .src = l->addrhi_reg, - .src_type = TCG_TYPE_I32, .src_ext = MO_UL }, - }; - tcg_out_movext2(s, &ext[0], &ext[1], TCG_REG_TMP); - } else { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, l->addrlo_reg); - } - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_AREG0); - - /* - * Tail call to the helper, with the return address back inline, - * just for the clarity of the debugging traceback -- the helper - * cannot return. We have used BLNE to arrive here, so LR is - * already set. - */ - tcg_out_goto(s, COND_AL, (const void *) - (l->is_ld ? helper_unaligned_ld : helper_unaligned_st)); - return true; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} -#endif /* SOFTMMU */ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, TCGReg addrlo, TCGReg addrhi, From 30feb7ee4387f40e73a1e938388b60d76c90fc4f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 2 Apr 2023 22:37:29 -0700 Subject: [PATCH 20/74] tcg/mips: Use full load/store helpers in user-only mode Instead of using helper_unaligned_{ld,st}, use the full load/store helpers. This will allow the fast path to increase alignment to implement atomicity while not immediately raising an alignment exception. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/mips/tcg-target.c.inc | 57 ++------------------------------------- 1 file changed, 2 insertions(+), 55 deletions(-) diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index 7770ef46bd..fa0f334e8d 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -1075,7 +1075,6 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, tcg_out_nop(s); } -#if defined(CONFIG_SOFTMMU) /* We have four temps, we might as well expose three of them. */ static const TCGLdstHelperParam ldst_helper_param = { .ntmp = 3, .tmp = { TCG_TMP0, TCG_TMP1, TCG_TMP2 } @@ -1088,8 +1087,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) /* resolve label address */ if (!reloc_pc16(l->label_ptr[0], tgt_rx) - || (TCG_TARGET_REG_BITS < TARGET_LONG_BITS - && !reloc_pc16(l->label_ptr[1], tgt_rx))) { + || (l->label_ptr[1] && !reloc_pc16(l->label_ptr[1], tgt_rx))) { return false; } @@ -1118,8 +1116,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) /* resolve label address */ if (!reloc_pc16(l->label_ptr[0], tgt_rx) - || (TCG_TARGET_REG_BITS < TARGET_LONG_BITS - && !reloc_pc16(l->label_ptr[1], tgt_rx))) { + || (l->label_ptr[1] && !reloc_pc16(l->label_ptr[1], tgt_rx))) { return false; } @@ -1139,56 +1136,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) return true; } -#else -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - void *target; - - if (!reloc_pc16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { - return false; - } - - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - /* A0 is env, A1 is skipped, A2:A3 is the uint64_t address. */ - TCGReg a2 = MIPS_BE ? l->addrhi_reg : l->addrlo_reg; - TCGReg a3 = MIPS_BE ? l->addrlo_reg : l->addrhi_reg; - - if (a3 != TCG_REG_A2) { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3); - } else if (a2 != TCG_REG_A3) { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2); - } else { - tcg_out_mov(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A2); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, TCG_REG_A3); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, TCG_TMP0); - } - } else { - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); - } - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); - - /* - * Tail call to the helper, with the return address back inline. - * We have arrived here via BNEL, so $31 is already set. - */ - target = (l->is_ld ? helper_unaligned_ld : helper_unaligned_st); - tcg_out_call_int(s, target, true); - return true; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} -#endif /* SOFTMMU */ - typedef struct { TCGReg base; MemOp align; From 3df73c7e393b8190372f2d276b7ebc523199cfa1 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 24 Apr 2023 14:11:35 +0100 Subject: [PATCH 21/74] tcg/s390x: Use full load/store helpers in user-only mode Instead of using helper_unaligned_{ld,st}, use the full load/store helpers. This will allow the fast path to increase alignment to implement atomicity while not immediately raising an alignment exception. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/s390x/tcg-target.c.inc | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc index 968977be98..de8aed5f77 100644 --- a/tcg/s390x/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -1679,7 +1679,6 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, } } -#if defined(CONFIG_SOFTMMU) static const TCGLdstHelperParam ldst_helper_param = { .ntmp = 1, .tmp = { TCG_TMP0 } }; @@ -1716,34 +1715,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); return true; } -#else -static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) -{ - if (!patch_reloc(l->label_ptr[0], R_390_PC16DBL, - (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) { - return false; - } - - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); - - /* "Tail call" to the helper, with the return address back inline. */ - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R14, (uintptr_t)l->raddr); - tgen_gotoi(s, S390_CC_ALWAYS, (const void *)(l->is_ld ? helper_unaligned_ld - : helper_unaligned_st)); - return true; -} - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} - -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - return tcg_out_fail_alignment(s, l); -} -#endif /* CONFIG_SOFTMMU */ /* * For softmmu, perform the TLB load and compare. From 33982b890b057319daf5a9a0678dbffc80ed8880 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 24 Apr 2023 02:38:03 -0500 Subject: [PATCH 22/74] tcg/sparc64: Allocate %g2 as a third temporary Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/sparc64/tcg-target.c.inc | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index e997db2645..64464ab363 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -83,9 +83,10 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) #define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) -/* Define some temporary registers. T2 is used for constant generation. */ +/* Define some temporary registers. T3 is used for constant generation. */ #define TCG_REG_T1 TCG_REG_G1 -#define TCG_REG_T2 TCG_REG_O7 +#define TCG_REG_T2 TCG_REG_G2 +#define TCG_REG_T3 TCG_REG_O7 #ifndef CONFIG_SOFTMMU # define TCG_GUEST_BASE_REG TCG_REG_I5 @@ -110,7 +111,6 @@ static const int tcg_target_reg_alloc_order[] = { TCG_REG_I4, TCG_REG_I5, - TCG_REG_G2, TCG_REG_G3, TCG_REG_G4, TCG_REG_G5, @@ -492,8 +492,8 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg) { - tcg_debug_assert(ret != TCG_REG_T2); - tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2); + tcg_debug_assert(ret != TCG_REG_T3); + tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T3); } static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) @@ -885,10 +885,8 @@ static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest, { uintptr_t desti = (uintptr_t)dest; - /* Be careful not to clobber %o7 for a tail call. */ tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1, - desti & ~0xfff, in_prologue, - tail_call ? TCG_REG_G2 : TCG_REG_O7); + desti & ~0xfff, in_prologue, TCG_REG_T2); tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL); } @@ -1856,6 +1854,7 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_T3); /* for internal use */ } #define ELF_HOST_MACHINE EM_SPARCV9 From 8b14f8627cd0ea5cd038e80a8dccde0c3d57458f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 24 Apr 2023 02:51:42 -0500 Subject: [PATCH 23/74] tcg/sparc64: Rename tcg_out_movi_imm13 to tcg_out_movi_s13 Emphasize that the constant is signed. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/sparc64/tcg-target.c.inc | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index 64464ab363..15d6a9fd73 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -399,7 +399,8 @@ static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg) tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10)); } -static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg) +/* A 13-bit constant sign-extended to 64 bits. */ +static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg) { tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR); } @@ -408,7 +409,7 @@ static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg) { if (check_fit_i32(arg, 13)) { /* A 13-bit constant sign-extended to 64-bits. */ - tcg_out_movi_imm13(s, ret, arg); + tcg_out_movi_s13(s, ret, arg); } else { /* A 32-bit constant zero-extended to 64 bits. */ tcg_out_sethi(s, ret, arg); @@ -433,7 +434,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, /* A 13-bit constant sign-extended to 64-bits. */ if (check_fit_tl(arg, 13)) { - tcg_out_movi_imm13(s, ret, arg); + tcg_out_movi_s13(s, ret, arg); return; } @@ -767,7 +768,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, default: tcg_out_cmp(s, c1, c2, c2const); - tcg_out_movi_imm13(s, ret, 0); + tcg_out_movi_s13(s, ret, 0); tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1); return; } @@ -803,11 +804,11 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, /* For 64-bit signed comparisons vs zero, we can avoid the compare if the input does not overlap the output. */ if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) { - tcg_out_movi_imm13(s, ret, 0); + tcg_out_movi_s13(s, ret, 0); tcg_out_movr(s, cond, ret, c1, 1, 1); } else { tcg_out_cmp(s, c1, c2, c2const); - tcg_out_movi_imm13(s, ret, 0); + tcg_out_movi_s13(s, ret, 0); tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1); } } @@ -844,7 +845,7 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, if (use_vis3_instructions && !is_sub) { /* Note that ADDXC doesn't accept immediates. */ if (bhconst && bh != 0) { - tcg_out_movi_imm13(s, TCG_REG_T2, bh); + tcg_out_movi_s13(s, TCG_REG_T2, bh); bh = TCG_REG_T2; } tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC); @@ -866,7 +867,7 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, * so the adjustment fits 12 bits. */ if (bhconst) { - tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1)); + tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1)); } else { tcg_out_arithi(s, TCG_REG_T2, bh, 1, is_sub ? ARITH_SUB : ARITH_ADD); @@ -1036,7 +1037,7 @@ static void tcg_target_qemu_prologue(TCGContext *s) tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); /* delay slot */ - tcg_out_movi_imm13(s, TCG_REG_O0, 0); + tcg_out_movi_s13(s, TCG_REG_O0, 0); build_trampolines(s); } @@ -1430,7 +1431,7 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) { if (check_fit_ptr(a0, 13)) { tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); - tcg_out_movi_imm13(s, TCG_REG_O0, a0); + tcg_out_movi_s13(s, TCG_REG_O0, a0); return; } else { intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0); From ca0681c9419cde2fa4e77950ce1025af8f280adb Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 8 May 2023 16:23:00 +0100 Subject: [PATCH 24/74] target/sparc64: Remove tcg_out_movi_s13 case from tcg_out_movi_imm32 Shuffle the order in tcg_out_movi_int to check s13 first, and drop this check from tcg_out_movi_imm32. This might make the sequence for in_prologue larger, but not worth worrying about. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/sparc64/tcg-target.c.inc | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index 15d6a9fd73..2689599fd6 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -407,15 +407,10 @@ static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg) static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg) { - if (check_fit_i32(arg, 13)) { - /* A 13-bit constant sign-extended to 64-bits. */ - tcg_out_movi_s13(s, ret, arg); - } else { - /* A 32-bit constant zero-extended to 64 bits. */ - tcg_out_sethi(s, ret, arg); - if (arg & 0x3ff) { - tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR); - } + /* A 32-bit constant zero-extended to 64 bits. */ + tcg_out_sethi(s, ret, arg); + if (arg & 0x3ff) { + tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR); } } @@ -426,18 +421,18 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long hi, lo = (int32_t)arg; tcg_target_long test, lsb; - /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ - if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { - tcg_out_movi_imm32(s, ret, arg); - return; - } - /* A 13-bit constant sign-extended to 64-bits. */ if (check_fit_tl(arg, 13)) { tcg_out_movi_s13(s, ret, arg); return; } + /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ + if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { + tcg_out_movi_imm32(s, ret, arg); + return; + } + /* A 13-bit constant relative to the TB. */ if (!in_prologue) { test = tcg_tbrel_diff(s, (void *)arg); From 2cb3f794b6d903f18f84bc0c223ec5c40ffd7064 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 24 Apr 2023 03:00:55 -0500 Subject: [PATCH 25/74] tcg/sparc64: Rename tcg_out_movi_imm32 to tcg_out_movi_u32 Emphasize that the constant is unsigned. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/sparc64/tcg-target.c.inc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index 2689599fd6..e244209890 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -405,9 +405,9 @@ static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg) tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR); } -static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg) +/* A 32-bit constant zero-extended to 64 bits. */ +static void tcg_out_movi_u32(TCGContext *s, TCGReg ret, uint32_t arg) { - /* A 32-bit constant zero-extended to 64 bits. */ tcg_out_sethi(s, ret, arg); if (arg & 0x3ff) { tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR); @@ -429,7 +429,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { - tcg_out_movi_imm32(s, ret, arg); + tcg_out_movi_u32(s, ret, arg); return; } @@ -473,13 +473,13 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, /* A 64-bit constant decomposed into 2 32-bit pieces. */ if (check_fit_i32(lo, 13)) { hi = (arg - lo) >> 32; - tcg_out_movi_imm32(s, ret, hi); + tcg_out_movi_u32(s, ret, hi); tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); tcg_out_arithi(s, ret, ret, lo, ARITH_ADD); } else { hi = arg >> 32; - tcg_out_movi_imm32(s, ret, hi); - tcg_out_movi_imm32(s, scratch, lo); + tcg_out_movi_u32(s, ret, hi); + tcg_out_movi_u32(s, scratch, lo); tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); tcg_out_arith(s, ret, ret, scratch, ARITH_OR); } From 1a42d9d472b61e4db2fb16800495d402cb9b94af Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 24 Apr 2023 03:11:38 -0500 Subject: [PATCH 26/74] tcg/sparc64: Split out tcg_out_movi_s32 Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/sparc64/tcg-target.c.inc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index e244209890..4375a06377 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -405,6 +405,13 @@ static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg) tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR); } +/* A 32-bit constant sign-extended to 64 bits. */ +static void tcg_out_movi_s32(TCGContext *s, TCGReg ret, int32_t arg) +{ + tcg_out_sethi(s, ret, ~arg); + tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR); +} + /* A 32-bit constant zero-extended to 64 bits. */ static void tcg_out_movi_u32(TCGContext *s, TCGReg ret, uint32_t arg) { @@ -444,8 +451,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, /* A 32-bit constant sign-extended to 64-bits. */ if (arg == lo) { - tcg_out_sethi(s, ret, ~arg); - tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR); + tcg_out_movi_s32(s, ret, arg); return; } From 2908650333ec463031632baa6ab2c9609732056d Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 7 Apr 2023 19:45:31 -0500 Subject: [PATCH 27/74] tcg/sparc64: Use standard slow path for softmmu Drop the target-specific trampolines for the standard slow path. This lets us use tcg_out_helper_{ld,st}_args, and handles the new atomicity bits within MemOp. At the same time, use the full load/store helpers for user-only mode. Drop inline unaligned access support for user-only mode, as it does not handle atomicity. Use TCG_REG_T[1-3] in the tlb lookup, instead of TCG_REG_O[0-2]. This allows the constraints to be simplified. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/sparc64/tcg-target-con-set.h | 2 - tcg/sparc64/tcg-target-con-str.h | 1 - tcg/sparc64/tcg-target.c.inc | 610 +++++++++---------------------- tcg/sparc64/tcg-target.h | 1 + 4 files changed, 182 insertions(+), 432 deletions(-) diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h index 31e6fea1fc..434bf25072 100644 --- a/tcg/sparc64/tcg-target-con-set.h +++ b/tcg/sparc64/tcg-target-con-set.h @@ -12,8 +12,6 @@ C_O0_I1(r) C_O0_I2(rZ, r) C_O0_I2(rZ, rJ) -C_O0_I2(sZ, s) -C_O1_I1(r, s) C_O1_I1(r, r) C_O1_I2(r, r, r) C_O1_I2(r, rZ, rJ) diff --git a/tcg/sparc64/tcg-target-con-str.h b/tcg/sparc64/tcg-target-con-str.h index 8f5c7aef97..0577ec4942 100644 --- a/tcg/sparc64/tcg-target-con-str.h +++ b/tcg/sparc64/tcg-target-con-str.h @@ -9,7 +9,6 @@ * REGS(letter, register_mask) */ REGS('r', ALL_GENERAL_REGS) -REGS('s', ALL_QLDST_REGS) /* * Define constraint letters for constants: diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index 4375a06377..0237188d65 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -27,6 +27,7 @@ #error "unsupported code generation mode" #endif +#include "../tcg-ldst.c.inc" #include "../tcg-pool.c.inc" #ifdef CONFIG_DEBUG_TCG @@ -70,18 +71,7 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { #define TCG_CT_CONST_S13 0x200 #define TCG_CT_CONST_ZERO 0x400 -/* - * For softmmu, we need to avoid conflicts with the first 3 - * argument registers to perform the tlb lookup, and to call - * the helper function. - */ -#ifdef CONFIG_SOFTMMU -#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3) -#else -#define SOFTMMU_RESERVE_REGS 0 -#endif -#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) -#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) +#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) /* Define some temporary registers. T3 is used for constant generation. */ #define TCG_REG_T1 TCG_REG_G1 @@ -918,82 +908,6 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL)); } -#ifdef CONFIG_SOFTMMU -static const tcg_insn_unit *qemu_ld_trampoline[MO_SSIZE + 1]; -static const tcg_insn_unit *qemu_st_trampoline[MO_SIZE + 1]; - -static void build_trampolines(TCGContext *s) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) { - if (qemu_ld_helpers[i] == NULL) { - continue; - } - - /* May as well align the trampoline. */ - while ((uintptr_t)s->code_ptr & 15) { - tcg_out_nop(s); - } - qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr); - - /* Set the retaddr operand. */ - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7); - /* Tail call. */ - tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true); - /* delay slot -- set the env argument */ - tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); - } - - for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) { - if (qemu_st_helpers[i] == NULL) { - continue; - } - - /* May as well align the trampoline. */ - while ((uintptr_t)s->code_ptr & 15) { - tcg_out_nop(s); - } - qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr); - - /* Set the retaddr operand. */ - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7); - - /* Tail call. */ - tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true); - /* delay slot -- set the env argument */ - tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); - } -} -#else -static const tcg_insn_unit *qemu_unalign_ld_trampoline; -static const tcg_insn_unit *qemu_unalign_st_trampoline; - -static void build_trampolines(TCGContext *s) -{ - for (int ld = 0; ld < 2; ++ld) { - void *helper; - - while ((uintptr_t)s->code_ptr & 15) { - tcg_out_nop(s); - } - - if (ld) { - helper = helper_unaligned_ld; - qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr); - } else { - helper = helper_unaligned_st; - qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr); - } - - /* Tail call. */ - tcg_out_jmpl_const(s, helper, true, true); - /* delay slot -- set the env argument */ - tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); - } -} -#endif - /* Generate global QEMU prologue and epilogue code */ static void tcg_target_qemu_prologue(TCGContext *s) { @@ -1039,8 +953,6 @@ static void tcg_target_qemu_prologue(TCGContext *s) tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); /* delay slot */ tcg_out_movi_s13(s, TCG_REG_O0, 0); - - build_trampolines(s); } static void tcg_out_nop_fill(tcg_insn_unit *p, int count) @@ -1051,381 +963,224 @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count) } } -#if defined(CONFIG_SOFTMMU) +static const TCGLdstHelperParam ldst_helper_param = { + .ntmp = 1, .tmp = { TCG_REG_T1 } +}; -/* We expect to use a 13-bit negative offset from ENV. */ -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); -QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12)); - -/* Perform the TLB load and compare. - - Inputs: - ADDRLO and ADDRHI contain the possible two parts of the address. - - MEM_INDEX and S_BITS are the memory context and log2 size of the load. - - WHICH is the offset into the CPUTLBEntry structure of the slot to read. - This should be offsetof addr_read or addr_write. - - The result of the TLB comparison is in %[ix]cc. The sanitized address - is in the returned register, maybe %o0. The TLB addend is in %o1. */ - -static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, - MemOp opc, int which) +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { + MemOp opc = get_memop(lb->oi); + MemOp sgn; + + if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19, + (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) { + return false; + } + + /* Use inline tcg_out_ext32s; otherwise let the helper sign-extend. */ + sgn = (opc & MO_SIZE) < MO_32 ? MO_SIGN : 0; + + tcg_out_ld_helper_args(s, lb, &ldst_helper_param); + tcg_out_call(s, qemu_ld_helpers[opc & (MO_SIZE | sgn)], NULL); + tcg_out_ld_helper_ret(s, lb, sgn, &ldst_helper_param); + + tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0); + return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19, + (intptr_t)lb->raddr, 0); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + MemOp opc = get_memop(lb->oi); + + if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19, + (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) { + return false; + } + + tcg_out_st_helper_args(s, lb, &ldst_helper_param); + tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE], NULL); + + tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0); + return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19, + (intptr_t)lb->raddr, 0); +} + +typedef struct { + TCGReg base; + TCGReg index; +} HostAddress; + +/* + * For softmmu, perform the TLB load and compare. + * For useronly, perform any required alignment tests. + * In both cases, return a TCGLabelQemuLdst structure if the slow path + * is required and fill in @h with the host address for the fast path. + */ +static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, + TCGReg addr_reg, MemOpIdx oi, + bool is_ld) +{ + TCGLabelQemuLdst *ldst = NULL; + MemOp opc = get_memop(oi); + unsigned a_bits = get_alignment_bits(opc); + unsigned s_bits = opc & MO_SIZE; + unsigned a_mask; + + /* We don't support unaligned accesses. */ + a_bits = MAX(a_bits, s_bits); + a_mask = (1u << a_bits) - 1; + +#ifdef CONFIG_SOFTMMU + int mem_index = get_mmuidx(oi); int fast_off = TLB_MASK_TABLE_OFS(mem_index); int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); int table_off = fast_off + offsetof(CPUTLBDescFast, table); - const TCGReg r0 = TCG_REG_O0; - const TCGReg r1 = TCG_REG_O1; - const TCGReg r2 = TCG_REG_O2; - unsigned s_bits = opc & MO_SIZE; - unsigned a_bits = get_alignment_bits(opc); - tcg_target_long compare_mask; + int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write); + int add_off = offsetof(CPUTLBEntry, addend); + int compare_mask; + int cc; /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ - tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off); - tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off); + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12)); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T2, TCG_AREG0, mask_off); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T3, TCG_AREG0, table_off); /* Extract the page index, shifted into place for tlb index. */ - tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, - SHIFT_SRL); - tcg_out_arith(s, r2, r2, r0, ARITH_AND); + tcg_out_arithi(s, TCG_REG_T1, addr_reg, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL); + tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND); /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */ - tcg_out_arith(s, r2, r2, r1, ARITH_ADD); + tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T3, ARITH_ADD); - /* Load the tlb comparator and the addend. */ - tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which); - tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend)); + /* Load the tlb comparator and the addend. */ + tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_T2, TCG_REG_T1, cmp_off); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T1, TCG_REG_T1, add_off); + h->base = TCG_REG_T1; - /* Mask out the page offset, except for the required alignment. - We don't support unaligned accesses. */ - if (a_bits < s_bits) { - a_bits = s_bits; - } - compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1); + /* Mask out the page offset, except for the required alignment. */ + compare_mask = TARGET_PAGE_MASK | a_mask; if (check_fit_tl(compare_mask, 13)) { - tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND); + tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND); } else { - tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask); - tcg_out_arith(s, r2, addr, r2, ARITH_AND); + tcg_out_movi_s32(s, TCG_REG_T3, compare_mask); + tcg_out_arith(s, TCG_REG_T3, addr_reg, TCG_REG_T3, ARITH_AND); } - tcg_out_cmp(s, r0, r2, 0); + tcg_out_cmp(s, TCG_REG_T2, TCG_REG_T3, 0); - /* If the guest address must be zero-extended, do so now. */ + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addr_reg; + ldst->label_ptr[0] = s->code_ptr; + + /* bne,pn %[xi]cc, label0 */ + cc = TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC; + tcg_out_bpcc0(s, COND_NE, BPCC_PN | cc, 0); +#else + if (a_bits != s_bits) { + /* + * Test for at least natural alignment, and defer + * everything else to the helper functions. + */ + tcg_debug_assert(check_fit_tl(a_mask, 13)); + tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC); + + ldst = new_ldst_label(s); + ldst->is_ld = is_ld; + ldst->oi = oi; + ldst->addrlo_reg = addr_reg; + ldst->label_ptr[0] = s->code_ptr; + + /* bne,pn %icc, label0 */ + tcg_out_bpcc0(s, COND_NE, BPCC_PN | BPCC_ICC, 0); + } + h->base = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0; +#endif + + /* If the guest address must be zero-extended, do in the delay slot. */ if (TARGET_LONG_BITS == 32) { - tcg_out_ext32u(s, r0, addr); - return r0; + tcg_out_ext32u(s, TCG_REG_T2, addr_reg); + h->index = TCG_REG_T2; + } else { + if (ldst) { + tcg_out_nop(s); + } + h->index = addr_reg; } - return addr; + return ldst; } -#endif /* CONFIG_SOFTMMU */ - -static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = { - [MO_UB] = LDUB, - [MO_SB] = LDSB, - [MO_UB | MO_LE] = LDUB, - [MO_SB | MO_LE] = LDSB, - - [MO_BEUW] = LDUH, - [MO_BESW] = LDSH, - [MO_BEUL] = LDUW, - [MO_BESL] = LDSW, - [MO_BEUQ] = LDX, - [MO_BESQ] = LDX, - - [MO_LEUW] = LDUH_LE, - [MO_LESW] = LDSH_LE, - [MO_LEUL] = LDUW_LE, - [MO_LESL] = LDSW_LE, - [MO_LEUQ] = LDX_LE, - [MO_LESQ] = LDX_LE, -}; - -static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_UB] = STB, - - [MO_BEUW] = STH, - [MO_BEUL] = STW, - [MO_BEUQ] = STX, - - [MO_LEUW] = STH_LE, - [MO_LEUL] = STW_LE, - [MO_LEUQ] = STX_LE, -}; static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, MemOpIdx oi, TCGType data_type) { - MemOp memop = get_memop(oi); - tcg_insn_unit *label_ptr; + static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = { + [MO_UB] = LDUB, + [MO_SB] = LDSB, + [MO_UB | MO_LE] = LDUB, + [MO_SB | MO_LE] = LDSB, -#ifdef CONFIG_SOFTMMU - unsigned memi = get_mmuidx(oi); - TCGReg addrz; - const tcg_insn_unit *func; + [MO_BEUW] = LDUH, + [MO_BESW] = LDSH, + [MO_BEUL] = LDUW, + [MO_BESL] = LDSW, + [MO_BEUQ] = LDX, + [MO_BESQ] = LDX, - addrz = tcg_out_tlb_load(s, addr, memi, memop, - offsetof(CPUTLBEntry, addr_read)); + [MO_LEUW] = LDUH_LE, + [MO_LESW] = LDSH_LE, + [MO_LEUL] = LDUW_LE, + [MO_LESL] = LDSW_LE, + [MO_LEUQ] = LDX_LE, + [MO_LESQ] = LDX_LE, + }; - /* The fast path is exactly one insn. Thus we can perform the - entire TLB Hit in the (annulled) delay slot of the branch - over the TLB Miss case. */ + TCGLabelQemuLdst *ldst; + HostAddress h; - /* beq,a,pt %[xi]cc, label0 */ - label_ptr = s->code_ptr; - tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT - | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); - /* delay slot */ - tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, - qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); + ldst = prepare_host_addr(s, &h, addr, oi, true); - /* TLB Miss. */ + tcg_out_ldst_rr(s, data, h.base, h.index, + ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]); - tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz); - - /* We use the helpers to extend SB and SW data, leaving the case - of SL needing explicit extending below. */ - if ((memop & MO_SSIZE) == MO_SL) { - func = qemu_ld_trampoline[MO_UL]; - } else { - func = qemu_ld_trampoline[memop & MO_SSIZE]; + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = data; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - tcg_debug_assert(func != NULL); - tcg_out_call_nodelay(s, func, false); - /* delay slot */ - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi); - - /* We let the helper sign-extend SB and SW, but leave SL for here. */ - if ((memop & MO_SSIZE) == MO_SL) { - tcg_out_ext32s(s, data, TCG_REG_O0); - } else { - tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0); - } - - *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); -#else - TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0); - unsigned a_bits = get_alignment_bits(memop); - unsigned s_bits = memop & MO_SIZE; - unsigned t_bits; - - if (TARGET_LONG_BITS == 32) { - tcg_out_ext32u(s, TCG_REG_T1, addr); - addr = TCG_REG_T1; - } - - /* - * Normal case: alignment equal to access size. - */ - if (a_bits == s_bits) { - tcg_out_ldst_rr(s, data, addr, index, - qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); - return; - } - - /* - * Test for at least natural alignment, and assume most accesses - * will be aligned -- perform a straight load in the delay slot. - * This is required to preserve atomicity for aligned accesses. - */ - t_bits = MAX(a_bits, s_bits); - tcg_debug_assert(t_bits < 13); - tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC); - - /* beq,a,pt %icc, label */ - label_ptr = s->code_ptr; - tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0); - /* delay slot */ - tcg_out_ldst_rr(s, data, addr, index, - qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); - - if (a_bits >= s_bits) { - /* - * Overalignment: A successful alignment test will perform the memory - * operation in the delay slot, and failure need only invoke the - * handler for SIGBUS. - */ - tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false); - /* delay slot -- move to low part of argument reg */ - tcg_out_mov_delay(s, TCG_REG_O1, addr); - } else { - /* Underalignment: load by pieces of minimum alignment. */ - int ld_opc, a_size, s_size, i; - - /* - * Force full address into T1 early; avoids problems with - * overlap between @addr and @data. - */ - tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD); - - a_size = 1 << a_bits; - s_size = 1 << s_bits; - if ((memop & MO_BSWAP) == MO_BE) { - ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)]; - tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc); - ld_opc = qemu_ld_opc[a_bits | MO_BE]; - for (i = a_size; i < s_size; i += a_size) { - tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc); - tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX); - tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); - } - } else if (a_bits == 0) { - ld_opc = LDUB; - tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc); - for (i = a_size; i < s_size; i += a_size) { - if ((memop & MO_SIGN) && i == s_size - a_size) { - ld_opc = LDSB; - } - tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc); - tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX); - tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); - } - } else { - ld_opc = qemu_ld_opc[a_bits | MO_LE]; - tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc); - for (i = a_size; i < s_size; i += a_size) { - tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD); - if ((memop & MO_SIGN) && i == s_size - a_size) { - ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN]; - } - tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc); - tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX); - tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); - } - } - } - - *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); -#endif /* CONFIG_SOFTMMU */ } static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, MemOpIdx oi, TCGType data_type) { - MemOp memop = get_memop(oi); - tcg_insn_unit *label_ptr; + static const int st_opc[(MO_SIZE | MO_BSWAP) + 1] = { + [MO_UB] = STB, -#ifdef CONFIG_SOFTMMU - unsigned memi = get_mmuidx(oi); - TCGReg addrz; - const tcg_insn_unit *func; + [MO_BEUW] = STH, + [MO_BEUL] = STW, + [MO_BEUQ] = STX, - addrz = tcg_out_tlb_load(s, addr, memi, memop, - offsetof(CPUTLBEntry, addr_write)); + [MO_LEUW] = STH_LE, + [MO_LEUL] = STW_LE, + [MO_LEUQ] = STX_LE, + }; - /* The fast path is exactly one insn. Thus we can perform the entire - TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */ - /* beq,a,pt %[xi]cc, label0 */ - label_ptr = s->code_ptr; - tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT - | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); - /* delay slot */ - tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, - qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); + TCGLabelQemuLdst *ldst; + HostAddress h; - /* TLB Miss. */ + ldst = prepare_host_addr(s, &h, addr, oi, false); - tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz); - tcg_out_movext(s, (memop & MO_SIZE) == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, - TCG_REG_O2, data_type, memop & MO_SIZE, data); + tcg_out_ldst_rr(s, data, h.base, h.index, + st_opc[get_memop(oi) & (MO_BSWAP | MO_SIZE)]); - func = qemu_st_trampoline[memop & MO_SIZE]; - tcg_debug_assert(func != NULL); - tcg_out_call_nodelay(s, func, false); - /* delay slot */ - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi); - - *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); -#else - TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0); - unsigned a_bits = get_alignment_bits(memop); - unsigned s_bits = memop & MO_SIZE; - unsigned t_bits; - - if (TARGET_LONG_BITS == 32) { - tcg_out_ext32u(s, TCG_REG_T1, addr); - addr = TCG_REG_T1; + if (ldst) { + ldst->type = data_type; + ldst->datalo_reg = data; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } - - /* - * Normal case: alignment equal to access size. - */ - if (a_bits == s_bits) { - tcg_out_ldst_rr(s, data, addr, index, - qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); - return; - } - - /* - * Test for at least natural alignment, and assume most accesses - * will be aligned -- perform a straight store in the delay slot. - * This is required to preserve atomicity for aligned accesses. - */ - t_bits = MAX(a_bits, s_bits); - tcg_debug_assert(t_bits < 13); - tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC); - - /* beq,a,pt %icc, label */ - label_ptr = s->code_ptr; - tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0); - /* delay slot */ - tcg_out_ldst_rr(s, data, addr, index, - qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); - - if (a_bits >= s_bits) { - /* - * Overalignment: A successful alignment test will perform the memory - * operation in the delay slot, and failure need only invoke the - * handler for SIGBUS. - */ - tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false); - /* delay slot -- move to low part of argument reg */ - tcg_out_mov_delay(s, TCG_REG_O1, addr); - } else { - /* Underalignment: store by pieces of minimum alignment. */ - int st_opc, a_size, s_size, i; - - /* - * Force full address into T1 early; avoids problems with - * overlap between @addr and @data. - */ - tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD); - - a_size = 1 << a_bits; - s_size = 1 << s_bits; - if ((memop & MO_BSWAP) == MO_BE) { - st_opc = qemu_st_opc[a_bits | MO_BE]; - for (i = 0; i < s_size; i += a_size) { - TCGReg d = data; - int shift = (s_size - a_size - i) * 8; - if (shift) { - d = TCG_REG_T2; - tcg_out_arithi(s, d, data, shift, SHIFT_SRLX); - } - tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc); - } - } else if (a_bits == 0) { - tcg_out_ldst(s, data, TCG_REG_T1, 0, STB); - for (i = 1; i < s_size; i++) { - tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX); - tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB); - } - } else { - /* Note that ST*A with immediate asi must use indexed address. */ - st_opc = qemu_st_opc[a_bits + MO_LE]; - tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc); - for (i = a_size; i < s_size; i += a_size) { - tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX); - tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD); - tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc); - } - } - } - - *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); -#endif /* CONFIG_SOFTMMU */ } static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) @@ -1744,6 +1499,8 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_extu_i32_i64: case INDEX_op_extrl_i64_i32: case INDEX_op_extrh_i64_i32: + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: return C_O1_I1(r, r); case INDEX_op_st8_i32: @@ -1753,6 +1510,8 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_st_i32: case INDEX_op_st32_i64: case INDEX_op_st_i64: + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_i64: return C_O0_I2(rZ, r); case INDEX_op_add_i32: @@ -1802,13 +1561,6 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_muluh_i64: return C_O1_I2(r, r, r); - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: - return C_O1_I1(r, s); - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st_i64: - return C_O0_I2(sZ, s); - default: g_assert_not_reached(); } diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h index ffe22b1d21..7434cc99d4 100644 --- a/tcg/sparc64/tcg-target.h +++ b/tcg/sparc64/tcg-target.h @@ -155,6 +155,7 @@ extern bool use_vis3_instructions; #define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_HAS_MEMORY_BSWAP 1 +#define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS #endif From a0d99b3f47a4fa0ccd07a9a8280b272167d18c15 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 3 Apr 2023 07:08:48 +0000 Subject: [PATCH 28/74] accel/tcg: Remove helper_unaligned_{ld,st} These functions are now unused. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- accel/tcg/user-exec.c | 10 ---------- include/tcg/tcg-ldst.h | 6 ------ 2 files changed, 16 deletions(-) diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 8f86254eb4..7b824dcde8 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -889,16 +889,6 @@ void page_reset_target_data(target_ulong start, target_ulong last) { } /* The softmmu versions of these helpers are in cputlb.c. */ -void helper_unaligned_ld(CPUArchState *env, target_ulong addr) -{ - cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC()); -} - -void helper_unaligned_st(CPUArchState *env, target_ulong addr) -{ - cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC()); -} - static void *cpu_mmu_lookup(CPUArchState *env, abi_ptr addr, MemOp mop, uintptr_t ra, MMUAccessType type) { diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h index 64f48e6990..7dd57013e9 100644 --- a/include/tcg/tcg-ldst.h +++ b/include/tcg/tcg-ldst.h @@ -60,10 +60,4 @@ void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, void helper_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, MemOpIdx oi, uintptr_t retaddr); -#ifdef CONFIG_USER_ONLY - -G_NORETURN void helper_unaligned_ld(CPUArchState *env, target_ulong addr); -G_NORETURN void helper_unaligned_st(CPUArchState *env, target_ulong addr); - -#endif /* CONFIG_USER_ONLY */ #endif /* TCG_LDST_H */ From 81f004b22354857a9cad9619a466a9d7eaffa975 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 18 Apr 2023 18:34:31 +0200 Subject: [PATCH 29/74] tcg/loongarch64: Check the host supports unaligned accesses This should be true of all loongarch64 running Linux. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/loongarch64/tcg-target.c.inc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index e651ec5c71..33d8e67513 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -30,6 +30,7 @@ */ #include "../tcg-ldst.c.inc" +#include #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { @@ -1674,6 +1675,14 @@ static void tcg_target_qemu_prologue(TCGContext *s) static void tcg_target_init(TCGContext *s) { + unsigned long hwcap = qemu_getauxval(AT_HWCAP); + + /* Server and desktop class cpus have UAL; embedded cpus do not. */ + if (!(hwcap & HWCAP_LOONGARCH_UAL)) { + error_report("TCG: unaligned access support required; exiting"); + exit(EXIT_FAILURE); + } + tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; From 12d7fead7c6232b3ddae963b113dcaf7e1cb1f28 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 18 Apr 2023 19:09:29 +0200 Subject: [PATCH 30/74] tcg/loongarch64: Support softmmu unaligned accesses Test the final byte of an unaligned access. Use BSTRINS.D to clear the range of bits, rather than AND. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/loongarch64/tcg-target.c.inc | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index 33d8e67513..7d0165349d 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -848,7 +848,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); - tcg_target_long compare_mask; ldst = new_ldst_label(s); ldst->is_ld = is_ld; @@ -872,14 +871,20 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, offsetof(CPUTLBEntry, addend)); - /* We don't support unaligned accesses. */ + /* + * For aligned accesses, we check the first byte and include the alignment + * bits within the address. For unaligned access, we check that we don't + * cross pages using the address of the last byte of the access. + */ if (a_bits < s_bits) { - a_bits = s_bits; + unsigned a_mask = (1u << a_bits) - 1; + unsigned s_mask = (1u << s_bits) - 1; + tcg_out_addi(s, TCG_TYPE_TL, TCG_REG_TMP1, addr_reg, s_mask - a_mask); + } else { + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_TMP1, addr_reg); } - /* Clear the non-page, non-alignment bits from the address. */ - compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); - tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); - tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addr_reg); + tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO, + a_bits, TARGET_PAGE_BITS - 1); /* Compare masked address with the TLB entry. */ ldst->label_ptr[0] = s->code_ptr; From 933b331b306cd530a441d25245577f30ee0b938e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 25 Apr 2023 12:06:48 +0100 Subject: [PATCH 31/74] tcg/riscv: Support softmmu unaligned accesses The system is required to emulate unaligned accesses, even if the hardware does not support it. The resulting trap may or may not be more efficient than the qemu slow path. There are linux kernel patches in flight to allow userspace to query hardware support; we can re-evaluate whether to enable this by default after that. In the meantime, softmmu now matches useronly, where we already assumed that unaligned accesses are supported. Reviewed-by: LIU Zhiwei Signed-off-by: Richard Henderson --- tcg/riscv/tcg-target.c.inc | 48 ++++++++++++++++++++++---------------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 19cd4507fb..415e6c6e15 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -910,12 +910,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, #ifdef CONFIG_SOFTMMU unsigned s_bits = opc & MO_SIZE; + unsigned s_mask = (1u << s_bits) - 1; int mem_index = get_mmuidx(oi); int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); - TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; - tcg_target_long compare_mask; + int compare_mask; + TCGReg addr_adj; ldst = new_ldst_label(s); ldst->is_ld = is_ld; @@ -924,14 +925,33 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs); - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); + /* + * For aligned accesses, we check the first byte and include the alignment + * bits within the address. For unaligned access, we check that we don't + * cross pages using the address of the last byte of the access. + */ + addr_adj = addr_reg; + if (a_bits < s_bits) { + addr_adj = TCG_REG_TMP0; + tcg_out_opc_imm(s, TARGET_LONG_BITS == 32 ? OPC_ADDIW : OPC_ADDI, + addr_adj, addr_reg, s_mask - a_mask); + } + compare_mask = TARGET_PAGE_MASK | a_mask; + if (compare_mask == sextreg(compare_mask, 0, 12)) { + tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask); + } else { + tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); + tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj); + } + /* Load the tlb comparator and the addend. */ tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, is_ld ? offsetof(CPUTLBEntry, addr_read) @@ -939,29 +959,17 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, offsetof(CPUTLBEntry, addend)); - /* We don't support unaligned accesses. */ - if (a_bits < s_bits) { - a_bits = s_bits; - } - /* Clear the non-page, non-alignment bits from the address. */ - compare_mask = (tcg_target_long)TARGET_PAGE_MASK | a_mask; - if (compare_mask == sextreg(compare_mask, 0, 12)) { - tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, compare_mask); - } else { - tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); - tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_reg); - } - /* Compare masked address with the TLB entry. */ ldst->label_ptr[0] = s->code_ptr; tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); /* TLB Hit - translate address using addend. */ + addr_adj = addr_reg; if (TARGET_LONG_BITS == 32) { - tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg); - addr_reg = TCG_REG_TMP0; + addr_adj = TCG_REG_TMP0; + tcg_out_ext32u(s, addr_adj, addr_reg); } - tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr_reg); + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr_adj); *pbase = TCG_REG_TMP0; #else if (a_mask) { From 7b8801071951c55dc506c1fca8b40ba292a28d6e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 19 Apr 2023 12:43:17 +0200 Subject: [PATCH 32/74] tcg: Introduce tcg_target_has_memory_bswap Replace the unparameterized TCG_TARGET_HAS_MEMORY_BSWAP macro with a function with a memop argument. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/aarch64/tcg-target.c.inc | 5 +++++ tcg/aarch64/tcg-target.h | 1 - tcg/arm/tcg-target.c.inc | 5 +++++ tcg/arm/tcg-target.h | 1 - tcg/i386/tcg-target.c.inc | 5 +++++ tcg/i386/tcg-target.h | 3 --- tcg/loongarch64/tcg-target.c.inc | 5 +++++ tcg/loongarch64/tcg-target.h | 2 -- tcg/mips/tcg-target.c.inc | 5 +++++ tcg/mips/tcg-target.h | 2 -- tcg/ppc/tcg-target.c.inc | 5 +++++ tcg/ppc/tcg-target.h | 1 - tcg/riscv/tcg-target.c.inc | 5 +++++ tcg/riscv/tcg-target.h | 2 -- tcg/s390x/tcg-target.c.inc | 5 +++++ tcg/s390x/tcg-target.h | 2 -- tcg/sparc64/tcg-target.c.inc | 5 +++++ tcg/sparc64/tcg-target.h | 1 - tcg/tcg-internal.h | 2 ++ tcg/tcg-op.c | 20 +++++++++++--------- tcg/tci/tcg-target.c.inc | 5 +++++ tcg/tci/tcg-target.h | 2 -- 22 files changed, 63 insertions(+), 26 deletions(-) diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index 36d8798bca..0cc719d799 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -1595,6 +1595,11 @@ typedef struct { TCGType index_ext; } HostAddress; +bool tcg_target_has_memory_bswap(MemOp memop) +{ + return false; +} + static const TCGLdstHelperParam ldst_helper_param = { .ntmp = 1, .tmp = { TCG_REG_TMP } }; diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h index 3c0b0d312d..378e01d9d8 100644 --- a/tcg/aarch64/tcg-target.h +++ b/tcg/aarch64/tcg-target.h @@ -154,7 +154,6 @@ extern bool have_lse2; #define TCG_TARGET_HAS_cmpsel_vec 0 #define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_HAS_MEMORY_BSWAP 0 #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index eb0542f32e..e5aed03247 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -1325,6 +1325,11 @@ typedef struct { bool index_scratch; } HostAddress; +bool tcg_target_has_memory_bswap(MemOp memop) +{ + return false; +} + static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg) { /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */ diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h index def2a189e6..4c2d3332d5 100644 --- a/tcg/arm/tcg-target.h +++ b/tcg/arm/tcg-target.h @@ -150,7 +150,6 @@ extern bool use_neon_instructions; #define TCG_TARGET_HAS_cmpsel_vec 0 #define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_HAS_MEMORY_BSWAP 0 #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 21553f3c39..6d55ba5a1c 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -1776,6 +1776,11 @@ typedef struct { int seg; } HostAddress; +bool tcg_target_has_memory_bswap(MemOp memop) +{ + return have_movbe; +} + /* * Because i686 has no register parameters and because x86_64 has xchg * to handle addr/data register overlap, we have placed all input arguments diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h index 0421776cb8..8fe6958abd 100644 --- a/tcg/i386/tcg-target.h +++ b/tcg/i386/tcg-target.h @@ -240,9 +240,6 @@ extern bool have_atomic16; #include "tcg/tcg-mo.h" #define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) - -#define TCG_TARGET_HAS_MEMORY_BSWAP have_movbe - #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index 7d0165349d..d26174dde5 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -828,6 +828,11 @@ typedef struct { TCGReg index; } HostAddress; +bool tcg_target_has_memory_bswap(MemOp memop) +{ + return false; +} + /* * For softmmu, perform the TLB load and compare. * For useronly, perform any required alignment tests. diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h index 17b8193aa5..75c3d80ed2 100644 --- a/tcg/loongarch64/tcg-target.h +++ b/tcg/loongarch64/tcg-target.h @@ -173,6 +173,4 @@ typedef enum { #define TCG_TARGET_NEED_LDST_LABELS -#define TCG_TARGET_HAS_MEMORY_BSWAP 0 - #endif /* LOONGARCH_TCG_TARGET_H */ diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index fa0f334e8d..cd0254a0d7 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -1141,6 +1141,11 @@ typedef struct { MemOp align; } HostAddress; +bool tcg_target_has_memory_bswap(MemOp memop) +{ + return false; +} + /* * For softmmu, perform the TLB load and compare. * For useronly, perform any required alignment tests. diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h index 42bd7fff01..47088af9cb 100644 --- a/tcg/mips/tcg-target.h +++ b/tcg/mips/tcg-target.h @@ -205,8 +205,6 @@ extern bool use_mips32r2_instructions; #endif #define TCG_TARGET_DEFAULT_MO 0 -#define TCG_TARGET_HAS_MEMORY_BSWAP 0 - #define TCG_TARGET_NEED_LDST_LABELS #endif diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index 218602c10c..b62a163014 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -2017,6 +2017,11 @@ typedef struct { TCGReg index; } HostAddress; +bool tcg_target_has_memory_bswap(MemOp memop) +{ + return true; +} + /* * For softmmu, perform the TLB load and compare. * For useronly, perform any required alignment tests. diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index af81c5a57f..d55f0266bb 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -179,7 +179,6 @@ extern bool have_vsx; #define TCG_TARGET_HAS_cmpsel_vec 0 #define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_HAS_MEMORY_BSWAP 1 #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 415e6c6e15..37870c89fc 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -853,6 +853,11 @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) tcg_debug_assert(ok); } +bool tcg_target_has_memory_bswap(MemOp memop) +{ + return false; +} + /* We have three temps, we might as well expose them. */ static const TCGLdstHelperParam ldst_helper_param = { .ntmp = 3, .tmp = { TCG_REG_TMP0, TCG_REG_TMP1, TCG_REG_TMP2 } diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index dddf2486c1..dece3b3c27 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -168,6 +168,4 @@ typedef enum { #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS -#define TCG_TARGET_HAS_MEMORY_BSWAP 0 - #endif diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc index de8aed5f77..22f0206b5a 100644 --- a/tcg/s390x/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -1574,6 +1574,11 @@ typedef struct { int disp; } HostAddress; +bool tcg_target_has_memory_bswap(MemOp memop) +{ + return true; +} + static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data, HostAddress h) { diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h index a05b473117..fe05680124 100644 --- a/tcg/s390x/tcg-target.h +++ b/tcg/s390x/tcg-target.h @@ -172,8 +172,6 @@ extern uint64_t s390_facilities[3]; #define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF #define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF -#define TCG_TARGET_HAS_MEMORY_BSWAP 1 - #define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index 0237188d65..bb23038529 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -1011,6 +1011,11 @@ typedef struct { TCGReg index; } HostAddress; +bool tcg_target_has_memory_bswap(MemOp memop) +{ + return true; +} + /* * For softmmu, perform the TLB load and compare. * For useronly, perform any required alignment tests. diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h index 7434cc99d4..f6cd86975a 100644 --- a/tcg/sparc64/tcg-target.h +++ b/tcg/sparc64/tcg-target.h @@ -154,7 +154,6 @@ extern bool use_vis3_instructions; #define TCG_AREG0 TCG_REG_I0 #define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_HAS_MEMORY_BSWAP 1 #define TCG_TARGET_NEED_LDST_LABELS #define TCG_TARGET_NEED_POOL_LABELS diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h index 0f1ba01a9a..67b698bd5c 100644 --- a/tcg/tcg-internal.h +++ b/tcg/tcg-internal.h @@ -126,4 +126,6 @@ static inline TCGv_i64 TCGV128_HIGH(TCGv_i128 t) return temp_tcgv_i64(tcgv_i128_temp(t) + o); } +bool tcg_target_has_memory_bswap(MemOp memop); + #endif /* TCG_INTERNAL_H */ diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 22481a344c..b13ded10df 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -2959,7 +2959,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) oi = make_memop_idx(memop, idx); orig_memop = memop; - if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { memop &= ~MO_BSWAP; /* The bswap primitive benefits from zero-extended input. */ if ((memop & MO_SSIZE) == MO_SW) { @@ -2996,7 +2996,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) memop = tcg_canonicalize_memop(memop, 0, 1); oi = make_memop_idx(memop, idx); - if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { swap = tcg_temp_ebb_new_i32(); switch (memop & MO_SIZE) { case MO_16: @@ -3045,7 +3045,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) oi = make_memop_idx(memop, idx); orig_memop = memop; - if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { memop &= ~MO_BSWAP; /* The bswap primitive benefits from zero-extended input. */ if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) { @@ -3091,7 +3091,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) memop = tcg_canonicalize_memop(memop, 1, 1); oi = make_memop_idx(memop, idx); - if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { swap = tcg_temp_ebb_new_i64(); switch (memop & MO_SIZE) { case MO_16: @@ -3157,11 +3157,6 @@ static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) tcg_debug_assert((orig & MO_SIZE) == MO_128); tcg_debug_assert((orig & MO_SIGN) == 0); - /* Use a memory ordering implemented by the host. */ - if (!TCG_TARGET_HAS_MEMORY_BSWAP && (orig & MO_BSWAP)) { - mop_1 &= ~MO_BSWAP; - } - /* Reduce the size to 64-bit. */ mop_1 = (mop_1 & ~MO_SIZE) | MO_64; @@ -3191,6 +3186,13 @@ static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) default: g_assert_not_reached(); } + + /* Use a memory ordering implemented by the host. */ + if ((orig & MO_BSWAP) && !tcg_target_has_memory_bswap(mop_1)) { + mop_1 &= ~MO_BSWAP; + mop_2 &= ~MO_BSWAP; + } + ret[0] = mop_1; ret[1] = mop_2; } diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc index 4cf03a579c..41fbf042da 100644 --- a/tcg/tci/tcg-target.c.inc +++ b/tcg/tci/tcg-target.c.inc @@ -963,3 +963,8 @@ static void tcg_target_init(TCGContext *s) static inline void tcg_target_qemu_prologue(TCGContext *s) { } + +bool tcg_target_has_memory_bswap(MemOp memop) +{ + return true; +} diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h index 7140a76a73..364012e4d2 100644 --- a/tcg/tci/tcg-target.h +++ b/tcg/tci/tcg-target.h @@ -176,6 +176,4 @@ typedef enum { We prefer consistency across hosts on this. */ #define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_HAS_MEMORY_BSWAP 1 - #endif /* TCG_TARGET_H */ From 12fde9bcdb52118495d10c32ed375679f23e323c Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 7 Nov 2022 10:42:56 +1100 Subject: [PATCH 33/74] tcg: Add INDEX_op_qemu_{ld,st}_i128 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add opcodes for backend support for 128-bit memory operations. Reviewed-by: Peter Maydell Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- docs/devel/tcg-ops.rst | 11 +++--- include/tcg/tcg-opc.h | 8 +++++ tcg/aarch64/tcg-target.h | 2 ++ tcg/arm/tcg-target.h | 2 ++ tcg/i386/tcg-target.h | 2 ++ tcg/loongarch64/tcg-target.h | 1 + tcg/mips/tcg-target.h | 2 ++ tcg/optimize.c | 2 ++ tcg/ppc/tcg-target.h | 2 ++ tcg/riscv/tcg-target.h | 2 ++ tcg/s390x/tcg-target.h | 2 ++ tcg/sparc64/tcg-target.h | 2 ++ tcg/tcg-op.c | 69 ++++++++++++++++++++++++++++++++---- tcg/tcg.c | 10 +++++- tcg/tci/tcg-target.h | 2 ++ 15 files changed, 108 insertions(+), 11 deletions(-) diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst index f3f451b77f..6a166c5665 100644 --- a/docs/devel/tcg-ops.rst +++ b/docs/devel/tcg-ops.rst @@ -672,19 +672,20 @@ QEMU specific operations | This operation is optional. If the TCG backend does not implement the goto_ptr opcode, emitting this op is equivalent to emitting exit_tb(0). - * - qemu_ld_i32/i64 *t0*, *t1*, *flags*, *memidx* + * - qemu_ld_i32/i64/i128 *t0*, *t1*, *flags*, *memidx* - qemu_st_i32/i64 *t0*, *t1*, *flags*, *memidx* + qemu_st_i32/i64/i128 *t0*, *t1*, *flags*, *memidx* qemu_st8_i32 *t0*, *t1*, *flags*, *memidx* - | Load data at the guest address *t1* into *t0*, or store data in *t0* at guest - address *t1*. The _i32/_i64 size applies to the size of the input/output + address *t1*. The _i32/_i64/_i128 size applies to the size of the input/output register *t0* only. The address *t1* is always sized according to the guest, and the width of the memory operation is controlled by *flags*. | | Both *t0* and *t1* may be split into little-endian ordered pairs of registers - if dealing with 64-bit quantities on a 32-bit host. + if dealing with 64-bit quantities on a 32-bit host, or 128-bit quantities on + a 64-bit host. | | The *memidx* selects the qemu tlb index to use (e.g. user or kernel access). The flags are the MemOp bits, selecting the sign, width, and endianness @@ -693,6 +694,8 @@ QEMU specific operations | For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a 64-bit memory access specified in *flags*. | + | For qemu_ld/st_i128, these are only supported for a 64-bit host. + | | For i386, qemu_st8_i32 is exactly like qemu_st_i32, except the size of the memory operation is known to be 8-bit. This allows the backend to provide a different set of register constraints. diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h index dd444734d9..94cf7c5d6a 100644 --- a/include/tcg/tcg-opc.h +++ b/include/tcg/tcg-opc.h @@ -213,6 +213,14 @@ DEF(qemu_st8_i32, 0, TLADDR_ARGS + 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | IMPL(TCG_TARGET_HAS_qemu_st8_i32)) +/* Only for 64-bit hosts at the moment. */ +DEF(qemu_ld_i128, 2, 1, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | + IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) +DEF(qemu_st_i128, 0, 3, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | + IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) + /* Host vector support. */ #define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec) diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h index 378e01d9d8..74ee2ed255 100644 --- a/tcg/aarch64/tcg-target.h +++ b/tcg/aarch64/tcg-target.h @@ -129,6 +129,8 @@ extern bool have_lse2; #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 +#define TCG_TARGET_HAS_qemu_ldst_i128 0 + #define TCG_TARGET_HAS_v64 1 #define TCG_TARGET_HAS_v128 1 #define TCG_TARGET_HAS_v256 0 diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h index 4c2d3332d5..65efc538f4 100644 --- a/tcg/arm/tcg-target.h +++ b/tcg/arm/tcg-target.h @@ -125,6 +125,8 @@ extern bool use_neon_instructions; #define TCG_TARGET_HAS_rem_i32 0 #define TCG_TARGET_HAS_qemu_st8_i32 0 +#define TCG_TARGET_HAS_qemu_ldst_i128 0 + #define TCG_TARGET_HAS_v64 use_neon_instructions #define TCG_TARGET_HAS_v128 use_neon_instructions #define TCG_TARGET_HAS_v256 0 diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h index 8fe6958abd..943af6775e 100644 --- a/tcg/i386/tcg-target.h +++ b/tcg/i386/tcg-target.h @@ -194,6 +194,8 @@ extern bool have_atomic16; #define TCG_TARGET_HAS_qemu_st8_i32 1 #endif +#define TCG_TARGET_HAS_qemu_ldst_i128 0 + /* We do not support older SSE systems, only beginning with AVX1. */ #define TCG_TARGET_HAS_v64 have_avx1 #define TCG_TARGET_HAS_v128 have_avx1 diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h index 75c3d80ed2..482901ac15 100644 --- a/tcg/loongarch64/tcg-target.h +++ b/tcg/loongarch64/tcg-target.h @@ -168,6 +168,7 @@ typedef enum { #define TCG_TARGET_HAS_muls2_i64 0 #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 +#define TCG_TARGET_HAS_qemu_ldst_i128 0 #define TCG_TARGET_DEFAULT_MO (0) diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h index 47088af9cb..7277a117ef 100644 --- a/tcg/mips/tcg-target.h +++ b/tcg/mips/tcg-target.h @@ -204,6 +204,8 @@ extern bool use_mips32r2_instructions; #define TCG_TARGET_HAS_ext16u_i64 0 /* andi rt, rs, 0xffff */ #endif +#define TCG_TARGET_HAS_qemu_ldst_i128 0 + #define TCG_TARGET_DEFAULT_MO 0 #define TCG_TARGET_NEED_LDST_LABELS diff --git a/tcg/optimize.c b/tcg/optimize.c index 9614fa3638..da400b9668 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2186,11 +2186,13 @@ void tcg_optimize(TCGContext *s) break; case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_i128: done = fold_qemu_ld(&ctx, op); break; case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st8_i32: case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_i128: done = fold_qemu_st(&ctx, op); break; CASE_OP_32_64(rem): diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index d55f0266bb..0914380bd7 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -149,6 +149,8 @@ extern bool have_vsx; #define TCG_TARGET_HAS_mulsh_i64 1 #endif +#define TCG_TARGET_HAS_qemu_ldst_i128 0 + /* * While technically Altivec could support V64, it has no 64-bit store * instruction and substituting two 32-bit stores makes the generated diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index dece3b3c27..494c986b49 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -163,6 +163,8 @@ typedef enum { #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 +#define TCG_TARGET_HAS_qemu_ldst_i128 0 + #define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_NEED_LDST_LABELS diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h index fe05680124..170007bea5 100644 --- a/tcg/s390x/tcg-target.h +++ b/tcg/s390x/tcg-target.h @@ -140,6 +140,8 @@ extern uint64_t s390_facilities[3]; #define TCG_TARGET_HAS_muluh_i64 0 #define TCG_TARGET_HAS_mulsh_i64 0 +#define TCG_TARGET_HAS_qemu_ldst_i128 0 + #define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR) #define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR) #define TCG_TARGET_HAS_v256 0 diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h index f6cd86975a..31c5537379 100644 --- a/tcg/sparc64/tcg-target.h +++ b/tcg/sparc64/tcg-target.h @@ -151,6 +151,8 @@ extern bool use_vis3_instructions; #define TCG_TARGET_HAS_muluh_i64 use_vis3_instructions #define TCG_TARGET_HAS_mulsh_i64 0 +#define TCG_TARGET_HAS_qemu_ldst_i128 0 + #define TCG_AREG0 TCG_REG_I0 #define TCG_TARGET_DEFAULT_MO (0) diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index b13ded10df..c419228cc4 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -3205,7 +3205,7 @@ static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) { - MemOpIdx oi = make_memop_idx(memop, idx); + const MemOpIdx oi = make_memop_idx(memop, idx); tcg_debug_assert((memop & MO_SIZE) == MO_128); tcg_debug_assert((memop & MO_SIGN) == 0); @@ -3213,9 +3213,36 @@ void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); addr = plugin_prep_mem_callbacks(addr); - /* TODO: allow the tcg backend to see the whole operation. */ + /* TODO: For now, force 32-bit hosts to use the helper. */ + if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) { + TCGv_i64 lo, hi; + TCGArg addr_arg; + MemOpIdx adj_oi; + bool need_bswap = false; - if (use_two_i64_for_i128(memop)) { + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { + lo = TCGV128_HIGH(val); + hi = TCGV128_LOW(val); + adj_oi = make_memop_idx(memop & ~MO_BSWAP, idx); + need_bswap = true; + } else { + lo = TCGV128_LOW(val); + hi = TCGV128_HIGH(val); + adj_oi = oi; + } + +#if TARGET_LONG_BITS == 32 + addr_arg = tcgv_i32_arg(addr); +#else + addr_arg = tcgv_i64_arg(addr); +#endif + tcg_gen_op4ii_i64(INDEX_op_qemu_ld_i128, lo, hi, addr_arg, adj_oi); + + if (need_bswap) { + tcg_gen_bswap64_i64(lo, lo); + tcg_gen_bswap64_i64(hi, hi); + } + } else if (use_two_i64_for_i128(memop)) { MemOp mop[2]; TCGv addr_p8; TCGv_i64 x, y; @@ -3258,7 +3285,7 @@ void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) { - MemOpIdx oi = make_memop_idx(memop, idx); + const MemOpIdx oi = make_memop_idx(memop, idx); tcg_debug_assert((memop & MO_SIZE) == MO_128); tcg_debug_assert((memop & MO_SIGN) == 0); @@ -3266,9 +3293,39 @@ void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST); addr = plugin_prep_mem_callbacks(addr); - /* TODO: allow the tcg backend to see the whole operation. */ + /* TODO: For now, force 32-bit hosts to use the helper. */ - if (use_two_i64_for_i128(memop)) { + if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) { + TCGv_i64 lo, hi; + TCGArg addr_arg; + MemOpIdx adj_oi; + bool need_bswap = false; + + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { + lo = tcg_temp_new_i64(); + hi = tcg_temp_new_i64(); + tcg_gen_bswap64_i64(lo, TCGV128_HIGH(val)); + tcg_gen_bswap64_i64(hi, TCGV128_LOW(val)); + adj_oi = make_memop_idx(memop & ~MO_BSWAP, idx); + need_bswap = true; + } else { + lo = TCGV128_LOW(val); + hi = TCGV128_HIGH(val); + adj_oi = oi; + } + +#if TARGET_LONG_BITS == 32 + addr_arg = tcgv_i32_arg(addr); +#else + addr_arg = tcgv_i64_arg(addr); +#endif + tcg_gen_op4ii_i64(INDEX_op_qemu_st_i128, lo, hi, addr_arg, adj_oi); + + if (need_bswap) { + tcg_temp_free_i64(lo); + tcg_temp_free_i64(hi); + } + } else if (use_two_i64_for_i128(memop)) { MemOp mop[2]; TCGv addr_p8; TCGv_i64 x, y; diff --git a/tcg/tcg.c b/tcg/tcg.c index a864ff1f4b..2f5bb5af41 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -1735,6 +1735,10 @@ bool tcg_op_supported(TCGOpcode op) case INDEX_op_qemu_st8_i32: return TCG_TARGET_HAS_qemu_st8_i32; + case INDEX_op_qemu_ld_i128: + case INDEX_op_qemu_st_i128: + return TCG_TARGET_HAS_qemu_ldst_i128; + case INDEX_op_mov_i32: case INDEX_op_setcond_i32: case INDEX_op_brcond_i32: @@ -2187,7 +2191,7 @@ static const char * const cond_name[] = [TCG_COND_GTU] = "gtu" }; -static const char * const ldst_name[] = +static const char * const ldst_name[(MO_BSWAP | MO_SSIZE) + 1] = { [MO_UB] = "ub", [MO_SB] = "sb", @@ -2201,6 +2205,8 @@ static const char * const ldst_name[] = [MO_BEUL] = "beul", [MO_BESL] = "besl", [MO_BEUQ] = "beq", + [MO_128 + MO_BE] = "beo", + [MO_128 + MO_LE] = "leo", }; static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = { @@ -2357,6 +2363,8 @@ static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) case INDEX_op_qemu_st8_i32: case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_ld_i128: + case INDEX_op_qemu_st_i128: { const char *s_al, *s_op, *s_at; MemOpIdx oi = op->args[k++]; diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h index 364012e4d2..28dc6d5cfc 100644 --- a/tcg/tci/tcg-target.h +++ b/tcg/tci/tcg-target.h @@ -127,6 +127,8 @@ #define TCG_TARGET_HAS_mulu2_i32 1 #endif /* TCG_TARGET_REG_BITS == 64 */ +#define TCG_TARGET_HAS_qemu_ldst_i128 0 + /* Number of registers available. */ #define TCG_TARGET_NB_REGS 16 From 2462e30e99676c710624806febe5ce67a45f0521 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 14 May 2023 09:58:39 -0700 Subject: [PATCH 34/74] tcg: Introduce tcg_out_movext3 With x86_64 as host, we do not have any temporaries with which to resolve cycles, but we do have xchg. As a side bonus, the set of graphs that can be made with 3 nodes and all nodes conflicting is small: two. We can solve the cycle with a single temp. This is required for x86_64 to handle stores of i128: 1 address register and 2 data registers. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/tcg.c | 138 ++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 108 insertions(+), 30 deletions(-) diff --git a/tcg/tcg.c b/tcg/tcg.c index 2f5bb5af41..16808e0dd5 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -532,6 +532,82 @@ static void tcg_out_movext2(TCGContext *s, const TCGMovExtend *i1, tcg_out_movext1_new_src(s, i1, src1); } +/** + * tcg_out_movext3 -- move and extend three pair + * @s: tcg context + * @i1: first move description + * @i2: second move description + * @i3: third move description + * @scratch: temporary register, or -1 for none + * + * As tcg_out_movext, for all of @i1, @i2 and @i3, caring for overlap + * between the sources and destinations. + */ + +static void tcg_out_movext3(TCGContext *s, const TCGMovExtend *i1, + const TCGMovExtend *i2, const TCGMovExtend *i3, + int scratch) +{ + TCGReg src1 = i1->src; + TCGReg src2 = i2->src; + TCGReg src3 = i3->src; + + if (i1->dst != src2 && i1->dst != src3) { + tcg_out_movext1(s, i1); + tcg_out_movext2(s, i2, i3, scratch); + return; + } + if (i2->dst != src1 && i2->dst != src3) { + tcg_out_movext1(s, i2); + tcg_out_movext2(s, i1, i3, scratch); + return; + } + if (i3->dst != src1 && i3->dst != src2) { + tcg_out_movext1(s, i3); + tcg_out_movext2(s, i1, i2, scratch); + return; + } + + /* + * There is a cycle. Since there are only 3 nodes, the cycle is + * either "clockwise" or "anti-clockwise", and can be solved with + * a single scratch or two xchg. + */ + if (i1->dst == src2 && i2->dst == src3 && i3->dst == src1) { + /* "Clockwise" */ + if (tcg_out_xchg(s, MAX(i1->src_type, i2->src_type), src1, src2)) { + tcg_out_xchg(s, MAX(i2->src_type, i3->src_type), src2, src3); + /* The data is now in the correct registers, now extend. */ + tcg_out_movext1_new_src(s, i1, i1->dst); + tcg_out_movext1_new_src(s, i2, i2->dst); + tcg_out_movext1_new_src(s, i3, i3->dst); + } else { + tcg_debug_assert(scratch >= 0); + tcg_out_mov(s, i1->src_type, scratch, src1); + tcg_out_movext1(s, i3); + tcg_out_movext1(s, i2); + tcg_out_movext1_new_src(s, i1, scratch); + } + } else if (i1->dst == src3 && i2->dst == src1 && i3->dst == src2) { + /* "Anti-clockwise" */ + if (tcg_out_xchg(s, MAX(i2->src_type, i3->src_type), src2, src3)) { + tcg_out_xchg(s, MAX(i1->src_type, i2->src_type), src1, src2); + /* The data is now in the correct registers, now extend. */ + tcg_out_movext1_new_src(s, i1, i1->dst); + tcg_out_movext1_new_src(s, i2, i2->dst); + tcg_out_movext1_new_src(s, i3, i3->dst); + } else { + tcg_debug_assert(scratch >= 0); + tcg_out_mov(s, i1->src_type, scratch, src1); + tcg_out_movext1(s, i2); + tcg_out_movext1(s, i3); + tcg_out_movext1_new_src(s, i1, scratch); + } + } else { + g_assert_not_reached(); + } +} + #define C_PFX1(P, A) P##A #define C_PFX2(P, A, B) P##A##_##B #define C_PFX3(P, A, B, C) P##A##_##B##_##C @@ -5153,46 +5229,48 @@ static int tcg_out_helper_stk_ofs(TCGType type, unsigned slot) static void tcg_out_helper_load_regs(TCGContext *s, unsigned nmov, TCGMovExtend *mov, - unsigned ntmp, const int *tmp) + const TCGLdstHelperParam *parm) { + TCGReg dst3; + switch (nmov) { - default: + case 4: /* The backend must have provided enough temps for the worst case. */ - tcg_debug_assert(ntmp + 1 >= nmov); + tcg_debug_assert(parm->ntmp >= 2); - for (unsigned i = nmov - 1; i >= 2; --i) { - TCGReg dst = mov[i].dst; + dst3 = mov[3].dst; + for (unsigned j = 0; j < 3; ++j) { + if (dst3 == mov[j].src) { + /* + * Conflict. Copy the source to a temporary, perform the + * remaining moves, then the extension from our scratch + * on the way out. + */ + TCGReg scratch = parm->tmp[1]; - for (unsigned j = 0; j < i; ++j) { - if (dst == mov[j].src) { - /* - * Conflict. - * Copy the source to a temporary, recurse for the - * remaining moves, perform the extension from our - * scratch on the way out. - */ - TCGReg scratch = tmp[--ntmp]; - tcg_out_mov(s, mov[i].src_type, scratch, mov[i].src); - mov[i].src = scratch; - - tcg_out_helper_load_regs(s, i, mov, ntmp, tmp); - tcg_out_movext1(s, &mov[i]); - return; - } + tcg_out_mov(s, mov[3].src_type, scratch, mov[3].src); + tcg_out_movext3(s, mov, mov + 1, mov + 2, parm->tmp[0]); + tcg_out_movext1_new_src(s, &mov[3], scratch); + break; } - - /* No conflicts: perform this move and continue. */ - tcg_out_movext1(s, &mov[i]); } - /* fall through for the final two moves */ + /* No conflicts: perform this move and continue. */ + tcg_out_movext1(s, &mov[3]); + /* fall through */ + + case 3: + tcg_out_movext3(s, mov, mov + 1, mov + 2, + parm->ntmp ? parm->tmp[0] : -1); + break; case 2: - tcg_out_movext2(s, mov, mov + 1, ntmp ? tmp[0] : -1); - return; + tcg_out_movext2(s, mov, mov + 1, + parm->ntmp ? parm->tmp[0] : -1); + break; case 1: tcg_out_movext1(s, mov); - return; - case 0: + break; + default: g_assert_not_reached(); } } @@ -5239,7 +5317,7 @@ static void tcg_out_helper_load_slots(TCGContext *s, for (i = 0; i < nmov; ++i) { mov[i].dst = tcg_target_call_iarg_regs[mov[i].dst]; } - tcg_out_helper_load_regs(s, nmov, mov, parm->ntmp, parm->tmp); + tcg_out_helper_load_regs(s, nmov, mov, parm); } static void tcg_out_helper_load_imm(TCGContext *s, unsigned slot, From 8d314041008478ea29ca462123f2ab21c3b363f6 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 14 May 2023 10:07:22 -0700 Subject: [PATCH 35/74] tcg: Merge tcg_out_helper_load_regs into caller Now that tcg_out_helper_load_regs is not recursive, we can merge it into its only caller, tcg_out_helper_load_slots. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/tcg.c | 89 +++++++++++++++++++++++++------------------------------ 1 file changed, 41 insertions(+), 48 deletions(-) diff --git a/tcg/tcg.c b/tcg/tcg.c index 16808e0dd5..1c2b17f615 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -5227,12 +5227,50 @@ static int tcg_out_helper_stk_ofs(TCGType type, unsigned slot) return ofs; } -static void tcg_out_helper_load_regs(TCGContext *s, - unsigned nmov, TCGMovExtend *mov, - const TCGLdstHelperParam *parm) +static void tcg_out_helper_load_slots(TCGContext *s, + unsigned nmov, TCGMovExtend *mov, + const TCGLdstHelperParam *parm) { + unsigned i; TCGReg dst3; + /* + * Start from the end, storing to the stack first. + * This frees those registers, so we need not consider overlap. + */ + for (i = nmov; i-- > 0; ) { + unsigned slot = mov[i].dst; + + if (arg_slot_reg_p(slot)) { + goto found_reg; + } + + TCGReg src = mov[i].src; + TCGType dst_type = mov[i].dst_type; + MemOp dst_mo = dst_type == TCG_TYPE_I32 ? MO_32 : MO_64; + + /* The argument is going onto the stack; extend into scratch. */ + if ((mov[i].src_ext & MO_SIZE) != dst_mo) { + tcg_debug_assert(parm->ntmp != 0); + mov[i].dst = src = parm->tmp[0]; + tcg_out_movext1(s, &mov[i]); + } + + tcg_out_st(s, dst_type, src, TCG_REG_CALL_STACK, + tcg_out_helper_stk_ofs(dst_type, slot)); + } + return; + + found_reg: + /* + * The remaining arguments are in registers. + * Convert slot numbers to argument registers. + */ + nmov = i + 1; + for (i = 0; i < nmov; ++i) { + mov[i].dst = tcg_target_call_iarg_regs[mov[i].dst]; + } + switch (nmov) { case 4: /* The backend must have provided enough temps for the worst case. */ @@ -5275,51 +5313,6 @@ static void tcg_out_helper_load_regs(TCGContext *s, } } -static void tcg_out_helper_load_slots(TCGContext *s, - unsigned nmov, TCGMovExtend *mov, - const TCGLdstHelperParam *parm) -{ - unsigned i; - - /* - * Start from the end, storing to the stack first. - * This frees those registers, so we need not consider overlap. - */ - for (i = nmov; i-- > 0; ) { - unsigned slot = mov[i].dst; - - if (arg_slot_reg_p(slot)) { - goto found_reg; - } - - TCGReg src = mov[i].src; - TCGType dst_type = mov[i].dst_type; - MemOp dst_mo = dst_type == TCG_TYPE_I32 ? MO_32 : MO_64; - - /* The argument is going onto the stack; extend into scratch. */ - if ((mov[i].src_ext & MO_SIZE) != dst_mo) { - tcg_debug_assert(parm->ntmp != 0); - mov[i].dst = src = parm->tmp[0]; - tcg_out_movext1(s, &mov[i]); - } - - tcg_out_st(s, dst_type, src, TCG_REG_CALL_STACK, - tcg_out_helper_stk_ofs(dst_type, slot)); - } - return; - - found_reg: - /* - * The remaining arguments are in registers. - * Convert slot numbers to argument registers. - */ - nmov = i + 1; - for (i = 0; i < nmov; ++i) { - mov[i].dst = tcg_target_call_iarg_regs[mov[i].dst]; - } - tcg_out_helper_load_regs(s, nmov, mov, parm); -} - static void tcg_out_helper_load_imm(TCGContext *s, unsigned slot, TCGType type, tcg_target_long imm, const TCGLdstHelperParam *parm) From ebebea53ef8b08deb1ff970eaae706504a857bf6 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 17 Apr 2023 10:20:51 +0200 Subject: [PATCH 36/74] tcg: Support TCG_TYPE_I128 in tcg_out_{ld,st}_helper_{args,ret} Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/tcg.c | 196 +++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 163 insertions(+), 33 deletions(-) diff --git a/tcg/tcg.c b/tcg/tcg.c index 1c2b17f615..6328611543 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -206,6 +206,7 @@ static void * const qemu_ld_helpers[MO_SSIZE + 1] __attribute__((unused)) = { [MO_UQ] = helper_ldq_mmu, #if TCG_TARGET_REG_BITS == 64 [MO_SL] = helper_ldsl_mmu, + [MO_128] = helper_ld16_mmu, #endif }; @@ -214,6 +215,9 @@ static void * const qemu_st_helpers[MO_SIZE + 1] __attribute__((unused)) = { [MO_16] = helper_stw_mmu, [MO_32] = helper_stl_mmu, [MO_64] = helper_stq_mmu, +#if TCG_TARGET_REG_BITS == 64 + [MO_128] = helper_st16_mmu, +#endif }; TCGContext tcg_init_ctx; @@ -866,6 +870,15 @@ static TCGHelperInfo info_helper_ld64_mmu = { | dh_typemask(ptr, 4) /* uintptr_t ra */ }; +static TCGHelperInfo info_helper_ld128_mmu = { + .flags = TCG_CALL_NO_WG, + .typemask = dh_typemask(i128, 0) /* return Int128 */ + | dh_typemask(env, 1) + | dh_typemask(tl, 2) /* target_ulong addr */ + | dh_typemask(i32, 3) /* unsigned oi */ + | dh_typemask(ptr, 4) /* uintptr_t ra */ +}; + static TCGHelperInfo info_helper_st32_mmu = { .flags = TCG_CALL_NO_WG, .typemask = dh_typemask(void, 0) @@ -886,6 +899,16 @@ static TCGHelperInfo info_helper_st64_mmu = { | dh_typemask(ptr, 5) /* uintptr_t ra */ }; +static TCGHelperInfo info_helper_st128_mmu = { + .flags = TCG_CALL_NO_WG, + .typemask = dh_typemask(void, 0) + | dh_typemask(env, 1) + | dh_typemask(tl, 2) /* target_ulong addr */ + | dh_typemask(i128, 3) /* Int128 data */ + | dh_typemask(i32, 4) /* unsigned oi */ + | dh_typemask(ptr, 5) /* uintptr_t ra */ +}; + #ifdef CONFIG_TCG_INTERPRETER static ffi_type *typecode_to_ffi(int argmask) { @@ -1299,8 +1322,10 @@ static void tcg_context_init(unsigned max_cpus) init_call_layout(&info_helper_ld32_mmu); init_call_layout(&info_helper_ld64_mmu); + init_call_layout(&info_helper_ld128_mmu); init_call_layout(&info_helper_st32_mmu); init_call_layout(&info_helper_st64_mmu); + init_call_layout(&info_helper_st128_mmu); #ifdef CONFIG_TCG_INTERPRETER init_ffi_layouts(); @@ -5401,6 +5426,8 @@ static unsigned tcg_out_helper_add_mov(TCGMovExtend *mov, TCGType dst_type, TCGType src_type, TCGReg lo, TCGReg hi) { + MemOp reg_mo; + if (dst_type <= TCG_TYPE_REG) { MemOp src_ext; @@ -5428,19 +5455,25 @@ static unsigned tcg_out_helper_add_mov(TCGMovExtend *mov, return 1; } - assert(TCG_TARGET_REG_BITS == 32); + if (TCG_TARGET_REG_BITS == 32) { + assert(dst_type == TCG_TYPE_I64); + reg_mo = MO_32; + } else { + assert(dst_type == TCG_TYPE_I128); + reg_mo = MO_64; + } mov[0].dst = loc[HOST_BIG_ENDIAN].arg_slot; mov[0].src = lo; - mov[0].dst_type = TCG_TYPE_I32; - mov[0].src_type = TCG_TYPE_I32; - mov[0].src_ext = MO_32; + mov[0].dst_type = TCG_TYPE_REG; + mov[0].src_type = TCG_TYPE_REG; + mov[0].src_ext = reg_mo; mov[1].dst = loc[!HOST_BIG_ENDIAN].arg_slot; mov[1].src = hi; - mov[1].dst_type = TCG_TYPE_I32; - mov[1].src_type = TCG_TYPE_I32; - mov[1].src_ext = MO_32; + mov[1].dst_type = TCG_TYPE_REG; + mov[1].src_type = TCG_TYPE_REG; + mov[1].src_ext = reg_mo; return 2; } @@ -5463,6 +5496,9 @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, case MO_64: info = &info_helper_ld64_mmu; break; + case MO_128: + info = &info_helper_ld128_mmu; + break; default: g_assert_not_reached(); } @@ -5477,8 +5513,33 @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, tcg_out_helper_load_slots(s, nmov, mov, parm); - /* No special attention for 32 and 64-bit return values. */ - tcg_debug_assert(info->out_kind == TCG_CALL_RET_NORMAL); + switch (info->out_kind) { + case TCG_CALL_RET_NORMAL: + case TCG_CALL_RET_BY_VEC: + break; + case TCG_CALL_RET_BY_REF: + /* + * The return reference is in the first argument slot. + * We need memory in which to return: re-use the top of stack. + */ + { + int ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET; + + if (arg_slot_reg_p(0)) { + tcg_out_addi_ptr(s, tcg_target_call_iarg_regs[0], + TCG_REG_CALL_STACK, ofs_slot0); + } else { + tcg_debug_assert(parm->ntmp != 0); + tcg_out_addi_ptr(s, parm->tmp[0], + TCG_REG_CALL_STACK, ofs_slot0); + tcg_out_st(s, TCG_TYPE_PTR, parm->tmp[0], + TCG_REG_CALL_STACK, ofs_slot0); + } + } + break; + default: + g_assert_not_reached(); + } tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg); } @@ -5487,11 +5548,18 @@ static void tcg_out_ld_helper_ret(TCGContext *s, const TCGLabelQemuLdst *ldst, bool load_sign, const TCGLdstHelperParam *parm) { + MemOp mop = get_memop(ldst->oi); TCGMovExtend mov[2]; + int ofs_slot0; - if (ldst->type <= TCG_TYPE_REG) { - MemOp mop = get_memop(ldst->oi); + switch (ldst->type) { + case TCG_TYPE_I64: + if (TCG_TARGET_REG_BITS == 32) { + break; + } + /* fall through */ + case TCG_TYPE_I32: mov[0].dst = ldst->datalo_reg; mov[0].src = tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, 0); mov[0].dst_type = ldst->type; @@ -5517,25 +5585,49 @@ static void tcg_out_ld_helper_ret(TCGContext *s, const TCGLabelQemuLdst *ldst, mov[0].src_ext = mop & MO_SSIZE; } tcg_out_movext1(s, mov); - } else { - assert(TCG_TARGET_REG_BITS == 32); + return; - mov[0].dst = ldst->datalo_reg; - mov[0].src = - tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, HOST_BIG_ENDIAN); - mov[0].dst_type = TCG_TYPE_I32; - mov[0].src_type = TCG_TYPE_I32; - mov[0].src_ext = MO_32; + case TCG_TYPE_I128: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET; + switch (TCG_TARGET_CALL_RET_I128) { + case TCG_CALL_RET_NORMAL: + break; + case TCG_CALL_RET_BY_VEC: + tcg_out_st(s, TCG_TYPE_V128, + tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0), + TCG_REG_CALL_STACK, ofs_slot0); + /* fall through */ + case TCG_CALL_RET_BY_REF: + tcg_out_ld(s, TCG_TYPE_I64, ldst->datalo_reg, + TCG_REG_CALL_STACK, ofs_slot0 + 8 * HOST_BIG_ENDIAN); + tcg_out_ld(s, TCG_TYPE_I64, ldst->datahi_reg, + TCG_REG_CALL_STACK, ofs_slot0 + 8 * !HOST_BIG_ENDIAN); + return; + default: + g_assert_not_reached(); + } + break; - mov[1].dst = ldst->datahi_reg; - mov[1].src = - tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, !HOST_BIG_ENDIAN); - mov[1].dst_type = TCG_TYPE_REG; - mov[1].src_type = TCG_TYPE_REG; - mov[1].src_ext = MO_32; - - tcg_out_movext2(s, mov, mov + 1, parm->ntmp ? parm->tmp[0] : -1); + default: + g_assert_not_reached(); } + + mov[0].dst = ldst->datalo_reg; + mov[0].src = + tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, HOST_BIG_ENDIAN); + mov[0].dst_type = TCG_TYPE_I32; + mov[0].src_type = TCG_TYPE_I32; + mov[0].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64; + + mov[1].dst = ldst->datahi_reg; + mov[1].src = + tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, !HOST_BIG_ENDIAN); + mov[1].dst_type = TCG_TYPE_REG; + mov[1].src_type = TCG_TYPE_REG; + mov[1].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64; + + tcg_out_movext2(s, mov, mov + 1, parm->ntmp ? parm->tmp[0] : -1); } static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, @@ -5559,6 +5651,10 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, info = &info_helper_st64_mmu; data_type = TCG_TYPE_I64; break; + case MO_128: + info = &info_helper_st128_mmu; + data_type = TCG_TYPE_I128; + break; default: g_assert_not_reached(); } @@ -5576,13 +5672,47 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, /* Handle data argument. */ loc = &info->in[next_arg]; - n = tcg_out_helper_add_mov(mov + nmov, loc, data_type, ldst->type, - ldst->datalo_reg, ldst->datahi_reg); - next_arg += n; - nmov += n; - tcg_debug_assert(nmov <= ARRAY_SIZE(mov)); + switch (loc->kind) { + case TCG_CALL_ARG_NORMAL: + case TCG_CALL_ARG_EXTEND_U: + case TCG_CALL_ARG_EXTEND_S: + n = tcg_out_helper_add_mov(mov + nmov, loc, data_type, ldst->type, + ldst->datalo_reg, ldst->datahi_reg); + next_arg += n; + nmov += n; + tcg_out_helper_load_slots(s, nmov, mov, parm); + break; + + case TCG_CALL_ARG_BY_REF: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_debug_assert(data_type == TCG_TYPE_I128); + tcg_out_st(s, TCG_TYPE_I64, + HOST_BIG_ENDIAN ? ldst->datahi_reg : ldst->datalo_reg, + TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc[0].ref_slot)); + tcg_out_st(s, TCG_TYPE_I64, + HOST_BIG_ENDIAN ? ldst->datalo_reg : ldst->datahi_reg, + TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc[1].ref_slot)); + + tcg_out_helper_load_slots(s, nmov, mov, parm); + + if (arg_slot_reg_p(loc->arg_slot)) { + tcg_out_addi_ptr(s, tcg_target_call_iarg_regs[loc->arg_slot], + TCG_REG_CALL_STACK, + arg_slot_stk_ofs(loc->ref_slot)); + } else { + tcg_debug_assert(parm->ntmp != 0); + tcg_out_addi_ptr(s, parm->tmp[0], TCG_REG_CALL_STACK, + arg_slot_stk_ofs(loc->ref_slot)); + tcg_out_st(s, TCG_TYPE_PTR, parm->tmp[0], + TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc->arg_slot)); + } + next_arg += 2; + break; + + default: + g_assert_not_reached(); + } - tcg_out_helper_load_slots(s, nmov, mov, parm); tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg); } From e63b8a29834d7fcc5574288852f78a5c70f19015 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 8 Nov 2022 09:23:54 +1100 Subject: [PATCH 37/74] tcg: Introduce atom_and_align_for_opc Examine MemOp for atomicity and alignment, adjusting alignment as required to implement atomicity on the host. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/tcg.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/tcg/tcg.c b/tcg/tcg.c index 6328611543..c4a4efc8b8 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -220,6 +220,15 @@ static void * const qemu_st_helpers[MO_SIZE + 1] __attribute__((unused)) = { #endif }; +typedef struct { + MemOp atom; /* lg2 bits of atomicity required */ + MemOp align; /* lg2 bits of alignment to use */ +} TCGAtomAlign; + +static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc, + MemOp host_atom, bool allow_two_ops) + __attribute__((unused)); + TCGContext tcg_init_ctx; __thread TCGContext *tcg_ctx; @@ -5230,6 +5239,92 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) } } +/** + * atom_and_align_for_opc: + * @s: tcg context + * @opc: memory operation code + * @host_atom: MO_ATOM_{IFALIGN,WITHIN16,SUBALIGN} for host operations + * @allow_two_ops: true if we are prepared to issue two operations + * + * Return the alignment and atomicity to use for the inline fast path + * for the given memory operation. The alignment may be larger than + * that specified in @opc, and the correct alignment will be diagnosed + * by the slow path helper. + * + * If @allow_two_ops, the host is prepared to test for 2x alignment, + * and issue two loads or stores for subalignment. + */ +static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc, + MemOp host_atom, bool allow_two_ops) +{ + MemOp align = get_alignment_bits(opc); + MemOp size = opc & MO_SIZE; + MemOp half = size ? size - 1 : 0; + MemOp atmax; + MemOp atom; + + /* When serialized, no further atomicity required. */ + if (s->gen_tb->cflags & CF_PARALLEL) { + atom = opc & MO_ATOM_MASK; + } else { + atom = MO_ATOM_NONE; + } + + switch (atom) { + case MO_ATOM_NONE: + /* The operation requires no specific atomicity. */ + atmax = MO_8; + break; + + case MO_ATOM_IFALIGN: + atmax = size; + break; + + case MO_ATOM_IFALIGN_PAIR: + atmax = half; + break; + + case MO_ATOM_WITHIN16: + atmax = size; + if (size == MO_128) { + /* Misalignment implies !within16, and therefore no atomicity. */ + } else if (host_atom != MO_ATOM_WITHIN16) { + /* The host does not implement within16, so require alignment. */ + align = MAX(align, size); + } + break; + + case MO_ATOM_WITHIN16_PAIR: + atmax = size; + /* + * Misalignment implies !within16, and therefore half atomicity. + * Any host prepared for two operations can implement this with + * half alignment. + */ + if (host_atom != MO_ATOM_WITHIN16 && allow_two_ops) { + align = MAX(align, half); + } + break; + + case MO_ATOM_SUBALIGN: + atmax = size; + if (host_atom != MO_ATOM_SUBALIGN) { + /* If unaligned but not odd, there are subobjects up to half. */ + if (allow_two_ops) { + align = MAX(align, half); + } else { + align = MAX(align, size); + } + } + break; + + default: + g_assert_not_reached(); + } + + return (TCGAtomAlign){ .atom = atmax, .align = align }; +} + /* * Similarly for qemu_ld/st slow path helpers. * We must re-implement tcg_gen_callN and tcg_reg_alloc_call simultaneously, From 1c5322d90cb0c5a3cf4723992f4baa21691542ca Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 17 Apr 2023 09:33:08 +0200 Subject: [PATCH 38/74] tcg/i386: Use atom_and_align_for_opc No change to the ultimate load/store routines yet, so some atomicity conditions not yet honored, but plumbs the change to alignment through the relevant functions. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/i386/tcg-target.c.inc | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 6d55ba5a1c..3b8528e332 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -1774,6 +1774,7 @@ typedef struct { int index; int ofs; int seg; + TCGAtomAlign aa; } HostAddress; bool tcg_target_has_memory_bswap(MemOp memop) @@ -1895,8 +1896,18 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, { TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); - unsigned a_bits = get_alignment_bits(opc); - unsigned a_mask = (1 << a_bits) - 1; + unsigned a_mask; + +#ifdef CONFIG_SOFTMMU + h->index = TCG_REG_L0; + h->ofs = 0; + h->seg = 0; +#else + *h = x86_guest_base; +#endif + h->base = addrlo; + h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); + a_mask = (1 << h->aa.align) - 1; #ifdef CONFIG_SOFTMMU int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read) @@ -1946,7 +1957,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * copy the address and mask. For lesser alignments, check that we don't * cross pages for the complete access. */ - if (a_bits >= s_bits) { + if (a_mask >= s_mask) { tcg_out_mov(s, ttype, TCG_REG_L1, addrlo); } else { tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1, @@ -1977,13 +1988,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, /* TLB Hit. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0, offsetof(CPUTLBEntry, addend)); - - *h = (HostAddress) { - .base = addrlo, - .index = TCG_REG_L0, - }; #else - if (a_bits) { + if (a_mask) { ldst = new_ldst_label(s); ldst->is_ld = is_ld; @@ -1997,9 +2003,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->label_ptr[0] = s->code_ptr; s->code_ptr += 4; } - - *h = x86_guest_base; - h->base = addrlo; #endif return ldst; From 64741d9902ca1c0c111c53baa6bc11a55c2474c9 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 21 Apr 2023 17:12:15 +0100 Subject: [PATCH 39/74] tcg/aarch64: Use atom_and_align_for_opc Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/aarch64/tcg-target.c.inc | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index 0cc719d799..ea4108d59c 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -1593,6 +1593,7 @@ typedef struct { TCGReg base; TCGReg index; TCGType index_ext; + TCGAtomAlign aa; } HostAddress; bool tcg_target_has_memory_bswap(MemOp memop) @@ -1646,8 +1647,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); - unsigned a_bits = get_alignment_bits(opc); - unsigned a_mask = (1u << a_bits) - 1; + unsigned a_mask; + + h->aa = atom_and_align_for_opc(s, opc, + have_lse2 ? MO_ATOM_WITHIN16 + : MO_ATOM_IFALIGN, + false); + a_mask = (1 << h->aa.align) - 1; #ifdef CONFIG_SOFTMMU unsigned s_bits = opc & MO_SIZE; @@ -1693,7 +1699,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * bits within the address. For unaligned access, we check that we don't * cross pages using the address of the last byte of the access. */ - if (a_bits >= s_bits) { + if (a_mask >= s_mask) { x3 = addr_reg; } else { tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64, @@ -1713,11 +1719,9 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->label_ptr[0] = s->code_ptr; tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); - *h = (HostAddress){ - .base = TCG_REG_X1, - .index = addr_reg, - .index_ext = addr_type - }; + h->base = TCG_REG_X1, + h->index = addr_reg; + h->index_ext = addr_type; #else if (a_mask) { ldst = new_ldst_label(s); @@ -1735,17 +1739,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, } if (USE_GUEST_BASE) { - *h = (HostAddress){ - .base = TCG_REG_GUEST_BASE, - .index = addr_reg, - .index_ext = addr_type - }; + h->base = TCG_REG_GUEST_BASE; + h->index = addr_reg; + h->index_ext = addr_type; } else { - *h = (HostAddress){ - .base = addr_reg, - .index = TCG_REG_XZR, - .index_ext = TCG_TYPE_I64 - }; + h->base = addr_reg; + h->index = TCG_REG_XZR; + h->index_ext = TCG_TYPE_I64; } #endif From 3e3d994276058db45dae3998f7fc21608c65fcc4 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 22 Apr 2023 06:48:58 +0100 Subject: [PATCH 40/74] tcg/arm: Use atom_and_align_for_opc No change to the ultimate load/store routines yet, so some atomicity conditions not yet honored, but plumbs the change to alignment through the relevant functions. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/arm/tcg-target.c.inc | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index e5aed03247..add8cc1fd5 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -1323,6 +1323,7 @@ typedef struct { TCGReg base; int index; bool index_scratch; + TCGAtomAlign aa; } HostAddress; bool tcg_target_has_memory_bswap(MemOp memop) @@ -1379,8 +1380,26 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, { TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); - MemOp a_bits = get_alignment_bits(opc); - unsigned a_mask = (1 << a_bits) - 1; + unsigned a_mask; + +#ifdef CONFIG_SOFTMMU + *h = (HostAddress){ + .cond = COND_AL, + .base = addrlo, + .index = TCG_REG_R1, + .index_scratch = true, + }; +#else + *h = (HostAddress){ + .cond = COND_AL, + .base = addrlo, + .index = guest_base ? TCG_REG_GUEST_BASE : -1, + .index_scratch = false, + }; +#endif + + h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); + a_mask = (1 << h->aa.align) - 1; #ifdef CONFIG_SOFTMMU int mem_index = get_mmuidx(oi); @@ -1469,13 +1488,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, if (TARGET_LONG_BITS == 64) { tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); } - - *h = (HostAddress){ - .cond = COND_AL, - .base = addrlo, - .index = TCG_REG_R1, - .index_scratch = true, - }; #else if (a_mask) { ldst = new_ldst_label(s); @@ -1484,18 +1496,11 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->addrlo_reg = addrlo; ldst->addrhi_reg = addrhi; - /* We are expecting a_bits to max out at 7 */ + /* We are expecting alignment to max out at 7 */ tcg_debug_assert(a_mask <= 0xff); /* tst addr, #mask */ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); } - - *h = (HostAddress){ - .cond = COND_AL, - .base = addrlo, - .index = guest_base ? TCG_REG_GUEST_BASE : -1, - .index_scratch = false, - }; #endif return ldst; From 7658a6cf567b66b1c118292da2c81789dc0643e7 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 18 Apr 2023 19:12:13 +0200 Subject: [PATCH 41/74] tcg/loongarch64: Use atom_and_align_for_opc Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/loongarch64/tcg-target.c.inc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index d26174dde5..07d35f92fa 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -826,6 +826,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) typedef struct { TCGReg base; TCGReg index; + TCGAtomAlign aa; } HostAddress; bool tcg_target_has_memory_bswap(MemOp memop) @@ -845,7 +846,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, { TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); - unsigned a_bits = get_alignment_bits(opc); + MemOp a_bits; + + h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); + a_bits = h->aa.align; #ifdef CONFIG_SOFTMMU unsigned s_bits = opc & MO_SIZE; From c0cafab5a52ea3830b2e3605116170106c5d044c Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 25 Apr 2023 13:36:39 +0100 Subject: [PATCH 42/74] tcg/mips: Use atom_and_align_for_opc Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/mips/tcg-target.c.inc | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index cd0254a0d7..3f3fe5b991 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -1138,7 +1138,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) typedef struct { TCGReg base; - MemOp align; + TCGAtomAlign aa; } HostAddress; bool tcg_target_has_memory_bswap(MemOp memop) @@ -1158,11 +1158,15 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, { TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); - unsigned a_bits = get_alignment_bits(opc); + MemOp a_bits; unsigned s_bits = opc & MO_SIZE; - unsigned a_mask = (1 << a_bits) - 1; + unsigned a_mask; TCGReg base; + h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); + a_bits = h->aa.align; + a_mask = (1 << a_bits) - 1; + #ifdef CONFIG_SOFTMMU unsigned s_mask = (1 << s_bits) - 1; int mem_index = get_mmuidx(oi); @@ -1281,7 +1285,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, #endif h->base = base; - h->align = a_bits; return ldst; } @@ -1394,7 +1397,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true); - if (use_mips32r6_instructions || h.align >= (opc & MO_SIZE)) { + if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) { tcg_out_qemu_ld_direct(s, datalo, datahi, h.base, opc, data_type); } else { tcg_out_qemu_ld_unalign(s, datalo, datahi, h.base, opc, data_type); @@ -1481,7 +1484,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false); - if (use_mips32r6_instructions || h.align >= (opc & MO_SIZE)) { + if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) { tcg_out_qemu_st_direct(s, datalo, datahi, h.base, opc); } else { tcg_out_qemu_st_unalign(s, datalo, datahi, h.base, opc); From 1bac4697199dd9aa5d8d0e3c56d0ea52b2bd0fa1 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 19 Apr 2023 10:45:00 +0200 Subject: [PATCH 43/74] tcg/ppc: Use atom_and_align_for_opc Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.c.inc | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index b62a163014..b5c49895f3 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -2015,6 +2015,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) typedef struct { TCGReg base; TCGReg index; + TCGAtomAlign aa; } HostAddress; bool tcg_target_has_memory_bswap(MemOp memop) @@ -2034,7 +2035,23 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, { TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); - unsigned a_bits = get_alignment_bits(opc); + MemOp a_bits; + + /* + * Book II, Section 1.4, Single-Copy Atomicity, specifies: + * + * Before 3.0, "An access that is not atomic is performed as a set of + * smaller disjoint atomic accesses. In general, the number and alignment + * of these accesses are implementation-dependent." Thus MO_ATOM_IFALIGN. + * + * As of 3.0, "the non-atomic access is performed as described in + * the corresponding list", which matches MO_ATOM_SUBALIGN. + */ + h->aa = atom_and_align_for_opc(s, opc, + have_isa_3_00 ? MO_ATOM_SUBALIGN + : MO_ATOM_IFALIGN, + false); + a_bits = h->aa.align; #ifdef CONFIG_SOFTMMU int mem_index = get_mmuidx(oi); From 37e523f04b09f2e653629ac16206512d8a5ebc85 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 19 Apr 2023 11:50:36 +0200 Subject: [PATCH 44/74] tcg/riscv: Use atom_and_align_for_opc Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/riscv/tcg-target.c.inc | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 37870c89fc..1fc1a9199b 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -910,8 +910,11 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, { TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); - unsigned a_bits = get_alignment_bits(opc); - unsigned a_mask = (1u << a_bits) - 1; + TCGAtomAlign aa; + unsigned a_mask; + + aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); + a_mask = (1u << aa.align) - 1; #ifdef CONFIG_SOFTMMU unsigned s_bits = opc & MO_SIZE; @@ -944,7 +947,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, * cross pages using the address of the last byte of the access. */ addr_adj = addr_reg; - if (a_bits < s_bits) { + if (a_mask < s_mask) { addr_adj = TCG_REG_TMP0; tcg_out_opc_imm(s, TARGET_LONG_BITS == 32 ? OPC_ADDIW : OPC_ADDI, addr_adj, addr_reg, s_mask - a_mask); @@ -983,8 +986,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, ldst->oi = oi; ldst->addrlo_reg = addr_reg; - /* We are expecting a_bits max 7, so we can always use andi. */ - tcg_debug_assert(a_bits < 12); + /* We are expecting alignment max 7, so we can always use andi. */ + tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12)); tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask); ldst->label_ptr[0] = s->code_ptr; From 00406e6d3f3e4d3f852af9991a0e03df66362de6 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 19 Apr 2023 16:21:55 +0200 Subject: [PATCH 45/74] tcg/s390x: Use atom_and_align_for_opc Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/s390x/tcg-target.c.inc | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc index 22f0206b5a..8e34b214fc 100644 --- a/tcg/s390x/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -1572,6 +1572,7 @@ typedef struct { TCGReg base; TCGReg index; int disp; + TCGAtomAlign aa; } HostAddress; bool tcg_target_has_memory_bswap(MemOp memop) @@ -1733,8 +1734,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, { TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); - unsigned a_bits = get_alignment_bits(opc); - unsigned a_mask = (1u << a_bits) - 1; + unsigned a_mask; + + h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); + a_mask = (1 << h->aa.align) - 1; #ifdef CONFIG_SOFTMMU unsigned s_bits = opc & MO_SIZE; @@ -1764,7 +1767,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * bits within the address. For unaligned access, we check that we don't * cross pages using the address of the last byte of the access. */ - a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask); + a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask); tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; if (a_off == 0) { tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask); @@ -1806,7 +1809,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->addrlo_reg = addr_reg; /* We are expecting a_bits to max out at 7, much lower than TMLL. */ - tcg_debug_assert(a_bits < 16); + tcg_debug_assert(a_mask <= 0xffff); tcg_out_insn(s, RI, TMLL, addr_reg, a_mask); tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */ From 9ca6343103e13968b44aee56e34523bda13be78a Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 25 Apr 2023 13:46:09 +0100 Subject: [PATCH 46/74] tcg/sparc64: Use atom_and_align_for_opc Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson --- tcg/sparc64/tcg-target.c.inc | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index bb23038529..9676b745a2 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -1009,6 +1009,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) typedef struct { TCGReg base; TCGReg index; + TCGAtomAlign aa; } HostAddress; bool tcg_target_has_memory_bswap(MemOp memop) @@ -1028,13 +1029,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, { TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); - unsigned a_bits = get_alignment_bits(opc); - unsigned s_bits = opc & MO_SIZE; + MemOp s_bits = opc & MO_SIZE; unsigned a_mask; /* We don't support unaligned accesses. */ - a_bits = MAX(a_bits, s_bits); - a_mask = (1u << a_bits) - 1; + h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); + h->aa.align = MAX(h->aa.align, s_bits); + a_mask = (1u << h->aa.align) - 1; #ifdef CONFIG_SOFTMMU int mem_index = get_mmuidx(oi); @@ -1086,11 +1087,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, cc = TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC; tcg_out_bpcc0(s, COND_NE, BPCC_PN | cc, 0); #else - if (a_bits != s_bits) { - /* - * Test for at least natural alignment, and defer - * everything else to the helper functions. - */ + /* + * If the size equals the required alignment, we can skip the test + * and allow host SIGBUS to deliver SIGBUS to the guest. + * Otherwise, test for at least natural alignment and defer + * everything else to the helper functions. + */ + if (s_bits != get_alignment_bits(opc)) { tcg_debug_assert(check_fit_tl(a_mask, 13)); tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC); From a1429ca26e13bdfd10f16348c2d9e5d2a23c1377 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 14 May 2023 23:13:46 -0700 Subject: [PATCH 47/74] tcg: Split out memory ops to tcg-op-ldst.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- tcg/meson.build | 1 + tcg/tcg-op-ldst.c | 1006 +++++++++++++++++++++++++++++++++++++++++++++ tcg/tcg-op.c | 974 ------------------------------------------- 3 files changed, 1007 insertions(+), 974 deletions(-) create mode 100644 tcg/tcg-op-ldst.c diff --git a/tcg/meson.build b/tcg/meson.build index c4c63b19d4..f56c465f4d 100644 --- a/tcg/meson.build +++ b/tcg/meson.build @@ -6,6 +6,7 @@ tcg_ss.add(files( 'tcg.c', 'tcg-common.c', 'tcg-op.c', + 'tcg-op-ldst.c', 'tcg-op-gvec.c', 'tcg-op-vec.c', )) diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c new file mode 100644 index 0000000000..d8503d7da4 --- /dev/null +++ b/tcg/tcg-op-ldst.c @@ -0,0 +1,1006 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "exec/exec-all.h" +#include "tcg/tcg.h" +#include "tcg/tcg-temp-internal.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-mo.h" +#include "exec/plugin-gen.h" +#include "tcg-internal.h" + + +static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) +{ + /* Trigger the asserts within as early as possible. */ + unsigned a_bits = get_alignment_bits(op); + + /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */ + if (a_bits == (op & MO_SIZE)) { + op = (op & ~MO_AMASK) | MO_ALIGN; + } + + switch (op & MO_SIZE) { + case MO_8: + op &= ~MO_BSWAP; + break; + case MO_16: + break; + case MO_32: + if (!is64) { + op &= ~MO_SIGN; + } + break; + case MO_64: + if (is64) { + op &= ~MO_SIGN; + break; + } + /* fall through */ + default: + g_assert_not_reached(); + } + if (st) { + op &= ~MO_SIGN; + } + return op; +} + +static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, + MemOp memop, TCGArg idx) +{ + MemOpIdx oi = make_memop_idx(memop, idx); +#if TARGET_LONG_BITS == 32 + tcg_gen_op3i_i32(opc, val, addr, oi); +#else + if (TCG_TARGET_REG_BITS == 32) { + tcg_gen_op4i_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr), oi); + } else { + tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_i64_arg(addr), oi); + } +#endif +} + +static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr, + MemOp memop, TCGArg idx) +{ + MemOpIdx oi = make_memop_idx(memop, idx); +#if TARGET_LONG_BITS == 32 + if (TCG_TARGET_REG_BITS == 32) { + tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi); + } else { + tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_i32_arg(addr), oi); + } +#else + if (TCG_TARGET_REG_BITS == 32) { + tcg_gen_op5i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), + TCGV_LOW(addr), TCGV_HIGH(addr), oi); + } else { + tcg_gen_op3i_i64(opc, val, addr, oi); + } +#endif +} + +static void tcg_gen_req_mo(TCGBar type) +{ +#ifdef TCG_GUEST_DEFAULT_MO + type &= TCG_GUEST_DEFAULT_MO; +#endif + type &= ~TCG_TARGET_DEFAULT_MO; + if (type) { + tcg_gen_mb(type | TCG_BAR_SC); + } +} + +static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr) +{ +#ifdef CONFIG_PLUGIN + if (tcg_ctx->plugin_insn != NULL) { + /* Save a copy of the vaddr for use after a load. */ + TCGv temp = tcg_temp_new(); + tcg_gen_mov_tl(temp, vaddr); + return temp; + } +#endif + return vaddr; +} + +static void plugin_gen_mem_callbacks(TCGv vaddr, MemOpIdx oi, + enum qemu_plugin_mem_rw rw) +{ +#ifdef CONFIG_PLUGIN + if (tcg_ctx->plugin_insn != NULL) { + qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw); + plugin_gen_empty_mem_callback(vaddr, info); + tcg_temp_free(vaddr); + } +#endif +} + +void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) +{ + MemOp orig_memop; + MemOpIdx oi; + + tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + memop = tcg_canonicalize_memop(memop, 0, 0); + oi = make_memop_idx(memop, idx); + + orig_memop = memop; + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { + memop &= ~MO_BSWAP; + /* The bswap primitive benefits from zero-extended input. */ + if ((memop & MO_SSIZE) == MO_SW) { + memop &= ~MO_SIGN; + } + } + + addr = plugin_prep_mem_callbacks(addr); + gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); + + if ((orig_memop ^ memop) & MO_BSWAP) { + switch (orig_memop & MO_SIZE) { + case MO_16: + tcg_gen_bswap16_i32(val, val, (orig_memop & MO_SIGN + ? TCG_BSWAP_IZ | TCG_BSWAP_OS + : TCG_BSWAP_IZ | TCG_BSWAP_OZ)); + break; + case MO_32: + tcg_gen_bswap32_i32(val, val); + break; + default: + g_assert_not_reached(); + } + } +} + +void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) +{ + TCGv_i32 swap = NULL; + MemOpIdx oi; + + tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + memop = tcg_canonicalize_memop(memop, 0, 1); + oi = make_memop_idx(memop, idx); + + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { + swap = tcg_temp_ebb_new_i32(); + switch (memop & MO_SIZE) { + case MO_16: + tcg_gen_bswap16_i32(swap, val, 0); + break; + case MO_32: + tcg_gen_bswap32_i32(swap, val); + break; + default: + g_assert_not_reached(); + } + val = swap; + memop &= ~MO_BSWAP; + } + + addr = plugin_prep_mem_callbacks(addr); + if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) { + gen_ldst_i32(INDEX_op_qemu_st8_i32, val, addr, memop, idx); + } else { + gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); + } + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); + + if (swap) { + tcg_temp_free_i32(swap); + } +} + +void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) +{ + MemOp orig_memop; + MemOpIdx oi; + + if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { + tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); + if (memop & MO_SIGN) { + tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31); + } else { + tcg_gen_movi_i32(TCGV_HIGH(val), 0); + } + return; + } + + tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + memop = tcg_canonicalize_memop(memop, 1, 0); + oi = make_memop_idx(memop, idx); + + orig_memop = memop; + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { + memop &= ~MO_BSWAP; + /* The bswap primitive benefits from zero-extended input. */ + if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) { + memop &= ~MO_SIGN; + } + } + + addr = plugin_prep_mem_callbacks(addr); + gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); + + if ((orig_memop ^ memop) & MO_BSWAP) { + int flags = (orig_memop & MO_SIGN + ? TCG_BSWAP_IZ | TCG_BSWAP_OS + : TCG_BSWAP_IZ | TCG_BSWAP_OZ); + switch (orig_memop & MO_SIZE) { + case MO_16: + tcg_gen_bswap16_i64(val, val, flags); + break; + case MO_32: + tcg_gen_bswap32_i64(val, val, flags); + break; + case MO_64: + tcg_gen_bswap64_i64(val, val); + break; + default: + g_assert_not_reached(); + } + } +} + +void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) +{ + TCGv_i64 swap = NULL; + MemOpIdx oi; + + if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { + tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop); + return; + } + + tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + memop = tcg_canonicalize_memop(memop, 1, 1); + oi = make_memop_idx(memop, idx); + + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { + swap = tcg_temp_ebb_new_i64(); + switch (memop & MO_SIZE) { + case MO_16: + tcg_gen_bswap16_i64(swap, val, 0); + break; + case MO_32: + tcg_gen_bswap32_i64(swap, val, 0); + break; + case MO_64: + tcg_gen_bswap64_i64(swap, val); + break; + default: + g_assert_not_reached(); + } + val = swap; + memop &= ~MO_BSWAP; + } + + addr = plugin_prep_mem_callbacks(addr); + gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); + + if (swap) { + tcg_temp_free_i64(swap); + } +} + +/* + * Return true if @mop, without knowledge of the pointer alignment, + * does not require 16-byte atomicity, and it would be adventagous + * to avoid a call to a helper function. + */ +static bool use_two_i64_for_i128(MemOp mop) +{ +#ifdef CONFIG_SOFTMMU + /* Two softmmu tlb lookups is larger than one function call. */ + return false; +#else + /* + * For user-only, two 64-bit operations may well be smaller than a call. + * Determine if that would be legal for the requested atomicity. + */ + switch (mop & MO_ATOM_MASK) { + case MO_ATOM_NONE: + case MO_ATOM_IFALIGN_PAIR: + return true; + case MO_ATOM_IFALIGN: + case MO_ATOM_SUBALIGN: + case MO_ATOM_WITHIN16: + case MO_ATOM_WITHIN16_PAIR: + /* In a serialized context, no atomicity is required. */ + return !(tcg_ctx->gen_tb->cflags & CF_PARALLEL); + default: + g_assert_not_reached(); + } +#endif +} + +static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) +{ + MemOp mop_1 = orig, mop_2; + + tcg_debug_assert((orig & MO_SIZE) == MO_128); + tcg_debug_assert((orig & MO_SIGN) == 0); + + /* Reduce the size to 64-bit. */ + mop_1 = (mop_1 & ~MO_SIZE) | MO_64; + + /* Retain the alignment constraints of the original. */ + switch (orig & MO_AMASK) { + case MO_UNALN: + case MO_ALIGN_2: + case MO_ALIGN_4: + mop_2 = mop_1; + break; + case MO_ALIGN_8: + /* Prefer MO_ALIGN+MO_64 to MO_ALIGN_8+MO_64. */ + mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN; + mop_2 = mop_1; + break; + case MO_ALIGN: + /* Second has 8-byte alignment; first has 16-byte alignment. */ + mop_2 = mop_1; + mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN_16; + break; + case MO_ALIGN_16: + case MO_ALIGN_32: + case MO_ALIGN_64: + /* Second has 8-byte alignment; first retains original. */ + mop_2 = (mop_1 & ~MO_AMASK) | MO_ALIGN; + break; + default: + g_assert_not_reached(); + } + + /* Use a memory ordering implemented by the host. */ + if ((orig & MO_BSWAP) && !tcg_target_has_memory_bswap(mop_1)) { + mop_1 &= ~MO_BSWAP; + mop_2 &= ~MO_BSWAP; + } + + ret[0] = mop_1; + ret[1] = mop_2; +} + +#if TARGET_LONG_BITS == 64 +#define tcg_temp_ebb_new tcg_temp_ebb_new_i64 +#else +#define tcg_temp_ebb_new tcg_temp_ebb_new_i32 +#endif + +void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) +{ + const MemOpIdx oi = make_memop_idx(memop, idx); + + tcg_debug_assert((memop & MO_SIZE) == MO_128); + tcg_debug_assert((memop & MO_SIGN) == 0); + + tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + addr = plugin_prep_mem_callbacks(addr); + + /* TODO: For now, force 32-bit hosts to use the helper. */ + if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) { + TCGv_i64 lo, hi; + TCGArg addr_arg; + MemOpIdx adj_oi; + bool need_bswap = false; + + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { + lo = TCGV128_HIGH(val); + hi = TCGV128_LOW(val); + adj_oi = make_memop_idx(memop & ~MO_BSWAP, idx); + need_bswap = true; + } else { + lo = TCGV128_LOW(val); + hi = TCGV128_HIGH(val); + adj_oi = oi; + } + +#if TARGET_LONG_BITS == 32 + addr_arg = tcgv_i32_arg(addr); +#else + addr_arg = tcgv_i64_arg(addr); +#endif + tcg_gen_op4ii_i64(INDEX_op_qemu_ld_i128, lo, hi, addr_arg, adj_oi); + + if (need_bswap) { + tcg_gen_bswap64_i64(lo, lo); + tcg_gen_bswap64_i64(hi, hi); + } + } else if (use_two_i64_for_i128(memop)) { + MemOp mop[2]; + TCGv addr_p8; + TCGv_i64 x, y; + + canonicalize_memop_i128_as_i64(mop, memop); + + /* + * Since there are no global TCGv_i128, there is no visible state + * changed if the second load faults. Load directly into the two + * subwords. + */ + if ((memop & MO_BSWAP) == MO_LE) { + x = TCGV128_LOW(val); + y = TCGV128_HIGH(val); + } else { + x = TCGV128_HIGH(val); + y = TCGV128_LOW(val); + } + + gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr, mop[0], idx); + + if ((mop[0] ^ memop) & MO_BSWAP) { + tcg_gen_bswap64_i64(x, x); + } + + addr_p8 = tcg_temp_ebb_new(); + tcg_gen_addi_tl(addr_p8, addr, 8); + gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8, mop[1], idx); + tcg_temp_free(addr_p8); + + if ((mop[0] ^ memop) & MO_BSWAP) { + tcg_gen_bswap64_i64(y, y); + } + } else { + gen_helper_ld_i128(val, cpu_env, addr, tcg_constant_i32(oi)); + } + + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); +} + +void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) +{ + const MemOpIdx oi = make_memop_idx(memop, idx); + + tcg_debug_assert((memop & MO_SIZE) == MO_128); + tcg_debug_assert((memop & MO_SIGN) == 0); + + tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST); + addr = plugin_prep_mem_callbacks(addr); + + /* TODO: For now, force 32-bit hosts to use the helper. */ + + if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) { + TCGv_i64 lo, hi; + TCGArg addr_arg; + MemOpIdx adj_oi; + bool need_bswap = false; + + if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { + lo = tcg_temp_new_i64(); + hi = tcg_temp_new_i64(); + tcg_gen_bswap64_i64(lo, TCGV128_HIGH(val)); + tcg_gen_bswap64_i64(hi, TCGV128_LOW(val)); + adj_oi = make_memop_idx(memop & ~MO_BSWAP, idx); + need_bswap = true; + } else { + lo = TCGV128_LOW(val); + hi = TCGV128_HIGH(val); + adj_oi = oi; + } + +#if TARGET_LONG_BITS == 32 + addr_arg = tcgv_i32_arg(addr); +#else + addr_arg = tcgv_i64_arg(addr); +#endif + tcg_gen_op4ii_i64(INDEX_op_qemu_st_i128, lo, hi, addr_arg, adj_oi); + + if (need_bswap) { + tcg_temp_free_i64(lo); + tcg_temp_free_i64(hi); + } + } else if (use_two_i64_for_i128(memop)) { + MemOp mop[2]; + TCGv addr_p8; + TCGv_i64 x, y; + + canonicalize_memop_i128_as_i64(mop, memop); + + if ((memop & MO_BSWAP) == MO_LE) { + x = TCGV128_LOW(val); + y = TCGV128_HIGH(val); + } else { + x = TCGV128_HIGH(val); + y = TCGV128_LOW(val); + } + + addr_p8 = tcg_temp_ebb_new(); + if ((mop[0] ^ memop) & MO_BSWAP) { + TCGv_i64 t = tcg_temp_ebb_new_i64(); + + tcg_gen_bswap64_i64(t, x); + gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr, mop[0], idx); + tcg_gen_bswap64_i64(t, y); + tcg_gen_addi_tl(addr_p8, addr, 8); + gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr_p8, mop[1], idx); + tcg_temp_free_i64(t); + } else { + gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr, mop[0], idx); + tcg_gen_addi_tl(addr_p8, addr, 8); + gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8, mop[1], idx); + } + tcg_temp_free(addr_p8); + } else { + gen_helper_st_i128(cpu_env, addr, val, tcg_constant_i32(oi)); + } + + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); +} + +static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) +{ + switch (opc & MO_SSIZE) { + case MO_SB: + tcg_gen_ext8s_i32(ret, val); + break; + case MO_UB: + tcg_gen_ext8u_i32(ret, val); + break; + case MO_SW: + tcg_gen_ext16s_i32(ret, val); + break; + case MO_UW: + tcg_gen_ext16u_i32(ret, val); + break; + default: + tcg_gen_mov_i32(ret, val); + break; + } +} + +static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) +{ + switch (opc & MO_SSIZE) { + case MO_SB: + tcg_gen_ext8s_i64(ret, val); + break; + case MO_UB: + tcg_gen_ext8u_i64(ret, val); + break; + case MO_SW: + tcg_gen_ext16s_i64(ret, val); + break; + case MO_UW: + tcg_gen_ext16u_i64(ret, val); + break; + case MO_SL: + tcg_gen_ext32s_i64(ret, val); + break; + case MO_UL: + tcg_gen_ext32u_i64(ret, val); + break; + default: + tcg_gen_mov_i64(ret, val); + break; + } +} + +typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, + TCGv_i32, TCGv_i32, TCGv_i32); +typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, + TCGv_i64, TCGv_i64, TCGv_i32); +typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv, + TCGv_i128, TCGv_i128, TCGv_i32); +typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, + TCGv_i32, TCGv_i32); +typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, + TCGv_i64, TCGv_i32); + +#ifdef CONFIG_ATOMIC64 +# define WITH_ATOMIC64(X) X, +#else +# define WITH_ATOMIC64(X) +#endif +#ifdef CONFIG_CMPXCHG128 +# define WITH_ATOMIC128(X) X, +#else +# define WITH_ATOMIC128(X) +#endif + +static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = { + [MO_8] = gen_helper_atomic_cmpxchgb, + [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le, + [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be, + [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le, + [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be, + WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le) + WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be) + WITH_ATOMIC128([MO_128 | MO_LE] = gen_helper_atomic_cmpxchgo_le) + WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be) +}; + +void tcg_gen_nonatomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, + TCGv_i32 newv, TCGArg idx, MemOp memop) +{ + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); + + tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE); + + tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN); + tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1); + tcg_gen_qemu_st_i32(t2, addr, idx, memop); + tcg_temp_free_i32(t2); + + if (memop & MO_SIGN) { + tcg_gen_ext_i32(retv, t1, memop); + } else { + tcg_gen_mov_i32(retv, t1); + } + tcg_temp_free_i32(t1); +} + +void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, + TCGv_i32 newv, TCGArg idx, MemOp memop) +{ + gen_atomic_cx_i32 gen; + MemOpIdx oi; + + if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { + tcg_gen_nonatomic_cmpxchg_i32(retv, addr, cmpv, newv, idx, memop); + return; + } + + memop = tcg_canonicalize_memop(memop, 0, 0); + gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; + tcg_debug_assert(gen != NULL); + + oi = make_memop_idx(memop & ~MO_SIGN, idx); + gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + + if (memop & MO_SIGN) { + tcg_gen_ext_i32(retv, retv, memop); + } +} + +void tcg_gen_nonatomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, + TCGv_i64 newv, TCGArg idx, MemOp memop) +{ + TCGv_i64 t1, t2; + + if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { + tcg_gen_nonatomic_cmpxchg_i32(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), + TCGV_LOW(newv), idx, memop); + if (memop & MO_SIGN) { + tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31); + } else { + tcg_gen_movi_i32(TCGV_HIGH(retv), 0); + } + return; + } + + t1 = tcg_temp_ebb_new_i64(); + t2 = tcg_temp_ebb_new_i64(); + + tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE); + + tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN); + tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1); + tcg_gen_qemu_st_i64(t2, addr, idx, memop); + tcg_temp_free_i64(t2); + + if (memop & MO_SIGN) { + tcg_gen_ext_i64(retv, t1, memop); + } else { + tcg_gen_mov_i64(retv, t1); + } + tcg_temp_free_i64(t1); +} + +void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, + TCGv_i64 newv, TCGArg idx, MemOp memop) +{ + if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { + tcg_gen_nonatomic_cmpxchg_i64(retv, addr, cmpv, newv, idx, memop); + return; + } + + if ((memop & MO_SIZE) == MO_64) { + gen_atomic_cx_i64 gen; + + memop = tcg_canonicalize_memop(memop, 1, 0); + gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; + if (gen) { + MemOpIdx oi = make_memop_idx(memop, idx); + gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + return; + } + + gen_helper_exit_atomic(cpu_env); + + /* + * Produce a result for a well-formed opcode stream. This satisfies + * liveness for set before used, which happens before this dead code + * is removed. + */ + tcg_gen_movi_i64(retv, 0); + return; + } + + if (TCG_TARGET_REG_BITS == 32) { + tcg_gen_atomic_cmpxchg_i32(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), + TCGV_LOW(newv), idx, memop); + if (memop & MO_SIGN) { + tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31); + } else { + tcg_gen_movi_i32(TCGV_HIGH(retv), 0); + } + } else { + TCGv_i32 c32 = tcg_temp_ebb_new_i32(); + TCGv_i32 n32 = tcg_temp_ebb_new_i32(); + TCGv_i32 r32 = tcg_temp_ebb_new_i32(); + + tcg_gen_extrl_i64_i32(c32, cmpv); + tcg_gen_extrl_i64_i32(n32, newv); + tcg_gen_atomic_cmpxchg_i32(r32, addr, c32, n32, idx, memop & ~MO_SIGN); + tcg_temp_free_i32(c32); + tcg_temp_free_i32(n32); + + tcg_gen_extu_i32_i64(retv, r32); + tcg_temp_free_i32(r32); + + if (memop & MO_SIGN) { + tcg_gen_ext_i64(retv, retv, memop); + } + } +} + +void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, + TCGv_i128 newv, TCGArg idx, MemOp memop) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* Inline expansion below is simply too large for 32-bit hosts. */ + gen_atomic_cx_i128 gen = ((memop & MO_BSWAP) == MO_LE + ? gen_helper_nonatomic_cmpxchgo_le + : gen_helper_nonatomic_cmpxchgo_be); + MemOpIdx oi = make_memop_idx(memop, idx); + + tcg_debug_assert((memop & MO_SIZE) == MO_128); + tcg_debug_assert((memop & MO_SIGN) == 0); + + gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + } else { + TCGv_i128 oldv = tcg_temp_ebb_new_i128(); + TCGv_i128 tmpv = tcg_temp_ebb_new_i128(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 z = tcg_constant_i64(0); + + tcg_gen_qemu_ld_i128(oldv, addr, idx, memop); + + /* Compare i128 */ + tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv)); + tcg_gen_xor_i64(t1, TCGV128_HIGH(oldv), TCGV128_HIGH(cmpv)); + tcg_gen_or_i64(t0, t0, t1); + + /* tmpv = equal ? newv : oldv */ + tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_LOW(tmpv), t0, z, + TCGV128_LOW(newv), TCGV128_LOW(oldv)); + tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_HIGH(tmpv), t0, z, + TCGV128_HIGH(newv), TCGV128_HIGH(oldv)); + + /* Unconditional writeback. */ + tcg_gen_qemu_st_i128(tmpv, addr, idx, memop); + tcg_gen_mov_i128(retv, oldv); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i128(tmpv); + tcg_temp_free_i128(oldv); + } +} + +void tcg_gen_atomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, + TCGv_i128 newv, TCGArg idx, MemOp memop) +{ + gen_atomic_cx_i128 gen; + + if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { + tcg_gen_nonatomic_cmpxchg_i128(retv, addr, cmpv, newv, idx, memop); + return; + } + + tcg_debug_assert((memop & MO_SIZE) == MO_128); + tcg_debug_assert((memop & MO_SIGN) == 0); + gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; + + if (gen) { + MemOpIdx oi = make_memop_idx(memop, idx); + gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + return; + } + + gen_helper_exit_atomic(cpu_env); + + /* + * Produce a result for a well-formed opcode stream. This satisfies + * liveness for set before used, which happens before this dead code + * is removed. + */ + tcg_gen_movi_i64(TCGV128_LOW(retv), 0); + tcg_gen_movi_i64(TCGV128_HIGH(retv), 0); +} + +static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, + TCGArg idx, MemOp memop, bool new_val, + void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); + + memop = tcg_canonicalize_memop(memop, 0, 0); + + tcg_gen_qemu_ld_i32(t1, addr, idx, memop); + tcg_gen_ext_i32(t2, val, memop); + gen(t2, t1, t2); + tcg_gen_qemu_st_i32(t2, addr, idx, memop); + + tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop); + tcg_temp_free_i32(t1); + tcg_temp_free_i32(t2); +} + +static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, + TCGArg idx, MemOp memop, void * const table[]) +{ + gen_atomic_op_i32 gen; + MemOpIdx oi; + + memop = tcg_canonicalize_memop(memop, 0, 0); + + gen = table[memop & (MO_SIZE | MO_BSWAP)]; + tcg_debug_assert(gen != NULL); + + oi = make_memop_idx(memop & ~MO_SIGN, idx); + gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); + + if (memop & MO_SIGN) { + tcg_gen_ext_i32(ret, ret, memop); + } +} + +static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, + TCGArg idx, MemOp memop, bool new_val, + void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); + + memop = tcg_canonicalize_memop(memop, 1, 0); + + tcg_gen_qemu_ld_i64(t1, addr, idx, memop); + tcg_gen_ext_i64(t2, val, memop); + gen(t2, t1, t2); + tcg_gen_qemu_st_i64(t2, addr, idx, memop); + + tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); +} + +static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, + TCGArg idx, MemOp memop, void * const table[]) +{ + memop = tcg_canonicalize_memop(memop, 1, 0); + + if ((memop & MO_SIZE) == MO_64) { +#ifdef CONFIG_ATOMIC64 + gen_atomic_op_i64 gen; + MemOpIdx oi; + + gen = table[memop & (MO_SIZE | MO_BSWAP)]; + tcg_debug_assert(gen != NULL); + + oi = make_memop_idx(memop & ~MO_SIGN, idx); + gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); +#else + gen_helper_exit_atomic(cpu_env); + /* Produce a result, so that we have a well-formed opcode stream + with respect to uses of the result in the (dead) code following. */ + tcg_gen_movi_i64(ret, 0); +#endif /* CONFIG_ATOMIC64 */ + } else { + TCGv_i32 v32 = tcg_temp_ebb_new_i32(); + TCGv_i32 r32 = tcg_temp_ebb_new_i32(); + + tcg_gen_extrl_i64_i32(v32, val); + do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table); + tcg_temp_free_i32(v32); + + tcg_gen_extu_i32_i64(ret, r32); + tcg_temp_free_i32(r32); + + if (memop & MO_SIGN) { + tcg_gen_ext_i64(ret, ret, memop); + } + } +} + +#define GEN_ATOMIC_HELPER(NAME, OP, NEW) \ +static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \ + [MO_8] = gen_helper_atomic_##NAME##b, \ + [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \ + [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \ + [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \ + [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \ + WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \ + WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ +}; \ +void tcg_gen_atomic_##NAME##_i32 \ + (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \ +{ \ + if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ + do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \ + } else { \ + do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \ + tcg_gen_##OP##_i32); \ + } \ +} \ +void tcg_gen_atomic_##NAME##_i64 \ + (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \ +{ \ + if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ + do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \ + } else { \ + do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \ + tcg_gen_##OP##_i64); \ + } \ +} + +GEN_ATOMIC_HELPER(fetch_add, add, 0) +GEN_ATOMIC_HELPER(fetch_and, and, 0) +GEN_ATOMIC_HELPER(fetch_or, or, 0) +GEN_ATOMIC_HELPER(fetch_xor, xor, 0) +GEN_ATOMIC_HELPER(fetch_smin, smin, 0) +GEN_ATOMIC_HELPER(fetch_umin, umin, 0) +GEN_ATOMIC_HELPER(fetch_smax, smax, 0) +GEN_ATOMIC_HELPER(fetch_umax, umax, 0) + +GEN_ATOMIC_HELPER(add_fetch, add, 1) +GEN_ATOMIC_HELPER(and_fetch, and, 1) +GEN_ATOMIC_HELPER(or_fetch, or, 1) +GEN_ATOMIC_HELPER(xor_fetch, xor, 1) +GEN_ATOMIC_HELPER(smin_fetch, smin, 1) +GEN_ATOMIC_HELPER(umin_fetch, umin, 1) +GEN_ATOMIC_HELPER(smax_fetch, smax, 1) +GEN_ATOMIC_HELPER(umax_fetch, umax, 1) + +static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_mov_i32(r, b); +} + +static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_mov_i64(r, b); +} + +GEN_ATOMIC_HELPER(xchg, mov2, 0) + +#undef GEN_ATOMIC_HELPER diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index c419228cc4..edbd1c61d7 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -27,7 +27,6 @@ #include "tcg/tcg.h" #include "tcg/tcg-temp-internal.h" #include "tcg/tcg-op.h" -#include "tcg/tcg-mo.h" #include "exec/plugin-gen.h" #include "tcg-internal.h" @@ -2841,976 +2840,3 @@ void tcg_gen_lookup_and_goto_ptr(void) tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr)); tcg_temp_free_ptr(ptr); } - -static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) -{ - /* Trigger the asserts within as early as possible. */ - unsigned a_bits = get_alignment_bits(op); - - /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */ - if (a_bits == (op & MO_SIZE)) { - op = (op & ~MO_AMASK) | MO_ALIGN; - } - - switch (op & MO_SIZE) { - case MO_8: - op &= ~MO_BSWAP; - break; - case MO_16: - break; - case MO_32: - if (!is64) { - op &= ~MO_SIGN; - } - break; - case MO_64: - if (is64) { - op &= ~MO_SIGN; - break; - } - /* fall through */ - default: - g_assert_not_reached(); - } - if (st) { - op &= ~MO_SIGN; - } - return op; -} - -static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, - MemOp memop, TCGArg idx) -{ - MemOpIdx oi = make_memop_idx(memop, idx); -#if TARGET_LONG_BITS == 32 - tcg_gen_op3i_i32(opc, val, addr, oi); -#else - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_op4i_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr), oi); - } else { - tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_i64_arg(addr), oi); - } -#endif -} - -static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr, - MemOp memop, TCGArg idx) -{ - MemOpIdx oi = make_memop_idx(memop, idx); -#if TARGET_LONG_BITS == 32 - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi); - } else { - tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_i32_arg(addr), oi); - } -#else - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_op5i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), - TCGV_LOW(addr), TCGV_HIGH(addr), oi); - } else { - tcg_gen_op3i_i64(opc, val, addr, oi); - } -#endif -} - -static void tcg_gen_req_mo(TCGBar type) -{ -#ifdef TCG_GUEST_DEFAULT_MO - type &= TCG_GUEST_DEFAULT_MO; -#endif - type &= ~TCG_TARGET_DEFAULT_MO; - if (type) { - tcg_gen_mb(type | TCG_BAR_SC); - } -} - -static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr) -{ -#ifdef CONFIG_PLUGIN - if (tcg_ctx->plugin_insn != NULL) { - /* Save a copy of the vaddr for use after a load. */ - TCGv temp = tcg_temp_new(); - tcg_gen_mov_tl(temp, vaddr); - return temp; - } -#endif - return vaddr; -} - -static void plugin_gen_mem_callbacks(TCGv vaddr, MemOpIdx oi, - enum qemu_plugin_mem_rw rw) -{ -#ifdef CONFIG_PLUGIN - if (tcg_ctx->plugin_insn != NULL) { - qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw); - plugin_gen_empty_mem_callback(vaddr, info); - tcg_temp_free(vaddr); - } -#endif -} - -void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) -{ - MemOp orig_memop; - MemOpIdx oi; - - tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); - memop = tcg_canonicalize_memop(memop, 0, 0); - oi = make_memop_idx(memop, idx); - - orig_memop = memop; - if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { - memop &= ~MO_BSWAP; - /* The bswap primitive benefits from zero-extended input. */ - if ((memop & MO_SSIZE) == MO_SW) { - memop &= ~MO_SIGN; - } - } - - addr = plugin_prep_mem_callbacks(addr); - gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); - - if ((orig_memop ^ memop) & MO_BSWAP) { - switch (orig_memop & MO_SIZE) { - case MO_16: - tcg_gen_bswap16_i32(val, val, (orig_memop & MO_SIGN - ? TCG_BSWAP_IZ | TCG_BSWAP_OS - : TCG_BSWAP_IZ | TCG_BSWAP_OZ)); - break; - case MO_32: - tcg_gen_bswap32_i32(val, val); - break; - default: - g_assert_not_reached(); - } - } -} - -void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) -{ - TCGv_i32 swap = NULL; - MemOpIdx oi; - - tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); - memop = tcg_canonicalize_memop(memop, 0, 1); - oi = make_memop_idx(memop, idx); - - if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { - swap = tcg_temp_ebb_new_i32(); - switch (memop & MO_SIZE) { - case MO_16: - tcg_gen_bswap16_i32(swap, val, 0); - break; - case MO_32: - tcg_gen_bswap32_i32(swap, val); - break; - default: - g_assert_not_reached(); - } - val = swap; - memop &= ~MO_BSWAP; - } - - addr = plugin_prep_mem_callbacks(addr); - if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) { - gen_ldst_i32(INDEX_op_qemu_st8_i32, val, addr, memop, idx); - } else { - gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); - } - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); - - if (swap) { - tcg_temp_free_i32(swap); - } -} - -void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) -{ - MemOp orig_memop; - MemOpIdx oi; - - if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { - tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); - if (memop & MO_SIGN) { - tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31); - } else { - tcg_gen_movi_i32(TCGV_HIGH(val), 0); - } - return; - } - - tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); - memop = tcg_canonicalize_memop(memop, 1, 0); - oi = make_memop_idx(memop, idx); - - orig_memop = memop; - if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { - memop &= ~MO_BSWAP; - /* The bswap primitive benefits from zero-extended input. */ - if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) { - memop &= ~MO_SIGN; - } - } - - addr = plugin_prep_mem_callbacks(addr); - gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); - - if ((orig_memop ^ memop) & MO_BSWAP) { - int flags = (orig_memop & MO_SIGN - ? TCG_BSWAP_IZ | TCG_BSWAP_OS - : TCG_BSWAP_IZ | TCG_BSWAP_OZ); - switch (orig_memop & MO_SIZE) { - case MO_16: - tcg_gen_bswap16_i64(val, val, flags); - break; - case MO_32: - tcg_gen_bswap32_i64(val, val, flags); - break; - case MO_64: - tcg_gen_bswap64_i64(val, val); - break; - default: - g_assert_not_reached(); - } - } -} - -void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) -{ - TCGv_i64 swap = NULL; - MemOpIdx oi; - - if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { - tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop); - return; - } - - tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); - memop = tcg_canonicalize_memop(memop, 1, 1); - oi = make_memop_idx(memop, idx); - - if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { - swap = tcg_temp_ebb_new_i64(); - switch (memop & MO_SIZE) { - case MO_16: - tcg_gen_bswap16_i64(swap, val, 0); - break; - case MO_32: - tcg_gen_bswap32_i64(swap, val, 0); - break; - case MO_64: - tcg_gen_bswap64_i64(swap, val); - break; - default: - g_assert_not_reached(); - } - val = swap; - memop &= ~MO_BSWAP; - } - - addr = plugin_prep_mem_callbacks(addr); - gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); - - if (swap) { - tcg_temp_free_i64(swap); - } -} - -/* - * Return true if @mop, without knowledge of the pointer alignment, - * does not require 16-byte atomicity, and it would be adventagous - * to avoid a call to a helper function. - */ -static bool use_two_i64_for_i128(MemOp mop) -{ -#ifdef CONFIG_SOFTMMU - /* Two softmmu tlb lookups is larger than one function call. */ - return false; -#else - /* - * For user-only, two 64-bit operations may well be smaller than a call. - * Determine if that would be legal for the requested atomicity. - */ - switch (mop & MO_ATOM_MASK) { - case MO_ATOM_NONE: - case MO_ATOM_IFALIGN_PAIR: - return true; - case MO_ATOM_IFALIGN: - case MO_ATOM_SUBALIGN: - case MO_ATOM_WITHIN16: - case MO_ATOM_WITHIN16_PAIR: - /* In a serialized context, no atomicity is required. */ - return !(tcg_ctx->gen_tb->cflags & CF_PARALLEL); - default: - g_assert_not_reached(); - } -#endif -} - -static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) -{ - MemOp mop_1 = orig, mop_2; - - tcg_debug_assert((orig & MO_SIZE) == MO_128); - tcg_debug_assert((orig & MO_SIGN) == 0); - - /* Reduce the size to 64-bit. */ - mop_1 = (mop_1 & ~MO_SIZE) | MO_64; - - /* Retain the alignment constraints of the original. */ - switch (orig & MO_AMASK) { - case MO_UNALN: - case MO_ALIGN_2: - case MO_ALIGN_4: - mop_2 = mop_1; - break; - case MO_ALIGN_8: - /* Prefer MO_ALIGN+MO_64 to MO_ALIGN_8+MO_64. */ - mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN; - mop_2 = mop_1; - break; - case MO_ALIGN: - /* Second has 8-byte alignment; first has 16-byte alignment. */ - mop_2 = mop_1; - mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN_16; - break; - case MO_ALIGN_16: - case MO_ALIGN_32: - case MO_ALIGN_64: - /* Second has 8-byte alignment; first retains original. */ - mop_2 = (mop_1 & ~MO_AMASK) | MO_ALIGN; - break; - default: - g_assert_not_reached(); - } - - /* Use a memory ordering implemented by the host. */ - if ((orig & MO_BSWAP) && !tcg_target_has_memory_bswap(mop_1)) { - mop_1 &= ~MO_BSWAP; - mop_2 &= ~MO_BSWAP; - } - - ret[0] = mop_1; - ret[1] = mop_2; -} - -#if TARGET_LONG_BITS == 64 -#define tcg_temp_ebb_new tcg_temp_ebb_new_i64 -#else -#define tcg_temp_ebb_new tcg_temp_ebb_new_i32 -#endif - -void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) -{ - const MemOpIdx oi = make_memop_idx(memop, idx); - - tcg_debug_assert((memop & MO_SIZE) == MO_128); - tcg_debug_assert((memop & MO_SIGN) == 0); - - tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); - addr = plugin_prep_mem_callbacks(addr); - - /* TODO: For now, force 32-bit hosts to use the helper. */ - if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) { - TCGv_i64 lo, hi; - TCGArg addr_arg; - MemOpIdx adj_oi; - bool need_bswap = false; - - if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { - lo = TCGV128_HIGH(val); - hi = TCGV128_LOW(val); - adj_oi = make_memop_idx(memop & ~MO_BSWAP, idx); - need_bswap = true; - } else { - lo = TCGV128_LOW(val); - hi = TCGV128_HIGH(val); - adj_oi = oi; - } - -#if TARGET_LONG_BITS == 32 - addr_arg = tcgv_i32_arg(addr); -#else - addr_arg = tcgv_i64_arg(addr); -#endif - tcg_gen_op4ii_i64(INDEX_op_qemu_ld_i128, lo, hi, addr_arg, adj_oi); - - if (need_bswap) { - tcg_gen_bswap64_i64(lo, lo); - tcg_gen_bswap64_i64(hi, hi); - } - } else if (use_two_i64_for_i128(memop)) { - MemOp mop[2]; - TCGv addr_p8; - TCGv_i64 x, y; - - canonicalize_memop_i128_as_i64(mop, memop); - - /* - * Since there are no global TCGv_i128, there is no visible state - * changed if the second load faults. Load directly into the two - * subwords. - */ - if ((memop & MO_BSWAP) == MO_LE) { - x = TCGV128_LOW(val); - y = TCGV128_HIGH(val); - } else { - x = TCGV128_HIGH(val); - y = TCGV128_LOW(val); - } - - gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr, mop[0], idx); - - if ((mop[0] ^ memop) & MO_BSWAP) { - tcg_gen_bswap64_i64(x, x); - } - - addr_p8 = tcg_temp_ebb_new(); - tcg_gen_addi_tl(addr_p8, addr, 8); - gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8, mop[1], idx); - tcg_temp_free(addr_p8); - - if ((mop[0] ^ memop) & MO_BSWAP) { - tcg_gen_bswap64_i64(y, y); - } - } else { - gen_helper_ld_i128(val, cpu_env, addr, tcg_constant_i32(oi)); - } - - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); -} - -void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) -{ - const MemOpIdx oi = make_memop_idx(memop, idx); - - tcg_debug_assert((memop & MO_SIZE) == MO_128); - tcg_debug_assert((memop & MO_SIGN) == 0); - - tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST); - addr = plugin_prep_mem_callbacks(addr); - - /* TODO: For now, force 32-bit hosts to use the helper. */ - - if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) { - TCGv_i64 lo, hi; - TCGArg addr_arg; - MemOpIdx adj_oi; - bool need_bswap = false; - - if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { - lo = tcg_temp_new_i64(); - hi = tcg_temp_new_i64(); - tcg_gen_bswap64_i64(lo, TCGV128_HIGH(val)); - tcg_gen_bswap64_i64(hi, TCGV128_LOW(val)); - adj_oi = make_memop_idx(memop & ~MO_BSWAP, idx); - need_bswap = true; - } else { - lo = TCGV128_LOW(val); - hi = TCGV128_HIGH(val); - adj_oi = oi; - } - -#if TARGET_LONG_BITS == 32 - addr_arg = tcgv_i32_arg(addr); -#else - addr_arg = tcgv_i64_arg(addr); -#endif - tcg_gen_op4ii_i64(INDEX_op_qemu_st_i128, lo, hi, addr_arg, adj_oi); - - if (need_bswap) { - tcg_temp_free_i64(lo); - tcg_temp_free_i64(hi); - } - } else if (use_two_i64_for_i128(memop)) { - MemOp mop[2]; - TCGv addr_p8; - TCGv_i64 x, y; - - canonicalize_memop_i128_as_i64(mop, memop); - - if ((memop & MO_BSWAP) == MO_LE) { - x = TCGV128_LOW(val); - y = TCGV128_HIGH(val); - } else { - x = TCGV128_HIGH(val); - y = TCGV128_LOW(val); - } - - addr_p8 = tcg_temp_ebb_new(); - if ((mop[0] ^ memop) & MO_BSWAP) { - TCGv_i64 t = tcg_temp_ebb_new_i64(); - - tcg_gen_bswap64_i64(t, x); - gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr, mop[0], idx); - tcg_gen_bswap64_i64(t, y); - tcg_gen_addi_tl(addr_p8, addr, 8); - gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr_p8, mop[1], idx); - tcg_temp_free_i64(t); - } else { - gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr, mop[0], idx); - tcg_gen_addi_tl(addr_p8, addr, 8); - gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8, mop[1], idx); - } - tcg_temp_free(addr_p8); - } else { - gen_helper_st_i128(cpu_env, addr, val, tcg_constant_i32(oi)); - } - - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); -} - -static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) -{ - switch (opc & MO_SSIZE) { - case MO_SB: - tcg_gen_ext8s_i32(ret, val); - break; - case MO_UB: - tcg_gen_ext8u_i32(ret, val); - break; - case MO_SW: - tcg_gen_ext16s_i32(ret, val); - break; - case MO_UW: - tcg_gen_ext16u_i32(ret, val); - break; - default: - tcg_gen_mov_i32(ret, val); - break; - } -} - -static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) -{ - switch (opc & MO_SSIZE) { - case MO_SB: - tcg_gen_ext8s_i64(ret, val); - break; - case MO_UB: - tcg_gen_ext8u_i64(ret, val); - break; - case MO_SW: - tcg_gen_ext16s_i64(ret, val); - break; - case MO_UW: - tcg_gen_ext16u_i64(ret, val); - break; - case MO_SL: - tcg_gen_ext32s_i64(ret, val); - break; - case MO_UL: - tcg_gen_ext32u_i64(ret, val); - break; - default: - tcg_gen_mov_i64(ret, val); - break; - } -} - -typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, - TCGv_i32, TCGv_i32, TCGv_i32); -typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, - TCGv_i64, TCGv_i64, TCGv_i32); -typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv, - TCGv_i128, TCGv_i128, TCGv_i32); -typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, - TCGv_i32, TCGv_i32); -typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, - TCGv_i64, TCGv_i32); - -#ifdef CONFIG_ATOMIC64 -# define WITH_ATOMIC64(X) X, -#else -# define WITH_ATOMIC64(X) -#endif -#ifdef CONFIG_CMPXCHG128 -# define WITH_ATOMIC128(X) X, -#else -# define WITH_ATOMIC128(X) -#endif - -static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = { - [MO_8] = gen_helper_atomic_cmpxchgb, - [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le, - [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be, - [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le, - [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be, - WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le) - WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be) - WITH_ATOMIC128([MO_128 | MO_LE] = gen_helper_atomic_cmpxchgo_le) - WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be) -}; - -void tcg_gen_nonatomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, - TCGv_i32 newv, TCGArg idx, MemOp memop) -{ - TCGv_i32 t1 = tcg_temp_ebb_new_i32(); - TCGv_i32 t2 = tcg_temp_ebb_new_i32(); - - tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE); - - tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN); - tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1); - tcg_gen_qemu_st_i32(t2, addr, idx, memop); - tcg_temp_free_i32(t2); - - if (memop & MO_SIGN) { - tcg_gen_ext_i32(retv, t1, memop); - } else { - tcg_gen_mov_i32(retv, t1); - } - tcg_temp_free_i32(t1); -} - -void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, - TCGv_i32 newv, TCGArg idx, MemOp memop) -{ - gen_atomic_cx_i32 gen; - MemOpIdx oi; - - if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { - tcg_gen_nonatomic_cmpxchg_i32(retv, addr, cmpv, newv, idx, memop); - return; - } - - memop = tcg_canonicalize_memop(memop, 0, 0); - gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; - tcg_debug_assert(gen != NULL); - - oi = make_memop_idx(memop & ~MO_SIGN, idx); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); - - if (memop & MO_SIGN) { - tcg_gen_ext_i32(retv, retv, memop); - } -} - -void tcg_gen_nonatomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, - TCGv_i64 newv, TCGArg idx, MemOp memop) -{ - TCGv_i64 t1, t2; - - if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { - tcg_gen_nonatomic_cmpxchg_i32(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), - TCGV_LOW(newv), idx, memop); - if (memop & MO_SIGN) { - tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31); - } else { - tcg_gen_movi_i32(TCGV_HIGH(retv), 0); - } - return; - } - - t1 = tcg_temp_ebb_new_i64(); - t2 = tcg_temp_ebb_new_i64(); - - tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE); - - tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN); - tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1); - tcg_gen_qemu_st_i64(t2, addr, idx, memop); - tcg_temp_free_i64(t2); - - if (memop & MO_SIGN) { - tcg_gen_ext_i64(retv, t1, memop); - } else { - tcg_gen_mov_i64(retv, t1); - } - tcg_temp_free_i64(t1); -} - -void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, - TCGv_i64 newv, TCGArg idx, MemOp memop) -{ - if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { - tcg_gen_nonatomic_cmpxchg_i64(retv, addr, cmpv, newv, idx, memop); - return; - } - - if ((memop & MO_SIZE) == MO_64) { - gen_atomic_cx_i64 gen; - - memop = tcg_canonicalize_memop(memop, 1, 0); - gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; - if (gen) { - MemOpIdx oi = make_memop_idx(memop, idx); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); - return; - } - - gen_helper_exit_atomic(cpu_env); - - /* - * Produce a result for a well-formed opcode stream. This satisfies - * liveness for set before used, which happens before this dead code - * is removed. - */ - tcg_gen_movi_i64(retv, 0); - return; - } - - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_atomic_cmpxchg_i32(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), - TCGV_LOW(newv), idx, memop); - if (memop & MO_SIGN) { - tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31); - } else { - tcg_gen_movi_i32(TCGV_HIGH(retv), 0); - } - } else { - TCGv_i32 c32 = tcg_temp_ebb_new_i32(); - TCGv_i32 n32 = tcg_temp_ebb_new_i32(); - TCGv_i32 r32 = tcg_temp_ebb_new_i32(); - - tcg_gen_extrl_i64_i32(c32, cmpv); - tcg_gen_extrl_i64_i32(n32, newv); - tcg_gen_atomic_cmpxchg_i32(r32, addr, c32, n32, idx, memop & ~MO_SIGN); - tcg_temp_free_i32(c32); - tcg_temp_free_i32(n32); - - tcg_gen_extu_i32_i64(retv, r32); - tcg_temp_free_i32(r32); - - if (memop & MO_SIGN) { - tcg_gen_ext_i64(retv, retv, memop); - } - } -} - -void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, - TCGv_i128 newv, TCGArg idx, MemOp memop) -{ - if (TCG_TARGET_REG_BITS == 32) { - /* Inline expansion below is simply too large for 32-bit hosts. */ - gen_atomic_cx_i128 gen = ((memop & MO_BSWAP) == MO_LE - ? gen_helper_nonatomic_cmpxchgo_le - : gen_helper_nonatomic_cmpxchgo_be); - MemOpIdx oi = make_memop_idx(memop, idx); - - tcg_debug_assert((memop & MO_SIZE) == MO_128); - tcg_debug_assert((memop & MO_SIGN) == 0); - - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); - } else { - TCGv_i128 oldv = tcg_temp_ebb_new_i128(); - TCGv_i128 tmpv = tcg_temp_ebb_new_i128(); - TCGv_i64 t0 = tcg_temp_ebb_new_i64(); - TCGv_i64 t1 = tcg_temp_ebb_new_i64(); - TCGv_i64 z = tcg_constant_i64(0); - - tcg_gen_qemu_ld_i128(oldv, addr, idx, memop); - - /* Compare i128 */ - tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv)); - tcg_gen_xor_i64(t1, TCGV128_HIGH(oldv), TCGV128_HIGH(cmpv)); - tcg_gen_or_i64(t0, t0, t1); - - /* tmpv = equal ? newv : oldv */ - tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_LOW(tmpv), t0, z, - TCGV128_LOW(newv), TCGV128_LOW(oldv)); - tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_HIGH(tmpv), t0, z, - TCGV128_HIGH(newv), TCGV128_HIGH(oldv)); - - /* Unconditional writeback. */ - tcg_gen_qemu_st_i128(tmpv, addr, idx, memop); - tcg_gen_mov_i128(retv, oldv); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i128(tmpv); - tcg_temp_free_i128(oldv); - } -} - -void tcg_gen_atomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, - TCGv_i128 newv, TCGArg idx, MemOp memop) -{ - gen_atomic_cx_i128 gen; - - if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { - tcg_gen_nonatomic_cmpxchg_i128(retv, addr, cmpv, newv, idx, memop); - return; - } - - tcg_debug_assert((memop & MO_SIZE) == MO_128); - tcg_debug_assert((memop & MO_SIGN) == 0); - gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; - - if (gen) { - MemOpIdx oi = make_memop_idx(memop, idx); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); - return; - } - - gen_helper_exit_atomic(cpu_env); - - /* - * Produce a result for a well-formed opcode stream. This satisfies - * liveness for set before used, which happens before this dead code - * is removed. - */ - tcg_gen_movi_i64(TCGV128_LOW(retv), 0); - tcg_gen_movi_i64(TCGV128_HIGH(retv), 0); -} - -static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, - TCGArg idx, MemOp memop, bool new_val, - void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) -{ - TCGv_i32 t1 = tcg_temp_ebb_new_i32(); - TCGv_i32 t2 = tcg_temp_ebb_new_i32(); - - memop = tcg_canonicalize_memop(memop, 0, 0); - - tcg_gen_qemu_ld_i32(t1, addr, idx, memop); - tcg_gen_ext_i32(t2, val, memop); - gen(t2, t1, t2); - tcg_gen_qemu_st_i32(t2, addr, idx, memop); - - tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); -} - -static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, - TCGArg idx, MemOp memop, void * const table[]) -{ - gen_atomic_op_i32 gen; - MemOpIdx oi; - - memop = tcg_canonicalize_memop(memop, 0, 0); - - gen = table[memop & (MO_SIZE | MO_BSWAP)]; - tcg_debug_assert(gen != NULL); - - oi = make_memop_idx(memop & ~MO_SIGN, idx); - gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); - - if (memop & MO_SIGN) { - tcg_gen_ext_i32(ret, ret, memop); - } -} - -static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, - TCGArg idx, MemOp memop, bool new_val, - void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) -{ - TCGv_i64 t1 = tcg_temp_ebb_new_i64(); - TCGv_i64 t2 = tcg_temp_ebb_new_i64(); - - memop = tcg_canonicalize_memop(memop, 1, 0); - - tcg_gen_qemu_ld_i64(t1, addr, idx, memop); - tcg_gen_ext_i64(t2, val, memop); - gen(t2, t1, t2); - tcg_gen_qemu_st_i64(t2, addr, idx, memop); - - tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); -} - -static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, - TCGArg idx, MemOp memop, void * const table[]) -{ - memop = tcg_canonicalize_memop(memop, 1, 0); - - if ((memop & MO_SIZE) == MO_64) { -#ifdef CONFIG_ATOMIC64 - gen_atomic_op_i64 gen; - MemOpIdx oi; - - gen = table[memop & (MO_SIZE | MO_BSWAP)]; - tcg_debug_assert(gen != NULL); - - oi = make_memop_idx(memop & ~MO_SIGN, idx); - gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); -#else - gen_helper_exit_atomic(cpu_env); - /* Produce a result, so that we have a well-formed opcode stream - with respect to uses of the result in the (dead) code following. */ - tcg_gen_movi_i64(ret, 0); -#endif /* CONFIG_ATOMIC64 */ - } else { - TCGv_i32 v32 = tcg_temp_ebb_new_i32(); - TCGv_i32 r32 = tcg_temp_ebb_new_i32(); - - tcg_gen_extrl_i64_i32(v32, val); - do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table); - tcg_temp_free_i32(v32); - - tcg_gen_extu_i32_i64(ret, r32); - tcg_temp_free_i32(r32); - - if (memop & MO_SIGN) { - tcg_gen_ext_i64(ret, ret, memop); - } - } -} - -#define GEN_ATOMIC_HELPER(NAME, OP, NEW) \ -static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \ - [MO_8] = gen_helper_atomic_##NAME##b, \ - [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \ - [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \ - [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \ - [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \ - WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \ - WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ -}; \ -void tcg_gen_atomic_##NAME##_i32 \ - (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \ -{ \ - if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ - do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \ - } else { \ - do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \ - tcg_gen_##OP##_i32); \ - } \ -} \ -void tcg_gen_atomic_##NAME##_i64 \ - (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \ -{ \ - if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ - do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \ - } else { \ - do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \ - tcg_gen_##OP##_i64); \ - } \ -} - -GEN_ATOMIC_HELPER(fetch_add, add, 0) -GEN_ATOMIC_HELPER(fetch_and, and, 0) -GEN_ATOMIC_HELPER(fetch_or, or, 0) -GEN_ATOMIC_HELPER(fetch_xor, xor, 0) -GEN_ATOMIC_HELPER(fetch_smin, smin, 0) -GEN_ATOMIC_HELPER(fetch_umin, umin, 0) -GEN_ATOMIC_HELPER(fetch_smax, smax, 0) -GEN_ATOMIC_HELPER(fetch_umax, umax, 0) - -GEN_ATOMIC_HELPER(add_fetch, add, 1) -GEN_ATOMIC_HELPER(and_fetch, and, 1) -GEN_ATOMIC_HELPER(or_fetch, or, 1) -GEN_ATOMIC_HELPER(xor_fetch, xor, 1) -GEN_ATOMIC_HELPER(smin_fetch, smin, 1) -GEN_ATOMIC_HELPER(umin_fetch, umin, 1) -GEN_ATOMIC_HELPER(smax_fetch, smax, 1) -GEN_ATOMIC_HELPER(umax_fetch, umax, 1) - -static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b) -{ - tcg_gen_mov_i32(r, b); -} - -static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b) -{ - tcg_gen_mov_i64(r, b); -} - -GEN_ATOMIC_HELPER(xchg, mov2, 0) - -#undef GEN_ATOMIC_HELPER From c9ad8d27caa01b13f01c22e04788f4e33068afb4 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 8 Mar 2023 12:24:41 -0800 Subject: [PATCH 48/74] tcg: Widen gen_insn_data to uint64_t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We already pass uint64_t to restore_state_to_opc; this changes all of the other uses from insn_start through the encoding to decoding. Reviewed-by: Anton Johansson Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- accel/tcg/translate-all.c | 28 ++++++++++++++++------------ include/tcg/tcg-op.h | 39 +++++++++------------------------------ include/tcg/tcg-opc.h | 2 +- include/tcg/tcg.h | 30 +++++++++++++++--------------- tcg/tcg.c | 18 ++++-------------- 5 files changed, 45 insertions(+), 72 deletions(-) diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 5b13281119..7b7d9a5fff 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -72,9 +72,11 @@ QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > TBContext tb_ctx; -/* Encode VAL as a signed leb128 sequence at P. - Return P incremented past the encoded value. */ -static uint8_t *encode_sleb128(uint8_t *p, target_long val) +/* + * Encode VAL as a signed leb128 sequence at P. + * Return P incremented past the encoded value. + */ +static uint8_t *encode_sleb128(uint8_t *p, int64_t val) { int more, byte; @@ -92,21 +94,23 @@ static uint8_t *encode_sleb128(uint8_t *p, target_long val) return p; } -/* Decode a signed leb128 sequence at *PP; increment *PP past the - decoded value. Return the decoded value. */ -static target_long decode_sleb128(const uint8_t **pp) +/* + * Decode a signed leb128 sequence at *PP; increment *PP past the + * decoded value. Return the decoded value. + */ +static int64_t decode_sleb128(const uint8_t **pp) { const uint8_t *p = *pp; - target_long val = 0; + int64_t val = 0; int byte, shift = 0; do { byte = *p++; - val |= (target_ulong)(byte & 0x7f) << shift; + val |= (int64_t)(byte & 0x7f) << shift; shift += 7; } while (byte & 0x80); if (shift < TARGET_LONG_BITS && (byte & 0x40)) { - val |= -(target_ulong)1 << shift; + val |= -(int64_t)1 << shift; } *pp = p; @@ -132,7 +136,7 @@ static int encode_search(TranslationBlock *tb, uint8_t *block) int i, j, n; for (i = 0, n = tb->icount; i < n; ++i) { - target_ulong prev; + uint64_t prev; for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { if (i == 0) { @@ -444,7 +448,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, /* Dump header and the first instruction */ fprintf(logfile, "OUT: [size=%d]\n", gen_code_size); fprintf(logfile, - " -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n", + " -- guest addr 0x%016" PRIx64 " + tb prologue\n", tcg_ctx->gen_insn_data[insn][0]); chunk_start = tcg_ctx->gen_insn_end_off[insn]; disas(logfile, tb->tc.ptr, chunk_start); @@ -457,7 +461,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, while (insn < tb->icount) { size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; if (chunk_end > chunk_start) { - fprintf(logfile, " -- guest addr 0x" TARGET_FMT_lx "\n", + fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n", tcg_ctx->gen_insn_data[insn][0]); disas(logfile, tb->tc.ptr + chunk_start, chunk_end - chunk_start); diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h index 4401fa493c..de3b70aa84 100644 --- a/include/tcg/tcg-op.h +++ b/include/tcg/tcg-op.h @@ -723,48 +723,27 @@ static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi) #endif #if TARGET_INSN_START_WORDS == 1 -# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS static inline void tcg_gen_insn_start(target_ulong pc) { - tcg_gen_op1(INDEX_op_insn_start, pc); + TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 64 / TCG_TARGET_REG_BITS); + tcg_set_insn_start_param(op, 0, pc); } -# else -static inline void tcg_gen_insn_start(target_ulong pc) -{ - tcg_gen_op2(INDEX_op_insn_start, (uint32_t)pc, (uint32_t)(pc >> 32)); -} -# endif #elif TARGET_INSN_START_WORDS == 2 -# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1) { - tcg_gen_op2(INDEX_op_insn_start, pc, a1); + TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 2 * 64 / TCG_TARGET_REG_BITS); + tcg_set_insn_start_param(op, 0, pc); + tcg_set_insn_start_param(op, 1, a1); } -# else -static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1) -{ - tcg_gen_op4(INDEX_op_insn_start, - (uint32_t)pc, (uint32_t)(pc >> 32), - (uint32_t)a1, (uint32_t)(a1 >> 32)); -} -# endif #elif TARGET_INSN_START_WORDS == 3 -# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1, target_ulong a2) { - tcg_gen_op3(INDEX_op_insn_start, pc, a1, a2); + TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 3 * 64 / TCG_TARGET_REG_BITS); + tcg_set_insn_start_param(op, 0, pc); + tcg_set_insn_start_param(op, 1, a1); + tcg_set_insn_start_param(op, 2, a2); } -# else -static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1, - target_ulong a2) -{ - tcg_gen_op6(INDEX_op_insn_start, - (uint32_t)pc, (uint32_t)(pc >> 32), - (uint32_t)a1, (uint32_t)(a1 >> 32), - (uint32_t)a2, (uint32_t)(a2 >> 32)); -} -# endif #else # error "Unhandled number of operands to insn_start" #endif diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h index 94cf7c5d6a..29216366d2 100644 --- a/include/tcg/tcg-opc.h +++ b/include/tcg/tcg-opc.h @@ -190,7 +190,7 @@ DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64)) #define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2) /* QEMU specific */ -DEF(insn_start, 0, 0, TLADDR_ARGS * TARGET_INSN_START_WORDS, +DEF(insn_start, 0, 0, DATA64_ARGS * TARGET_INSN_START_WORDS, TCG_OPF_NOT_PRESENT) DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index b19e167e1d..f40de4177d 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -629,7 +629,7 @@ struct TCGContext { TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS]; uint16_t gen_insn_end_off[TCG_MAX_INSNS]; - target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS]; + uint64_t gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS]; /* Exit to translator on overflow. */ sigjmp_buf jmp_trans; @@ -771,24 +771,24 @@ static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v) op->args[arg] = v; } -static inline target_ulong tcg_get_insn_start_param(TCGOp *op, int arg) +static inline uint64_t tcg_get_insn_start_param(TCGOp *op, int arg) { -#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - return tcg_get_insn_param(op, arg); -#else - return tcg_get_insn_param(op, arg * 2) | - ((uint64_t)tcg_get_insn_param(op, arg * 2 + 1) << 32); -#endif + if (TCG_TARGET_REG_BITS == 64) { + return tcg_get_insn_param(op, arg); + } else { + return deposit64(tcg_get_insn_param(op, arg * 2), 32, 32, + tcg_get_insn_param(op, arg * 2 + 1)); + } } -static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v) +static inline void tcg_set_insn_start_param(TCGOp *op, int arg, uint64_t v) { -#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - tcg_set_insn_param(op, arg, v); -#else - tcg_set_insn_param(op, arg * 2, v); - tcg_set_insn_param(op, arg * 2 + 1, v >> 32); -#endif + if (TCG_TARGET_REG_BITS == 64) { + tcg_set_insn_param(op, arg, v); + } else { + tcg_set_insn_param(op, arg * 2, v); + tcg_set_insn_param(op, arg * 2 + 1, v >> 32); + } } /* The last op that was emitted. */ diff --git a/tcg/tcg.c b/tcg/tcg.c index c4a4efc8b8..eb80492e6e 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -2384,13 +2384,8 @@ static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) col += ne_fprintf(f, "\n ----"); for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { - target_ulong a; -#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS - a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]); -#else - a = op->args[i]; -#endif - col += ne_fprintf(f, " " TARGET_FMT_lx, a); + col += ne_fprintf(f, " %016" PRIx64, + tcg_get_insn_start_param(op, i)); } } else if (c == INDEX_op_call) { const TCGHelperInfo *info = tcg_call_info(op); @@ -6081,13 +6076,8 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) } num_insns++; for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { - target_ulong a; -#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS - a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]); -#else - a = op->args[i]; -#endif - s->gen_insn_data[num_insns][i] = a; + s->gen_insn_data[num_insns][i] = + tcg_get_insn_start_param(op, i); } break; case INDEX_op_discard: From 24e46e6c9d9927121ac703db79819c688d2a5c5b Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 26 Apr 2023 22:09:47 +0100 Subject: [PATCH 49/74] accel/tcg: Widen tcg-ldst.h addresses to uint64_t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Always pass the target address as uint64_t. Adjust tcg_out_{ld,st}_helper_args to match. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 26 +++++++++--------- accel/tcg/user-exec.c | 26 +++++++++--------- include/tcg/tcg-ldst.h | 26 +++++++++--------- tcg/tcg.c | 62 ++++++++++++++++++++++++++++++++---------- 4 files changed, 87 insertions(+), 53 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 49e49f75a4..5440f68deb 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -2367,7 +2367,7 @@ static uint8_t do_ld1_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, return do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra); } -tcg_target_ulong helper_ldub_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr) { tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8); @@ -2398,7 +2398,7 @@ static uint16_t do_ld2_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, return ret; } -tcg_target_ulong helper_lduw_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr) { tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); @@ -2425,7 +2425,7 @@ static uint32_t do_ld4_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, return ret; } -tcg_target_ulong helper_ldul_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr) { tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); @@ -2452,7 +2452,7 @@ static uint64_t do_ld8_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, return ret; } -uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, +uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr) { tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); @@ -2464,19 +2464,19 @@ uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, * avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ -tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr) { return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr); } -tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr) { return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr); } -tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr) { return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr); @@ -2544,7 +2544,7 @@ static Int128 do_ld16_mmu(CPUArchState *env, target_ulong addr, return ret; } -Int128 helper_ld16_mmu(CPUArchState *env, target_ulong addr, +Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, uint32_t oi, uintptr_t retaddr) { tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); @@ -2860,7 +2860,7 @@ static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val, } } -void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val, +void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val, MemOpIdx oi, uintptr_t ra) { MMULookupLocals l; @@ -2895,7 +2895,7 @@ static void do_st2_mmu(CPUArchState *env, target_ulong addr, uint16_t val, do_st_1(env, &l.page[1], b, l.mmu_idx, ra); } -void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, +void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val, MemOpIdx oi, uintptr_t retaddr) { tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); @@ -2922,7 +2922,7 @@ static void do_st4_mmu(CPUArchState *env, target_ulong addr, uint32_t val, (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); } -void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, +void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val, MemOpIdx oi, uintptr_t retaddr) { tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); @@ -2949,7 +2949,7 @@ static void do_st8_mmu(CPUArchState *env, target_ulong addr, uint64_t val, (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); } -void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, +void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val, MemOpIdx oi, uintptr_t retaddr) { tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); @@ -3017,7 +3017,7 @@ static void do_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, } } -void helper_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, +void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi, uintptr_t retaddr) { tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 7b824dcde8..9a7afb6f78 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -920,13 +920,13 @@ static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr, return ret; } -tcg_target_ulong helper_ldub_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t ra) { return do_ld1_mmu(env, addr, get_memop(oi), ra); } -tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t ra) { return (int8_t)do_ld1_mmu(env, addr, get_memop(oi), ra); @@ -953,7 +953,7 @@ static uint16_t do_ld2_he_mmu(CPUArchState *env, abi_ptr addr, return ret; } -tcg_target_ulong helper_lduw_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t ra) { MemOp mop = get_memop(oi); @@ -965,7 +965,7 @@ tcg_target_ulong helper_lduw_mmu(CPUArchState *env, target_ulong addr, return ret; } -tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t ra) { MemOp mop = get_memop(oi); @@ -1014,7 +1014,7 @@ static uint32_t do_ld4_he_mmu(CPUArchState *env, abi_ptr addr, return ret; } -tcg_target_ulong helper_ldul_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t ra) { MemOp mop = get_memop(oi); @@ -1026,7 +1026,7 @@ tcg_target_ulong helper_ldul_mmu(CPUArchState *env, target_ulong addr, return ret; } -tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t ra) { MemOp mop = get_memop(oi); @@ -1075,7 +1075,7 @@ static uint64_t do_ld8_he_mmu(CPUArchState *env, abi_ptr addr, return ret; } -uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, +uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t ra) { MemOp mop = get_memop(oi); @@ -1124,7 +1124,7 @@ static Int128 do_ld16_he_mmu(CPUArchState *env, abi_ptr addr, return ret; } -Int128 helper_ld16_mmu(CPUArchState *env, target_ulong addr, +Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t ra) { MemOp mop = get_memop(oi); @@ -1182,7 +1182,7 @@ static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, clear_helper_retaddr(); } -void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val, +void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val, MemOpIdx oi, uintptr_t ra) { do_st1_mmu(env, addr, val, get_memop(oi), ra); @@ -1206,7 +1206,7 @@ static void do_st2_he_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, clear_helper_retaddr(); } -void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, +void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val, MemOpIdx oi, uintptr_t ra) { MemOp mop = get_memop(oi); @@ -1248,7 +1248,7 @@ static void do_st4_he_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, clear_helper_retaddr(); } -void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, +void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val, MemOpIdx oi, uintptr_t ra) { MemOp mop = get_memop(oi); @@ -1290,7 +1290,7 @@ static void do_st8_he_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, clear_helper_retaddr(); } -void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, +void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val, MemOpIdx oi, uintptr_t ra) { MemOp mop = get_memop(oi); @@ -1332,7 +1332,7 @@ static void do_st16_he_mmu(CPUArchState *env, abi_ptr addr, Int128 val, clear_helper_retaddr(); } -void helper_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, +void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi, uintptr_t ra) { MemOp mop = get_memop(oi); diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h index 7dd57013e9..6ccfe9131d 100644 --- a/include/tcg/tcg-ldst.h +++ b/include/tcg/tcg-ldst.h @@ -26,38 +26,38 @@ #define TCG_LDST_H /* Value zero-extended to tcg register size. */ -tcg_target_ulong helper_ldub_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_lduw_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_ldul_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr); -uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, +uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr); -Int128 helper_ld16_mmu(CPUArchState *env, target_ulong addr, +Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr); /* Value sign-extended to tcg register size. */ -tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, target_ulong addr, +tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr, MemOpIdx oi, uintptr_t retaddr); /* * Value extended to at least uint32_t, so that some ABIs do not require * zero-extension from uint8_t or uint16_t. */ -void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val, +void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val, MemOpIdx oi, uintptr_t retaddr); -void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, +void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val, MemOpIdx oi, uintptr_t retaddr); -void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, +void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val, MemOpIdx oi, uintptr_t retaddr); -void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, +void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val, MemOpIdx oi, uintptr_t retaddr); -void helper_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, +void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi, uintptr_t retaddr); #endif /* TCG_LDST_H */ diff --git a/tcg/tcg.c b/tcg/tcg.c index eb80492e6e..48853e015b 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -865,7 +865,7 @@ static TCGHelperInfo info_helper_ld32_mmu = { .flags = TCG_CALL_NO_WG, .typemask = dh_typemask(ttl, 0) /* return tcg_target_ulong */ | dh_typemask(env, 1) - | dh_typemask(tl, 2) /* target_ulong addr */ + | dh_typemask(i64, 2) /* uint64_t addr */ | dh_typemask(i32, 3) /* unsigned oi */ | dh_typemask(ptr, 4) /* uintptr_t ra */ }; @@ -874,7 +874,7 @@ static TCGHelperInfo info_helper_ld64_mmu = { .flags = TCG_CALL_NO_WG, .typemask = dh_typemask(i64, 0) /* return uint64_t */ | dh_typemask(env, 1) - | dh_typemask(tl, 2) /* target_ulong addr */ + | dh_typemask(i64, 2) /* uint64_t addr */ | dh_typemask(i32, 3) /* unsigned oi */ | dh_typemask(ptr, 4) /* uintptr_t ra */ }; @@ -883,7 +883,7 @@ static TCGHelperInfo info_helper_ld128_mmu = { .flags = TCG_CALL_NO_WG, .typemask = dh_typemask(i128, 0) /* return Int128 */ | dh_typemask(env, 1) - | dh_typemask(tl, 2) /* target_ulong addr */ + | dh_typemask(i64, 2) /* uint64_t addr */ | dh_typemask(i32, 3) /* unsigned oi */ | dh_typemask(ptr, 4) /* uintptr_t ra */ }; @@ -892,7 +892,7 @@ static TCGHelperInfo info_helper_st32_mmu = { .flags = TCG_CALL_NO_WG, .typemask = dh_typemask(void, 0) | dh_typemask(env, 1) - | dh_typemask(tl, 2) /* target_ulong addr */ + | dh_typemask(i64, 2) /* uint64_t addr */ | dh_typemask(i32, 3) /* uint32_t data */ | dh_typemask(i32, 4) /* unsigned oi */ | dh_typemask(ptr, 5) /* uintptr_t ra */ @@ -902,7 +902,7 @@ static TCGHelperInfo info_helper_st64_mmu = { .flags = TCG_CALL_NO_WG, .typemask = dh_typemask(void, 0) | dh_typemask(env, 1) - | dh_typemask(tl, 2) /* target_ulong addr */ + | dh_typemask(i64, 2) /* uint64_t addr */ | dh_typemask(i64, 3) /* uint64_t data */ | dh_typemask(i32, 4) /* unsigned oi */ | dh_typemask(ptr, 5) /* uintptr_t ra */ @@ -912,7 +912,7 @@ static TCGHelperInfo info_helper_st128_mmu = { .flags = TCG_CALL_NO_WG, .typemask = dh_typemask(void, 0) | dh_typemask(env, 1) - | dh_typemask(tl, 2) /* target_ulong addr */ + | dh_typemask(i64, 2) /* uint64_t addr */ | dh_typemask(i128, 3) /* Int128 data */ | dh_typemask(i32, 4) /* unsigned oi */ | dh_typemask(ptr, 5) /* uintptr_t ra */ @@ -5597,11 +5597,26 @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, next_arg = 1; loc = &info->in[next_arg]; - nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_TL, TCG_TYPE_TL, - ldst->addrlo_reg, ldst->addrhi_reg); - next_arg += nmov; + if (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 64) { + nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, TCG_TYPE_TL, + ldst->addrlo_reg, ldst->addrhi_reg); + tcg_out_helper_load_slots(s, nmov, mov, parm); + next_arg += nmov; + } else { + /* + * 32-bit host with 32-bit guest: zero-extend the guest address + * to 64-bits for the helper by storing the low part, then + * load a zero for the high part. + */ + tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN, + TCG_TYPE_I32, TCG_TYPE_I32, + ldst->addrlo_reg, -1); + tcg_out_helper_load_slots(s, 1, mov, parm); - tcg_out_helper_load_slots(s, nmov, mov, parm); + tcg_out_helper_load_imm(s, loc[!HOST_BIG_ENDIAN].arg_slot, + TCG_TYPE_I32, 0, parm); + next_arg += 2; + } switch (info->out_kind) { case TCG_CALL_RET_NORMAL: @@ -5755,10 +5770,24 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, /* Handle addr argument. */ loc = &info->in[next_arg]; - n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_TL, TCG_TYPE_TL, - ldst->addrlo_reg, ldst->addrhi_reg); - next_arg += n; - nmov += n; + if (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 64) { + n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, TCG_TYPE_TL, + ldst->addrlo_reg, ldst->addrhi_reg); + next_arg += n; + nmov += n; + } else { + /* + * 32-bit host with 32-bit guest: zero-extend the guest address + * to 64-bits for the helper by storing the low part. Later, + * after we have processed the register inputs, we will load a + * zero for the high part. + */ + tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN, + TCG_TYPE_I32, TCG_TYPE_I32, + ldst->addrlo_reg, -1); + next_arg += 2; + nmov += 1; + } /* Handle data argument. */ loc = &info->in[next_arg]; @@ -5803,6 +5832,11 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, g_assert_not_reached(); } + if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32) { + loc = &info->in[1 + !HOST_BIG_ENDIAN]; + tcg_out_helper_load_imm(s, loc->arg_slot, TCG_TYPE_I32, 0, parm); + } + tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg); } From e570597a8a7762bc85196b699c0d733dc33929ec Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 14 Mar 2023 17:02:50 -0700 Subject: [PATCH 50/74] tcg: Widen helper_{ld,st}_i128 addresses to uint64_t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Always pass the target address as uint64_t. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 5 ++--- accel/tcg/tcg-runtime.h | 4 ++-- accel/tcg/user-exec.c | 5 ++--- tcg/tcg-op-ldst.c | 26 ++++++++++++++++++++++++-- 4 files changed, 30 insertions(+), 10 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 5440f68deb..ae0fbcdee2 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -2551,7 +2551,7 @@ Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, return do_ld16_mmu(env, addr, oi, retaddr); } -Int128 helper_ld_i128(CPUArchState *env, target_ulong addr, uint32_t oi) +Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi) { return helper_ld16_mmu(env, addr, oi, GETPC()); } @@ -3024,8 +3024,7 @@ void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, do_st16_mmu(env, addr, val, oi, retaddr); } -void helper_st_i128(CPUArchState *env, target_ulong addr, Int128 val, - MemOpIdx oi) +void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi) { helper_st16_mmu(env, addr, val, oi, GETPC()); } diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h index d9adc646c1..0e6c5f55fd 100644 --- a/accel/tcg/tcg-runtime.h +++ b/accel/tcg/tcg-runtime.h @@ -39,8 +39,8 @@ DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env) DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr) #endif /* IN_HELPER_PROTO */ -DEF_HELPER_FLAGS_3(ld_i128, TCG_CALL_NO_WG, i128, env, tl, i32) -DEF_HELPER_FLAGS_4(st_i128, TCG_CALL_NO_WG, void, env, tl, i128, i32) +DEF_HELPER_FLAGS_3(ld_i128, TCG_CALL_NO_WG, i128, env, i64, i32) +DEF_HELPER_FLAGS_4(st_i128, TCG_CALL_NO_WG, void, env, i64, i128, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, i32, env, tl, i32, i32, i32) diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 9a7afb6f78..36ad8284a5 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -1136,7 +1136,7 @@ Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, return ret; } -Int128 helper_ld_i128(CPUArchState *env, target_ulong addr, MemOpIdx oi) +Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi) { return helper_ld16_mmu(env, addr, oi, GETPC()); } @@ -1343,8 +1343,7 @@ void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, do_st16_he_mmu(env, addr, val, mop, ra); } -void helper_st_i128(CPUArchState *env, target_ulong addr, - Int128 val, MemOpIdx oi) +void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi) { helper_st16_mmu(env, addr, val, oi, GETPC()); } diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c index d8503d7da4..aab6dda454 100644 --- a/tcg/tcg-op-ldst.c +++ b/tcg/tcg-op-ldst.c @@ -393,6 +393,24 @@ static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) #define tcg_temp_ebb_new tcg_temp_ebb_new_i32 #endif +static TCGv_i64 maybe_extend_addr64(TCGv addr) +{ +#if TARGET_LONG_BITS == 32 + TCGv_i64 a64 = tcg_temp_ebb_new_i64(); + tcg_gen_extu_i32_i64(a64, addr); + return a64; +#else + return addr; +#endif +} + +static void maybe_free_addr64(TCGv_i64 a64) +{ +#if TARGET_LONG_BITS == 32 + tcg_temp_free_i64(a64); +#endif +} + void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) { const MemOpIdx oi = make_memop_idx(memop, idx); @@ -467,7 +485,9 @@ void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_bswap64_i64(y, y); } } else { - gen_helper_ld_i128(val, cpu_env, addr, tcg_constant_i32(oi)); + TCGv_i64 a64 = maybe_extend_addr64(addr); + gen_helper_ld_i128(val, cpu_env, a64, tcg_constant_i32(oi)); + maybe_free_addr64(a64); } plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); @@ -547,7 +567,9 @@ void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) } tcg_temp_free(addr_p8); } else { - gen_helper_st_i128(cpu_env, addr, val, tcg_constant_i32(oi)); + TCGv_i64 a64 = maybe_extend_addr64(addr); + gen_helper_st_i128(cpu_env, a64, val, tcg_constant_i32(oi)); + maybe_free_addr64(a64); } plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); From ddfdd4178beb56543ac98976efbc885d7e2b5150 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 27 Mar 2023 19:56:31 -0700 Subject: [PATCH 51/74] tcg: Widen helper_atomic_* addresses to uint64_t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Always pass the target address as uint64_t. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- accel/tcg/atomic_common.c.inc | 14 +++++------ accel/tcg/tcg-runtime.h | 46 +++++++++++++++++------------------ tcg/tcg-op-ldst.c | 38 ++++++++++++++++++++--------- 3 files changed, 57 insertions(+), 41 deletions(-) diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc index 8f2ce43ee6..fe0eea018f 100644 --- a/accel/tcg/atomic_common.c.inc +++ b/accel/tcg/atomic_common.c.inc @@ -13,20 +13,20 @@ * See the COPYING file in the top-level directory. */ -static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, +static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr, MemOpIdx oi) { qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW); } #if HAVE_ATOMIC128 -static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, +static void atomic_trace_ld_post(CPUArchState *env, uint64_t addr, MemOpIdx oi) { qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); } -static void atomic_trace_st_post(CPUArchState *env, target_ulong addr, +static void atomic_trace_st_post(CPUArchState *env, uint64_t addr, MemOpIdx oi) { qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); @@ -40,7 +40,7 @@ static void atomic_trace_st_post(CPUArchState *env, target_ulong addr, */ #define CMPXCHG_HELPER(OP, TYPE) \ - TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \ + TYPE HELPER(atomic_##OP)(CPUArchState *env, uint64_t addr, \ TYPE oldv, TYPE newv, uint32_t oi) \ { return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); } @@ -62,7 +62,7 @@ CMPXCHG_HELPER(cmpxchgo_le, Int128) #undef CMPXCHG_HELPER -Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, target_ulong addr, +Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, uint64_t addr, Int128 cmpv, Int128 newv, uint32_t oi) { #if TCG_TARGET_REG_BITS == 32 @@ -82,7 +82,7 @@ Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, target_ulong addr, #endif } -Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr, +Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, uint64_t addr, Int128 cmpv, Int128 newv, uint32_t oi) { #if TCG_TARGET_REG_BITS == 32 @@ -103,7 +103,7 @@ Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr, } #define ATOMIC_HELPER(OP, TYPE) \ - TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \ + TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, uint64_t addr, \ TYPE val, uint32_t oi) \ { return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); } diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h index 0e6c5f55fd..6f8c2061d0 100644 --- a/accel/tcg/tcg-runtime.h +++ b/accel/tcg/tcg-runtime.h @@ -43,61 +43,61 @@ DEF_HELPER_FLAGS_3(ld_i128, TCG_CALL_NO_WG, i128, env, i64, i32) DEF_HELPER_FLAGS_4(st_i128, TCG_CALL_NO_WG, void, env, i64, i128, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) #ifdef CONFIG_ATOMIC64 DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG, - i64, env, tl, i64, i64, i32) + i64, env, i64, i64, i64, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG, - i64, env, tl, i64, i64, i32) + i64, env, i64, i64, i64, i32) #endif #ifdef CONFIG_CMPXCHG128 DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG, - i128, env, tl, i128, i128, i32) + i128, env, i64, i128, i128, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG, - i128, env, tl, i128, i128, i32) + i128, env, i64, i128, i128, i32) #endif DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_be, TCG_CALL_NO_WG, - i128, env, tl, i128, i128, i32) + i128, env, i64, i128, i128, i32) DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_le, TCG_CALL_NO_WG, - i128, env, tl, i128, i128, i32) + i128, env, i64, i128, i128, i32) #ifdef CONFIG_ATOMIC64 #define GEN_ATOMIC_HELPERS(NAME) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \ - TCG_CALL_NO_WG, i64, env, tl, i64, i32) \ + TCG_CALL_NO_WG, i64, env, i64, i64, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \ - TCG_CALL_NO_WG, i64, env, tl, i64, i32) + TCG_CALL_NO_WG, i64, env, i64, i64, i32) #else #define GEN_ATOMIC_HELPERS(NAME) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) + TCG_CALL_NO_WG, i32, env, i64, i32, i32) #endif /* CONFIG_ATOMIC64 */ GEN_ATOMIC_HELPERS(fetch_add) diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c index aab6dda454..ca57a2779d 100644 --- a/tcg/tcg-op-ldst.c +++ b/tcg/tcg-op-ldst.c @@ -623,15 +623,15 @@ static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) } } -typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, +typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv_i64, TCGv_i32, TCGv_i32, TCGv_i32); -typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, +typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i32); -typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv, +typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv_i64, TCGv_i128, TCGv_i128, TCGv_i32); -typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, +typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv_i64, TCGv_i32, TCGv_i32); -typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, +typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64, TCGv_i32); #ifdef CONFIG_ATOMIC64 @@ -682,6 +682,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, TCGv_i32 newv, TCGArg idx, MemOp memop) { gen_atomic_cx_i32 gen; + TCGv_i64 a64; MemOpIdx oi; if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { @@ -694,7 +695,9 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, tcg_debug_assert(gen != NULL); oi = make_memop_idx(memop & ~MO_SIGN, idx); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + a64 = maybe_extend_addr64(addr); + gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi)); + maybe_free_addr64(a64); if (memop & MO_SIGN) { tcg_gen_ext_i32(retv, retv, memop); @@ -750,7 +753,9 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; if (gen) { MemOpIdx oi = make_memop_idx(memop, idx); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + TCGv_i64 a64 = maybe_extend_addr64(addr); + gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi)); + maybe_free_addr64(a64); return; } @@ -802,11 +807,14 @@ void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, ? gen_helper_nonatomic_cmpxchgo_le : gen_helper_nonatomic_cmpxchgo_be); MemOpIdx oi = make_memop_idx(memop, idx); + TCGv_i64 a64; tcg_debug_assert((memop & MO_SIZE) == MO_128); tcg_debug_assert((memop & MO_SIGN) == 0); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + a64 = maybe_extend_addr64(addr); + gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi)); + maybe_free_addr64(a64); } else { TCGv_i128 oldv = tcg_temp_ebb_new_i128(); TCGv_i128 tmpv = tcg_temp_ebb_new_i128(); @@ -854,7 +862,9 @@ void tcg_gen_atomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, if (gen) { MemOpIdx oi = make_memop_idx(memop, idx); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + TCGv_i64 a64 = maybe_extend_addr64(addr); + gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi)); + maybe_free_addr64(a64); return; } @@ -892,6 +902,7 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop, void * const table[]) { gen_atomic_op_i32 gen; + TCGv_i64 a64; MemOpIdx oi; memop = tcg_canonicalize_memop(memop, 0, 0); @@ -900,7 +911,9 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, tcg_debug_assert(gen != NULL); oi = make_memop_idx(memop & ~MO_SIGN, idx); - gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); + a64 = maybe_extend_addr64(addr); + gen(ret, cpu_env, a64, val, tcg_constant_i32(oi)); + maybe_free_addr64(a64); if (memop & MO_SIGN) { tcg_gen_ext_i32(ret, ret, memop); @@ -934,13 +947,16 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, if ((memop & MO_SIZE) == MO_64) { #ifdef CONFIG_ATOMIC64 gen_atomic_op_i64 gen; + TCGv_i64 a64; MemOpIdx oi; gen = table[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); oi = make_memop_idx(memop & ~MO_SIGN, idx); - gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); + a64 = maybe_extend_addr64(addr); + gen(ret, cpu_env, a64, val, tcg_constant_i32(oi)); + maybe_free_addr64(a64); #else gen_helper_exit_atomic(cpu_env); /* Produce a result, so that we have a well-formed opcode stream From 76cef4b233da82641fca61cb7783a30fc7096053 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 8 Mar 2023 16:48:02 -0800 Subject: [PATCH 52/74] tcg: Widen tcg_gen_code pc_start argument to uint64_t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- include/tcg/tcg.h | 2 +- tcg/tcg.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index f40de4177d..42a3c05607 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -852,7 +852,7 @@ void tcg_register_thread(void); void tcg_prologue_init(TCGContext *s); void tcg_func_start(TCGContext *s); -int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start); +int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start); void tb_target_set_jmp_target(const TranslationBlock *, int, uintptr_t, uintptr_t); diff --git a/tcg/tcg.c b/tcg/tcg.c index 48853e015b..45e8aa67ec 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -5949,7 +5949,7 @@ int64_t tcg_cpu_exec_time(void) #endif -int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) +int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start) { #ifdef CONFIG_PROFILER TCGProfile *prof = &s->prof; From f5c346ac41759aea9dbce333a0dbfa9cc52f9f11 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 8 Mar 2023 20:03:30 -0800 Subject: [PATCH 53/74] accel/tcg: Merge gen_mem_wrapped with plugin_gen_empty_mem_callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As gen_mem_wrapped is only used in plugin_gen_empty_mem_callback, we can avoid the curiosity of union mem_gen_fn by inlining it. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- accel/tcg/plugin-gen.c | 30 ++++++------------------------ 1 file changed, 6 insertions(+), 24 deletions(-) diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c index 5efb8db258..04facd6305 100644 --- a/accel/tcg/plugin-gen.c +++ b/accel/tcg/plugin-gen.c @@ -202,35 +202,17 @@ static void plugin_gen_empty_callback(enum plugin_gen_from from) } } -union mem_gen_fn { - void (*mem_fn)(TCGv, uint32_t); - void (*inline_fn)(void); -}; - -static void gen_mem_wrapped(enum plugin_gen_cb type, - const union mem_gen_fn *f, TCGv addr, - uint32_t info, bool is_mem) +void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) { enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); - gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw); - if (is_mem) { - f->mem_fn(addr, info); - } else { - f->inline_fn(); - } + gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_MEM, rw); + gen_empty_mem_cb(addr, info); tcg_gen_plugin_cb_end(); -} -void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) -{ - union mem_gen_fn fn; - - fn.mem_fn = gen_empty_mem_cb; - gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true); - - fn.inline_fn = gen_empty_inline_cb; - gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false); + gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_INLINE, rw); + gen_empty_inline_cb(); + tcg_gen_plugin_cb_end(); } static TCGOp *find_op(TCGOp *op, TCGOpcode opc) From b6d9164518b377bf77a6962f1a8bcc8cf6c085f1 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 8 Mar 2023 20:10:16 -0800 Subject: [PATCH 54/74] accel/tcg: Merge do_gen_mem_cb into caller MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As do_gen_mem_cb is called once, merge it into gen_empty_mem_cb. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- accel/tcg/plugin-gen.c | 39 +++++++++++++++++---------------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c index 04facd6305..907c5004a4 100644 --- a/accel/tcg/plugin-gen.c +++ b/accel/tcg/plugin-gen.c @@ -92,27 +92,6 @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index, void *userdata) { } -static void do_gen_mem_cb(TCGv vaddr, uint32_t info) -{ - TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); - TCGv_i32 meminfo = tcg_temp_ebb_new_i32(); - TCGv_i64 vaddr64 = tcg_temp_ebb_new_i64(); - TCGv_ptr udata = tcg_temp_ebb_new_ptr(); - - tcg_gen_movi_i32(meminfo, info); - tcg_gen_movi_ptr(udata, 0); - tcg_gen_ld_i32(cpu_index, cpu_env, - -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); - tcg_gen_extu_tl_i64(vaddr64, vaddr); - - gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata); - - tcg_temp_free_ptr(udata); - tcg_temp_free_i64(vaddr64); - tcg_temp_free_i32(meminfo); - tcg_temp_free_i32(cpu_index); -} - static void gen_empty_udata_cb(void) { TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); @@ -147,7 +126,23 @@ static void gen_empty_inline_cb(void) static void gen_empty_mem_cb(TCGv addr, uint32_t info) { - do_gen_mem_cb(addr, info); + TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); + TCGv_i32 meminfo = tcg_temp_ebb_new_i32(); + TCGv_i64 addr64 = tcg_temp_ebb_new_i64(); + TCGv_ptr udata = tcg_temp_ebb_new_ptr(); + + tcg_gen_movi_i32(meminfo, info); + tcg_gen_movi_ptr(udata, 0); + tcg_gen_ld_i32(cpu_index, cpu_env, + -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); + tcg_gen_extu_tl_i64(addr64, addr); + + gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr64, udata); + + tcg_temp_free_ptr(udata); + tcg_temp_free_i64(addr64); + tcg_temp_free_i32(meminfo); + tcg_temp_free_i32(cpu_index); } /* From eb9d02f24b1ce877a60ffaf6cc1ecc8484740b37 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 10 Mar 2023 07:56:42 -0800 Subject: [PATCH 55/74] tcg: Reduce copies for plugin_gen_mem_callbacks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We only need to make copies for loads, when the destination overlaps the address. For now, only eliminate the copy for stores and 128-bit loads. Rename plugin_prep_mem_callbacks to plugin_maybe_preserve_addr, returning NULL if no copy is made. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- tcg/tcg-op-ldst.c | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c index ca57a2779d..b695d2954e 100644 --- a/tcg/tcg-op-ldst.c +++ b/tcg/tcg-op-ldst.c @@ -114,7 +114,8 @@ static void tcg_gen_req_mo(TCGBar type) } } -static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr) +/* Only required for loads, where value might overlap addr. */ +static TCGv plugin_maybe_preserve_addr(TCGv vaddr) { #ifdef CONFIG_PLUGIN if (tcg_ctx->plugin_insn != NULL) { @@ -124,17 +125,20 @@ static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr) return temp; } #endif - return vaddr; + return NULL; } -static void plugin_gen_mem_callbacks(TCGv vaddr, MemOpIdx oi, - enum qemu_plugin_mem_rw rw) +static void +plugin_gen_mem_callbacks(TCGv copy_addr, TCGv orig_addr, MemOpIdx oi, + enum qemu_plugin_mem_rw rw) { #ifdef CONFIG_PLUGIN if (tcg_ctx->plugin_insn != NULL) { qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw); - plugin_gen_empty_mem_callback(vaddr, info); - tcg_temp_free(vaddr); + plugin_gen_empty_mem_callback(copy_addr ? : orig_addr, info); + if (copy_addr) { + tcg_temp_free(copy_addr); + } } #endif } @@ -143,6 +147,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; MemOpIdx oi; + TCGv copy_addr; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 0, 0); @@ -157,9 +162,9 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) } } - addr = plugin_prep_mem_callbacks(addr); + copy_addr = plugin_maybe_preserve_addr(addr); gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); + plugin_gen_mem_callbacks(copy_addr, addr, oi, QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { @@ -202,13 +207,12 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) memop &= ~MO_BSWAP; } - addr = plugin_prep_mem_callbacks(addr); if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) { gen_ldst_i32(INDEX_op_qemu_st8_i32, val, addr, memop, idx); } else { gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); } - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); + plugin_gen_mem_callbacks(NULL, addr, oi, QEMU_PLUGIN_MEM_W); if (swap) { tcg_temp_free_i32(swap); @@ -219,6 +223,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; MemOpIdx oi; + TCGv copy_addr; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); @@ -243,9 +248,9 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) } } - addr = plugin_prep_mem_callbacks(addr); + copy_addr = plugin_maybe_preserve_addr(addr); gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); + plugin_gen_mem_callbacks(copy_addr, addr, oi, QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { int flags = (orig_memop & MO_SIGN @@ -300,9 +305,8 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) memop &= ~MO_BSWAP; } - addr = plugin_prep_mem_callbacks(addr); gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); + plugin_gen_mem_callbacks(NULL, addr, oi, QEMU_PLUGIN_MEM_W); if (swap) { tcg_temp_free_i64(swap); @@ -419,7 +423,6 @@ void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) tcg_debug_assert((memop & MO_SIGN) == 0); tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); - addr = plugin_prep_mem_callbacks(addr); /* TODO: For now, force 32-bit hosts to use the helper. */ if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) { @@ -490,7 +493,7 @@ void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) maybe_free_addr64(a64); } - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); + plugin_gen_mem_callbacks(NULL, addr, oi, QEMU_PLUGIN_MEM_R); } void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) @@ -501,7 +504,6 @@ void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) tcg_debug_assert((memop & MO_SIGN) == 0); tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST); - addr = plugin_prep_mem_callbacks(addr); /* TODO: For now, force 32-bit hosts to use the helper. */ @@ -572,7 +574,7 @@ void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) maybe_free_addr64(a64); } - plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); + plugin_gen_mem_callbacks(NULL, addr, oi, QEMU_PLUGIN_MEM_W); } static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) From fcdab382c8b92bcc689b18f8ba5cd036139945bf Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 13 Mar 2023 17:35:29 -0700 Subject: [PATCH 56/74] accel/tcg: Widen plugin_gen_empty_mem_callback to i64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we do this inside gen_empty_mem_cb anyway, let's do this earlier inside tcg expansion. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- accel/tcg/plugin-gen.c | 9 +++------ include/exec/plugin-gen.h | 4 ++-- tcg/tcg-op-ldst.c | 28 ++++++++++++++++++++-------- 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c index 907c5004a4..34be1b940c 100644 --- a/accel/tcg/plugin-gen.c +++ b/accel/tcg/plugin-gen.c @@ -124,23 +124,20 @@ static void gen_empty_inline_cb(void) tcg_temp_free_i64(val); } -static void gen_empty_mem_cb(TCGv addr, uint32_t info) +static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info) { TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); TCGv_i32 meminfo = tcg_temp_ebb_new_i32(); - TCGv_i64 addr64 = tcg_temp_ebb_new_i64(); TCGv_ptr udata = tcg_temp_ebb_new_ptr(); tcg_gen_movi_i32(meminfo, info); tcg_gen_movi_ptr(udata, 0); tcg_gen_ld_i32(cpu_index, cpu_env, -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); - tcg_gen_extu_tl_i64(addr64, addr); - gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr64, udata); + gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata); tcg_temp_free_ptr(udata); - tcg_temp_free_i64(addr64); tcg_temp_free_i32(meminfo); tcg_temp_free_i32(cpu_index); } @@ -197,7 +194,7 @@ static void plugin_gen_empty_callback(enum plugin_gen_from from) } } -void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) +void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info) { enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h index 5f5506f1cc..3af0168e65 100644 --- a/include/exec/plugin-gen.h +++ b/include/exec/plugin-gen.h @@ -27,7 +27,7 @@ void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db); void plugin_gen_insn_end(void); void plugin_gen_disable_mem_helpers(void); -void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info); +void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info); static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size) { @@ -69,7 +69,7 @@ static inline void plugin_gen_tb_end(CPUState *cpu) static inline void plugin_gen_disable_mem_helpers(void) { } -static inline void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) +static inline void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info) { } static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size) diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c index b695d2954e..2d5e98971d 100644 --- a/tcg/tcg-op-ldst.c +++ b/tcg/tcg-op-ldst.c @@ -115,13 +115,13 @@ static void tcg_gen_req_mo(TCGBar type) } /* Only required for loads, where value might overlap addr. */ -static TCGv plugin_maybe_preserve_addr(TCGv vaddr) +static TCGv_i64 plugin_maybe_preserve_addr(TCGv vaddr) { #ifdef CONFIG_PLUGIN if (tcg_ctx->plugin_insn != NULL) { /* Save a copy of the vaddr for use after a load. */ - TCGv temp = tcg_temp_new(); - tcg_gen_mov_tl(temp, vaddr); + TCGv_i64 temp = tcg_temp_ebb_new_i64(); + tcg_gen_extu_tl_i64(temp, vaddr); return temp; } #endif @@ -129,16 +129,28 @@ static TCGv plugin_maybe_preserve_addr(TCGv vaddr) } static void -plugin_gen_mem_callbacks(TCGv copy_addr, TCGv orig_addr, MemOpIdx oi, +plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGv orig_addr, MemOpIdx oi, enum qemu_plugin_mem_rw rw) { #ifdef CONFIG_PLUGIN if (tcg_ctx->plugin_insn != NULL) { qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw); - plugin_gen_empty_mem_callback(copy_addr ? : orig_addr, info); + +#if TARGET_LONG_BITS == 64 if (copy_addr) { - tcg_temp_free(copy_addr); + plugin_gen_empty_mem_callback(copy_addr, info); + tcg_temp_free_i64(copy_addr); + } else { + plugin_gen_empty_mem_callback(orig_addr, info); } +#else + if (!copy_addr) { + copy_addr = tcg_temp_ebb_new_i64(); + tcg_gen_extu_tl_i64(copy_addr, orig_addr); + } + plugin_gen_empty_mem_callback(copy_addr, info); + tcg_temp_free_i64(copy_addr); +#endif } #endif } @@ -147,7 +159,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; MemOpIdx oi; - TCGv copy_addr; + TCGv_i64 copy_addr; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 0, 0); @@ -223,7 +235,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; MemOpIdx oi; - TCGv copy_addr; + TCGv_i64 copy_addr; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); From 4baf3978c02b387c39dc6a75d323126ab386aece Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 9 Mar 2023 17:46:16 -0800 Subject: [PATCH 57/74] tcg: Add addr_type to TCGContext MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will enable replacement of TARGET_LONG_BITS within tcg/. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- accel/tcg/translate-all.c | 2 ++ include/tcg/tcg.h | 1 + tcg/tcg.c | 3 +++ 3 files changed, 6 insertions(+) diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 7b7d9a5fff..99a9d0e34f 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -356,6 +356,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu, tb_set_page_addr0(tb, phys_pc); tb_set_page_addr1(tb, -1); tcg_ctx->gen_tb = tb; + tcg_ctx->addr_type = TCG_TYPE_TL; + tb_overflow: #ifdef CONFIG_PROFILER diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index 42a3c05607..b9748fd0c5 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -558,6 +558,7 @@ struct TCGContext { int nb_temps; int nb_indirects; int nb_ops; + TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */ TCGRegSet reserved_regs; intptr_t current_frame_offset; diff --git a/tcg/tcg.c b/tcg/tcg.c index 45e8aa67ec..ff860b5469 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -1520,6 +1520,9 @@ void tcg_func_start(TCGContext *s) QTAILQ_INIT(&s->ops); QTAILQ_INIT(&s->free_ops); QSIMPLEQ_INIT(&s->labels); + + tcg_debug_assert(s->addr_type == TCG_TYPE_I32 || + s->addr_type == TCG_TYPE_I64); } static TCGTemp *tcg_temp_alloc(TCGContext *s) From d5920b7280762d4d696bff87f50dbce453adef06 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 14 Mar 2023 16:46:55 -0700 Subject: [PATCH 58/74] tcg: Remove TCGv from tcg_gen_qemu_{ld,st}_* MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Expand from TCGv to TCGTemp inline in the translators, and validate that the size matches tcg_ctx->addr_type. These inlines will eventually be seen only by target-specific code. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- include/tcg/tcg-op.h | 50 ++++++- tcg/tcg-op-ldst.c | 343 ++++++++++++++++++++++++++----------------- 2 files changed, 251 insertions(+), 142 deletions(-) diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h index de3b70aa84..e556450ba9 100644 --- a/include/tcg/tcg-op.h +++ b/include/tcg/tcg-op.h @@ -803,22 +803,60 @@ static inline void tcg_gen_plugin_cb_end(void) #define tcg_temp_new() tcg_temp_new_i32() #define tcg_global_mem_new tcg_global_mem_new_i32 #define tcg_temp_free tcg_temp_free_i32 +#define tcgv_tl_temp tcgv_i32_temp #define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32 #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32 #else #define tcg_temp_new() tcg_temp_new_i64() #define tcg_global_mem_new tcg_global_mem_new_i64 #define tcg_temp_free tcg_temp_free_i64 +#define tcgv_tl_temp tcgv_i64_temp #define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64 #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64 #endif -void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, MemOp); -void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, MemOp); -void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, MemOp); -void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, MemOp); -void tcg_gen_qemu_ld_i128(TCGv_i128, TCGv, TCGArg, MemOp); -void tcg_gen_qemu_st_i128(TCGv_i128, TCGv, TCGArg, MemOp); +void tcg_gen_qemu_ld_i32_chk(TCGv_i32, TCGTemp *, TCGArg, MemOp, TCGType); +void tcg_gen_qemu_st_i32_chk(TCGv_i32, TCGTemp *, TCGArg, MemOp, TCGType); +void tcg_gen_qemu_ld_i64_chk(TCGv_i64, TCGTemp *, TCGArg, MemOp, TCGType); +void tcg_gen_qemu_st_i64_chk(TCGv_i64, TCGTemp *, TCGArg, MemOp, TCGType); +void tcg_gen_qemu_ld_i128_chk(TCGv_i128, TCGTemp *, TCGArg, MemOp, TCGType); +void tcg_gen_qemu_st_i128_chk(TCGv_i128, TCGTemp *, TCGArg, MemOp, TCGType); + +static inline void +tcg_gen_qemu_ld_i32(TCGv_i32 v, TCGv a, TCGArg i, MemOp m) +{ + tcg_gen_qemu_ld_i32_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL); +} + +static inline void +tcg_gen_qemu_st_i32(TCGv_i32 v, TCGv a, TCGArg i, MemOp m) +{ + tcg_gen_qemu_st_i32_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL); +} + +static inline void +tcg_gen_qemu_ld_i64(TCGv_i64 v, TCGv a, TCGArg i, MemOp m) +{ + tcg_gen_qemu_ld_i64_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL); +} + +static inline void +tcg_gen_qemu_st_i64(TCGv_i64 v, TCGv a, TCGArg i, MemOp m) +{ + tcg_gen_qemu_st_i64_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL); +} + +static inline void +tcg_gen_qemu_ld_i128(TCGv_i128 v, TCGv a, TCGArg i, MemOp m) +{ + tcg_gen_qemu_ld_i128_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL); +} + +static inline void +tcg_gen_qemu_st_i128(TCGv_i128 v, TCGv a, TCGArg i, MemOp m) +{ + tcg_gen_qemu_st_i128_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL); +} void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32, TCGArg, MemOp); diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c index 2d5e98971d..0fd7acf258 100644 --- a/tcg/tcg-op-ldst.c +++ b/tcg/tcg-op-ldst.c @@ -68,39 +68,38 @@ static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) return op; } -static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, - MemOp memop, TCGArg idx) +static void gen_ldst(TCGOpcode opc, TCGTemp *vl, TCGTemp *vh, + TCGTemp *addr, MemOpIdx oi) { - MemOpIdx oi = make_memop_idx(memop, idx); -#if TARGET_LONG_BITS == 32 - tcg_gen_op3i_i32(opc, val, addr, oi); -#else - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_op4i_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr), oi); + if (TCG_TARGET_REG_BITS == 64 || tcg_ctx->addr_type == TCG_TYPE_I32) { + if (vh) { + tcg_gen_op4(opc, temp_arg(vl), temp_arg(vh), temp_arg(addr), oi); + } else { + tcg_gen_op3(opc, temp_arg(vl), temp_arg(addr), oi); + } } else { - tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_i64_arg(addr), oi); + /* See TCGV_LOW/HIGH. */ + TCGTemp *al = addr + HOST_BIG_ENDIAN; + TCGTemp *ah = addr + !HOST_BIG_ENDIAN; + + if (vh) { + tcg_gen_op5(opc, temp_arg(vl), temp_arg(vh), + temp_arg(al), temp_arg(ah), oi); + } else { + tcg_gen_op4(opc, temp_arg(vl), temp_arg(al), temp_arg(ah), oi); + } } -#endif } -static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr, - MemOp memop, TCGArg idx) +static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi) { - MemOpIdx oi = make_memop_idx(memop, idx); -#if TARGET_LONG_BITS == 32 if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi); + TCGTemp *vl = tcgv_i32_temp(TCGV_LOW(v)); + TCGTemp *vh = tcgv_i32_temp(TCGV_HIGH(v)); + gen_ldst(opc, vl, vh, addr, oi); } else { - tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_i32_arg(addr), oi); + gen_ldst(opc, tcgv_i64_temp(v), NULL, addr, oi); } -#else - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_op5i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), - TCGV_LOW(addr), TCGV_HIGH(addr), oi); - } else { - tcg_gen_op3i_i64(opc, val, addr, oi); - } -#endif } static void tcg_gen_req_mo(TCGBar type) @@ -115,13 +114,17 @@ static void tcg_gen_req_mo(TCGBar type) } /* Only required for loads, where value might overlap addr. */ -static TCGv_i64 plugin_maybe_preserve_addr(TCGv vaddr) +static TCGv_i64 plugin_maybe_preserve_addr(TCGTemp *addr) { #ifdef CONFIG_PLUGIN if (tcg_ctx->plugin_insn != NULL) { /* Save a copy of the vaddr for use after a load. */ TCGv_i64 temp = tcg_temp_ebb_new_i64(); - tcg_gen_extu_tl_i64(temp, vaddr); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + tcg_gen_extu_i32_i64(temp, temp_tcgv_i32(addr)); + } else { + tcg_gen_mov_i64(temp, temp_tcgv_i64(addr)); + } return temp; } #endif @@ -129,54 +132,55 @@ static TCGv_i64 plugin_maybe_preserve_addr(TCGv vaddr) } static void -plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGv orig_addr, MemOpIdx oi, +plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi, enum qemu_plugin_mem_rw rw) { #ifdef CONFIG_PLUGIN if (tcg_ctx->plugin_insn != NULL) { qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw); -#if TARGET_LONG_BITS == 64 - if (copy_addr) { + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + if (!copy_addr) { + copy_addr = tcg_temp_ebb_new_i64(); + tcg_gen_extu_i32_i64(copy_addr, temp_tcgv_i32(orig_addr)); + } plugin_gen_empty_mem_callback(copy_addr, info); tcg_temp_free_i64(copy_addr); } else { - plugin_gen_empty_mem_callback(orig_addr, info); + if (copy_addr) { + plugin_gen_empty_mem_callback(copy_addr, info); + tcg_temp_free_i64(copy_addr); + } else { + plugin_gen_empty_mem_callback(temp_tcgv_i64(orig_addr), info); + } } -#else - if (!copy_addr) { - copy_addr = tcg_temp_ebb_new_i64(); - tcg_gen_extu_tl_i64(copy_addr, orig_addr); - } - plugin_gen_empty_mem_callback(copy_addr, info); - tcg_temp_free_i64(copy_addr); -#endif } #endif } -void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) +static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr, + TCGArg idx, MemOp memop) { MemOp orig_memop; - MemOpIdx oi; + MemOpIdx orig_oi, oi; TCGv_i64 copy_addr; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); - memop = tcg_canonicalize_memop(memop, 0, 0); - oi = make_memop_idx(memop, idx); + orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0); + orig_oi = oi = make_memop_idx(memop, idx); - orig_memop = memop; if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { memop &= ~MO_BSWAP; /* The bswap primitive benefits from zero-extended input. */ if ((memop & MO_SSIZE) == MO_SW) { memop &= ~MO_SIGN; } + oi = make_memop_idx(memop, idx); } copy_addr = plugin_maybe_preserve_addr(addr); - gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); - plugin_gen_mem_callbacks(copy_addr, addr, oi, QEMU_PLUGIN_MEM_R); + gen_ldst(INDEX_op_qemu_ld_i32, tcgv_i32_temp(val), NULL, addr, oi); + plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { @@ -194,14 +198,24 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) } } -void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) +void tcg_gen_qemu_ld_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx, + MemOp memop, TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_32); + tcg_gen_qemu_ld_i32_int(val, addr, idx, memop); +} + +static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr, + TCGArg idx, MemOp memop) { TCGv_i32 swap = NULL; - MemOpIdx oi; + MemOpIdx orig_oi, oi; + TCGOpcode opc; tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 0, 1); - oi = make_memop_idx(memop, idx); + orig_oi = oi = make_memop_idx(memop, idx); if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { swap = tcg_temp_ebb_new_i32(); @@ -217,28 +231,39 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) } val = swap; memop &= ~MO_BSWAP; + oi = make_memop_idx(memop, idx); } if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) { - gen_ldst_i32(INDEX_op_qemu_st8_i32, val, addr, memop, idx); + opc = INDEX_op_qemu_st8_i32; } else { - gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); + opc = INDEX_op_qemu_st_i32; } - plugin_gen_mem_callbacks(NULL, addr, oi, QEMU_PLUGIN_MEM_W); + gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi); + plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W); if (swap) { tcg_temp_free_i32(swap); } } -void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) +void tcg_gen_qemu_st_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx, + MemOp memop, TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_32); + tcg_gen_qemu_st_i32_int(val, addr, idx, memop); +} + +static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr, + TCGArg idx, MemOp memop) { MemOp orig_memop; - MemOpIdx oi; + MemOpIdx orig_oi, oi; TCGv_i64 copy_addr; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { - tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); + tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop); if (memop & MO_SIGN) { tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31); } else { @@ -248,21 +273,21 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) } tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); - memop = tcg_canonicalize_memop(memop, 1, 0); - oi = make_memop_idx(memop, idx); + orig_memop = memop = tcg_canonicalize_memop(memop, 1, 0); + orig_oi = oi = make_memop_idx(memop, idx); - orig_memop = memop; if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { memop &= ~MO_BSWAP; /* The bswap primitive benefits from zero-extended input. */ if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) { memop &= ~MO_SIGN; } + oi = make_memop_idx(memop, idx); } copy_addr = plugin_maybe_preserve_addr(addr); - gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); - plugin_gen_mem_callbacks(copy_addr, addr, oi, QEMU_PLUGIN_MEM_R); + gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, oi); + plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { int flags = (orig_memop & MO_SIGN @@ -284,19 +309,28 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) } } -void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) +void tcg_gen_qemu_ld_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx, + MemOp memop, TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_64); + tcg_gen_qemu_ld_i64_int(val, addr, idx, memop); +} + +static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr, + TCGArg idx, MemOp memop) { TCGv_i64 swap = NULL; - MemOpIdx oi; + MemOpIdx orig_oi, oi; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { - tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop); + tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop); return; } tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 1, 1); - oi = make_memop_idx(memop, idx); + orig_oi = oi = make_memop_idx(memop, idx); if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { swap = tcg_temp_ebb_new_i64(); @@ -315,16 +349,25 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) } val = swap; memop &= ~MO_BSWAP; + oi = make_memop_idx(memop, idx); } - gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); - plugin_gen_mem_callbacks(NULL, addr, oi, QEMU_PLUGIN_MEM_W); + gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, oi); + plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W); if (swap) { tcg_temp_free_i64(swap); } } +void tcg_gen_qemu_st_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx, + MemOp memop, TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_64); + tcg_gen_qemu_st_i64_int(val, addr, idx, memop); +} + /* * Return true if @mop, without knowledge of the pointer alignment, * does not require 16-byte atomicity, and it would be adventagous @@ -360,9 +403,6 @@ static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) { MemOp mop_1 = orig, mop_2; - tcg_debug_assert((orig & MO_SIZE) == MO_128); - tcg_debug_assert((orig & MO_SIGN) == 0); - /* Reduce the size to 64-bit. */ mop_1 = (mop_1 & ~MO_SIZE) | MO_64; @@ -403,12 +443,6 @@ static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) ret[1] = mop_2; } -#if TARGET_LONG_BITS == 64 -#define tcg_temp_ebb_new tcg_temp_ebb_new_i64 -#else -#define tcg_temp_ebb_new tcg_temp_ebb_new_i32 -#endif - static TCGv_i64 maybe_extend_addr64(TCGv addr) { #if TARGET_LONG_BITS == 32 @@ -427,39 +461,32 @@ static void maybe_free_addr64(TCGv_i64 a64) #endif } -void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) +static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, + TCGArg idx, MemOp memop) { - const MemOpIdx oi = make_memop_idx(memop, idx); - - tcg_debug_assert((memop & MO_SIZE) == MO_128); - tcg_debug_assert((memop & MO_SIGN) == 0); + const MemOpIdx orig_oi = make_memop_idx(memop, idx); + TCGv_i64 ext_addr = NULL; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); /* TODO: For now, force 32-bit hosts to use the helper. */ if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) { TCGv_i64 lo, hi; - TCGArg addr_arg; - MemOpIdx adj_oi; bool need_bswap = false; + MemOpIdx oi = orig_oi; if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { lo = TCGV128_HIGH(val); hi = TCGV128_LOW(val); - adj_oi = make_memop_idx(memop & ~MO_BSWAP, idx); + oi = make_memop_idx(memop & ~MO_BSWAP, idx); need_bswap = true; } else { lo = TCGV128_LOW(val); hi = TCGV128_HIGH(val); - adj_oi = oi; } -#if TARGET_LONG_BITS == 32 - addr_arg = tcgv_i32_arg(addr); -#else - addr_arg = tcgv_i64_arg(addr); -#endif - tcg_gen_op4ii_i64(INDEX_op_qemu_ld_i128, lo, hi, addr_arg, adj_oi); + gen_ldst(INDEX_op_qemu_ld_i128, tcgv_i64_temp(lo), + tcgv_i64_temp(hi), addr, oi); if (need_bswap) { tcg_gen_bswap64_i64(lo, lo); @@ -467,10 +494,12 @@ void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) } } else if (use_two_i64_for_i128(memop)) { MemOp mop[2]; - TCGv addr_p8; + TCGTemp *addr_p8; TCGv_i64 x, y; + bool need_bswap; canonicalize_memop_i128_as_i64(mop, memop); + need_bswap = (mop[0] ^ memop) & MO_BSWAP; /* * Since there are no global TCGv_i128, there is no visible state @@ -485,35 +514,57 @@ void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) y = TCGV128_LOW(val); } - gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr, mop[0], idx); + gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr, + make_memop_idx(mop[0], idx)); - if ((mop[0] ^ memop) & MO_BSWAP) { + if (need_bswap) { tcg_gen_bswap64_i64(x, x); } - addr_p8 = tcg_temp_ebb_new(); - tcg_gen_addi_tl(addr_p8, addr, 8); - gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8, mop[1], idx); - tcg_temp_free(addr_p8); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + TCGv_i32 t = tcg_temp_ebb_new_i32(); + tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8); + addr_p8 = tcgv_i32_temp(t); + } else { + TCGv_i64 t = tcg_temp_ebb_new_i64(); + tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8); + addr_p8 = tcgv_i64_temp(t); + } - if ((mop[0] ^ memop) & MO_BSWAP) { + gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8, + make_memop_idx(mop[1], idx)); + tcg_temp_free_internal(addr_p8); + + if (need_bswap) { tcg_gen_bswap64_i64(y, y); } } else { - TCGv_i64 a64 = maybe_extend_addr64(addr); - gen_helper_ld_i128(val, cpu_env, a64, tcg_constant_i32(oi)); - maybe_free_addr64(a64); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + ext_addr = tcg_temp_ebb_new_i64(); + tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr)); + addr = tcgv_i64_temp(ext_addr); + } + gen_helper_ld_i128(val, cpu_env, temp_tcgv_i64(addr), + tcg_constant_i32(orig_oi)); } - plugin_gen_mem_callbacks(NULL, addr, oi, QEMU_PLUGIN_MEM_R); + plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R); } -void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) +void tcg_gen_qemu_ld_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx, + MemOp memop, TCGType addr_type) { - const MemOpIdx oi = make_memop_idx(memop, idx); - + tcg_debug_assert(addr_type == tcg_ctx->addr_type); tcg_debug_assert((memop & MO_SIZE) == MO_128); tcg_debug_assert((memop & MO_SIGN) == 0); + tcg_gen_qemu_ld_i128_int(val, addr, idx, memop); +} + +static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, + TCGArg idx, MemOp memop) +{ + const MemOpIdx orig_oi = make_memop_idx(memop, idx); + TCGv_i64 ext_addr = NULL; tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST); @@ -521,29 +572,23 @@ void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) { TCGv_i64 lo, hi; - TCGArg addr_arg; - MemOpIdx adj_oi; + MemOpIdx oi = orig_oi; bool need_bswap = false; if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) { - lo = tcg_temp_new_i64(); - hi = tcg_temp_new_i64(); + lo = tcg_temp_ebb_new_i64(); + hi = tcg_temp_ebb_new_i64(); tcg_gen_bswap64_i64(lo, TCGV128_HIGH(val)); tcg_gen_bswap64_i64(hi, TCGV128_LOW(val)); - adj_oi = make_memop_idx(memop & ~MO_BSWAP, idx); + oi = make_memop_idx(memop & ~MO_BSWAP, idx); need_bswap = true; } else { lo = TCGV128_LOW(val); hi = TCGV128_HIGH(val); - adj_oi = oi; } -#if TARGET_LONG_BITS == 32 - addr_arg = tcgv_i32_arg(addr); -#else - addr_arg = tcgv_i64_arg(addr); -#endif - tcg_gen_op4ii_i64(INDEX_op_qemu_st_i128, lo, hi, addr_arg, adj_oi); + gen_ldst(INDEX_op_qemu_st_i128, tcgv_i64_temp(lo), + tcgv_i64_temp(hi), addr, oi); if (need_bswap) { tcg_temp_free_i64(lo); @@ -551,8 +596,8 @@ void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) } } else if (use_two_i64_for_i128(memop)) { MemOp mop[2]; - TCGv addr_p8; - TCGv_i64 x, y; + TCGTemp *addr_p8; + TCGv_i64 x, y, b = NULL; canonicalize_memop_i128_as_i64(mop, memop); @@ -564,29 +609,55 @@ void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) y = TCGV128_LOW(val); } - addr_p8 = tcg_temp_ebb_new(); if ((mop[0] ^ memop) & MO_BSWAP) { - TCGv_i64 t = tcg_temp_ebb_new_i64(); - - tcg_gen_bswap64_i64(t, x); - gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr, mop[0], idx); - tcg_gen_bswap64_i64(t, y); - tcg_gen_addi_tl(addr_p8, addr, 8); - gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr_p8, mop[1], idx); - tcg_temp_free_i64(t); - } else { - gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr, mop[0], idx); - tcg_gen_addi_tl(addr_p8, addr, 8); - gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8, mop[1], idx); + b = tcg_temp_ebb_new_i64(); + tcg_gen_bswap64_i64(b, x); + x = b; } - tcg_temp_free(addr_p8); + gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr, + make_memop_idx(mop[0], idx)); + + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + TCGv_i32 t = tcg_temp_ebb_new_i32(); + tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8); + addr_p8 = tcgv_i32_temp(t); + } else { + TCGv_i64 t = tcg_temp_ebb_new_i64(); + tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8); + addr_p8 = tcgv_i64_temp(t); + } + + if (b) { + tcg_gen_bswap64_i64(b, y); + y = b; + } + gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8, + make_memop_idx(mop[1], idx)); + + if (b) { + tcg_temp_free_i64(b); + } + tcg_temp_free_internal(addr_p8); } else { - TCGv_i64 a64 = maybe_extend_addr64(addr); - gen_helper_st_i128(cpu_env, a64, val, tcg_constant_i32(oi)); - maybe_free_addr64(a64); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + ext_addr = tcg_temp_ebb_new_i64(); + tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr)); + addr = tcgv_i64_temp(ext_addr); + } + gen_helper_st_i128(cpu_env, temp_tcgv_i64(addr), val, + tcg_constant_i32(orig_oi)); } - plugin_gen_mem_callbacks(NULL, addr, oi, QEMU_PLUGIN_MEM_W); + plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_W); +} + +void tcg_gen_qemu_st_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx, + MemOp memop, TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) == MO_128); + tcg_debug_assert((memop & MO_SIGN) == 0); + tcg_gen_qemu_st_i128_int(val, addr, idx, memop); } static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) From 0700ceb3939f08619d7f323209597ef62b489514 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 28 Mar 2023 17:25:10 -0700 Subject: [PATCH 59/74] tcg: Remove TCGv from tcg_gen_atomic_* MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Expand from TCGv to TCGTemp inline in the translators, and validate that the size matches tcg_ctx->addr_type. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- include/tcg/tcg-op.h | 184 ++++++++++++++++++++++++++++++---------- tcg/tcg-op-ldst.c | 198 ++++++++++++++++++++++++++++--------------- 2 files changed, 267 insertions(+), 115 deletions(-) diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h index e556450ba9..35c5700183 100644 --- a/include/tcg/tcg-op.h +++ b/include/tcg/tcg-op.h @@ -858,56 +858,148 @@ tcg_gen_qemu_st_i128(TCGv_i128 v, TCGv a, TCGArg i, MemOp m) tcg_gen_qemu_st_i128_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL); } -void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32, - TCGArg, MemOp); -void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64, - TCGArg, MemOp); -void tcg_gen_atomic_cmpxchg_i128(TCGv_i128, TCGv, TCGv_i128, TCGv_i128, - TCGArg, MemOp); +void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128, + TCGv_i128, TCGArg, MemOp, TCGType); -void tcg_gen_nonatomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32, - TCGArg, MemOp); -void tcg_gen_nonatomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64, - TCGArg, MemOp); -void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128, TCGv, TCGv_i128, TCGv_i128, - TCGArg, MemOp); +void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128, + TCGv_i128, TCGArg, MemOp, TCGType); -void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_xchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_xchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); -void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_and_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_add_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_add_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_and_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_and_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_or_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_or_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_xor_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_xor_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_smin_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_smin_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_umin_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_umin_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_smax_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_smax_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_umax_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_umax_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); -void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_and_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); -void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_add_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_add_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_and_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_and_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_or_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_or_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_xor_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_xor_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_smin_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_smin_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_umin_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_umin_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_smax_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_smax_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_umax_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_umax_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, + TCGArg, MemOp, TCGType); + +#define DEF_ATOMIC2(N, S) \ + static inline void N##_##S(TCGv_##S r, TCGv a, TCGv_##S v, \ + TCGArg i, MemOp m) \ + { N##_##S##_chk(r, tcgv_tl_temp(a), v, i, m, TCG_TYPE_TL); } + +#define DEF_ATOMIC3(N, S) \ + static inline void N##_##S(TCGv_##S r, TCGv a, TCGv_##S o, \ + TCGv_##S n, TCGArg i, MemOp m) \ + { N##_##S##_chk(r, tcgv_tl_temp(a), o, n, i, m, TCG_TYPE_TL); } + +DEF_ATOMIC3(tcg_gen_atomic_cmpxchg, i32) +DEF_ATOMIC3(tcg_gen_atomic_cmpxchg, i64) +DEF_ATOMIC3(tcg_gen_atomic_cmpxchg, i128) + +DEF_ATOMIC3(tcg_gen_nonatomic_cmpxchg, i32) +DEF_ATOMIC3(tcg_gen_nonatomic_cmpxchg, i64) +DEF_ATOMIC3(tcg_gen_nonatomic_cmpxchg, i128) + +DEF_ATOMIC2(tcg_gen_atomic_xchg, i32) +DEF_ATOMIC2(tcg_gen_atomic_xchg, i64) + +DEF_ATOMIC2(tcg_gen_atomic_fetch_add, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_add, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_and, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_and, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_or, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_or, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_xor, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_xor, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_smin, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_smin, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_umin, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_umin, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_smax, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_smax, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_umax, i32) +DEF_ATOMIC2(tcg_gen_atomic_fetch_umax, i64) + +DEF_ATOMIC2(tcg_gen_atomic_add_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_add_fetch, i64) +DEF_ATOMIC2(tcg_gen_atomic_and_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_and_fetch, i64) +DEF_ATOMIC2(tcg_gen_atomic_or_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_or_fetch, i64) +DEF_ATOMIC2(tcg_gen_atomic_xor_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_xor_fetch, i64) +DEF_ATOMIC2(tcg_gen_atomic_smin_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_smin_fetch, i64) +DEF_ATOMIC2(tcg_gen_atomic_umin_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_umin_fetch, i64) +DEF_ATOMIC2(tcg_gen_atomic_smax_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_smax_fetch, i64) +DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i32) +DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64) + +#undef DEF_ATOMIC2 +#undef DEF_ATOMIC3 void tcg_gen_mov_vec(TCGv_vec, TCGv_vec); void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32); diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c index 0fd7acf258..679be51e90 100644 --- a/tcg/tcg-op-ldst.c +++ b/tcg/tcg-op-ldst.c @@ -443,22 +443,21 @@ static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) ret[1] = mop_2; } -static TCGv_i64 maybe_extend_addr64(TCGv addr) +static TCGv_i64 maybe_extend_addr64(TCGTemp *addr) { -#if TARGET_LONG_BITS == 32 - TCGv_i64 a64 = tcg_temp_ebb_new_i64(); - tcg_gen_extu_i32_i64(a64, addr); - return a64; -#else - return addr; -#endif + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + TCGv_i64 a64 = tcg_temp_ebb_new_i64(); + tcg_gen_extu_i32_i64(a64, temp_tcgv_i32(addr)); + return a64; + } + return temp_tcgv_i64(addr); } static void maybe_free_addr64(TCGv_i64 a64) { -#if TARGET_LONG_BITS == 32 - tcg_temp_free_i64(a64); -#endif + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + tcg_temp_free_i64(a64); + } } static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, @@ -742,17 +741,18 @@ static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = { WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be) }; -void tcg_gen_nonatomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, - TCGv_i32 newv, TCGArg idx, MemOp memop) +static void tcg_gen_nonatomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr, + TCGv_i32 cmpv, TCGv_i32 newv, + TCGArg idx, MemOp memop) { TCGv_i32 t1 = tcg_temp_ebb_new_i32(); TCGv_i32 t2 = tcg_temp_ebb_new_i32(); tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE); - tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN); + tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop & ~MO_SIGN); tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1); - tcg_gen_qemu_st_i32(t2, addr, idx, memop); + tcg_gen_qemu_st_i32_int(t2, addr, idx, memop); tcg_temp_free_i32(t2); if (memop & MO_SIGN) { @@ -763,15 +763,26 @@ void tcg_gen_nonatomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, tcg_temp_free_i32(t1); } -void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, - TCGv_i32 newv, TCGArg idx, MemOp memop) +void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr, + TCGv_i32 cmpv, TCGv_i32 newv, + TCGArg idx, MemOp memop, + TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_32); + tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop); +} + +static void tcg_gen_atomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr, + TCGv_i32 cmpv, TCGv_i32 newv, + TCGArg idx, MemOp memop) { gen_atomic_cx_i32 gen; TCGv_i64 a64; MemOpIdx oi; if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { - tcg_gen_nonatomic_cmpxchg_i32(retv, addr, cmpv, newv, idx, memop); + tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop); return; } @@ -789,14 +800,25 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, } } -void tcg_gen_nonatomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, - TCGv_i64 newv, TCGArg idx, MemOp memop) +void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr, + TCGv_i32 cmpv, TCGv_i32 newv, + TCGArg idx, MemOp memop, + TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_32); + tcg_gen_atomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop); +} + +static void tcg_gen_nonatomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr, + TCGv_i64 cmpv, TCGv_i64 newv, + TCGArg idx, MemOp memop) { TCGv_i64 t1, t2; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { - tcg_gen_nonatomic_cmpxchg_i32(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), - TCGV_LOW(newv), idx, memop); + tcg_gen_nonatomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), + TCGV_LOW(newv), idx, memop); if (memop & MO_SIGN) { tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31); } else { @@ -810,9 +832,9 @@ void tcg_gen_nonatomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE); - tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN); + tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop & ~MO_SIGN); tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1); - tcg_gen_qemu_st_i64(t2, addr, idx, memop); + tcg_gen_qemu_st_i64_int(t2, addr, idx, memop); tcg_temp_free_i64(t2); if (memop & MO_SIGN) { @@ -823,11 +845,22 @@ void tcg_gen_nonatomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, tcg_temp_free_i64(t1); } -void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, - TCGv_i64 newv, TCGArg idx, MemOp memop) +void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr, + TCGv_i64 cmpv, TCGv_i64 newv, + TCGArg idx, MemOp memop, + TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_64); + tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop); +} + +static void tcg_gen_atomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr, + TCGv_i64 cmpv, TCGv_i64 newv, + TCGArg idx, MemOp memop) { if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { - tcg_gen_nonatomic_cmpxchg_i64(retv, addr, cmpv, newv, idx, memop); + tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop); return; } @@ -856,8 +889,8 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, } if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_atomic_cmpxchg_i32(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), - TCGV_LOW(newv), idx, memop); + tcg_gen_atomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), + TCGV_LOW(newv), idx, memop); if (memop & MO_SIGN) { tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31); } else { @@ -870,7 +903,8 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, tcg_gen_extrl_i64_i32(c32, cmpv); tcg_gen_extrl_i64_i32(n32, newv); - tcg_gen_atomic_cmpxchg_i32(r32, addr, c32, n32, idx, memop & ~MO_SIGN); + tcg_gen_atomic_cmpxchg_i32_int(r32, addr, c32, n32, + idx, memop & ~MO_SIGN); tcg_temp_free_i32(c32); tcg_temp_free_i32(n32); @@ -883,8 +917,18 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, } } -void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, - TCGv_i128 newv, TCGArg idx, MemOp memop) +void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr, + TCGv_i64 cmpv, TCGv_i64 newv, + TCGArg idx, MemOp memop, TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & MO_SIZE) <= MO_64); + tcg_gen_atomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop); +} + +static void tcg_gen_nonatomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr, + TCGv_i128 cmpv, TCGv_i128 newv, + TCGArg idx, MemOp memop) { if (TCG_TARGET_REG_BITS == 32) { /* Inline expansion below is simply too large for 32-bit hosts. */ @@ -892,12 +936,8 @@ void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, ? gen_helper_nonatomic_cmpxchgo_le : gen_helper_nonatomic_cmpxchgo_be); MemOpIdx oi = make_memop_idx(memop, idx); - TCGv_i64 a64; + TCGv_i64 a64 = maybe_extend_addr64(addr); - tcg_debug_assert((memop & MO_SIZE) == MO_128); - tcg_debug_assert((memop & MO_SIGN) == 0); - - a64 = maybe_extend_addr64(addr); gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi)); maybe_free_addr64(a64); } else { @@ -907,7 +947,7 @@ void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 z = tcg_constant_i64(0); - tcg_gen_qemu_ld_i128(oldv, addr, idx, memop); + tcg_gen_qemu_ld_i128_int(oldv, addr, idx, memop); /* Compare i128 */ tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv)); @@ -921,7 +961,7 @@ void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, TCGV128_HIGH(newv), TCGV128_HIGH(oldv)); /* Unconditional writeback. */ - tcg_gen_qemu_st_i128(tmpv, addr, idx, memop); + tcg_gen_qemu_st_i128_int(tmpv, addr, idx, memop); tcg_gen_mov_i128(retv, oldv); tcg_temp_free_i64(t0); @@ -931,20 +971,28 @@ void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, } } -void tcg_gen_atomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, - TCGv_i128 newv, TCGArg idx, MemOp memop) +void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr, + TCGv_i128 cmpv, TCGv_i128 newv, + TCGArg idx, MemOp memop, + TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128); + tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop); +} + +static void tcg_gen_atomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr, + TCGv_i128 cmpv, TCGv_i128 newv, + TCGArg idx, MemOp memop) { gen_atomic_cx_i128 gen; if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { - tcg_gen_nonatomic_cmpxchg_i128(retv, addr, cmpv, newv, idx, memop); + tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop); return; } - tcg_debug_assert((memop & MO_SIZE) == MO_128); - tcg_debug_assert((memop & MO_SIGN) == 0); gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; - if (gen) { MemOpIdx oi = make_memop_idx(memop, idx); TCGv_i64 a64 = maybe_extend_addr64(addr); @@ -964,7 +1012,17 @@ void tcg_gen_atomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, tcg_gen_movi_i64(TCGV128_HIGH(retv), 0); } -static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, +void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr, + TCGv_i128 cmpv, TCGv_i128 newv, + TCGArg idx, MemOp memop, + TCGType addr_type) +{ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); + tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128); + tcg_gen_atomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop); +} + +static void do_nonatomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val, TCGArg idx, MemOp memop, bool new_val, void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) { @@ -973,17 +1031,17 @@ static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, memop = tcg_canonicalize_memop(memop, 0, 0); - tcg_gen_qemu_ld_i32(t1, addr, idx, memop); + tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop); tcg_gen_ext_i32(t2, val, memop); gen(t2, t1, t2); - tcg_gen_qemu_st_i32(t2, addr, idx, memop); + tcg_gen_qemu_st_i32_int(t2, addr, idx, memop); tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); } -static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, +static void do_atomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val, TCGArg idx, MemOp memop, void * const table[]) { gen_atomic_op_i32 gen; @@ -1005,7 +1063,7 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, } } -static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, +static void do_nonatomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val, TCGArg idx, MemOp memop, bool new_val, void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) { @@ -1014,40 +1072,36 @@ static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, memop = tcg_canonicalize_memop(memop, 1, 0); - tcg_gen_qemu_ld_i64(t1, addr, idx, memop); + tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop); tcg_gen_ext_i64(t2, val, memop); gen(t2, t1, t2); - tcg_gen_qemu_st_i64(t2, addr, idx, memop); + tcg_gen_qemu_st_i64_int(t2, addr, idx, memop); tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop); tcg_temp_free_i64(t1); tcg_temp_free_i64(t2); } -static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, +static void do_atomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val, TCGArg idx, MemOp memop, void * const table[]) { memop = tcg_canonicalize_memop(memop, 1, 0); if ((memop & MO_SIZE) == MO_64) { -#ifdef CONFIG_ATOMIC64 - gen_atomic_op_i64 gen; - TCGv_i64 a64; - MemOpIdx oi; + gen_atomic_op_i64 gen = table[memop & (MO_SIZE | MO_BSWAP)]; - gen = table[memop & (MO_SIZE | MO_BSWAP)]; - tcg_debug_assert(gen != NULL); + if (gen) { + MemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx); + TCGv_i64 a64 = maybe_extend_addr64(addr); + gen(ret, cpu_env, a64, val, tcg_constant_i32(oi)); + maybe_free_addr64(a64); + return; + } - oi = make_memop_idx(memop & ~MO_SIGN, idx); - a64 = maybe_extend_addr64(addr); - gen(ret, cpu_env, a64, val, tcg_constant_i32(oi)); - maybe_free_addr64(a64); -#else gen_helper_exit_atomic(cpu_env); /* Produce a result, so that we have a well-formed opcode stream with respect to uses of the result in the (dead) code following. */ tcg_gen_movi_i64(ret, 0); -#endif /* CONFIG_ATOMIC64 */ } else { TCGv_i32 v32 = tcg_temp_ebb_new_i32(); TCGv_i32 r32 = tcg_temp_ebb_new_i32(); @@ -1075,9 +1129,12 @@ static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \ WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \ WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ }; \ -void tcg_gen_atomic_##NAME##_i32 \ - (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \ +void tcg_gen_atomic_##NAME##_i32_chk(TCGv_i32 ret, TCGTemp *addr, \ + TCGv_i32 val, TCGArg idx, \ + MemOp memop, TCGType addr_type) \ { \ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); \ + tcg_debug_assert((memop & MO_SIZE) <= MO_32); \ if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \ } else { \ @@ -1085,9 +1142,12 @@ void tcg_gen_atomic_##NAME##_i32 \ tcg_gen_##OP##_i32); \ } \ } \ -void tcg_gen_atomic_##NAME##_i64 \ - (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \ +void tcg_gen_atomic_##NAME##_i64_chk(TCGv_i64 ret, TCGTemp *addr, \ + TCGv_i64 val, TCGArg idx, \ + MemOp memop, TCGType addr_type) \ { \ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); \ + tcg_debug_assert((memop & MO_SIZE) <= MO_64); \ if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \ } else { \ From fecccfcc542370caedbfc09fe9963d128f5e73a0 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 16 May 2023 20:07:20 -0700 Subject: [PATCH 60/74] tcg: Split INDEX_op_qemu_{ld,st}* for guest address size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For 32-bit hosts, we cannot simply rely on TCGContext.addr_bits, as we need one or two host registers to represent the guest address. Create the new opcodes and update all users. Since we have not yet eliminated TARGET_LONG_BITS, only one of the two opcodes will ever be used, so we can get away with treating them the same in the backends. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- include/tcg/tcg-opc.h | 35 +++++++++---- tcg/aarch64/tcg-target.c.inc | 24 ++++++--- tcg/arm/tcg-target.c.inc | 83 ++++++++++++++++--------------- tcg/i386/tcg-target.c.inc | 79 ++++++++++++++++++----------- tcg/loongarch64/tcg-target.c.inc | 24 ++++++--- tcg/mips/tcg-target.c.inc | 66 ++++++++++++++++--------- tcg/optimize.c | 21 +++++--- tcg/ppc/tcg-target.c.inc | 77 +++++++++++++++++------------ tcg/riscv/tcg-target.c.inc | 24 ++++++--- tcg/s390x/tcg-target.c.inc | 24 ++++++--- tcg/sparc64/tcg-target.c.inc | 24 ++++++--- tcg/tcg-op-ldst.c | 85 ++++++++++++++++++++++++-------- tcg/tcg.c | 42 ++++++++++------ tcg/tci.c | 32 +++++++----- tcg/tci/tcg-target.c.inc | 44 +++++++++-------- 15 files changed, 436 insertions(+), 248 deletions(-) diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h index 29216366d2..21594c1590 100644 --- a/include/tcg/tcg-opc.h +++ b/include/tcg/tcg-opc.h @@ -186,7 +186,6 @@ DEF(muls2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muls2_i64)) DEF(muluh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muluh_i64)) DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64)) -#define TLADDR_ARGS (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? 1 : 2) #define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2) /* QEMU specific */ @@ -199,25 +198,44 @@ DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) DEF(plugin_cb_start, 0, 0, 3, TCG_OPF_NOT_PRESENT) DEF(plugin_cb_end, 0, 0, 0, TCG_OPF_NOT_PRESENT) -DEF(qemu_ld_i32, 1, TLADDR_ARGS, 1, +/* Replicate ld/st ops for 32 and 64-bit guest addresses. */ +DEF(qemu_ld_a32_i32, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) -DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 1, +DEF(qemu_st_a32_i32, 0, 1 + 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) -DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 1, +DEF(qemu_ld_a32_i64, DATA64_ARGS, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) -DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 1, +DEF(qemu_st_a32_i64, 0, DATA64_ARGS + 1, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) + +DEF(qemu_ld_a64_i32, 1, DATA64_ARGS, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) +DEF(qemu_st_a64_i32, 0, 1 + DATA64_ARGS, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) +DEF(qemu_ld_a64_i64, DATA64_ARGS, DATA64_ARGS, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) +DEF(qemu_st_a64_i64, 0, DATA64_ARGS + DATA64_ARGS, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) /* Only used by i386 to cope with stupid register constraints. */ -DEF(qemu_st8_i32, 0, TLADDR_ARGS + 1, 1, +DEF(qemu_st8_a32_i32, 0, 1 + 1, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | + IMPL(TCG_TARGET_HAS_qemu_st8_i32)) +DEF(qemu_st8_a64_i32, 0, 1 + DATA64_ARGS, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | IMPL(TCG_TARGET_HAS_qemu_st8_i32)) /* Only for 64-bit hosts at the moment. */ -DEF(qemu_ld_i128, 2, 1, 1, +DEF(qemu_ld_a32_i128, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) -DEF(qemu_st_i128, 0, 3, 1, +DEF(qemu_ld_a64_i128, 2, 1, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | + IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) +DEF(qemu_st_a32_i128, 0, 3, 1, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | + IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) +DEF(qemu_st_a64_i128, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) @@ -291,7 +309,6 @@ DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT) DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT) #endif -#undef TLADDR_ARGS #undef DATA64_ARGS #undef IMPL #undef IMPL64 diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index ea4108d59c..a1c91162e8 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -2164,12 +2164,16 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]); break; - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: tcg_out_qemu_ld(s, a0, a1, a2, ext); break; - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: tcg_out_qemu_st(s, REG0(0), a1, a2, ext); break; @@ -2806,11 +2810,15 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_movcond_i64: return C_O1_I4(r, r, rA, rZ, rZ); - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: return C_O1_I1(r, l); - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: return C_O0_I2(lZ, l); case INDEX_op_deposit_i32: diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index add8cc1fd5..47f3ff18fa 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -1985,41 +1985,36 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, ARITH_MOV, args[0], 0, 0); break; - case INDEX_op_qemu_ld_i32: - if (TARGET_LONG_BITS == 32) { - tcg_out_qemu_ld(s, args[0], -1, args[1], -1, - args[2], TCG_TYPE_I32); - } else { - tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], - args[3], TCG_TYPE_I32); - } + case INDEX_op_qemu_ld_a32_i32: + tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); break; - case INDEX_op_qemu_ld_i64: - if (TARGET_LONG_BITS == 32) { - tcg_out_qemu_ld(s, args[0], args[1], args[2], -1, - args[3], TCG_TYPE_I64); - } else { - tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], - args[4], TCG_TYPE_I64); - } + case INDEX_op_qemu_ld_a64_i32: + tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], + args[3], TCG_TYPE_I32); break; - case INDEX_op_qemu_st_i32: - if (TARGET_LONG_BITS == 32) { - tcg_out_qemu_st(s, args[0], -1, args[1], -1, - args[2], TCG_TYPE_I32); - } else { - tcg_out_qemu_st(s, args[0], -1, args[1], args[2], - args[3], TCG_TYPE_I32); - } + case INDEX_op_qemu_ld_a32_i64: + tcg_out_qemu_ld(s, args[0], args[1], args[2], -1, + args[3], TCG_TYPE_I64); break; - case INDEX_op_qemu_st_i64: - if (TARGET_LONG_BITS == 32) { - tcg_out_qemu_st(s, args[0], args[1], args[2], -1, - args[3], TCG_TYPE_I64); - } else { - tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], - args[4], TCG_TYPE_I64); - } + case INDEX_op_qemu_ld_a64_i64: + tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], + args[4], TCG_TYPE_I64); + break; + + case INDEX_op_qemu_st_a32_i32: + tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); + break; + case INDEX_op_qemu_st_a64_i32: + tcg_out_qemu_st(s, args[0], -1, args[1], args[2], + args[3], TCG_TYPE_I32); + break; + case INDEX_op_qemu_st_a32_i64: + tcg_out_qemu_st(s, args[0], args[1], args[2], -1, + args[3], TCG_TYPE_I64); + break; + case INDEX_op_qemu_st_a64_i64: + tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], + args[4], TCG_TYPE_I64); break; case INDEX_op_bswap16_i32: @@ -2160,14 +2155,22 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_setcond2_i32: return C_O1_I4(r, r, r, rI, rI); - case INDEX_op_qemu_ld_i32: - return TARGET_LONG_BITS == 32 ? C_O1_I1(r, q) : C_O1_I2(r, q, q); - case INDEX_op_qemu_ld_i64: - return TARGET_LONG_BITS == 32 ? C_O2_I1(e, p, q) : C_O2_I2(e, p, q, q); - case INDEX_op_qemu_st_i32: - return TARGET_LONG_BITS == 32 ? C_O0_I2(q, q) : C_O0_I3(q, q, q); - case INDEX_op_qemu_st_i64: - return TARGET_LONG_BITS == 32 ? C_O0_I3(Q, p, q) : C_O0_I4(Q, p, q, q); + case INDEX_op_qemu_ld_a32_i32: + return C_O1_I1(r, q); + case INDEX_op_qemu_ld_a64_i32: + return C_O1_I2(r, q, q); + case INDEX_op_qemu_ld_a32_i64: + return C_O2_I1(e, p, q); + case INDEX_op_qemu_ld_a64_i64: + return C_O2_I2(e, p, q, q); + case INDEX_op_qemu_st_a32_i32: + return C_O0_I2(q, q); + case INDEX_op_qemu_st_a64_i32: + return C_O0_I3(q, q, q); + case INDEX_op_qemu_st_a32_i64: + return C_O0_I3(Q, p, q); + case INDEX_op_qemu_st_a64_i64: + return C_O0_I4(Q, p, q, q); case INDEX_op_st_vec: return C_O0_I2(w, r); diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 3b8528e332..d8fd38d9e7 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -2475,35 +2475,51 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0); break; - case INDEX_op_qemu_ld_i32: - if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { - tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I32); - } else { + case INDEX_op_qemu_ld_a64_i32: + if (TCG_TARGET_REG_BITS == 32) { tcg_out_qemu_ld(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32); + break; } + /* fall through */ + case INDEX_op_qemu_ld_a32_i32: + tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64); - } else if (TARGET_LONG_BITS == 32) { + } else { tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64); + } + break; + case INDEX_op_qemu_ld_a64_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64); } else { tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); } break; - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st8_i32: - if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { - tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I32); - } else { + + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st8_a64_i32: + if (TCG_TARGET_REG_BITS == 32) { tcg_out_qemu_st(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32); + break; } + /* fall through */ + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st8_a32_i32: + tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_a32_i64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64); - } else if (TARGET_LONG_BITS == 32) { + } else { tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64); + } + break; + case INDEX_op_qemu_st_a64_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64); } else { tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); } @@ -3181,26 +3197,29 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_clz_i64: return have_lzcnt ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r); - case INDEX_op_qemu_ld_i32: - return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - ? C_O1_I1(r, L) : C_O1_I2(r, L, L)); + case INDEX_op_qemu_ld_a32_i32: + return C_O1_I1(r, L); + case INDEX_op_qemu_ld_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O1_I2(r, L, L); - case INDEX_op_qemu_st_i32: - return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - ? C_O0_I2(L, L) : C_O0_I3(L, L, L)); - case INDEX_op_qemu_st8_i32: - return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - ? C_O0_I2(s, L) : C_O0_I3(s, L, L)); + case INDEX_op_qemu_st_a32_i32: + return C_O0_I2(L, L); + case INDEX_op_qemu_st_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I3(L, L, L); + case INDEX_op_qemu_st8_a32_i32: + return C_O0_I2(s, L); + case INDEX_op_qemu_st8_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(s, L) : C_O0_I3(s, L, L); - case INDEX_op_qemu_ld_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L) - : C_O2_I2(r, r, L, L)); + case INDEX_op_qemu_ld_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I1(r, r, L); + case INDEX_op_qemu_ld_a64_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I2(r, r, L, L); - case INDEX_op_qemu_st_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(L, L, L) - : C_O0_I4(L, L, L, L)); + case INDEX_op_qemu_st_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I3(L, L, L); + case INDEX_op_qemu_st_a64_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I4(L, L, L, L); case INDEX_op_brcond2_i32: return C_O0_I4(r, r, ri, ri); diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index 07d35f92fa..0c4ef72d6f 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -1443,16 +1443,20 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); break; - case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); break; - case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); break; @@ -1492,8 +1496,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_st32_i64: case INDEX_op_st_i32: case INDEX_op_st_i64: - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: return C_O0_I2(rZ, r); case INDEX_op_brcond_i32: @@ -1535,8 +1541,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_ld32u_i64: case INDEX_op_ld_i32: case INDEX_op_ld_i64: - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: return C_O1_I1(r, r); case INDEX_op_andc_i32: diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index 3f3fe5b991..7ff4e2ff71 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -1954,34 +1954,49 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); break; - case INDEX_op_qemu_ld_i32: - if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { - tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I32); - } else { + case INDEX_op_qemu_ld_a64_i32: + if (TCG_TARGET_REG_BITS == 32) { tcg_out_qemu_ld(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32); + break; } + /* fall through */ + case INDEX_op_qemu_ld_a32_i32: + tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64); - } else if (TARGET_LONG_BITS == 32) { + } else { tcg_out_qemu_ld(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64); + } + break; + case INDEX_op_qemu_ld_a64_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64); } else { tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); } break; - case INDEX_op_qemu_st_i32: - if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { - tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I32); - } else { + + case INDEX_op_qemu_st_a64_i32: + if (TCG_TARGET_REG_BITS == 32) { tcg_out_qemu_st(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32); + break; } + /* fall through */ + case INDEX_op_qemu_st_a32_i32: + tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_a32_i64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64); - } else if (TARGET_LONG_BITS == 32) { + } else { tcg_out_qemu_st(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64); + } + break; + case INDEX_op_qemu_st_a64_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64); } else { tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); } @@ -2140,19 +2155,22 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_brcond2_i32: return C_O0_I4(rZ, rZ, rZ, rZ); - case INDEX_op_qemu_ld_i32: - return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 - ? C_O1_I1(r, r) : C_O1_I2(r, r, r)); - case INDEX_op_qemu_st_i32: - return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 - ? C_O0_I2(rZ, r) : C_O0_I3(rZ, r, r)); - case INDEX_op_qemu_ld_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) - : TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, r) - : C_O2_I2(r, r, r, r)); - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_ld_a32_i32: + return C_O1_I1(r, r); + case INDEX_op_qemu_ld_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r); + case INDEX_op_qemu_st_a32_i32: + return C_O0_I2(rZ, r); + case INDEX_op_qemu_st_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) : C_O0_I3(rZ, r, r); + case INDEX_op_qemu_ld_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r); + case INDEX_op_qemu_ld_a64_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r); + case INDEX_op_qemu_st_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) : C_O0_I3(rZ, rZ, r); + case INDEX_op_qemu_st_a64_i64: return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) - : TARGET_LONG_BITS == 32 ? C_O0_I3(rZ, rZ, r) : C_O0_I4(rZ, rZ, r, r)); default: diff --git a/tcg/optimize.c b/tcg/optimize.c index da400b9668..bf975a3a6c 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2184,15 +2184,22 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64_VEC(orc): done = fold_orc(&ctx, op); break; - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: - case INDEX_op_qemu_ld_i128: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: + case INDEX_op_qemu_ld_a32_i128: + case INDEX_op_qemu_ld_a64_i128: done = fold_qemu_ld(&ctx, op); break; - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st8_i32: - case INDEX_op_qemu_st_i64: - case INDEX_op_qemu_st_i128: + case INDEX_op_qemu_st8_a32_i32: + case INDEX_op_qemu_st8_a64_i32: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: + case INDEX_op_qemu_st_a32_i128: + case INDEX_op_qemu_st_a64_i128: done = fold_qemu_st(&ctx, op); break; CASE_OP_32_64(rem): diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index b5c49895f3..e2851c5dcd 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -2832,43 +2832,58 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out32(s, MODUD | TAB(args[0], args[1], args[2])); break; - case INDEX_op_qemu_ld_i32: - if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { - tcg_out_qemu_ld(s, args[0], -1, args[1], -1, - args[2], TCG_TYPE_I32); - } else { + case INDEX_op_qemu_ld_a64_i32: + if (TCG_TARGET_REG_BITS == 32) { tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], args[3], TCG_TYPE_I32); + break; } + /* fall through */ + case INDEX_op_qemu_ld_a32_i32: + tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); break; - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I64); - } else if (TARGET_LONG_BITS == 32) { + } else { tcg_out_qemu_ld(s, args[0], args[1], args[2], -1, args[3], TCG_TYPE_I64); + } + break; + case INDEX_op_qemu_ld_a64_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_ld(s, args[0], -1, args[1], -1, + args[2], TCG_TYPE_I64); } else { tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], args[4], TCG_TYPE_I64); } break; - case INDEX_op_qemu_st_i32: - if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { - tcg_out_qemu_st(s, args[0], -1, args[1], -1, - args[2], TCG_TYPE_I32); - } else { + + case INDEX_op_qemu_st_a64_i32: + if (TCG_TARGET_REG_BITS == 32) { tcg_out_qemu_st(s, args[0], -1, args[1], args[2], args[3], TCG_TYPE_I32); + break; } + /* fall through */ + case INDEX_op_qemu_st_a32_i32: + tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); break; - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_a32_i64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I64); - } else if (TARGET_LONG_BITS == 32) { + } else { tcg_out_qemu_st(s, args[0], args[1], args[2], -1, args[3], TCG_TYPE_I64); + } + break; + case INDEX_op_qemu_st_a64_i64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_qemu_st(s, args[0], -1, args[1], -1, + args[2], TCG_TYPE_I64); } else { tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], args[4], TCG_TYPE_I64); @@ -3689,25 +3704,23 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_sub2_i32: return C_O2_I4(r, r, rI, rZM, r, r); - case INDEX_op_qemu_ld_i32: - return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 - ? C_O1_I1(r, r) - : C_O1_I2(r, r, r)); + case INDEX_op_qemu_ld_a32_i32: + return C_O1_I1(r, r); + case INDEX_op_qemu_ld_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r); + case INDEX_op_qemu_ld_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r); + case INDEX_op_qemu_ld_a64_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r); - case INDEX_op_qemu_st_i32: - return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 - ? C_O0_I2(r, r) - : C_O0_I3(r, r, r)); - - case INDEX_op_qemu_ld_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) - : TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, r) - : C_O2_I2(r, r, r, r)); - - case INDEX_op_qemu_st_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) - : TARGET_LONG_BITS == 32 ? C_O0_I3(r, r, r) - : C_O0_I4(r, r, r, r)); + case INDEX_op_qemu_st_a32_i32: + return C_O0_I2(r, r); + case INDEX_op_qemu_st_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); + case INDEX_op_qemu_st_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); + case INDEX_op_qemu_st_a64_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r); case INDEX_op_add_vec: case INDEX_op_sub_vec: diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 1fc1a9199b..de61edb5df 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -1382,16 +1382,20 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_setcond(s, args[3], a0, a1, a2); break; - case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); break; - case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); break; @@ -1533,11 +1537,15 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_sub2_i64: return C_O2_I4(r, r, rZ, rZ, rM, rM); - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: return C_O1_I1(r, r); - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: return C_O0_I2(rZ, r); default: diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc index 8e34b214fc..466d8e7e53 100644 --- a/tcg/s390x/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -2210,16 +2210,20 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, args[2], const_args[2], args[3], const_args[3], args[4]); break; - case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32); break; - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64); break; - case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32); break; - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64); break; @@ -3093,11 +3097,15 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_ctpop_i64: return C_O1_I1(r, r); - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: return C_O1_I1(r, r); - case INDEX_op_qemu_st_i64: - case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: return C_O0_I2(r, r); case INDEX_op_deposit_i32: diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index 9676b745a2..6e6c26d470 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -1376,16 +1376,20 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX); break; - case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); break; - case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); break; - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); break; @@ -1507,8 +1511,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_extu_i32_i64: case INDEX_op_extrl_i64_i32: case INDEX_op_extrh_i64_i32: - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: return C_O1_I1(r, r); case INDEX_op_st8_i32: @@ -1518,8 +1524,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_st_i32: case INDEX_op_st32_i64: case INDEX_op_st_i64: - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: return C_O0_I2(rZ, r); case INDEX_op_add_i32: diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c index 679be51e90..f4e508cb68 100644 --- a/tcg/tcg-op-ldst.c +++ b/tcg/tcg-op-ldst.c @@ -164,6 +164,7 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr, MemOp orig_memop; MemOpIdx orig_oi, oi; TCGv_i64 copy_addr; + TCGOpcode opc; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0); @@ -179,7 +180,12 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr, } copy_addr = plugin_maybe_preserve_addr(addr); - gen_ldst(INDEX_op_qemu_ld_i32, tcgv_i32_temp(val), NULL, addr, oi); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_ld_a32_i32; + } else { + opc = INDEX_op_qemu_ld_a64_i32; + } + gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi); plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { @@ -235,9 +241,17 @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr, } if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) { - opc = INDEX_op_qemu_st8_i32; + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st8_a32_i32; + } else { + opc = INDEX_op_qemu_st8_a64_i32; + } } else { - opc = INDEX_op_qemu_st_i32; + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st_a32_i32; + } else { + opc = INDEX_op_qemu_st_a64_i32; + } } gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi); plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W); @@ -261,6 +275,7 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr, MemOp orig_memop; MemOpIdx orig_oi, oi; TCGv_i64 copy_addr; + TCGOpcode opc; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop); @@ -286,7 +301,12 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr, } copy_addr = plugin_maybe_preserve_addr(addr); - gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, oi); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_ld_a32_i64; + } else { + opc = INDEX_op_qemu_ld_a64_i64; + } + gen_ldst_i64(opc, val, addr, oi); plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { @@ -322,6 +342,7 @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr, { TCGv_i64 swap = NULL; MemOpIdx orig_oi, oi; + TCGOpcode opc; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop); @@ -352,7 +373,12 @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr, oi = make_memop_idx(memop, idx); } - gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, oi); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st_a32_i64; + } else { + opc = INDEX_op_qemu_st_a64_i64; + } + gen_ldst_i64(opc, val, addr, oi); plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W); if (swap) { @@ -465,6 +491,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, { const MemOpIdx orig_oi = make_memop_idx(memop, idx); TCGv_i64 ext_addr = NULL; + TCGOpcode opc; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); @@ -484,8 +511,12 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, hi = TCGV128_HIGH(val); } - gen_ldst(INDEX_op_qemu_ld_i128, tcgv_i64_temp(lo), - tcgv_i64_temp(hi), addr, oi); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_ld_a32_i128; + } else { + opc = INDEX_op_qemu_ld_a64_i128; + } + gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi); if (need_bswap) { tcg_gen_bswap64_i64(lo, lo); @@ -500,6 +531,12 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, canonicalize_memop_i128_as_i64(mop, memop); need_bswap = (mop[0] ^ memop) & MO_BSWAP; + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_ld_a32_i64; + } else { + opc = INDEX_op_qemu_ld_a64_i64; + } + /* * Since there are no global TCGv_i128, there is no visible state * changed if the second load faults. Load directly into the two @@ -513,8 +550,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, y = TCGV128_LOW(val); } - gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr, - make_memop_idx(mop[0], idx)); + gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx)); if (need_bswap) { tcg_gen_bswap64_i64(x, x); @@ -530,8 +566,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, addr_p8 = tcgv_i64_temp(t); } - gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8, - make_memop_idx(mop[1], idx)); + gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx)); tcg_temp_free_internal(addr_p8); if (need_bswap) { @@ -564,6 +599,7 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, { const MemOpIdx orig_oi = make_memop_idx(memop, idx); TCGv_i64 ext_addr = NULL; + TCGOpcode opc; tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST); @@ -586,8 +622,12 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, hi = TCGV128_HIGH(val); } - gen_ldst(INDEX_op_qemu_st_i128, tcgv_i64_temp(lo), - tcgv_i64_temp(hi), addr, oi); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st_a32_i128; + } else { + opc = INDEX_op_qemu_st_a64_i128; + } + gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi); if (need_bswap) { tcg_temp_free_i64(lo); @@ -600,6 +640,12 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, canonicalize_memop_i128_as_i64(mop, memop); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st_a32_i64; + } else { + opc = INDEX_op_qemu_st_a64_i64; + } + if ((memop & MO_BSWAP) == MO_LE) { x = TCGV128_LOW(val); y = TCGV128_HIGH(val); @@ -613,8 +659,8 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, tcg_gen_bswap64_i64(b, x); x = b; } - gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr, - make_memop_idx(mop[0], idx)); + + gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx)); if (tcg_ctx->addr_type == TCG_TYPE_I32) { TCGv_i32 t = tcg_temp_ebb_new_i32(); @@ -628,13 +674,10 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, if (b) { tcg_gen_bswap64_i64(b, y); - y = b; - } - gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8, - make_memop_idx(mop[1], idx)); - - if (b) { + gen_ldst_i64(opc, b, addr_p8, make_memop_idx(mop[1], idx)); tcg_temp_free_i64(b); + } else { + gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx)); } tcg_temp_free_internal(addr_p8); } else { diff --git a/tcg/tcg.c b/tcg/tcg.c index ff860b5469..d33d5cfe38 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -1839,17 +1839,24 @@ bool tcg_op_supported(TCGOpcode op) case INDEX_op_exit_tb: case INDEX_op_goto_tb: case INDEX_op_goto_ptr: - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_ld_i64: - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: return true; - case INDEX_op_qemu_st8_i32: + case INDEX_op_qemu_st8_a32_i32: + case INDEX_op_qemu_st8_a64_i32: return TCG_TARGET_HAS_qemu_st8_i32; - case INDEX_op_qemu_ld_i128: - case INDEX_op_qemu_st_i128: + case INDEX_op_qemu_ld_a32_i128: + case INDEX_op_qemu_ld_a64_i128: + case INDEX_op_qemu_st_a32_i128: + case INDEX_op_qemu_st_a64_i128: return TCG_TARGET_HAS_qemu_ldst_i128; case INDEX_op_mov_i32: @@ -2466,13 +2473,20 @@ static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) } i = 1; break; - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st8_i32: - case INDEX_op_qemu_ld_i64: - case INDEX_op_qemu_st_i64: - case INDEX_op_qemu_ld_i128: - case INDEX_op_qemu_st_i128: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: + case INDEX_op_qemu_st8_a32_i32: + case INDEX_op_qemu_st8_a64_i32: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: + case INDEX_op_qemu_ld_a32_i128: + case INDEX_op_qemu_ld_a64_i128: + case INDEX_op_qemu_st_a32_i128: + case INDEX_op_qemu_st_a64_i128: { const char *s_al, *s_op, *s_at; MemOpIdx oi = op->args[k++]; diff --git a/tcg/tci.c b/tcg/tci.c index 15f2f8c463..742c791726 100644 --- a/tcg/tci.c +++ b/tcg/tci.c @@ -922,7 +922,8 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, tb_ptr = ptr; break; - case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { tci_args_rrm(insn, &r0, &r1, &oi); taddr = regs[r1]; @@ -934,7 +935,8 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, regs[r0] = tmp32; break; - case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: if (TCG_TARGET_REG_BITS == 64) { tci_args_rrm(insn, &r0, &r1, &oi); taddr = regs[r1]; @@ -954,7 +956,8 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, } break; - case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { tci_args_rrm(insn, &r0, &r1, &oi); taddr = regs[r1]; @@ -966,7 +969,8 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, tci_qemu_st(env, taddr, tmp32, oi, tb_ptr); break; - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: if (TCG_TARGET_REG_BITS == 64) { tci_args_rrm(insn, &r0, &r1, &oi); taddr = regs[r1]; @@ -1251,15 +1255,21 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) str_r(r3), str_r(r4), str_r(r5)); break; - case INDEX_op_qemu_ld_i64: - case INDEX_op_qemu_st_i64: - len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_st_a32_i32: + len = 1 + 1; + goto do_qemu_ldst; + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_st_a64_i32: + len = 1 + DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); + goto do_qemu_ldst; + case INDEX_op_qemu_ld_a64_i64: + case INDEX_op_qemu_st_a64_i64: + len = 2 * DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); goto do_qemu_ldst; - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_st_i32: - len = 1; do_qemu_ldst: - len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS); switch (len) { case 2: tci_args_rrm(insn, &r0, &r1, &oi); diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc index 41fbf042da..586b2e6a08 100644 --- a/tcg/tci/tcg-target.c.inc +++ b/tcg/tci/tcg-target.c.inc @@ -156,22 +156,22 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_setcond2_i32: return C_O1_I4(r, r, r, r, r); - case INDEX_op_qemu_ld_i32: - return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - ? C_O1_I1(r, r) - : C_O1_I2(r, r, r)); - case INDEX_op_qemu_ld_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, r) - : C_O2_I2(r, r, r, r)); - case INDEX_op_qemu_st_i32: - return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - ? C_O0_I2(r, r) - : C_O0_I3(r, r, r)); - case INDEX_op_qemu_st_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(r, r, r) - : C_O0_I4(r, r, r, r)); + case INDEX_op_qemu_ld_a32_i32: + return C_O1_I1(r, r); + case INDEX_op_qemu_ld_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r); + case INDEX_op_qemu_ld_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r); + case INDEX_op_qemu_ld_a64_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r); + case INDEX_op_qemu_st_a32_i32: + return C_O0_I2(r, r); + case INDEX_op_qemu_st_a64_i32: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); + case INDEX_op_qemu_st_a32_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); + case INDEX_op_qemu_st_a64_i64: + return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r); default: g_assert_not_reached(); @@ -849,8 +849,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]); break; - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld_a64_i32: + case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st_a64_i32: if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); } else { @@ -858,8 +860,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, } break; - case INDEX_op_qemu_ld_i64: - case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_ld_a64_i64: + case INDEX_op_qemu_st_a32_i64: + case INDEX_op_qemu_st_a64_i64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { From dd7dc93ef056509edd1ec47f7a1d8a7bde9c411f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 20 Mar 2023 07:48:09 -0700 Subject: [PATCH 61/74] tcg/tci: Elimnate TARGET_LONG_BITS, target_ulong MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We now have the address size as part of the opcode, so we no longer need to test TARGET_LONG_BITS. We can use uint64_t for target_ulong, as passed into load/store helpers. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- tcg/tci.c | 61 +++++++++++++++++++++++++--------------- tcg/tci/tcg-target.c.inc | 15 +++++----- 2 files changed, 46 insertions(+), 30 deletions(-) diff --git a/tcg/tci.c b/tcg/tci.c index 742c791726..bab4397bc5 100644 --- a/tcg/tci.c +++ b/tcg/tci.c @@ -286,7 +286,7 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) return result; } -static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, +static uint64_t tci_qemu_ld(CPUArchState *env, uint64_t taddr, MemOpIdx oi, const void *tb_ptr) { MemOp mop = get_memop(oi); @@ -312,7 +312,7 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, } } -static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, +static void tci_qemu_st(CPUArchState *env, uint64_t taddr, uint64_t val, MemOpIdx oi, const void *tb_ptr) { MemOp mop = get_memop(oi); @@ -372,10 +372,9 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, TCGReg r0, r1, r2, r3, r4, r5; tcg_target_ulong t1; TCGCond condition; - target_ulong taddr; uint8_t pos, len; uint32_t tmp32; - uint64_t tmp64; + uint64_t tmp64, taddr; uint64_t T1, T2; MemOpIdx oi; int32_t ofs; @@ -923,31 +922,40 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, break; case INDEX_op_qemu_ld_a32_i32: + tci_args_rrm(insn, &r0, &r1, &oi); + taddr = (uint32_t)regs[r1]; + goto do_ld_i32; case INDEX_op_qemu_ld_a64_i32: - if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { + if (TCG_TARGET_REG_BITS == 64) { tci_args_rrm(insn, &r0, &r1, &oi); taddr = regs[r1]; } else { tci_args_rrrm(insn, &r0, &r1, &r2, &oi); taddr = tci_uint64(regs[r2], regs[r1]); } - tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr); - regs[r0] = tmp32; + do_ld_i32: + regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr); break; case INDEX_op_qemu_ld_a32_i64: + if (TCG_TARGET_REG_BITS == 64) { + tci_args_rrm(insn, &r0, &r1, &oi); + taddr = (uint32_t)regs[r1]; + } else { + tci_args_rrrm(insn, &r0, &r1, &r2, &oi); + taddr = (uint32_t)regs[r2]; + } + goto do_ld_i64; case INDEX_op_qemu_ld_a64_i64: if (TCG_TARGET_REG_BITS == 64) { tci_args_rrm(insn, &r0, &r1, &oi); taddr = regs[r1]; - } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { - tci_args_rrrm(insn, &r0, &r1, &r2, &oi); - taddr = regs[r2]; } else { tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); taddr = tci_uint64(regs[r3], regs[r2]); oi = regs[r4]; } + do_ld_i64: tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr); if (TCG_TARGET_REG_BITS == 32) { tci_write_reg64(regs, r1, r0, tmp64); @@ -957,35 +965,44 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, break; case INDEX_op_qemu_st_a32_i32: + tci_args_rrm(insn, &r0, &r1, &oi); + taddr = (uint32_t)regs[r1]; + goto do_st_i32; case INDEX_op_qemu_st_a64_i32: - if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { + if (TCG_TARGET_REG_BITS == 64) { tci_args_rrm(insn, &r0, &r1, &oi); taddr = regs[r1]; } else { tci_args_rrrm(insn, &r0, &r1, &r2, &oi); taddr = tci_uint64(regs[r2], regs[r1]); } - tmp32 = regs[r0]; - tci_qemu_st(env, taddr, tmp32, oi, tb_ptr); + do_st_i32: + tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr); break; case INDEX_op_qemu_st_a32_i64: + if (TCG_TARGET_REG_BITS == 64) { + tci_args_rrm(insn, &r0, &r1, &oi); + tmp64 = regs[r0]; + taddr = (uint32_t)regs[r1]; + } else { + tci_args_rrrm(insn, &r0, &r1, &r2, &oi); + tmp64 = tci_uint64(regs[r1], regs[r0]); + taddr = (uint32_t)regs[r2]; + } + goto do_st_i64; case INDEX_op_qemu_st_a64_i64: if (TCG_TARGET_REG_BITS == 64) { tci_args_rrm(insn, &r0, &r1, &oi); - taddr = regs[r1]; tmp64 = regs[r0]; + taddr = regs[r1]; } else { - if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { - tci_args_rrrm(insn, &r0, &r1, &r2, &oi); - taddr = regs[r2]; - } else { - tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); - taddr = tci_uint64(regs[r3], regs[r2]); - oi = regs[r4]; - } + tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); tmp64 = tci_uint64(regs[r1], regs[r0]); + taddr = tci_uint64(regs[r3], regs[r2]); + oi = regs[r4]; } + do_st_i64: tci_qemu_st(env, taddr, tmp64, oi, tb_ptr); break; diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc index 586b2e6a08..c9516a5e8b 100644 --- a/tcg/tci/tcg-target.c.inc +++ b/tcg/tci/tcg-target.c.inc @@ -243,7 +243,7 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, return false; } -static void stack_bounds_check(TCGReg base, target_long offset) +static void stack_bounds_check(TCGReg base, intptr_t offset) { if (base == TCG_REG_CALL_STACK) { tcg_debug_assert(offset >= 0); @@ -850,24 +850,23 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_ld_a64_i32: case INDEX_op_qemu_st_a32_i32: + tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); + break; + case INDEX_op_qemu_ld_a64_i32: case INDEX_op_qemu_st_a64_i32: - if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { + case INDEX_op_qemu_ld_a32_i64: + case INDEX_op_qemu_st_a32_i64: + if (TCG_TARGET_REG_BITS == 64) { tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); } else { tcg_out_op_rrrm(s, opc, args[0], args[1], args[2], args[3]); } break; - - case INDEX_op_qemu_ld_a32_i64: case INDEX_op_qemu_ld_a64_i64: - case INDEX_op_qemu_st_a32_i64: case INDEX_op_qemu_st_a64_i64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); - } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { - tcg_out_op_rrrm(s, opc, args[0], args[1], args[2], args[3]); } else { tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[4]); tcg_out_op_rrrrr(s, opc, args[0], args[1], From 7a9ccb869c31430ac8a852ae45ea95023b2d0d34 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 20 Mar 2023 07:54:45 -0700 Subject: [PATCH 62/74] tcg/i386: Always enable TCG_TARGET_HAS_extr[lh]_i64_i32 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Keep all 32-bit values zero extended in the register, not solely when addresses are 32 bits. This eliminates a dependency on TARGET_LONG_BITS. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- tcg/i386/tcg-target.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h index 943af6775e..0b5a2c68c5 100644 --- a/tcg/i386/tcg-target.h +++ b/tcg/i386/tcg-target.h @@ -154,9 +154,9 @@ extern bool have_atomic16; #define TCG_TARGET_HAS_mulsh_i32 0 #if TCG_TARGET_REG_BITS == 64 -/* Keep target addresses zero-extended in a register. */ -#define TCG_TARGET_HAS_extrl_i64_i32 (TARGET_LONG_BITS == 32) -#define TCG_TARGET_HAS_extrh_i64_i32 (TARGET_LONG_BITS == 32) +/* Keep 32-bit values zero-extended in a register. */ +#define TCG_TARGET_HAS_extrl_i64_i32 1 +#define TCG_TARGET_HAS_extrh_i64_i32 1 #define TCG_TARGET_HAS_div2_i64 1 #define TCG_TARGET_HAS_rot_i64 1 #define TCG_TARGET_HAS_ext8s_i64 1 From b2485530d8873f8c3a4f32bf718a52527e7d2981 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 5 Apr 2023 19:00:43 -0700 Subject: [PATCH 63/74] tcg/i386: Conditionalize tcg_out_extu_i32_i64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since TCG_TYPE_I32 values are kept zero-extended in registers, via omission of the REXW bit, we need not extend if the register matches. This is already relied upon by qemu_{ld,st}. Reviewed-by: Alex Bennée Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- tcg/i386/tcg-target.c.inc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index d8fd38d9e7..5d7fe5b300 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -1315,7 +1315,9 @@ static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dest, TCGReg src) static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dest, TCGReg src) { - tcg_out_ext32u(s, dest, src); + if (dest != src) { + tcg_out_ext32u(s, dest, src); + } } static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg dest, TCGReg src) From c60ad6e3b9be0c9622811715ffdd921bfd6ea7d2 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 20 Mar 2023 09:36:31 -0700 Subject: [PATCH 64/74] tcg/i386: Adjust type of tlb_mask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because of its use on tgen_arithi, this value must be a signed 32-bit quantity, as that is what may be encoded in the insn. The truncation of the value to unsigned for 32-bit guests is done via the REX bit via 'trexw'. Removes the only uses of target_ulong from this tcg backend. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- tcg/i386/tcg-target.c.inc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 5d7fe5b300..d85f980883 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -1920,7 +1920,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, unsigned mem_index = get_mmuidx(oi); unsigned s_bits = opc & MO_SIZE; unsigned s_mask = (1 << s_bits) - 1; - target_ulong tlb_mask; + int tlb_mask; ldst = new_ldst_label(s); ldst->is_ld = is_ld; @@ -1965,7 +1965,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1, addrlo, s_mask - a_mask); } - tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask; + tlb_mask = TARGET_PAGE_MASK | a_mask; tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0); /* cmp 0(TCG_REG_L0), TCG_REG_L1 */ From 63f4da91f955f6ca1d1d3264a2e0d9fdd22bc33e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 27 Apr 2023 13:55:11 +0100 Subject: [PATCH 65/74] tcg/i386: Remove TARGET_LONG_BITS, TCG_TYPE_TL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All uses can be infered from the INDEX_op_qemu_*_a{32,64}_* opcode being used. Add a field into TCGLabelQemuLdst to record the usage. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- tcg/i386/tcg-target.c.inc | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index d85f980883..14bddc8c33 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -1929,10 +1929,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->addrhi_reg = addrhi; if (TCG_TARGET_REG_BITS == 64) { - if (TARGET_LONG_BITS == 64) { - ttype = TCG_TYPE_I64; - trexw = P_REXW; - } + ttype = s->addr_type; + trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW); if (TCG_TYPE_PTR == TCG_TYPE_I64) { hrexw = P_REXW; if (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32) { @@ -1977,7 +1975,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->label_ptr[0] = s->code_ptr; s->code_ptr += 4; - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) { /* cmp 4(TCG_REG_L0), addrhi */ tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, TCG_REG_L0, cmp_ofs + 4); From 03a2ecdab17e8410483e7c41af7386ccfce4dd6e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 22 Mar 2023 18:13:12 -0700 Subject: [PATCH 66/74] tcg/arm: Remove TARGET_LONG_BITS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All uses can be infered from the INDEX_op_qemu_*_a{32,64}_* opcode being used. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- tcg/arm/tcg-target.c.inc | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index 47f3ff18fa..3c38e868e2 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -1431,18 +1431,18 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * Load the tlb comparator into R2/R3 and the fast path addend into R1. */ if (cmp_off == 0) { - if (TARGET_LONG_BITS == 64) { - tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); - } else { + if (s->addr_type == TCG_TYPE_I32) { tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); + } else { + tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); } } else { tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); - if (TARGET_LONG_BITS == 64) { - tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); - } else { + if (s->addr_type == TCG_TYPE_I32) { tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); + } else { + tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); } } @@ -1485,7 +1485,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); } - if (TARGET_LONG_BITS == 64) { + if (s->addr_type != TCG_TYPE_I32) { tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); } #else From 52533157568c1b8df04821759bdcf24b29a25b83 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 23 Mar 2023 01:48:06 +0000 Subject: [PATCH 67/74] tcg/aarch64: Remove USE_GUEST_BASE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Eliminate the test vs TARGET_LONG_BITS by considering this predicate to be always true, and simplify accordingly. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- tcg/aarch64/tcg-target.c.inc | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index a1c91162e8..ba0748a46e 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -84,11 +84,6 @@ bool have_lse2; #define TCG_VEC_TMP TCG_REG_V31 #ifndef CONFIG_SOFTMMU -/* Note that XZR cannot be encoded in the address base register slot, - as that actaully encodes SP. So if we need to zero-extend the guest - address, via the address index register slot, we need to load even - a zero guest base into a register. */ -#define USE_GUEST_BASE (guest_base != 0 || TARGET_LONG_BITS == 32) #define TCG_REG_GUEST_BASE TCG_REG_X28 #endif @@ -1738,7 +1733,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); } - if (USE_GUEST_BASE) { + if (guest_base || addr_type == TCG_TYPE_I32) { h->base = TCG_REG_GUEST_BASE; h->index = addr_reg; h->index_ext = addr_type; @@ -2993,10 +2988,14 @@ static void tcg_target_qemu_prologue(TCGContext *s) CPU_TEMP_BUF_NLONGS * sizeof(long)); #if !defined(CONFIG_SOFTMMU) - if (USE_GUEST_BASE) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); - } + /* + * Note that XZR cannot be encoded in the address base register slot, + * as that actaully encodes SP. Depending on the guest, we may need + * to zero-extend the guest address via the address index register slot, + * therefore we need to load even a zero guest base into a register. + */ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); #endif tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); From 60c452a1a2aaf100830256e02dc09ff7a85ad6a7 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 27 Apr 2023 14:45:09 +0100 Subject: [PATCH 68/74] tcg/aarch64: Remove TARGET_LONG_BITS, TCG_TYPE_TL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All uses replaced with TCGContext.addr_type. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- tcg/aarch64/tcg-target.c.inc | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index ba0748a46e..462663fad8 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -1639,7 +1639,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, TCGReg addr_reg, MemOpIdx oi, bool is_ld) { - TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; + TCGType addr_type = s->addr_type; TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); unsigned a_mask; @@ -1683,7 +1683,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_insn(s, 3502, ADD, 1, TCG_REG_X1, TCG_REG_X1, TCG_REG_X0); /* Load the tlb comparator into X0, and the fast path addend into X1. */ - tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_X0, TCG_REG_X1, + tcg_out_ld(s, addr_type, TCG_REG_X0, TCG_REG_X1, is_ld ? offsetof(CPUTLBEntry, addr_read) : offsetof(CPUTLBEntry, addr_write)); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_X1, TCG_REG_X1, @@ -1697,18 +1697,17 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, if (a_mask >= s_mask) { x3 = addr_reg; } else { - tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64, + tcg_out_insn(s, 3401, ADDI, addr_type, TCG_REG_X3, addr_reg, s_mask - a_mask); x3 = TCG_REG_X3; } compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; /* Store the page mask part of the address into X3. */ - tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64, - TCG_REG_X3, x3, compare_mask); + tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_X3, x3, compare_mask); /* Perform the address comparison. */ - tcg_out_cmp(s, TARGET_LONG_BITS == 64, TCG_REG_X0, TCG_REG_X3, 0); + tcg_out_cmp(s, addr_type, TCG_REG_X0, TCG_REG_X3, 0); /* If not equal, we jump to the slow path. */ ldst->label_ptr[0] = s->code_ptr; From 6e2a21b70b9308292b8bc0e67c2b0b42f0cdb5b8 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 27 Apr 2023 15:08:12 +0100 Subject: [PATCH 69/74] tcg/loongarch64: Remove TARGET_LONG_BITS, TCG_TYPE_TL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All uses replaced with TCGContext.addr_type. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- tcg/loongarch64/tcg-target.c.inc | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index 0c4ef72d6f..e5f98845a0 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -844,6 +844,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, TCGReg addr_reg, MemOpIdx oi, bool is_ld) { + TCGType addr_type = s->addr_type; TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); MemOp a_bits; @@ -874,7 +875,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); /* Load the tlb comparator and the addend. */ - tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, + tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, is_ld ? offsetof(CPUTLBEntry, addr_read) : offsetof(CPUTLBEntry, addr_write)); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, @@ -888,9 +889,9 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, if (a_bits < s_bits) { unsigned a_mask = (1u << a_bits) - 1; unsigned s_mask = (1u << s_bits) - 1; - tcg_out_addi(s, TCG_TYPE_TL, TCG_REG_TMP1, addr_reg, s_mask - a_mask); + tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask); } else { - tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_TMP1, addr_reg); + tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg); } tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO, a_bits, TARGET_PAGE_BITS - 1); @@ -923,7 +924,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, h->index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; #endif - if (TARGET_LONG_BITS == 32) { + if (addr_type == TCG_TYPE_I32) { h->base = TCG_REG_TMP0; tcg_out_ext32u(s, h->base, addr_reg); } else { From 5e983cbc2e18b471c26f68dad83853330393da35 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 27 Apr 2023 15:27:06 +0100 Subject: [PATCH 70/74] tcg/mips: Remove TARGET_LONG_BITS, TCG_TYPE_TL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All uses replaced with TCGContext.addr_type. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- tcg/mips/tcg-target.c.inc | 42 +++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index 7ff4e2ff71..209d95992e 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -354,10 +354,6 @@ typedef enum { /* Aliases for convenience. */ ALIAS_PADD = sizeof(void *) == 4 ? OPC_ADDU : OPC_DADDU, ALIAS_PADDI = sizeof(void *) == 4 ? OPC_ADDIU : OPC_DADDIU, - ALIAS_TSRL = TARGET_LONG_BITS == 32 || TCG_TARGET_REG_BITS == 32 - ? OPC_SRL : OPC_DSRL, - ALIAS_TADDI = TARGET_LONG_BITS == 32 || TCG_TARGET_REG_BITS == 32 - ? OPC_ADDIU : OPC_DADDIU, } MIPSInsn; /* @@ -1156,6 +1152,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, TCGReg addrlo, TCGReg addrhi, MemOpIdx oi, bool is_ld) { + TCGType addr_type = s->addr_type; TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); MemOp a_bits; @@ -1190,23 +1187,26 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off); /* Extract the TLB index from the address into TMP3. */ - tcg_out_opc_sa(s, ALIAS_TSRL, TCG_TMP3, addrlo, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) { + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + } else { + tcg_out_dsrl(s, TCG_TMP3, addrlo, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + } tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0); /* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */ tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1); - /* Load the (low-half) tlb comparator. */ - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF); - } else { - tcg_out_ld(s, TCG_TYPE_TL, TCG_TMP0, TCG_TMP3, cmp_off); - } - - if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { + if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) { + /* Load the tlb comparator. */ + tcg_out_ld(s, addr_type, TCG_TMP0, TCG_TMP3, cmp_off); /* Load the tlb addend for the fast path. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off); + } else { + /* Load the low half of the tlb comparator. */ + tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF); } /* @@ -1214,16 +1214,20 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * For unaligned accesses, compare against the end of the access to * verify that it does not cross a page boundary. */ - tcg_out_movi(s, TCG_TYPE_TL, TCG_TMP1, TARGET_PAGE_MASK | a_mask); + tcg_out_movi(s, addr_type, TCG_TMP1, TARGET_PAGE_MASK | a_mask); if (a_mask < s_mask) { - tcg_out_opc_imm(s, ALIAS_TADDI, TCG_TMP2, addrlo, s_mask - a_mask); + if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) { + tcg_out_opc_imm(s, OPC_ADDIU, TCG_TMP2, addrlo, s_mask - a_mask); + } else { + tcg_out_opc_imm(s, OPC_DADDIU, TCG_TMP2, addrlo, s_mask - a_mask); + } tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2); } else { tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrlo); } /* Zero extend a 32-bit guest address for a 64-bit host. */ - if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { + if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { tcg_out_ext32u(s, TCG_TMP2, addrlo); addrlo = TCG_TMP2; } @@ -1232,7 +1236,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0); /* Load and test the high half tlb comparator. */ - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) { /* delay slot */ tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF); @@ -1269,7 +1273,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, } base = addrlo; - if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { + if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { tcg_out_ext32u(s, TCG_REG_A0, base); base = TCG_REG_A0; } From c31e5fa44d0ebd2e78f1ead2147e30cd137ae5e7 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 28 Apr 2023 09:14:17 +0100 Subject: [PATCH 71/74] tcg: Remove TARGET_LONG_BITS, TCG_TYPE_TL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All uses replaced with TCGContext.addr_type. Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson --- tcg/tcg.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/tcg/tcg.c b/tcg/tcg.c index d33d5cfe38..a0bec13582 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -5614,12 +5614,7 @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, next_arg = 1; loc = &info->in[next_arg]; - if (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 64) { - nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, TCG_TYPE_TL, - ldst->addrlo_reg, ldst->addrhi_reg); - tcg_out_helper_load_slots(s, nmov, mov, parm); - next_arg += nmov; - } else { + if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) { /* * 32-bit host with 32-bit guest: zero-extend the guest address * to 64-bits for the helper by storing the low part, then @@ -5633,6 +5628,11 @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, tcg_out_helper_load_imm(s, loc[!HOST_BIG_ENDIAN].arg_slot, TCG_TYPE_I32, 0, parm); next_arg += 2; + } else { + nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type, + ldst->addrlo_reg, ldst->addrhi_reg); + tcg_out_helper_load_slots(s, nmov, mov, parm); + next_arg += nmov; } switch (info->out_kind) { @@ -5787,12 +5787,7 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, /* Handle addr argument. */ loc = &info->in[next_arg]; - if (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 64) { - n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, TCG_TYPE_TL, - ldst->addrlo_reg, ldst->addrhi_reg); - next_arg += n; - nmov += n; - } else { + if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) { /* * 32-bit host with 32-bit guest: zero-extend the guest address * to 64-bits for the helper by storing the low part. Later, @@ -5804,6 +5799,11 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, ldst->addrlo_reg, -1); next_arg += 2; nmov += 1; + } else { + n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type, + ldst->addrlo_reg, ldst->addrhi_reg); + next_arg += n; + nmov += n; } /* Handle data argument. */ @@ -5849,7 +5849,8 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, g_assert_not_reached(); } - if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32) { + if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) { + /* Zero extend the address by loading a zero for the high part. */ loc = &info->in[1 + !HOST_BIG_ENDIAN]; tcg_out_helper_load_imm(s, loc->arg_slot, TCG_TYPE_I32, 0, parm); } From aece72b76bfeffcab715cd62742fd7f366ceb079 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 23 Mar 2023 21:06:22 -0700 Subject: [PATCH 72/74] tcg: Add page_bits and page_mask to TCGContext Disconnect guest page size from TCG compilation. While this could be done via exec/target_page.h, we want to cache the value across multiple memory access operations, so we might as well initialize this early. The changes within tcg/ are entirely mechanical: sed -i s/TARGET_PAGE_BITS/s->page_bits/g sed -i s/TARGET_PAGE_MASK/s->page_mask/g Reviewed-by: Anton Johansson Signed-off-by: Richard Henderson --- accel/tcg/translate-all.c | 4 ++++ include/tcg/tcg.h | 5 +++++ tcg/aarch64/tcg-target.c.inc | 6 +++--- tcg/arm/tcg-target.c.inc | 10 +++++----- tcg/i386/tcg-target.c.inc | 6 +++--- tcg/loongarch64/tcg-target.c.inc | 4 ++-- tcg/mips/tcg-target.c.inc | 6 +++--- tcg/ppc/tcg-target.c.inc | 14 +++++++------- tcg/riscv/tcg-target.c.inc | 4 ++-- tcg/s390x/tcg-target.c.inc | 4 ++-- tcg/sparc64/tcg-target.c.inc | 4 ++-- 11 files changed, 38 insertions(+), 29 deletions(-) diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 99a9d0e34f..ca306f67da 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -357,6 +357,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu, tb_set_page_addr1(tb, -1); tcg_ctx->gen_tb = tb; tcg_ctx->addr_type = TCG_TYPE_TL; +#ifdef CONFIG_SOFTMMU + tcg_ctx->page_bits = TARGET_PAGE_BITS; + tcg_ctx->page_mask = TARGET_PAGE_MASK; +#endif tb_overflow: diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index b9748fd0c5..db57c4d492 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -560,6 +560,11 @@ struct TCGContext { int nb_ops; TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */ +#ifdef CONFIG_SOFTMMU + int page_mask; + uint8_t page_bits; +#endif + TCGRegSet reserved_regs; intptr_t current_frame_offset; intptr_t frame_start; diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index 462663fad8..c2a1f09f17 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -1663,7 +1663,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->oi = oi; ldst->addrlo_reg = addr_reg; - mask_type = (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32 + mask_type = (s->page_bits + CPU_TLB_DYN_MAX_BITS > 32 ? TCG_TYPE_I64 : TCG_TYPE_I32); /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {x0,x1}. */ @@ -1677,7 +1677,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, /* Extract the TLB index from the address into X0. */ tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64, TCG_REG_X0, TCG_REG_X0, addr_reg, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + s->page_bits - CPU_TLB_ENTRY_BITS); /* Add the tlb_table pointer, creating the CPUTLBEntry address into X1. */ tcg_out_insn(s, 3502, ADD, 1, TCG_REG_X1, TCG_REG_X1, TCG_REG_X0); @@ -1701,7 +1701,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, TCG_REG_X3, addr_reg, s_mask - a_mask); x3 = TCG_REG_X3; } - compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; + compare_mask = (uint64_t)s->page_mask | a_mask; /* Store the page mask part of the address into X3. */ tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_X3, x3, compare_mask); diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index 3c38e868e2..20cc1cc477 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -1424,7 +1424,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, /* Extract the tlb index from the address into R0. */ tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, - SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS)); + SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS)); /* * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. @@ -1468,8 +1468,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, addrlo, s_mask - a_mask); } - if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) { - tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask)); + if (use_armv7_instructions && s->page_bits <= 16) { + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask)); tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, t_addr, TCG_REG_TMP, 0); tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0); @@ -1479,10 +1479,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); } tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, - SHIFT_IMM_LSR(TARGET_PAGE_BITS)); + SHIFT_IMM_LSR(s->page_bits)); tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, - SHIFT_IMM_LSL(TARGET_PAGE_BITS)); + SHIFT_IMM_LSL(s->page_bits)); } if (s->addr_type != TCG_TYPE_I32) { diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 14bddc8c33..c5d4570ba2 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -1933,7 +1933,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW); if (TCG_TYPE_PTR == TCG_TYPE_I64) { hrexw = P_REXW; - if (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32) { + if (s->page_bits + CPU_TLB_DYN_MAX_BITS > 32) { tlbtype = TCG_TYPE_I64; tlbrexw = P_REXW; } @@ -1942,7 +1942,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo); tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + s->page_bits - CPU_TLB_ENTRY_BITS); tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0, TLB_MASK_TABLE_OFS(mem_index) + @@ -1963,7 +1963,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1, addrlo, s_mask - a_mask); } - tlb_mask = TARGET_PAGE_MASK | a_mask; + tlb_mask = s->page_mask | a_mask; tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0); /* cmp 0(TCG_REG_L0), TCG_REG_L1 */ diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index e5f98845a0..0bae922982 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -870,7 +870,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + s->page_bits - CPU_TLB_ENTRY_BITS); tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); @@ -894,7 +894,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg); } tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO, - a_bits, TARGET_PAGE_BITS - 1); + a_bits, s->page_bits - 1); /* Compare masked address with the TLB entry. */ ldst->label_ptr[0] = s->code_ptr; diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index 209d95992e..ef146b193c 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -1189,10 +1189,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, /* Extract the TLB index from the address into TMP3. */ if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) { tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + s->page_bits - CPU_TLB_ENTRY_BITS); } else { tcg_out_dsrl(s, TCG_TMP3, addrlo, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + s->page_bits - CPU_TLB_ENTRY_BITS); } tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0); @@ -1214,7 +1214,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * For unaligned accesses, compare against the end of the access to * verify that it does not cross a page boundary. */ - tcg_out_movi(s, addr_type, TCG_TMP1, TARGET_PAGE_MASK | a_mask); + tcg_out_movi(s, addr_type, TCG_TMP1, s->page_mask | a_mask); if (a_mask < s_mask) { if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) { tcg_out_opc_imm(s, OPC_ADDIU, TCG_TMP2, addrlo, s_mask - a_mask); diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index e2851c5dcd..d4269dffcf 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -2077,10 +2077,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, /* Extract the page index, shifted into place for tlb index. */ if (TCG_TARGET_REG_BITS == 32) { tcg_out_shri32(s, TCG_REG_R0, addrlo, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + s->page_bits - CPU_TLB_ENTRY_BITS); } else { tcg_out_shri64(s, TCG_REG_R0, addrlo, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + s->page_bits - CPU_TLB_ENTRY_BITS); } tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0)); @@ -2119,7 +2119,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, a_bits = s_bits; } tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0, - (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS); + (32 - a_bits) & 31, 31 - s->page_bits); } else { TCGReg t = addrlo; @@ -2140,13 +2140,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, /* Mask the address for the requested alignment. */ if (TARGET_LONG_BITS == 32) { tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0, - (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS); + (32 - a_bits) & 31, 31 - s->page_bits); } else if (a_bits == 0) { - tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS); + tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits); } else { tcg_out_rld(s, RLDICL, TCG_REG_R0, t, - 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits); - tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0); + 64 - s->page_bits, s->page_bits - a_bits); + tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0); } } diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index de61edb5df..ff6334980f 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -937,7 +937,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + s->page_bits - CPU_TLB_ENTRY_BITS); tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); @@ -952,7 +952,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, tcg_out_opc_imm(s, TARGET_LONG_BITS == 32 ? OPC_ADDIW : OPC_ADDI, addr_adj, addr_reg, s_mask - a_mask); } - compare_mask = TARGET_PAGE_MASK | a_mask; + compare_mask = s->page_mask | a_mask; if (compare_mask == sextreg(compare_mask, 0, 12)) { tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask); } else { diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc index 466d8e7e53..dfaa34c264 100644 --- a/tcg/s390x/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -1755,7 +1755,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->addrlo_reg = addr_reg; tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + s->page_bits - CPU_TLB_ENTRY_BITS); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); @@ -1768,7 +1768,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * cross pages using the address of the last byte of the access. */ a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask); - tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; + tlb_mask = (uint64_t)s->page_mask | a_mask; if (a_off == 0) { tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask); } else { diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index 6e6c26d470..d2d0f604c2 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -1056,7 +1056,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, /* Extract the page index, shifted into place for tlb index. */ tcg_out_arithi(s, TCG_REG_T1, addr_reg, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL); + s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL); tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND); /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */ @@ -1068,7 +1068,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, h->base = TCG_REG_T1; /* Mask out the page offset, except for the required alignment. */ - compare_mask = TARGET_PAGE_MASK | a_mask; + compare_mask = s->page_mask | a_mask; if (check_fit_tl(compare_mask, 13)) { tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND); } else { From a66efde188e43041277822e582de1c897b966792 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 2 Apr 2023 10:07:57 -0700 Subject: [PATCH 73/74] tcg: Add tlb_dyn_max_bits to TCGContext Disconnect guest tlb parameters from TCG compilation. Reviewed-by: Anton Johansson Signed-off-by: Richard Henderson --- accel/tcg/translate-all.c | 1 + include/tcg/tcg.h | 1 + tcg/aarch64/tcg-target.c.inc | 2 +- tcg/i386/tcg-target.c.inc | 2 +- 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index ca306f67da..353849ca6d 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -360,6 +360,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, #ifdef CONFIG_SOFTMMU tcg_ctx->page_bits = TARGET_PAGE_BITS; tcg_ctx->page_mask = TARGET_PAGE_MASK; + tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS; #endif tb_overflow: diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index db57c4d492..cd6327b175 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -563,6 +563,7 @@ struct TCGContext { #ifdef CONFIG_SOFTMMU int page_mask; uint8_t page_bits; + uint8_t tlb_dyn_max_bits; #endif TCGRegSet reserved_regs; diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index c2a1f09f17..bc6b99a1bd 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -1663,7 +1663,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->oi = oi; ldst->addrlo_reg = addr_reg; - mask_type = (s->page_bits + CPU_TLB_DYN_MAX_BITS > 32 + mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32 ? TCG_TYPE_I64 : TCG_TYPE_I32); /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {x0,x1}. */ diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index c5d4570ba2..8b9a5f00e5 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -1933,7 +1933,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW); if (TCG_TYPE_PTR == TCG_TYPE_I64) { hrexw = P_REXW; - if (s->page_bits + CPU_TLB_DYN_MAX_BITS > 32) { + if (s->page_bits + s->tlb_dyn_max_bits > 32) { tlbtype = TCG_TYPE_I64; tlbrexw = P_REXW; } From 7d478306e84259678b2941e8af7496ef32a9c4c5 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 30 Apr 2023 08:24:36 +0100 Subject: [PATCH 74/74] tcg: Split out exec/user/guest-base.h TCG will need this declaration, without all of the other bits that come with cpu-all.h. Reviewed-by: Thomas Huth Signed-off-by: Richard Henderson --- include/exec/cpu-all.h | 5 +---- include/exec/user/guest-base.h | 12 ++++++++++++ tcg/tcg.c | 3 +++ 3 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 include/exec/user/guest-base.h diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index ad824fee52..78d258af44 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -84,11 +84,8 @@ #if defined(CONFIG_USER_ONLY) #include "exec/user/abitypes.h" +#include "exec/user/guest-base.h" -/* On some host systems the guest address space is reserved on the host. - * This allows the guest address space to be offset to a convenient location. - */ -extern uintptr_t guest_base; extern bool have_guest_base; /* diff --git a/include/exec/user/guest-base.h b/include/exec/user/guest-base.h new file mode 100644 index 0000000000..afe2ab7fbb --- /dev/null +++ b/include/exec/user/guest-base.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Declaration of guest_base. + * Copyright (c) 2003 Fabrice Bellard + */ + +#ifndef EXEC_USER_GUEST_BASE_H +#define EXEC_USER_GUEST_BASE_H + +extern uintptr_t guest_base; + +#endif diff --git a/tcg/tcg.c b/tcg/tcg.c index a0bec13582..0b0fe9c7ad 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -63,6 +63,9 @@ #include "tcg/tcg-temp-internal.h" #include "tcg-internal.h" #include "accel/tcg/perf.h" +#ifdef CONFIG_USER_ONLY +#include "exec/user/guest-base.h" +#endif /* Forward declarations for functions declared in tcg-target.c.inc and used here. */