From 0ccbac336b74c3006d4234bb6b5c4b3c32c61171 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Thu, 10 Oct 2024 10:58:55 +0200 Subject: [PATCH 01/72] tests/tcg: Do not use inttypes.h in multiarch/system/memory.c make check-tcg fails on Fedora with the following error message: alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...] qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory 17 | #include | ^~~~~~~~~~~~ compilation terminated. The reason is that Fedora has cross-compilers, but no cross-glibc headers. Fix by hardcoding the format specifiers and dropping the include. An alternative fix would be to introduce a configure check for inttypes.h. But this would make it impossible to use Fedora cross-compilers for softmmu tests, which used to work so far. Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation") Signed-off-by: Ilya Leoshkevich Reviewed-by: Paolo Bonzini Message-ID: <20241010085906.226249-1-iii@linux.ibm.com> Signed-off-by: Richard Henderson --- tests/tcg/multiarch/system/memory.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c index 65a6038a24..7508f6b916 100644 --- a/tests/tcg/multiarch/system/memory.c +++ b/tests/tcg/multiarch/system/memory.c @@ -14,7 +14,6 @@ #include #include -#include #include #ifndef CHECK_UNALIGNED @@ -511,8 +510,8 @@ int main(void) int i; bool ok = true; - ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]); - ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]); + ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]); + ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]); /* Run through the unsigned tests first */ for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) { @@ -529,8 +528,8 @@ int main(void) ok = do_signed_reads(true); } - ml_printf("Test data read: %"PRId32"\n", test_read_count); - ml_printf("Test data write: %"PRId32"\n", test_write_count); + ml_printf("Test data read: %lu\n", (unsigned long)test_read_count); + ml_printf("Test data write: %lu\n", (unsigned long)test_write_count); ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED"); return ok ? 0 : -1; } From dbf408b6678a6076bd2412159d0ce665dce6acd0 Mon Sep 17 00:00:00 2001 From: Pierrick Bouvier Date: Thu, 28 Nov 2024 13:38:43 -0800 Subject: [PATCH 02/72] plugins: optimize cpu_index code generation When running with a single vcpu, we can return a constant instead of a load when accessing cpu_index. A side effect is that all tcg operations using it are optimized, most notably scoreboard access. When running a simple loop in user-mode, the speedup is around 20%. Signed-off-by: Pierrick Bouvier Reviewed-by: Richard Henderson Signed-off-by: Richard Henderson Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org> --- accel/tcg/plugin-gen.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c index 1ef075552c..7e5f040bf7 100644 --- a/accel/tcg/plugin-gen.c +++ b/accel/tcg/plugin-gen.c @@ -102,6 +102,15 @@ static void gen_disable_mem_helper(void) static TCGv_i32 gen_cpu_index(void) { + /* + * Optimize when we run with a single vcpu. All values using cpu_index, + * including scoreboard index, will be optimized out. + * User-mode calls tb_flush when setting this flag. In system-mode, all + * vcpus are created before generating code. + */ + if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) { + return tcg_constant_i32(current_cpu->cpu_index); + } TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); tcg_gen_ld_i32(cpu_index, tcg_env, -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); From 1526855c012a7f1314278e4f8fbb0741ec74a372 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 07:45:11 -0600 Subject: [PATCH 03/72] tcg/optimize: Split out finish_bb, finish_ebb Call them directly from the opcode switch statement in tcg_optimize, rather than in finish_folding based on opcode flags. Adjust folding of conditional branches to match. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 47 +++++++++++++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 16 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index e9ef16b3c6..453e8c43bd 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -964,24 +964,25 @@ static void copy_propagate(OptContext *ctx, TCGOp *op, } } +static void finish_bb(OptContext *ctx) +{ + /* We only optimize memory barriers across basic blocks. */ + ctx->prev_mb = NULL; +} + +static void finish_ebb(OptContext *ctx) +{ + finish_bb(ctx); + /* We only optimize across extended basic blocks. */ + memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); + remove_mem_copy_all(ctx); +} + static void finish_folding(OptContext *ctx, TCGOp *op) { const TCGOpDef *def = &tcg_op_defs[op->opc]; int i, nb_oargs; - /* - * We only optimize extended basic blocks. If the opcode ends a BB - * and is not a conditional branch, reset all temp data. - */ - if (def->flags & TCG_OPF_BB_END) { - ctx->prev_mb = NULL; - if (!(def->flags & TCG_OPF_COND_BRANCH)) { - memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); - remove_mem_copy_all(ctx); - } - return; - } - nb_oargs = def->nb_oargs; for (i = 0; i < nb_oargs; i++) { TCGTemp *ts = arg_temp(op->args[i]); @@ -1351,8 +1352,11 @@ static bool fold_brcond(OptContext *ctx, TCGOp *op) if (i > 0) { op->opc = INDEX_op_br; op->args[0] = op->args[3]; + finish_ebb(ctx); + } else { + finish_bb(ctx); } - return false; + return true; } static bool fold_brcond2(OptContext *ctx, TCGOp *op) @@ -1443,9 +1447,12 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op) } op->opc = INDEX_op_br; op->args[0] = label; - break; + finish_ebb(ctx); + return true; } - return false; + + finish_bb(ctx); + return true; } static bool fold_bswap(OptContext *ctx, TCGOp *op) @@ -3037,6 +3044,14 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64_VEC(xor): done = fold_xor(&ctx, op); break; + case INDEX_op_set_label: + case INDEX_op_br: + case INDEX_op_exit_tb: + case INDEX_op_goto_tb: + case INDEX_op_goto_ptr: + finish_ebb(&ctx); + done = true; + break; default: break; } From 045ace35a8587f9552b528595f2d67a76b77f1b5 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 19 Dec 2024 10:33:51 -0800 Subject: [PATCH 04/72] tcg/optimize: Split out fold_affected_mask There are only a few logical operations which can compute an "affected" mask. Split out handling of this optimization to a separate function, only to be called when applicable. Remove the a_mask field from OptContext, as the mask is no longer stored anywhere. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 42 +++++++++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 453e8c43bd..6757fe0036 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -64,7 +64,6 @@ typedef struct OptContext { QSIMPLEQ_HEAD(, MemCopyInfo) mem_free; /* In flight values from optimization. */ - uint64_t a_mask; /* mask bit is 0 iff value identical to first input */ uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */ uint64_t s_mask; /* mask of clrsb(value) bits */ TCGType type; @@ -1047,7 +1046,6 @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op) static bool fold_masks(OptContext *ctx, TCGOp *op) { - uint64_t a_mask = ctx->a_mask; uint64_t z_mask = ctx->z_mask; uint64_t s_mask = ctx->s_mask; @@ -1059,7 +1057,6 @@ static bool fold_masks(OptContext *ctx, TCGOp *op) * type changing opcodes. */ if (ctx->type == TCG_TYPE_I32) { - a_mask = (int32_t)a_mask; z_mask = (int32_t)z_mask; s_mask |= MAKE_64BIT_MASK(32, 32); ctx->z_mask = z_mask; @@ -1069,6 +1066,19 @@ static bool fold_masks(OptContext *ctx, TCGOp *op) if (z_mask == 0) { return tcg_opt_gen_movi(ctx, op, op->args[0], 0); } + return false; +} + +/* + * An "affected" mask bit is 0 if and only if the result is identical + * to the first input. Thus if the entire mask is 0, the operation + * is equivalent to a copy. + */ +static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask) +{ + if (ctx->type == TCG_TYPE_I32) { + a_mask = (uint32_t)a_mask; + } if (a_mask == 0) { return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); } @@ -1305,8 +1315,9 @@ static bool fold_and(OptContext *ctx, TCGOp *op) * Known-zeros does not imply known-ones. Therefore unless * arg2 is constant, we can't infer affected bits from it. */ - if (arg_is_const(op->args[2])) { - ctx->a_mask = z1 & ~z2; + if (arg_is_const(op->args[2]) && + fold_affected_mask(ctx, op, z1 & ~z2)) { + return true; } return fold_masks(ctx, op); @@ -1331,7 +1342,9 @@ static bool fold_andc(OptContext *ctx, TCGOp *op) */ if (arg_is_const(op->args[2])) { uint64_t z2 = ~arg_info(op->args[2])->z_mask; - ctx->a_mask = z1 & ~z2; + if (fold_affected_mask(ctx, op, z1 & ~z2)) { + return true; + } z1 &= z2; } ctx->z_mask = z1; @@ -1709,8 +1722,8 @@ static bool fold_extract(OptContext *ctx, TCGOp *op) z_mask_old = arg_info(op->args[1])->z_mask; z_mask = extract64(z_mask_old, pos, len); - if (pos == 0) { - ctx->a_mask = z_mask_old ^ z_mask; + if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) { + return true; } ctx->z_mask = z_mask; ctx->s_mask = smask_from_zmask(z_mask); @@ -1777,8 +1790,8 @@ static bool fold_exts(OptContext *ctx, TCGOp *op) ctx->z_mask = z_mask; ctx->s_mask = s_mask; - if (!type_change) { - ctx->a_mask = s_mask & ~s_mask_old; + if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) { + return true; } return fold_masks(ctx, op); @@ -1819,8 +1832,8 @@ static bool fold_extu(OptContext *ctx, TCGOp *op) ctx->z_mask = z_mask; ctx->s_mask = smask_from_zmask(z_mask); - if (!type_change) { - ctx->a_mask = z_mask_old ^ z_mask; + if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) { + return true; } return fold_masks(ctx, op); } @@ -2482,8 +2495,8 @@ static bool fold_sextract(OptContext *ctx, TCGOp *op) s_mask |= MAKE_64BIT_MASK(len, 64 - len); ctx->s_mask = s_mask; - if (pos == 0) { - ctx->a_mask = s_mask & ~s_mask_old; + if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) { + return true; } return fold_masks(ctx, op); @@ -2843,7 +2856,6 @@ void tcg_optimize(TCGContext *s) } /* Assume all bits affected, no bits known zero, no sign reps. */ - ctx.a_mask = -1; ctx.z_mask = -1; ctx.s_mask = 0; From 56e06ecfa5f13816b68b850f4ce46d8756e2c32b Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 18:26:48 -0600 Subject: [PATCH 05/72] tcg/optimize: Copy mask writeback to fold_masks Use of fold_masks should be restricted to those opcodes that can reliably make use of it -- those with a single output, and from higher-level folders that set up the masks. Prepare for conversion of each folder in turn. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 6757fe0036..2aa57afd64 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1048,6 +1048,12 @@ static bool fold_masks(OptContext *ctx, TCGOp *op) { uint64_t z_mask = ctx->z_mask; uint64_t s_mask = ctx->s_mask; + const TCGOpDef *def = &tcg_op_defs[op->opc]; + TCGTemp *ts; + TempOptInfo *ti; + + /* Only single-output opcodes are supported here. */ + tcg_debug_assert(def->nb_oargs == 1); /* * 32-bit ops generate 32-bit results, which for the purpose of @@ -1059,14 +1065,19 @@ static bool fold_masks(OptContext *ctx, TCGOp *op) if (ctx->type == TCG_TYPE_I32) { z_mask = (int32_t)z_mask; s_mask |= MAKE_64BIT_MASK(32, 32); - ctx->z_mask = z_mask; - ctx->s_mask = s_mask; } if (z_mask == 0) { return tcg_opt_gen_movi(ctx, op, op->args[0], 0); } - return false; + + ts = arg_temp(op->args[0]); + reset_ts(ctx, ts); + + ti = ts_info(ts); + ti->z_mask = z_mask; + ti->s_mask = s_mask; + return true; } /* From d582b14d808ee9b9c624140a1d253b6381406a9e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 19 Dec 2024 10:43:26 -0800 Subject: [PATCH 06/72] tcg/optimize: Split out fold_masks_zs Add a routine to which masks can be passed directly, rather than storing them into OptContext. To be used in upcoming patches. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 2aa57afd64..d70127b88d 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1044,10 +1044,14 @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op) return fold_const2(ctx, op); } -static bool fold_masks(OptContext *ctx, TCGOp *op) +/* + * Record "zero" and "sign" masks for the single output of @op. + * See TempOptInfo definition of z_mask and s_mask. + * If z_mask allows, fold the output to constant zero. + */ +static bool fold_masks_zs(OptContext *ctx, TCGOp *op, + uint64_t z_mask, uint64_t s_mask) { - uint64_t z_mask = ctx->z_mask; - uint64_t s_mask = ctx->s_mask; const TCGOpDef *def = &tcg_op_defs[op->opc]; TCGTemp *ts; TempOptInfo *ti; @@ -1080,6 +1084,11 @@ static bool fold_masks(OptContext *ctx, TCGOp *op) return true; } +static bool fold_masks(OptContext *ctx, TCGOp *op) +{ + return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask); +} + /* * An "affected" mask bit is 0 if and only if the result is identical * to the first input. Thus if the entire mask is 0, the operation From 75c3bf324d21f8f57be1349007b0252ae64b4c51 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 19 Dec 2024 10:50:40 -0800 Subject: [PATCH 07/72] tcg/optimize: Augment s_mask from z_mask in fold_masks_zs Consider the passed s_mask to be a minimum deduced from either existing s_mask or from a sign-extension operation. We may be able to deduce more from the set of known zeros. Remove identical logic from several opcode folders. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index d70127b88d..d8f6542c4f 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1048,6 +1048,7 @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op) * Record "zero" and "sign" masks for the single output of @op. * See TempOptInfo definition of z_mask and s_mask. * If z_mask allows, fold the output to constant zero. + * The passed s_mask may be augmented by z_mask. */ static bool fold_masks_zs(OptContext *ctx, TCGOp *op, uint64_t z_mask, uint64_t s_mask) @@ -1080,7 +1081,7 @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op, ti = ts_info(ts); ti->z_mask = z_mask; - ti->s_mask = s_mask; + ti->s_mask = s_mask | smask_from_zmask(z_mask); return true; } @@ -1519,8 +1520,8 @@ static bool fold_bswap(OptContext *ctx, TCGOp *op) default: g_assert_not_reached(); } - s_mask = smask_from_zmask(z_mask); + s_mask = 0; switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { case TCG_BSWAP_OZ: break; @@ -1534,7 +1535,6 @@ static bool fold_bswap(OptContext *ctx, TCGOp *op) default: /* The high bits are undefined: force all bits above the sign to 1. */ z_mask |= sign << 1; - s_mask = 0; break; } ctx->z_mask = z_mask; @@ -1605,7 +1605,6 @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op) g_assert_not_reached(); } ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask; - ctx->s_mask = smask_from_zmask(ctx->z_mask); return false; } @@ -1625,7 +1624,6 @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op) default: g_assert_not_reached(); } - ctx->s_mask = smask_from_zmask(ctx->z_mask); return false; } @@ -1746,7 +1744,6 @@ static bool fold_extract(OptContext *ctx, TCGOp *op) return true; } ctx->z_mask = z_mask; - ctx->s_mask = smask_from_zmask(z_mask); return fold_masks(ctx, op); } @@ -1851,7 +1848,6 @@ static bool fold_extu(OptContext *ctx, TCGOp *op) } ctx->z_mask = z_mask; - ctx->s_mask = smask_from_zmask(z_mask); if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) { return true; } @@ -2116,10 +2112,10 @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) int width = 8 * memop_size(mop); if (width < 64) { - ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width); - if (!(mop & MO_SIGN)) { + if (mop & MO_SIGN) { + ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width); + } else { ctx->z_mask = MAKE_64BIT_MASK(0, width); - ctx->s_mask <<= 1; } } @@ -2354,7 +2350,6 @@ static bool fold_setcond(OptContext *ctx, TCGOp *op) fold_setcond_tst_pow2(ctx, op, false); ctx->z_mask = 1; - ctx->s_mask = smask_from_zmask(1); return false; } @@ -2455,7 +2450,6 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op) } ctx->z_mask = 1; - ctx->s_mask = smask_from_zmask(1); return false; do_setcond_const: @@ -2649,21 +2643,18 @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) break; CASE_OP_32_64(ld8u): ctx->z_mask = MAKE_64BIT_MASK(0, 8); - ctx->s_mask = MAKE_64BIT_MASK(9, 55); break; CASE_OP_32_64(ld16s): ctx->s_mask = MAKE_64BIT_MASK(16, 48); break; CASE_OP_32_64(ld16u): ctx->z_mask = MAKE_64BIT_MASK(0, 16); - ctx->s_mask = MAKE_64BIT_MASK(17, 47); break; case INDEX_op_ld32s_i64: ctx->s_mask = MAKE_64BIT_MASK(32, 32); break; case INDEX_op_ld32u_i64: ctx->z_mask = MAKE_64BIT_MASK(0, 32); - ctx->s_mask = MAKE_64BIT_MASK(33, 31); break; default: g_assert_not_reached(); From 6d70ddc6350361c38e7720d1ffc594e5cc648900 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 21 Dec 2024 21:08:10 -0800 Subject: [PATCH 08/72] tcg/optimize: Change representation of s_mask Change the representation from sign bit repetitions to all bits equal to the sign bit, including the sign bit itself. The previous format has a problem in that it is difficult to recreate a valid sign mask after a shift operation: the "repetitions" part of the previous format meant that applying the same shift as for the value lead to an off-by-one value. The new format, including the sign bit itself, means that the sign mask can be manipulated in exactly the same way as the value, canonicalization is easier. Canonicalize the s_mask in fold_masks_zs, rather than requiring callers to do so. Treat 0 as a non-canonical but typeless input for no sign information, which will be reset as appropriate for the data type. We can easily fold in the data from z_mask while canonicalizing. Temporarily disable optimizations using s_mask while each operation is converted to use fold_masks_zs and to the new form. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 64 ++++++++++++-------------------------------------- 1 file changed, 15 insertions(+), 49 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index d8f6542c4f..fbc0dc5588 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -52,7 +52,7 @@ typedef struct TempOptInfo { QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy; uint64_t val; uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */ - uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */ + uint64_t s_mask; /* mask bit is 1 if value bit matches msb */ } TempOptInfo; typedef struct OptContext { @@ -65,49 +65,10 @@ typedef struct OptContext { /* In flight values from optimization. */ uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */ - uint64_t s_mask; /* mask of clrsb(value) bits */ + uint64_t s_mask; /* mask bit is 1 if value bit matches msb */ TCGType type; } OptContext; -/* Calculate the smask for a specific value. */ -static uint64_t smask_from_value(uint64_t value) -{ - int rep = clrsb64(value); - return ~(~0ull >> rep); -} - -/* - * Calculate the smask for a given set of known-zeros. - * If there are lots of zeros on the left, we can consider the remainder - * an unsigned field, and thus the corresponding signed field is one bit - * larger. - */ -static uint64_t smask_from_zmask(uint64_t zmask) -{ - /* - * Only the 0 bits are significant for zmask, thus the msb itself - * must be zero, else we have no sign information. - */ - int rep = clz64(zmask); - if (rep == 0) { - return 0; - } - rep -= 1; - return ~(~0ull >> rep); -} - -/* - * Recreate a properly left-aligned smask after manipulation. - * Some bit-shuffling, particularly shifts and rotates, may - * retain sign bits on the left, but may scatter disconnected - * sign bits on the right. Retain only what remains to the left. - */ -static uint64_t smask_from_smask(int64_t smask) -{ - /* Only the 1 bits are significant for smask */ - return smask_from_zmask(~smask); -} - static inline TempOptInfo *ts_info(TCGTemp *ts) { return ts->state_ptr; @@ -173,7 +134,7 @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts) ti->is_const = true; ti->val = ts->val; ti->z_mask = ts->val; - ti->s_mask = smask_from_value(ts->val); + ti->s_mask = INT64_MIN >> clrsb64(ts->val); } else { ti->is_const = false; ti->z_mask = -1; @@ -992,7 +953,6 @@ static void finish_folding(OptContext *ctx, TCGOp *op) */ if (i == 0) { ts_info(ts)->z_mask = ctx->z_mask; - ts_info(ts)->s_mask = ctx->s_mask; } } } @@ -1051,11 +1011,12 @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op) * The passed s_mask may be augmented by z_mask. */ static bool fold_masks_zs(OptContext *ctx, TCGOp *op, - uint64_t z_mask, uint64_t s_mask) + uint64_t z_mask, int64_t s_mask) { const TCGOpDef *def = &tcg_op_defs[op->opc]; TCGTemp *ts; TempOptInfo *ti; + int rep; /* Only single-output opcodes are supported here. */ tcg_debug_assert(def->nb_oargs == 1); @@ -1069,7 +1030,7 @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op, */ if (ctx->type == TCG_TYPE_I32) { z_mask = (int32_t)z_mask; - s_mask |= MAKE_64BIT_MASK(32, 32); + s_mask |= INT32_MIN; } if (z_mask == 0) { @@ -1081,7 +1042,13 @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op, ti = ts_info(ts); ti->z_mask = z_mask; - ti->s_mask = s_mask | smask_from_zmask(z_mask); + + /* Canonicalize s_mask and incorporate data from z_mask. */ + rep = clz64(~s_mask); + rep = MAX(rep, clz64(z_mask)); + rep = MAX(rep - 1, 0); + ti->s_mask = INT64_MIN >> rep; + return true; } @@ -1807,7 +1774,7 @@ static bool fold_exts(OptContext *ctx, TCGOp *op) ctx->z_mask = z_mask; ctx->s_mask = s_mask; - if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) { + if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) { return true; } @@ -2509,7 +2476,7 @@ static bool fold_sextract(OptContext *ctx, TCGOp *op) s_mask |= MAKE_64BIT_MASK(len, 64 - len); ctx->s_mask = s_mask; - if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) { + if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) { return true; } @@ -2535,7 +2502,6 @@ static bool fold_shift(OptContext *ctx, TCGOp *op) ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh); s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh); - ctx->s_mask = smask_from_smask(s_mask); return fold_masks(ctx, op); } From f3ed3cffb96f96875755ac4b057a15fd50ed0f32 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 18:39:47 -0600 Subject: [PATCH 09/72] tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2 Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index fbc0dc5588..26d1c5d4a1 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -938,7 +938,7 @@ static void finish_ebb(OptContext *ctx) remove_mem_copy_all(ctx); } -static void finish_folding(OptContext *ctx, TCGOp *op) +static bool finish_folding(OptContext *ctx, TCGOp *op) { const TCGOpDef *def = &tcg_op_defs[op->opc]; int i, nb_oargs; @@ -955,6 +955,7 @@ static void finish_folding(OptContext *ctx, TCGOp *op) ts_info(ts)->z_mask = ctx->z_mask; } } + return true; } /* @@ -1188,7 +1189,7 @@ static bool fold_add(OptContext *ctx, TCGOp *op) fold_xi_to_x(ctx, op, 0)) { return true; } - return false; + return finish_folding(ctx, op); } /* We cannot as yet do_constant_folding with vectors. */ @@ -1198,7 +1199,7 @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op) fold_xi_to_x(ctx, op, 0)) { return true; } - return false; + return finish_folding(ctx, op); } static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add) @@ -1265,7 +1266,7 @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add) op->args[4] = arg_new_constant(ctx, bl); op->args[5] = arg_new_constant(ctx, bh); } - return false; + return finish_folding(ctx, op); } static bool fold_add2(OptContext *ctx, TCGOp *op) From e1b6c141e98034d44d7e9004dc35545b87ebcade Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 22 Dec 2024 10:26:14 -0800 Subject: [PATCH 10/72] tcg/optimize: Introduce const value accessors for TempOptInfo Introduce ti_is_const, ti_const_val, ti_is_const_val. Signed-off-by: Richard Henderson --- tcg/optimize.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 26d1c5d4a1..5090f6e759 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -79,15 +79,29 @@ static inline TempOptInfo *arg_info(TCGArg arg) return ts_info(arg_temp(arg)); } +static inline bool ti_is_const(TempOptInfo *ti) +{ + return ti->is_const; +} + +static inline uint64_t ti_const_val(TempOptInfo *ti) +{ + return ti->val; +} + +static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val) +{ + return ti_is_const(ti) && ti_const_val(ti) == val; +} + static inline bool ts_is_const(TCGTemp *ts) { - return ts_info(ts)->is_const; + return ti_is_const(ts_info(ts)); } static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val) { - TempOptInfo *ti = ts_info(ts); - return ti->is_const && ti->val == val; + return ti_is_const_val(ts_info(ts), val); } static inline bool arg_is_const(TCGArg arg) From 1ca7372c033d8d958add8f4f4c7d8e37c06e6ef7 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 18:47:15 -0600 Subject: [PATCH 11/72] tcg/optimize: Use fold_masks_zs in fold_and Avoid the use of the OptContext slots. Find TempOptInfo once. Sink mask computation below fold_affected_mask early exit. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 5090f6e759..4a5b52916a 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1294,7 +1294,8 @@ static bool fold_add2(OptContext *ctx, TCGOp *op) static bool fold_and(OptContext *ctx, TCGOp *op) { - uint64_t z1, z2; + uint64_t z1, z2, z_mask, s_mask; + TempOptInfo *t1, *t2; if (fold_const2_commutative(ctx, op) || fold_xi_to_i(ctx, op, 0) || @@ -1303,27 +1304,28 @@ static bool fold_and(OptContext *ctx, TCGOp *op) return true; } - z1 = arg_info(op->args[1])->z_mask; - z2 = arg_info(op->args[2])->z_mask; - ctx->z_mask = z1 & z2; - - /* - * Sign repetitions are perforce all identical, whether they are 1 or 0. - * Bitwise operations preserve the relative quantity of the repetitions. - */ - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + z1 = t1->z_mask; + z2 = t2->z_mask; /* * Known-zeros does not imply known-ones. Therefore unless * arg2 is constant, we can't infer affected bits from it. */ - if (arg_is_const(op->args[2]) && - fold_affected_mask(ctx, op, z1 & ~z2)) { + if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) { return true; } - return fold_masks(ctx, op); + z_mask = z1 & z2; + + /* + * Sign repetitions are perforce all identical, whether they are 1 or 0. + * Bitwise operations preserve the relative quantity of the repetitions. + */ + s_mask = t1->s_mask & t2->s_mask; + + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_andc(OptContext *ctx, TCGOp *op) From 21e2b5f9fa79eb122cb7240436b84a56263547aa Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 18:56:55 -0600 Subject: [PATCH 12/72] tcg/optimize: Use fold_masks_zs in fold_andc Avoid the use of the OptContext slots. Find TempOptInfo once. Avoid double inversion of the value of second const operand. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 4a5b52916a..2096d705bd 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1330,7 +1330,8 @@ static bool fold_and(OptContext *ctx, TCGOp *op) static bool fold_andc(OptContext *ctx, TCGOp *op) { - uint64_t z1; + uint64_t z_mask, s_mask; + TempOptInfo *t1, *t2; if (fold_const2(ctx, op) || fold_xx_to_i(ctx, op, 0) || @@ -1339,24 +1340,24 @@ static bool fold_andc(OptContext *ctx, TCGOp *op) return true; } - z1 = arg_info(op->args[1])->z_mask; + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + z_mask = t1->z_mask; /* * Known-zeros does not imply known-ones. Therefore unless * arg2 is constant, we can't infer anything from it. */ - if (arg_is_const(op->args[2])) { - uint64_t z2 = ~arg_info(op->args[2])->z_mask; - if (fold_affected_mask(ctx, op, z1 & ~z2)) { + if (ti_is_const(t2)) { + uint64_t v2 = ti_const_val(t2); + if (fold_affected_mask(ctx, op, z_mask & v2)) { return true; } - z1 &= z2; + z_mask &= ~v2; } - ctx->z_mask = z1; - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return fold_masks(ctx, op); + s_mask = t1->s_mask & t2->s_mask; + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_brcond(OptContext *ctx, TCGOp *op) From c1e7b989c8f05a4e78896a8856530492d87b51b4 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 19:42:20 -0600 Subject: [PATCH 13/72] tcg/optimize: Use fold_masks_zs in fold_bswap Avoid the use of the OptContext slots. Find TempOptInfo once. Always set s_mask along the BSWAP_OS path, since the result is being explicitly sign-extended. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 2096d705bd..054109d347 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1477,16 +1477,16 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op) static bool fold_bswap(OptContext *ctx, TCGOp *op) { uint64_t z_mask, s_mask, sign; + TempOptInfo *t1 = arg_info(op->args[1]); - if (arg_is_const(op->args[1])) { - uint64_t t = arg_info(op->args[1])->val; - - t = do_constant_folding(op->opc, ctx->type, t, op->args[2]); - return tcg_opt_gen_movi(ctx, op, op->args[0], t); + if (ti_is_const(t1)) { + return tcg_opt_gen_movi(ctx, op, op->args[0], + do_constant_folding(op->opc, ctx->type, + ti_const_val(t1), + op->args[2])); } - z_mask = arg_info(op->args[1])->z_mask; - + z_mask = t1->z_mask; switch (op->opc) { case INDEX_op_bswap16_i32: case INDEX_op_bswap16_i64: @@ -1514,18 +1514,17 @@ static bool fold_bswap(OptContext *ctx, TCGOp *op) /* If the sign bit may be 1, force all the bits above to 1. */ if (z_mask & sign) { z_mask |= sign; - s_mask = sign << 1; } + /* The value and therefore s_mask is explicitly sign-extended. */ + s_mask = sign; break; default: /* The high bits are undefined: force all bits above the sign to 1. */ z_mask |= sign << 1; break; } - ctx->z_mask = z_mask; - ctx->s_mask = s_mask; - return fold_masks(ctx, op); + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_call(OptContext *ctx, TCGOp *op) From ce1d663ff8a4535e4c72471cab3e52cb24fb7eb1 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 19:47:51 -0600 Subject: [PATCH 14/72] tcg/optimize: Use fold_masks_zs in fold_count_zeros Avoid the use of the OptContext slots. Find TempOptInfo once. Compute s_mask from the union of the maximum count and the op2 fallback for op1 being zero. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 054109d347..0766a452b5 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1566,10 +1566,12 @@ static bool fold_call(OptContext *ctx, TCGOp *op) static bool fold_count_zeros(OptContext *ctx, TCGOp *op) { - uint64_t z_mask; + uint64_t z_mask, s_mask; + TempOptInfo *t1 = arg_info(op->args[1]); + TempOptInfo *t2 = arg_info(op->args[2]); - if (arg_is_const(op->args[1])) { - uint64_t t = arg_info(op->args[1])->val; + if (ti_is_const(t1)) { + uint64_t t = ti_const_val(t1); if (t != 0) { t = do_constant_folding(op->opc, ctx->type, t, 0); @@ -1588,8 +1590,11 @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op) default: g_assert_not_reached(); } - ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask; - return false; + s_mask = ~z_mask; + z_mask |= t2->z_mask; + s_mask &= t2->s_mask; + + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_ctpop(OptContext *ctx, TCGOp *op) From 81be07f905b187743b69adeb2877e5a9efc00d8e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 19:49:17 -0600 Subject: [PATCH 15/72] tcg/optimize: Use fold_masks_z in fold_ctpop Add fold_masks_z as a trivial wrapper around fold_masks_zs. Avoid the use of the OptContext slots. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 0766a452b5..2f5030c899 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1067,6 +1067,11 @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op, return true; } +static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask) +{ + return fold_masks_zs(ctx, op, z_mask, 0); +} + static bool fold_masks(OptContext *ctx, TCGOp *op) { return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask); @@ -1599,21 +1604,23 @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op) static bool fold_ctpop(OptContext *ctx, TCGOp *op) { + uint64_t z_mask; + if (fold_const1(ctx, op)) { return true; } switch (ctx->type) { case TCG_TYPE_I32: - ctx->z_mask = 32 | 31; + z_mask = 32 | 31; break; case TCG_TYPE_I64: - ctx->z_mask = 64 | 63; + z_mask = 64 | 63; break; default: g_assert_not_reached(); } - return false; + return fold_masks_z(ctx, op, z_mask); } static bool fold_deposit(OptContext *ctx, TCGOp *op) From c7739ab83e02b93cb15f54984c3f66ba3c5bd8d2 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 19:57:28 -0600 Subject: [PATCH 16/72] tcg/optimize: Use fold_and and fold_masks_z in fold_deposit Avoid the use of the OptContext slots. Find TempOptInfo once. When we fold to and, use fold_and. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 2f5030c899..c0f0390431 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1625,14 +1625,17 @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op) static bool fold_deposit(OptContext *ctx, TCGOp *op) { + TempOptInfo *t1 = arg_info(op->args[1]); + TempOptInfo *t2 = arg_info(op->args[2]); + int ofs = op->args[3]; + int len = op->args[4]; TCGOpcode and_opc; + uint64_t z_mask; - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { - uint64_t t1 = arg_info(op->args[1])->val; - uint64_t t2 = arg_info(op->args[2])->val; - - t1 = deposit64(t1, op->args[3], op->args[4], t2); - return tcg_opt_gen_movi(ctx, op, op->args[0], t1); + if (ti_is_const(t1) && ti_is_const(t2)) { + return tcg_opt_gen_movi(ctx, op, op->args[0], + deposit64(ti_const_val(t1), ofs, len, + ti_const_val(t2))); } switch (ctx->type) { @@ -1647,30 +1650,26 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op) } /* Inserting a value into zero at offset 0. */ - if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) { - uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]); + if (ti_is_const_val(t1, 0) && ofs == 0) { + uint64_t mask = MAKE_64BIT_MASK(0, len); op->opc = and_opc; op->args[1] = op->args[2]; op->args[2] = arg_new_constant(ctx, mask); - ctx->z_mask = mask & arg_info(op->args[1])->z_mask; - return false; + return fold_and(ctx, op); } /* Inserting zero into a value. */ - if (arg_is_const_val(op->args[2], 0)) { - uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0); + if (ti_is_const_val(t2, 0)) { + uint64_t mask = deposit64(-1, ofs, len, 0); op->opc = and_opc; op->args[2] = arg_new_constant(ctx, mask); - ctx->z_mask = mask & arg_info(op->args[1])->z_mask; - return false; + return fold_and(ctx, op); } - ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask, - op->args[3], op->args[4], - arg_info(op->args[2])->z_mask); - return false; + z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask); + return fold_masks_z(ctx, op, z_mask); } static bool fold_divide(OptContext *ctx, TCGOp *op) From edb832cb51dd98d955dc33973063853bd333752e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 19 Dec 2024 17:56:05 -0800 Subject: [PATCH 17/72] tcg/optimize: Compute sign mask in fold_deposit The input which overlaps the sign bit of the output can have its input s_mask propagated to the output s_mask. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index c0f0390431..b774c96f49 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1629,8 +1629,9 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op) TempOptInfo *t2 = arg_info(op->args[2]); int ofs = op->args[3]; int len = op->args[4]; + int width; TCGOpcode and_opc; - uint64_t z_mask; + uint64_t z_mask, s_mask; if (ti_is_const(t1) && ti_is_const(t2)) { return tcg_opt_gen_movi(ctx, op, op->args[0], @@ -1641,9 +1642,11 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op) switch (ctx->type) { case TCG_TYPE_I32: and_opc = INDEX_op_and_i32; + width = 32; break; case TCG_TYPE_I64: and_opc = INDEX_op_and_i64; + width = 64; break; default: g_assert_not_reached(); @@ -1668,8 +1671,15 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op) return fold_and(ctx, op); } + /* The s_mask from the top portion of the deposit is still valid. */ + if (ofs + len == width) { + s_mask = t2->s_mask << ofs; + } else { + s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len); + } + z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask); - return fold_masks_z(ctx, op, z_mask); + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_divide(OptContext *ctx, TCGOp *op) From 3d5ec804da6e119ad16632675f4ffbbc880ea291 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 19:59:15 -0600 Subject: [PATCH 18/72] tcg/optimize: Use finish_folding in fold_divide Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index b774c96f49..a68221a027 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1688,7 +1688,7 @@ static bool fold_divide(OptContext *ctx, TCGOp *op) fold_xi_to_x(ctx, op, 1)) { return true; } - return false; + return finish_folding(ctx, op); } static bool fold_dup(OptContext *ctx, TCGOp *op) From e089d694e1d3a7b4406535b748a324fa28bfaf0f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:00:51 -0600 Subject: [PATCH 19/72] tcg/optimize: Use finish_folding in fold_dup, fold_dup2 Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index a68221a027..803bceb4bd 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1698,7 +1698,7 @@ static bool fold_dup(OptContext *ctx, TCGOp *op) t = dup_const(TCGOP_VECE(op), t); return tcg_opt_gen_movi(ctx, op, op->args[0], t); } - return false; + return finish_folding(ctx, op); } static bool fold_dup2(OptContext *ctx, TCGOp *op) @@ -1713,7 +1713,7 @@ static bool fold_dup2(OptContext *ctx, TCGOp *op) op->opc = INDEX_op_dup_vec; TCGOP_VECE(op) = MO_32; } - return false; + return finish_folding(ctx, op); } static bool fold_eqv(OptContext *ctx, TCGOp *op) From ef6be624f6bfc655bbc2dd7c86f50a46ab90a414 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:03:15 -0600 Subject: [PATCH 20/72] tcg/optimize: Use fold_masks_s in fold_eqv Add fold_masks_s as a trivial wrapper around fold_masks_zs. Avoid the use of the OptContext slots. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 803bceb4bd..f948cc48c9 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1072,6 +1072,11 @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask) return fold_masks_zs(ctx, op, z_mask, 0); } +static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask) +{ + return fold_masks_zs(ctx, op, -1, s_mask); +} + static bool fold_masks(OptContext *ctx, TCGOp *op) { return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask); @@ -1718,15 +1723,17 @@ static bool fold_dup2(OptContext *ctx, TCGOp *op) static bool fold_eqv(OptContext *ctx, TCGOp *op) { + uint64_t s_mask; + if (fold_const2_commutative(ctx, op) || fold_xi_to_x(ctx, op, -1) || fold_xi_to_not(ctx, op, 0)) { return true; } - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return false; + s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return fold_masks_s(ctx, op, s_mask); } static bool fold_extract(OptContext *ctx, TCGOp *op) From b6cd00f1ef6d7d0b789094559f8d8a4537356515 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:05:11 -0600 Subject: [PATCH 21/72] tcg/optimize: Use fold_masks_z in fold_extract Avoid the use of the OptContext slots. Find TempOptInfo once. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index f948cc48c9..8111c120af 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1739,25 +1739,22 @@ static bool fold_eqv(OptContext *ctx, TCGOp *op) static bool fold_extract(OptContext *ctx, TCGOp *op) { uint64_t z_mask_old, z_mask; + TempOptInfo *t1 = arg_info(op->args[1]); int pos = op->args[2]; int len = op->args[3]; - if (arg_is_const(op->args[1])) { - uint64_t t; - - t = arg_info(op->args[1])->val; - t = extract64(t, pos, len); - return tcg_opt_gen_movi(ctx, op, op->args[0], t); + if (ti_is_const(t1)) { + return tcg_opt_gen_movi(ctx, op, op->args[0], + extract64(ti_const_val(t1), pos, len)); } - z_mask_old = arg_info(op->args[1])->z_mask; + z_mask_old = t1->z_mask; z_mask = extract64(z_mask_old, pos, len); if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) { return true; } - ctx->z_mask = z_mask; - return fold_masks(ctx, op); + return fold_masks_z(ctx, op, z_mask); } static bool fold_extract2(OptContext *ctx, TCGOp *op) From c9df99ee8d549875cd274db10c072dc0b373a168 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:06:42 -0600 Subject: [PATCH 22/72] tcg/optimize: Use finish_folding in fold_extract2 Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 8111c120af..04ec6fdcef 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1773,7 +1773,7 @@ static bool fold_extract2(OptContext *ctx, TCGOp *op) } return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2); } - return false; + return finish_folding(ctx, op); } static bool fold_exts(OptContext *ctx, TCGOp *op) From a96219204f6944def14bdb3a6b2a0bbba172f88a Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:08:46 -0600 Subject: [PATCH 23/72] tcg/optimize: Use fold_masks_zs in fold_exts Avoid the use of the OptContext slots. Find TempOptInfo once. Explicitly sign-extend z_mask instead of doing that manually. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 04ec6fdcef..3aafe039ed 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1778,49 +1778,44 @@ static bool fold_extract2(OptContext *ctx, TCGOp *op) static bool fold_exts(OptContext *ctx, TCGOp *op) { - uint64_t s_mask_old, s_mask, z_mask, sign; + uint64_t s_mask_old, s_mask, z_mask; bool type_change = false; + TempOptInfo *t1; if (fold_const1(ctx, op)) { return true; } - z_mask = arg_info(op->args[1])->z_mask; - s_mask = arg_info(op->args[1])->s_mask; + t1 = arg_info(op->args[1]); + z_mask = t1->z_mask; + s_mask = t1->s_mask; s_mask_old = s_mask; switch (op->opc) { CASE_OP_32_64(ext8s): - sign = INT8_MIN; - z_mask = (uint8_t)z_mask; + s_mask |= INT8_MIN; + z_mask = (int8_t)z_mask; break; CASE_OP_32_64(ext16s): - sign = INT16_MIN; - z_mask = (uint16_t)z_mask; + s_mask |= INT16_MIN; + z_mask = (int16_t)z_mask; break; case INDEX_op_ext_i32_i64: type_change = true; QEMU_FALLTHROUGH; case INDEX_op_ext32s_i64: - sign = INT32_MIN; - z_mask = (uint32_t)z_mask; + s_mask |= INT32_MIN; + z_mask = (int32_t)z_mask; break; default: g_assert_not_reached(); } - if (z_mask & sign) { - z_mask |= sign; - } - s_mask |= sign << 1; - - ctx->z_mask = z_mask; - ctx->s_mask = s_mask; if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) { return true; } - return fold_masks(ctx, op); + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_extu(OptContext *ctx, TCGOp *op) From 08abe2908fa597fb3af298408c261e17378c54d6 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:11:44 -0600 Subject: [PATCH 24/72] tcg/optimize: Use fold_masks_z in fold_extu Avoid the use of the OptContext slots. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 3aafe039ed..f62e7adfe1 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1851,11 +1851,11 @@ static bool fold_extu(OptContext *ctx, TCGOp *op) g_assert_not_reached(); } - ctx->z_mask = z_mask; if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) { return true; } - return fold_masks(ctx, op); + + return fold_masks_z(ctx, op, z_mask); } static bool fold_mb(OptContext *ctx, TCGOp *op) From 322027841f2e35adef592e28ee1288d90232185f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:16:38 -0600 Subject: [PATCH 25/72] tcg/optimize: Use fold_masks_zs in fold_movcond Avoid the use of the OptContext slots. Find TempOptInfo once. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index f62e7adfe1..0104582b3a 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1889,6 +1889,8 @@ static bool fold_mov(OptContext *ctx, TCGOp *op) static bool fold_movcond(OptContext *ctx, TCGOp *op) { + uint64_t z_mask, s_mask; + TempOptInfo *tt, *ft; int i; /* If true and false values are the same, eliminate the cmp. */ @@ -1910,14 +1912,14 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op) return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); } - ctx->z_mask = arg_info(op->args[3])->z_mask - | arg_info(op->args[4])->z_mask; - ctx->s_mask = arg_info(op->args[3])->s_mask - & arg_info(op->args[4])->s_mask; + tt = arg_info(op->args[3]); + ft = arg_info(op->args[4]); + z_mask = tt->z_mask | ft->z_mask; + s_mask = tt->s_mask & ft->s_mask; - if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { - uint64_t tv = arg_info(op->args[3])->val; - uint64_t fv = arg_info(op->args[4])->val; + if (ti_is_const(tt) && ti_is_const(ft)) { + uint64_t tv = ti_const_val(tt); + uint64_t fv = ti_const_val(ft); TCGOpcode opc, negopc = 0; TCGCond cond = op->args[5]; @@ -1956,7 +1958,8 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op) } } } - return false; + + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_mul(OptContext *ctx, TCGOp *op) From cd9c5834d83ccde38268a52d3201659a6286428b Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:18:02 -0600 Subject: [PATCH 26/72] tcg/optimize: Use finish_folding in fold_mul* Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 0104582b3a..10d1376f62 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1969,7 +1969,7 @@ static bool fold_mul(OptContext *ctx, TCGOp *op) fold_xi_to_x(ctx, op, 1)) { return true; } - return false; + return finish_folding(ctx, op); } static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) @@ -1978,7 +1978,7 @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) fold_xi_to_i(ctx, op, 0)) { return true; } - return false; + return finish_folding(ctx, op); } static bool fold_multiply2(OptContext *ctx, TCGOp *op) @@ -2023,7 +2023,7 @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op) tcg_opt_gen_movi(ctx, op2, rh, h); return true; } - return false; + return finish_folding(ctx, op); } static bool fold_nand(OptContext *ctx, TCGOp *op) From fa3168ee93e4a9cabe31824f7918bfe4b7a56369 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:20:40 -0600 Subject: [PATCH 27/72] tcg/optimize: Use fold_masks_s in fold_nand Avoid the use of the OptContext slots. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 10d1376f62..7fe5bd6012 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2028,14 +2028,16 @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op) static bool fold_nand(OptContext *ctx, TCGOp *op) { + uint64_t s_mask; + if (fold_const2_commutative(ctx, op) || fold_xi_to_not(ctx, op, -1)) { return true; } - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return false; + s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return fold_masks_s(ctx, op, s_mask); } static bool fold_neg_no_const(OptContext *ctx, TCGOp *op) From d151fd34b090ddb40b073f1bd2ac4c893a67d1eb Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:23:11 -0600 Subject: [PATCH 28/72] tcg/optimize: Use fold_masks_z in fold_neg_no_const Avoid the use of the OptContext slots. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 7fe5bd6012..fbaaece152 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2044,14 +2044,9 @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op) { /* Set to 1 all bits to the left of the rightmost. */ uint64_t z_mask = arg_info(op->args[1])->z_mask; - ctx->z_mask = -(z_mask & -z_mask); + z_mask = -(z_mask & -z_mask); - /* - * Because of fold_sub_to_neg, we want to always return true, - * via finish_folding. - */ - finish_folding(ctx, op); - return true; + return fold_masks_z(ctx, op, z_mask); } static bool fold_neg(OptContext *ctx, TCGOp *op) From 2b7b69575733d5568e86d850a1c17e3414be974b Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:25:21 -0600 Subject: [PATCH 29/72] tcg/optimize: Use fold_masks_s in fold_nor Avoid the use of the OptContext slots. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index fbaaece152..acff3985f3 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2056,14 +2056,16 @@ static bool fold_neg(OptContext *ctx, TCGOp *op) static bool fold_nor(OptContext *ctx, TCGOp *op) { + uint64_t s_mask; + if (fold_const2_commutative(ctx, op) || fold_xi_to_not(ctx, op, 0)) { return true; } - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return false; + s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return fold_masks_s(ctx, op, s_mask); } static bool fold_not(OptContext *ctx, TCGOp *op) From 608e75fc0c957e6d07cb65d8203c2646a6723dc9 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:27:02 -0600 Subject: [PATCH 30/72] tcg/optimize: Use fold_masks_s in fold_not Avoid the use of the OptContext slots. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index acff3985f3..4ede218bfc 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2073,12 +2073,7 @@ static bool fold_not(OptContext *ctx, TCGOp *op) if (fold_const1(ctx, op)) { return true; } - - ctx->s_mask = arg_info(op->args[1])->s_mask; - - /* Because of fold_to_not, we want to always return true, via finish. */ - finish_folding(ctx, op); - return true; + return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask); } static bool fold_or(OptContext *ctx, TCGOp *op) From 83b1ba3696a4c647d500705cdd11e79b69462cd9 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:28:59 -0600 Subject: [PATCH 31/72] tcg/optimize: Use fold_masks_zs in fold_or Avoid the use of the OptContext slots. Find TempOptInfo once. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 4ede218bfc..e284d79fb1 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2078,17 +2078,20 @@ static bool fold_not(OptContext *ctx, TCGOp *op) static bool fold_or(OptContext *ctx, TCGOp *op) { + uint64_t z_mask, s_mask; + TempOptInfo *t1, *t2; + if (fold_const2_commutative(ctx, op) || fold_xi_to_x(ctx, op, 0) || fold_xx_to_x(ctx, op)) { return true; } - ctx->z_mask = arg_info(op->args[1])->z_mask - | arg_info(op->args[2])->z_mask; - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return fold_masks(ctx, op); + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + z_mask = t1->z_mask | t2->z_mask; + s_mask = t1->s_mask & t2->s_mask; + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_orc(OptContext *ctx, TCGOp *op) From 54e26b292bbf9602f49a66c0c022a623d0beec4b Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:30:20 -0600 Subject: [PATCH 32/72] tcg/optimize: Use fold_masks_zs in fold_orc Avoid the use of the OptContext slots. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index e284d79fb1..81ed26a376 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2096,6 +2096,8 @@ static bool fold_or(OptContext *ctx, TCGOp *op) static bool fold_orc(OptContext *ctx, TCGOp *op) { + uint64_t s_mask; + if (fold_const2(ctx, op) || fold_xx_to_i(ctx, op, -1) || fold_xi_to_x(ctx, op, -1) || @@ -2103,9 +2105,9 @@ static bool fold_orc(OptContext *ctx, TCGOp *op) return true; } - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return false; + s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return fold_masks_s(ctx, op, s_mask); } static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) From 6813be9b9bb962865eb6770555f34d4b0d6066f3 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:33:30 -0600 Subject: [PATCH 33/72] tcg/optimize: Use fold_masks_zs in fold_qemu_ld Avoid the use of the OptContext slots. Be careful not to call fold_masks_zs when the memory operation is wide enough to require multiple outputs, so split into two functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 81ed26a376..7bd17a36c7 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2110,24 +2110,33 @@ static bool fold_orc(OptContext *ctx, TCGOp *op) return fold_masks_s(ctx, op, s_mask); } -static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) +static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op) { const TCGOpDef *def = &tcg_op_defs[op->opc]; MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs]; MemOp mop = get_memop(oi); int width = 8 * memop_size(mop); + uint64_t z_mask = -1, s_mask = 0; if (width < 64) { if (mop & MO_SIGN) { - ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width); + s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1)); } else { - ctx->z_mask = MAKE_64BIT_MASK(0, width); + z_mask = MAKE_64BIT_MASK(0, width); } } /* Opcodes that touch guest memory stop the mb optimization. */ ctx->prev_mb = NULL; - return false; + + return fold_masks_zs(ctx, op, z_mask, s_mask); +} + +static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op) +{ + /* Opcodes that touch guest memory stop the mb optimization. */ + ctx->prev_mb = NULL; + return finish_folding(ctx, op); } static bool fold_qemu_st(OptContext *ctx, TCGOp *op) @@ -3012,11 +3021,18 @@ void tcg_optimize(TCGContext *s) break; case INDEX_op_qemu_ld_a32_i32: case INDEX_op_qemu_ld_a64_i32: + done = fold_qemu_ld_1reg(&ctx, op); + break; case INDEX_op_qemu_ld_a32_i64: case INDEX_op_qemu_ld_a64_i64: + if (TCG_TARGET_REG_BITS == 64) { + done = fold_qemu_ld_1reg(&ctx, op); + break; + } + QEMU_FALLTHROUGH; case INDEX_op_qemu_ld_a32_i128: case INDEX_op_qemu_ld_a64_i128: - done = fold_qemu_ld(&ctx, op); + done = fold_qemu_ld_2reg(&ctx, op); break; case INDEX_op_qemu_st8_a32_i32: case INDEX_op_qemu_st8_a64_i32: From 082b3ef9195571d543c32ab4afe5fc516153a9e5 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:34:57 -0600 Subject: [PATCH 34/72] tcg/optimize: Return true from fold_qemu_st, fold_tcg_st Stores have no output operands, and so need no further work. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 7bd17a36c7..07792c5351 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2143,7 +2143,7 @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op) { /* Opcodes that touch guest memory stop the mb optimization. */ ctx->prev_mb = NULL; - return false; + return true; } static bool fold_remainder(OptContext *ctx, TCGOp *op) @@ -2706,7 +2706,7 @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op) if (op->args[1] != tcgv_ptr_arg(tcg_env)) { remove_mem_copy_all(ctx); - return false; + return true; } switch (op->opc) { @@ -2730,7 +2730,7 @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op) g_assert_not_reached(); } remove_mem_copy_in(ctx, ofs, ofs + lm1); - return false; + return true; } static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op) @@ -2740,8 +2740,7 @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op) TCGType type; if (op->args[1] != tcgv_ptr_arg(tcg_env)) { - fold_tcg_st(ctx, op); - return false; + return fold_tcg_st(ctx, op); } src = arg_temp(op->args[0]); @@ -2763,7 +2762,7 @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op) last = ofs + tcg_type_size(type) - 1; remove_mem_copy_in(ctx, ofs, last); record_mem_copy(ctx, type, src, ofs, last); - return false; + return true; } static bool fold_xor(OptContext *ctx, TCGOp *op) From f9e3934903a8e388559c373f93544e3f9e6a9fc0 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:36:50 -0600 Subject: [PATCH 35/72] tcg/optimize: Use finish_folding in fold_remainder Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 07792c5351..e78f5a79a3 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2152,7 +2152,7 @@ static bool fold_remainder(OptContext *ctx, TCGOp *op) fold_xx_to_i(ctx, op, 0)) { return true; } - return false; + return finish_folding(ctx, op); } static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg) From 95eb229363f28aaacf506974cdb3047d816345fe Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:47:59 -0600 Subject: [PATCH 36/72] tcg/optimize: Distinguish simplification in fold_setcond_zmask Change return from bool to int; distinguish between complete folding, simplification, and no change. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index e78f5a79a3..678015a94a 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2155,7 +2155,8 @@ static bool fold_remainder(OptContext *ctx, TCGOp *op) return finish_folding(ctx, op); } -static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg) +/* Return 1 if finished, -1 if simplified, 0 if unchanged. */ +static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg) { uint64_t a_zmask, b_val; TCGCond cond; @@ -2250,11 +2251,10 @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg) op->opc = xor_opc; op->args[2] = arg_new_constant(ctx, 1); } - return false; + return -1; } } - - return false; + return 0; } static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg) @@ -2359,10 +2359,13 @@ static bool fold_setcond(OptContext *ctx, TCGOp *op) return tcg_opt_gen_movi(ctx, op, op->args[0], i); } - if (fold_setcond_zmask(ctx, op, false)) { + i = fold_setcond_zmask(ctx, op, false); + if (i > 0) { return true; } - fold_setcond_tst_pow2(ctx, op, false); + if (i == 0) { + fold_setcond_tst_pow2(ctx, op, false); + } ctx->z_mask = 1; return false; @@ -2376,10 +2379,13 @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op) return tcg_opt_gen_movi(ctx, op, op->args[0], -i); } - if (fold_setcond_zmask(ctx, op, true)) { + i = fold_setcond_zmask(ctx, op, true); + if (i > 0) { return true; } - fold_setcond_tst_pow2(ctx, op, true); + if (i == 0) { + fold_setcond_tst_pow2(ctx, op, true); + } /* Value is {0,-1} so all bits are repetitions of the sign. */ ctx->s_mask = -1; From 2c8a28398d65e2e4ff31061533873ed09b894543 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:50:37 -0600 Subject: [PATCH 37/72] tcg/optimize: Use fold_masks_z in fold_setcond Avoid the use of the OptContext slots. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 678015a94a..74be827f51 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2367,8 +2367,7 @@ static bool fold_setcond(OptContext *ctx, TCGOp *op) fold_setcond_tst_pow2(ctx, op, false); } - ctx->z_mask = 1; - return false; + return fold_masks_z(ctx, op, 1); } static bool fold_negsetcond(OptContext *ctx, TCGOp *op) From 081cf08b09edf0bea704126b607220150c9b5630 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:50:58 -0600 Subject: [PATCH 38/72] tcg/optimize: Use fold_masks_s in fold_negsetcond Avoid the use of the OptContext slots. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 74be827f51..7e909791e1 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2387,8 +2387,7 @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op) } /* Value is {0,-1} so all bits are repetitions of the sign. */ - ctx->s_mask = -1; - return false; + return fold_masks_s(ctx, op, -1); } static bool fold_setcond2(OptContext *ctx, TCGOp *op) From a53502c0b4b63bc6dd5bf891231e145cf7a637ff Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:56:36 -0600 Subject: [PATCH 39/72] tcg/optimize: Use fold_masks_z in fold_setcond2 Avoid the use of the OptContext slots. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 7e909791e1..c61d0eae4e 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2468,8 +2468,7 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op) return fold_setcond(ctx, op); } - ctx->z_mask = 1; - return false; + return fold_masks_z(ctx, op, 1); do_setcond_const: return tcg_opt_gen_movi(ctx, op, op->args[0], i); From 4d20104f9f2deef6d30109f0bba5725c0dcc08da Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:57:53 -0600 Subject: [PATCH 40/72] tcg/optimize: Use finish_folding in fold_cmp_vec Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index c61d0eae4e..ccdac7b7d7 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2480,7 +2480,7 @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op) if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { op->args[3] = tcg_swap_cond(op->args[3]); } - return false; + return finish_folding(ctx, op); } static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op) From 210c70b7ac449f2eabc55893eca15fe36d36391f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 20:59:15 -0600 Subject: [PATCH 41/72] tcg/optimize: Use finish_folding in fold_cmpsel_vec Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index ccdac7b7d7..4090ffe12c 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2501,7 +2501,7 @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op) if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { op->args[5] = tcg_invert_cond(op->args[5]); } - return false; + return finish_folding(ctx, op); } static bool fold_sextract(OptContext *ctx, TCGOp *op) From baff507e50fbcb270a1a7b448c2cc37cc7f9ec05 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 21:09:30 -0600 Subject: [PATCH 42/72] tcg/optimize: Use fold_masks_zs in fold_sextract Avoid the use of the OptContext slots. Find TempOptInfo once. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 4090ffe12c..2d634c8925 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2507,31 +2507,25 @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op) static bool fold_sextract(OptContext *ctx, TCGOp *op) { uint64_t z_mask, s_mask, s_mask_old; + TempOptInfo *t1 = arg_info(op->args[1]); int pos = op->args[2]; int len = op->args[3]; - if (arg_is_const(op->args[1])) { - uint64_t t; - - t = arg_info(op->args[1])->val; - t = sextract64(t, pos, len); - return tcg_opt_gen_movi(ctx, op, op->args[0], t); + if (ti_is_const(t1)) { + return tcg_opt_gen_movi(ctx, op, op->args[0], + sextract64(ti_const_val(t1), pos, len)); } - z_mask = arg_info(op->args[1])->z_mask; - z_mask = sextract64(z_mask, pos, len); - ctx->z_mask = z_mask; - - s_mask_old = arg_info(op->args[1])->s_mask; - s_mask = sextract64(s_mask_old, pos, len); - s_mask |= MAKE_64BIT_MASK(len, 64 - len); - ctx->s_mask = s_mask; + s_mask_old = t1->s_mask; + s_mask = s_mask_old >> pos; + s_mask |= -1ull << (len - 1); if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) { return true; } - return fold_masks(ctx, op); + z_mask = sextract64(t1->z_mask, pos, len); + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_shift(OptContext *ctx, TCGOp *op) From 4e9ce6a2ec73d42e12aedf21b255dc00b378fc8d Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 21:13:41 -0600 Subject: [PATCH 43/72] tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift Avoid the use of the OptContext slots. Find TempOptInfo once. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 2d634c8925..b70e9bdaf5 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2531,6 +2531,7 @@ static bool fold_sextract(OptContext *ctx, TCGOp *op) static bool fold_shift(OptContext *ctx, TCGOp *op) { uint64_t s_mask, z_mask, sign; + TempOptInfo *t1, *t2; if (fold_const2(ctx, op) || fold_ix_to_i(ctx, op, 0) || @@ -2538,17 +2539,18 @@ static bool fold_shift(OptContext *ctx, TCGOp *op) return true; } - s_mask = arg_info(op->args[1])->s_mask; - z_mask = arg_info(op->args[1])->z_mask; + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + s_mask = t1->s_mask; + z_mask = t1->z_mask; - if (arg_is_const(op->args[2])) { - int sh = arg_info(op->args[2])->val; - - ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh); + if (ti_is_const(t2)) { + int sh = ti_const_val(t2); + z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh); s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh); - return fold_masks(ctx, op); + return fold_masks_zs(ctx, op, z_mask, s_mask); } switch (op->opc) { @@ -2557,23 +2559,22 @@ static bool fold_shift(OptContext *ctx, TCGOp *op) * Arithmetic right shift will not reduce the number of * input sign repetitions. */ - ctx->s_mask = s_mask; - break; + return fold_masks_s(ctx, op, s_mask); CASE_OP_32_64(shr): /* * If the sign bit is known zero, then logical right shift - * will not reduced the number of input sign repetitions. + * will not reduce the number of input sign repetitions. */ - sign = (s_mask & -s_mask) >> 1; + sign = -s_mask; if (sign && !(z_mask & sign)) { - ctx->s_mask = s_mask; + return fold_masks_s(ctx, op, s_mask); } break; default: break; } - return false; + return finish_folding(ctx, op); } static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) From 4ed2ba3f4abe6b3c03a905d44cdd18b3a3c1ce33 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 19 Dec 2024 19:38:54 -0800 Subject: [PATCH 44/72] tcg/optimize: Simplify sign bit test in fold_shift Merge the two conditions, sign != 0 && !(z_mask & sign), by testing ~z_mask & sign. If sign == 0, the logical and will produce false. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index b70e9bdaf5..26790f7c27 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2530,7 +2530,7 @@ static bool fold_sextract(OptContext *ctx, TCGOp *op) static bool fold_shift(OptContext *ctx, TCGOp *op) { - uint64_t s_mask, z_mask, sign; + uint64_t s_mask, z_mask; TempOptInfo *t1, *t2; if (fold_const2(ctx, op) || @@ -2565,8 +2565,7 @@ static bool fold_shift(OptContext *ctx, TCGOp *op) * If the sign bit is known zero, then logical right shift * will not reduce the number of input sign repetitions. */ - sign = -s_mask; - if (sign && !(z_mask & sign)) { + if (~z_mask & -s_mask) { return fold_masks_s(ctx, op, s_mask); } break; From fe1d0074b5cc64e0a548dfba8ab322bd8710c7e5 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 21:15:22 -0600 Subject: [PATCH 45/72] tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec Duplicate fold_sub_vec into fold_sub instead of calling it, now that fold_sub_vec always returns true. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 26790f7c27..cd052a2dbf 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2620,12 +2620,15 @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op) fold_sub_to_neg(ctx, op)) { return true; } - return false; + return finish_folding(ctx, op); } static bool fold_sub(OptContext *ctx, TCGOp *op) { - if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) { + if (fold_const2(ctx, op) || + fold_xx_to_i(ctx, op, 0) || + fold_xi_to_x(ctx, op, 0) || + fold_sub_to_neg(ctx, op)) { return true; } @@ -2637,7 +2640,7 @@ static bool fold_sub(OptContext *ctx, TCGOp *op) ? INDEX_op_add_i32 : INDEX_op_add_i64); op->args[2] = arg_new_constant(ctx, -val); } - return false; + return finish_folding(ctx, op); } static bool fold_sub2(OptContext *ctx, TCGOp *op) From d33e0f01db0f75c890a8ed1f1116d45080ca1c3c Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 9 Dec 2024 08:53:20 -0600 Subject: [PATCH 46/72] tcg/optimize: Use fold_masks_zs in fold_tcg_ld Avoid the use of the OptContext slots. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index cd052a2dbf..7141b18496 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2650,30 +2650,32 @@ static bool fold_sub2(OptContext *ctx, TCGOp *op) static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) { + uint64_t z_mask = -1, s_mask = 0; + /* We can't do any folding with a load, but we can record bits. */ switch (op->opc) { CASE_OP_32_64(ld8s): - ctx->s_mask = MAKE_64BIT_MASK(8, 56); + s_mask = INT8_MIN; break; CASE_OP_32_64(ld8u): - ctx->z_mask = MAKE_64BIT_MASK(0, 8); + z_mask = MAKE_64BIT_MASK(0, 8); break; CASE_OP_32_64(ld16s): - ctx->s_mask = MAKE_64BIT_MASK(16, 48); + s_mask = INT16_MIN; break; CASE_OP_32_64(ld16u): - ctx->z_mask = MAKE_64BIT_MASK(0, 16); + z_mask = MAKE_64BIT_MASK(0, 16); break; case INDEX_op_ld32s_i64: - ctx->s_mask = MAKE_64BIT_MASK(32, 32); + s_mask = INT32_MIN; break; case INDEX_op_ld32u_i64: - ctx->z_mask = MAKE_64BIT_MASK(0, 32); + z_mask = MAKE_64BIT_MASK(0, 32); break; default: g_assert_not_reached(); } - return false; + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op) From 0fb5b757c344cb57d7c81922262bc8546c3ab504 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 9 Dec 2024 09:44:40 -0600 Subject: [PATCH 47/72] tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 7141b18496..047cb5a1ee 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2685,7 +2685,7 @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op) TCGType type; if (op->args[1] != tcgv_ptr_arg(tcg_env)) { - return false; + return finish_folding(ctx, op); } type = ctx->type; From c890fd71794601431694ce0650055fbe927a1d8e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 21:39:01 -0600 Subject: [PATCH 48/72] tcg/optimize: Use fold_masks_zs in fold_xor Avoid the use of the OptContext slots. Find TempOptInfo once. Remove fold_masks as the function becomes unused. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 047cb5a1ee..d543266b8d 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1077,11 +1077,6 @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask) return fold_masks_zs(ctx, op, -1, s_mask); } -static bool fold_masks(OptContext *ctx, TCGOp *op) -{ - return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask); -} - /* * An "affected" mask bit is 0 if and only if the result is identical * to the first input. Thus if the entire mask is 0, the operation @@ -2769,6 +2764,9 @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op) static bool fold_xor(OptContext *ctx, TCGOp *op) { + uint64_t z_mask, s_mask; + TempOptInfo *t1, *t2; + if (fold_const2_commutative(ctx, op) || fold_xx_to_i(ctx, op, 0) || fold_xi_to_x(ctx, op, 0) || @@ -2776,11 +2774,11 @@ static bool fold_xor(OptContext *ctx, TCGOp *op) return true; } - ctx->z_mask = arg_info(op->args[1])->z_mask - | arg_info(op->args[2])->z_mask; - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return fold_masks(ctx, op); + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + z_mask = t1->z_mask | t2->z_mask; + s_mask = t1->s_mask & t2->s_mask; + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op) From 4fcd14ca64aaaed88a8b6a5a22b517397a7053b1 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 21:40:25 -0600 Subject: [PATCH 49/72] tcg/optimize: Use finish_folding in fold_bitsel_vec Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index d543266b8d..4271d14d2c 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2833,7 +2833,7 @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op) return fold_orc(ctx, op); } } - return false; + return finish_folding(ctx, op); } /* Propagate constants and copies, fold constant expressions. */ From 0ae564288947d3670aaa75c931e838d5265d2a64 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 21:42:53 -0600 Subject: [PATCH 50/72] tcg/optimize: Use finish_folding as default in tcg_optimize All non-default cases now finish folding within each function. Do the same with the default case and assert it is done after. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 4271d14d2c..51cfcb15d2 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -3096,11 +3096,9 @@ void tcg_optimize(TCGContext *s) done = true; break; default: + done = finish_folding(&ctx, op); break; } - - if (!done) { - finish_folding(&ctx, op); - } + tcg_debug_assert(done); } } From a3a88b17c2b2f682554a113e87e764c516e93e08 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 9 Dec 2024 13:57:09 -0600 Subject: [PATCH 51/72] tcg/optimize: Remove z_mask, s_mask from OptContext All mask setting is now done with parameters via fold_masks_*. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 51cfcb15d2..98b41975af 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -64,8 +64,6 @@ typedef struct OptContext { QSIMPLEQ_HEAD(, MemCopyInfo) mem_free; /* In flight values from optimization. */ - uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */ - uint64_t s_mask; /* mask bit is 1 if value bit matches msb */ TCGType type; } OptContext; @@ -961,13 +959,6 @@ static bool finish_folding(OptContext *ctx, TCGOp *op) for (i = 0; i < nb_oargs; i++) { TCGTemp *ts = arg_temp(op->args[i]); reset_ts(ctx, ts); - /* - * Save the corresponding known-zero/sign bits mask for the - * first output argument (only one supported so far). - */ - if (i == 0) { - ts_info(ts)->z_mask = ctx->z_mask; - } } return true; } @@ -2879,10 +2870,6 @@ void tcg_optimize(TCGContext *s) ctx.type = TCG_TYPE_I32; } - /* Assume all bits affected, no bits known zero, no sign reps. */ - ctx.z_mask = -1; - ctx.s_mask = 0; - /* * Process each opcode. * Sorted alphabetically by opcode as much as possible. From aa9e0501a445d1897b960f5014050497e8e70105 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 21 Dec 2024 22:03:53 -0800 Subject: [PATCH 52/72] tcg/optimize: Re-enable sign-mask optimizations All instances of s_mask have been converted to the new representation. We can now re-enable usage. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 98b41975af..182be7e63c 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1797,7 +1797,7 @@ static bool fold_exts(OptContext *ctx, TCGOp *op) g_assert_not_reached(); } - if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) { + if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) { return true; } @@ -2506,7 +2506,7 @@ static bool fold_sextract(OptContext *ctx, TCGOp *op) s_mask = s_mask_old >> pos; s_mask |= -1ull << (len - 1); - if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) { + if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) { return true; } From 7d3c63aca11ee365b36524c95d2d75d70516c6bd Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 9 Dec 2024 14:06:08 -0600 Subject: [PATCH 53/72] tcg/optimize: Move fold_bitsel_vec into alphabetic sort The big comment just above says functions should be sorted. Add forward declarations as needed. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------ 1 file changed, 59 insertions(+), 55 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 182be7e63c..1df61378ea 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1193,6 +1193,10 @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op) * 3) those that produce information about the result value. */ +static bool fold_or(OptContext *ctx, TCGOp *op); +static bool fold_orc(OptContext *ctx, TCGOp *op); +static bool fold_xor(OptContext *ctx, TCGOp *op); + static bool fold_add(OptContext *ctx, TCGOp *op) { if (fold_const2_commutative(ctx, op) || @@ -1356,6 +1360,61 @@ static bool fold_andc(OptContext *ctx, TCGOp *op) return fold_masks_zs(ctx, op, z_mask, s_mask); } +static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op) +{ + /* If true and false values are the same, eliminate the cmp. */ + if (args_are_copies(op->args[2], op->args[3])) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); + } + + if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { + uint64_t tv = arg_info(op->args[2])->val; + uint64_t fv = arg_info(op->args[3])->val; + + if (tv == -1 && fv == 0) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); + } + if (tv == 0 && fv == -1) { + if (TCG_TARGET_HAS_not_vec) { + op->opc = INDEX_op_not_vec; + return fold_not(ctx, op); + } else { + op->opc = INDEX_op_xor_vec; + op->args[2] = arg_new_constant(ctx, -1); + return fold_xor(ctx, op); + } + } + } + if (arg_is_const(op->args[2])) { + uint64_t tv = arg_info(op->args[2])->val; + if (tv == -1) { + op->opc = INDEX_op_or_vec; + op->args[2] = op->args[3]; + return fold_or(ctx, op); + } + if (tv == 0 && TCG_TARGET_HAS_andc_vec) { + op->opc = INDEX_op_andc_vec; + op->args[2] = op->args[1]; + op->args[1] = op->args[3]; + return fold_andc(ctx, op); + } + } + if (arg_is_const(op->args[3])) { + uint64_t fv = arg_info(op->args[3])->val; + if (fv == 0) { + op->opc = INDEX_op_and_vec; + return fold_and(ctx, op); + } + if (fv == -1 && TCG_TARGET_HAS_orc_vec) { + op->opc = INDEX_op_orc_vec; + op->args[2] = op->args[1]; + op->args[1] = op->args[3]; + return fold_orc(ctx, op); + } + } + return finish_folding(ctx, op); +} + static bool fold_brcond(OptContext *ctx, TCGOp *op) { int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0], @@ -2772,61 +2831,6 @@ static bool fold_xor(OptContext *ctx, TCGOp *op) return fold_masks_zs(ctx, op, z_mask, s_mask); } -static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op) -{ - /* If true and false values are the same, eliminate the cmp. */ - if (args_are_copies(op->args[2], op->args[3])) { - return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); - } - - if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { - uint64_t tv = arg_info(op->args[2])->val; - uint64_t fv = arg_info(op->args[3])->val; - - if (tv == -1 && fv == 0) { - return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); - } - if (tv == 0 && fv == -1) { - if (TCG_TARGET_HAS_not_vec) { - op->opc = INDEX_op_not_vec; - return fold_not(ctx, op); - } else { - op->opc = INDEX_op_xor_vec; - op->args[2] = arg_new_constant(ctx, -1); - return fold_xor(ctx, op); - } - } - } - if (arg_is_const(op->args[2])) { - uint64_t tv = arg_info(op->args[2])->val; - if (tv == -1) { - op->opc = INDEX_op_or_vec; - op->args[2] = op->args[3]; - return fold_or(ctx, op); - } - if (tv == 0 && TCG_TARGET_HAS_andc_vec) { - op->opc = INDEX_op_andc_vec; - op->args[2] = op->args[1]; - op->args[1] = op->args[3]; - return fold_andc(ctx, op); - } - } - if (arg_is_const(op->args[3])) { - uint64_t fv = arg_info(op->args[3])->val; - if (fv == 0) { - op->opc = INDEX_op_and_vec; - return fold_and(ctx, op); - } - if (fv == -1 && TCG_TARGET_HAS_orc_vec) { - op->opc = INDEX_op_orc_vec; - op->args[2] = op->args[1]; - op->args[1] = op->args[3]; - return fold_orc(ctx, op); - } - } - return finish_folding(ctx, op); -} - /* Propagate constants and copies, fold constant expressions. */ void tcg_optimize(TCGContext *s) { From 29f6586f6167a0ef6c8eaeb8c3cbdf4ff4c9d762 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 9 Dec 2024 14:09:49 -0600 Subject: [PATCH 54/72] tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort The big comment just above says functions should be sorted. Reviewed-by: Pierrick Bouvier Signed-off-by: Richard Henderson --- tcg/optimize.c | 60 +++++++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index 1df61378ea..c23f0d1392 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1619,6 +1619,36 @@ static bool fold_call(OptContext *ctx, TCGOp *op) return true; } +static bool fold_cmp_vec(OptContext *ctx, TCGOp *op) +{ + /* Canonicalize the comparison to put immediate second. */ + if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { + op->args[3] = tcg_swap_cond(op->args[3]); + } + return finish_folding(ctx, op); +} + +static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op) +{ + /* If true and false values are the same, eliminate the cmp. */ + if (args_are_copies(op->args[3], op->args[4])) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]); + } + + /* Canonicalize the comparison to put immediate second. */ + if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { + op->args[5] = tcg_swap_cond(op->args[5]); + } + /* + * Canonicalize the "false" input reg to match the destination, + * so that the tcg backend can implement "move if true". + */ + if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { + op->args[5] = tcg_invert_cond(op->args[5]); + } + return finish_folding(ctx, op); +} + static bool fold_count_zeros(OptContext *ctx, TCGOp *op) { uint64_t z_mask, s_mask; @@ -2519,36 +2549,6 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op) return tcg_opt_gen_movi(ctx, op, op->args[0], i); } -static bool fold_cmp_vec(OptContext *ctx, TCGOp *op) -{ - /* Canonicalize the comparison to put immediate second. */ - if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { - op->args[3] = tcg_swap_cond(op->args[3]); - } - return finish_folding(ctx, op); -} - -static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op) -{ - /* If true and false values are the same, eliminate the cmp. */ - if (args_are_copies(op->args[3], op->args[4])) { - return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]); - } - - /* Canonicalize the comparison to put immediate second. */ - if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { - op->args[5] = tcg_swap_cond(op->args[5]); - } - /* - * Canonicalize the "false" input reg to match the destination, - * so that the tcg backend can implement "move if true". - */ - if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { - op->args[5] = tcg_invert_cond(op->args[5]); - } - return finish_folding(ctx, op); -} - static bool fold_sextract(OptContext *ctx, TCGOp *op) { uint64_t z_mask, s_mask, s_mask_old; From 910556bbf4ffe41c9de5cf7f2c3a269ac2de5324 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 7 Dec 2024 17:21:25 -0600 Subject: [PATCH 55/72] softfloat: Add float{16,32,64}_muladd_scalbn MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We currently have a flag, float_muladd_halve_result, to scale the result by 2**-1. Extend this to handle arbitrary scaling. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- fpu/softfloat-parts.c.inc | 7 +++-- fpu/softfloat.c | 58 ++++++++++++++++++++++----------------- include/fpu/softfloat.h | 6 ++++ 3 files changed, 44 insertions(+), 27 deletions(-) diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc index ba8de7be76..4a62d6ca24 100644 --- a/fpu/softfloat-parts.c.inc +++ b/fpu/softfloat-parts.c.inc @@ -562,8 +562,9 @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b, * Requires A and C extracted into a double-sized structure to provide the * extra space for the widening multiply. */ -static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b, - FloatPartsN *c, int flags, float_status *s) +static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b, + FloatPartsN *c, int scale, + int flags, float_status *s) { int ab_mask, abc_mask; FloatPartsW p_widen, c_widen; @@ -652,9 +653,11 @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b, a->exp = p_widen.exp; return_normal: + /* TODO: Replace all use of float_muladd_halve_result with scale. */ if (flags & float_muladd_halve_result) { a->exp -= 1; } + a->exp += scale; finish_sign: if (flags & float_muladd_negate_result) { a->sign ^= 1; diff --git a/fpu/softfloat.c b/fpu/softfloat.c index 8de8d5f342..b5936cc0f8 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -789,15 +789,15 @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b, #define parts_mul(A, B, S) \ PARTS_GENERIC_64_128(mul, A)(A, B, S) -static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b, - FloatParts64 *c, int flags, - float_status *s); -static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b, - FloatParts128 *c, int flags, - float_status *s); +static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b, + FloatParts64 *c, int scale, + int flags, float_status *s); +static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b, + FloatParts128 *c, int scale, + int flags, float_status *s); -#define parts_muladd(A, B, C, Z, S) \ - PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S) +#define parts_muladd_scalbn(A, B, C, Z, Y, S) \ + PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S) static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b, float_status *s); @@ -2212,43 +2212,50 @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status) * Fused multiply-add */ -float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c, - int flags, float_status *status) +float16 QEMU_FLATTEN +float16_muladd_scalbn(float16 a, float16 b, float16 c, + int scale, int flags, float_status *status) { FloatParts64 pa, pb, pc, *pr; float16_unpack_canonical(&pa, a, status); float16_unpack_canonical(&pb, b, status); float16_unpack_canonical(&pc, c, status); - pr = parts_muladd(&pa, &pb, &pc, flags, status); + pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status); return float16_round_pack_canonical(pr, status); } -static float32 QEMU_SOFTFLOAT_ATTR -soft_f32_muladd(float32 a, float32 b, float32 c, int flags, - float_status *status) +float16 float16_muladd(float16 a, float16 b, float16 c, + int flags, float_status *status) +{ + return float16_muladd_scalbn(a, b, c, 0, flags, status); +} + +float32 QEMU_SOFTFLOAT_ATTR +float32_muladd_scalbn(float32 a, float32 b, float32 c, + int scale, int flags, float_status *status) { FloatParts64 pa, pb, pc, *pr; float32_unpack_canonical(&pa, a, status); float32_unpack_canonical(&pb, b, status); float32_unpack_canonical(&pc, c, status); - pr = parts_muladd(&pa, &pb, &pc, flags, status); + pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status); return float32_round_pack_canonical(pr, status); } -static float64 QEMU_SOFTFLOAT_ATTR -soft_f64_muladd(float64 a, float64 b, float64 c, int flags, - float_status *status) +float64 QEMU_SOFTFLOAT_ATTR +float64_muladd_scalbn(float64 a, float64 b, float64 c, + int scale, int flags, float_status *status) { FloatParts64 pa, pb, pc, *pr; float64_unpack_canonical(&pa, a, status); float64_unpack_canonical(&pb, b, status); float64_unpack_canonical(&pc, c, status); - pr = parts_muladd(&pa, &pb, &pc, flags, status); + pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status); return float64_round_pack_canonical(pr, status); } @@ -2323,7 +2330,7 @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s) return ur.s; soft: - return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s); + return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s); } float64 QEMU_FLATTEN @@ -2394,7 +2401,7 @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s) return ur.s; soft: - return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s); + return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s); } float64 float64r32_muladd(float64 a, float64 b, float64 c, @@ -2405,7 +2412,7 @@ float64 float64r32_muladd(float64 a, float64 b, float64 c, float64_unpack_canonical(&pa, a, status); float64_unpack_canonical(&pb, b, status); float64_unpack_canonical(&pc, c, status); - pr = parts_muladd(&pa, &pb, &pc, flags, status); + pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status); return float64r32_round_pack_canonical(pr, status); } @@ -2418,7 +2425,7 @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c, bfloat16_unpack_canonical(&pa, a, status); bfloat16_unpack_canonical(&pb, b, status); bfloat16_unpack_canonical(&pc, c, status); - pr = parts_muladd(&pa, &pb, &pc, flags, status); + pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status); return bfloat16_round_pack_canonical(pr, status); } @@ -2431,7 +2438,7 @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c, float128_unpack_canonical(&pa, a, status); float128_unpack_canonical(&pb, b, status); float128_unpack_canonical(&pc, c, status); - pr = parts_muladd(&pa, &pb, &pc, flags, status); + pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status); return float128_round_pack_canonical(pr, status); } @@ -5249,8 +5256,9 @@ float32 float32_exp2(float32 a, float_status *status) float64_unpack_canonical(&rp, float64_one, status); for (i = 0 ; i < 15 ; i++) { + float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status); - rp = *parts_muladd(&tp, &xnp, &rp, 0, status); + rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status); xnp = *parts_mul(&xnp, &xp, status); } diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h index eb64075b9c..c34ce0477d 100644 --- a/include/fpu/softfloat.h +++ b/include/fpu/softfloat.h @@ -238,6 +238,8 @@ float16 float16_add(float16, float16, float_status *status); float16 float16_sub(float16, float16, float_status *status); float16 float16_mul(float16, float16, float_status *status); float16 float16_muladd(float16, float16, float16, int, float_status *status); +float16 float16_muladd_scalbn(float16, float16, float16, + int, int, float_status *status); float16 float16_div(float16, float16, float_status *status); float16 float16_scalbn(float16, int, float_status *status); float16 float16_min(float16, float16, float_status *status); @@ -597,6 +599,8 @@ float32 float32_mul(float32, float32, float_status *status); float32 float32_div(float32, float32, float_status *status); float32 float32_rem(float32, float32, float_status *status); float32 float32_muladd(float32, float32, float32, int, float_status *status); +float32 float32_muladd_scalbn(float32, float32, float32, + int, int, float_status *status); float32 float32_sqrt(float32, float_status *status); float32 float32_exp2(float32, float_status *status); float32 float32_log2(float32, float_status *status); @@ -792,6 +796,8 @@ float64 float64_mul(float64, float64, float_status *status); float64 float64_div(float64, float64, float_status *status); float64 float64_rem(float64, float64, float_status *status); float64 float64_muladd(float64, float64, float64, int, float_status *status); +float64 float64_muladd_scalbn(float64, float64, float64, + int, int, float_status *status); float64 float64_sqrt(float64, float_status *status); float64 float64_log2(float64, float_status *status); FloatRelation float64_compare(float64, float64, float_status *status); From 912400a362e6d25a137e8314fa78ca0429d908aa Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 7 Dec 2024 17:43:10 -0600 Subject: [PATCH 56/72] target/arm: Use float*_muladd_scalbn MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the scalbn interface instead of float_muladd_halve_result. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- target/arm/tcg/helper-a64.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index 0e130501fd..3b226daee7 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -262,7 +262,7 @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst) (float16_is_infinity(b) && float16_is_zero(a))) { return float16_one_point_five; } - return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst); + return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst); } float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst) @@ -275,7 +275,7 @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst) (float32_is_infinity(b) && float32_is_zero(a))) { return float32_one_point_five; } - return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst); + return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst); } float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst) @@ -288,7 +288,7 @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst) (float64_is_infinity(b) && float64_is_zero(a))) { return float64_one_point_five; } - return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst); + return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst); } /* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */ From 88d5f550bd570cd2837c0316bea6bae8cb4b745a Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 7 Dec 2024 18:00:59 -0600 Subject: [PATCH 57/72] target/sparc: Use float*_muladd_scalbn MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the scalbn interface instead of float_muladd_halve_result. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- target/sparc/fop_helper.c | 8 ++-- target/sparc/helper.h | 4 +- target/sparc/translate.c | 80 +++++++++++++++++++++++---------------- 3 files changed, 54 insertions(+), 38 deletions(-) diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c index 236d27b19c..c25097d07f 100644 --- a/target/sparc/fop_helper.c +++ b/target/sparc/fop_helper.c @@ -344,17 +344,17 @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src) } float32 helper_fmadds(CPUSPARCState *env, float32 s1, - float32 s2, float32 s3, uint32_t op) + float32 s2, float32 s3, int32_t sc, uint32_t op) { - float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status); + float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status); check_ieee_exceptions(env, GETPC()); return ret; } float64 helper_fmaddd(CPUSPARCState *env, float64 s1, - float64 s2, float64 s3, uint32_t op) + float64 s2, float64 s3, int32_t sc, uint32_t op) { - float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status); + float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status); check_ieee_exceptions(env, GETPC()); return ret; } diff --git a/target/sparc/helper.h b/target/sparc/helper.h index 1ae3f0c467..3a7f7dc129 100644 --- a/target/sparc/helper.h +++ b/target/sparc/helper.h @@ -59,7 +59,7 @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64) DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64) DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64) DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64) -DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32) +DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32) DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64) DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64) @@ -72,7 +72,7 @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32) -DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32) +DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32) DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32) diff --git a/target/sparc/translate.c b/target/sparc/translate.c index 9be26c804e..465e20f4f3 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -1359,93 +1359,109 @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src) static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3) { - gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0)); + TCGv_i32 z = tcg_constant_i32(0); + gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z); } static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3) { - gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0)); + TCGv_i32 z = tcg_constant_i32(0); + gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z); } static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3) { - int op = float_muladd_negate_c; - gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op)); + TCGv_i32 z = tcg_constant_i32(0); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c); + gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op); } static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3) { - int op = float_muladd_negate_c; - gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op)); + TCGv_i32 z = tcg_constant_i32(0); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c); + gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op); } static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3) { - int op = float_muladd_negate_c | float_muladd_negate_result; - gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op)); + TCGv_i32 z = tcg_constant_i32(0); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c | + float_muladd_negate_result); + gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op); } static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3) { - int op = float_muladd_negate_c | float_muladd_negate_result; - gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op)); + TCGv_i32 z = tcg_constant_i32(0); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c | + float_muladd_negate_result); + gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op); } static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3) { - int op = float_muladd_negate_result; - gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op)); + TCGv_i32 z = tcg_constant_i32(0); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result); + gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op); } static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3) { - int op = float_muladd_negate_result; - gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op)); + TCGv_i32 z = tcg_constant_i32(0); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result); + gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op); } /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */ static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2) { - TCGv_i32 one = tcg_constant_i32(float32_one); - int op = float_muladd_halve_result; - gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op)); + TCGv_i32 fone = tcg_constant_i32(float32_one); + TCGv_i32 mone = tcg_constant_i32(-1); + TCGv_i32 op = tcg_constant_i32(0); + gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op); } static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2) { - TCGv_i64 one = tcg_constant_i64(float64_one); - int op = float_muladd_halve_result; - gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op)); + TCGv_i64 fone = tcg_constant_i64(float64_one); + TCGv_i32 mone = tcg_constant_i32(-1); + TCGv_i32 op = tcg_constant_i32(0); + gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op); } /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */ static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2) { - TCGv_i32 one = tcg_constant_i32(float32_one); - int op = float_muladd_negate_c | float_muladd_halve_result; - gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op)); + TCGv_i32 fone = tcg_constant_i32(float32_one); + TCGv_i32 mone = tcg_constant_i32(-1); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c); + gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op); } static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2) { - TCGv_i64 one = tcg_constant_i64(float64_one); - int op = float_muladd_negate_c | float_muladd_halve_result; - gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op)); + TCGv_i64 fone = tcg_constant_i64(float64_one); + TCGv_i32 mone = tcg_constant_i32(-1); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c); + gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op); } /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */ static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2) { - TCGv_i32 one = tcg_constant_i32(float32_one); - int op = float_muladd_negate_result | float_muladd_halve_result; - gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op)); + TCGv_i32 fone = tcg_constant_i32(float32_one); + TCGv_i32 mone = tcg_constant_i32(-1); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result); + gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op); } static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2) { - TCGv_i64 one = tcg_constant_i64(float64_one); - int op = float_muladd_negate_result | float_muladd_halve_result; - gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op)); + TCGv_i64 fone = tcg_constant_i64(float64_one); + TCGv_i32 mone = tcg_constant_i32(-1); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result); + gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op); } static void gen_op_fpexception_im(DisasContext *dc, int ftt) From 6a243913aa46f3d60ce36c7a826562c6e40b64d7 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 7 Dec 2024 18:06:30 -0600 Subject: [PATCH 58/72] softfloat: Remove float_muladd_halve_result MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All uses have been convered to float*_muladd_scalbn. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- fpu/softfloat-parts.c.inc | 4 ---- fpu/softfloat.c | 6 ------ include/fpu/softfloat.h | 3 --- 3 files changed, 13 deletions(-) diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc index 4a62d6ca24..a724f317c5 100644 --- a/fpu/softfloat-parts.c.inc +++ b/fpu/softfloat-parts.c.inc @@ -653,10 +653,6 @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b, a->exp = p_widen.exp; return_normal: - /* TODO: Replace all use of float_muladd_halve_result with scale. */ - if (flags & float_muladd_halve_result) { - a->exp -= 1; - } a->exp += scale; finish_sign: if (flags & float_muladd_negate_result) { diff --git a/fpu/softfloat.c b/fpu/softfloat.c index b5936cc0f8..6967fb5c9f 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -2274,9 +2274,6 @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s) if (unlikely(!can_use_fpu(s))) { goto soft; } - if (unlikely(flags & float_muladd_halve_result)) { - goto soft; - } float32_input_flush3(&ua.s, &ub.s, &uc.s, s); if (unlikely(!f32_is_zon3(ua, ub, uc))) { @@ -2345,9 +2342,6 @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s) if (unlikely(!can_use_fpu(s))) { goto soft; } - if (unlikely(flags & float_muladd_halve_result)) { - goto soft; - } float64_input_flush3(&ua.s, &ub.s, &uc.s, s); if (unlikely(!f64_is_zon3(ua, ub, uc))) { diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h index c34ce0477d..aa69aecfb0 100644 --- a/include/fpu/softfloat.h +++ b/include/fpu/softfloat.h @@ -120,14 +120,11 @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status); | Using these differs from negating an input or output before calling | the muladd function in that this means that a NaN doesn't have its | sign bit inverted before it is propagated. -| We also support halving the result before rounding, as a special -| case to support the ARM fused-sqrt-step instruction FRSQRTS. *----------------------------------------------------------------------------*/ enum { float_muladd_negate_c = 1, float_muladd_negate_product = 2, float_muladd_negate_result = 4, - float_muladd_halve_result = 8, }; /*---------------------------------------------------------------------------- From 72330260cdb42015ae72096bae37e6fdaf361737 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 08:54:41 -0600 Subject: [PATCH 59/72] softfloat: Add float_round_nearest_even_max This rounding mode is used by Hexagon. Signed-off-by: Richard Henderson --- fpu/softfloat-parts.c.inc | 3 +++ include/fpu/softfloat-types.h | 2 ++ 2 files changed, 5 insertions(+) diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc index a724f317c5..37d046cfe9 100644 --- a/fpu/softfloat-parts.c.inc +++ b/fpu/softfloat-parts.c.inc @@ -241,6 +241,9 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s, int exp, flags = 0; switch (s->float_rounding_mode) { + case float_round_nearest_even_max: + overflow_norm = true; + /* fall through */ case float_round_nearest_even: if (N > 64 && frac_lsb == 0) { inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1 diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h index 79ca44dcc3..9d37cdfaa8 100644 --- a/include/fpu/softfloat-types.h +++ b/include/fpu/softfloat-types.h @@ -140,6 +140,8 @@ typedef enum __attribute__((__packed__)) { float_round_to_odd = 5, /* Not an IEEE rounding mode: round to closest odd, overflow to inf */ float_round_to_odd_inf = 6, + /* Not an IEEE rounding mode: round to nearest even, overflow to max */ + float_round_nearest_even_max = 7, } FloatRoundMode; /* From 82f898f3b660fb11e601ee5ea1cab4b2fdafacc8 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 09:13:45 -0600 Subject: [PATCH 60/72] softfloat: Add float_muladd_suppress_add_product_zero Certain Hexagon instructions suppress changes to the result when the product of fma() is a true zero. Signed-off-by: Richard Henderson --- fpu/softfloat-parts.c.inc | 4 +++- fpu/softfloat.c | 3 +++ include/fpu/softfloat.h | 5 +++++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc index 37d046cfe9..ebde42992f 100644 --- a/fpu/softfloat-parts.c.inc +++ b/fpu/softfloat-parts.c.inc @@ -615,7 +615,9 @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b, goto return_normal; } if (c->cls == float_class_zero) { - if (a->sign != c->sign) { + if (flags & float_muladd_suppress_add_product_zero) { + a->sign = c->sign; + } else if (a->sign != c->sign) { goto return_sub_zero; } goto return_zero; diff --git a/fpu/softfloat.c b/fpu/softfloat.c index 6967fb5c9f..8d75d66817 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -2274,6 +2274,9 @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s) if (unlikely(!can_use_fpu(s))) { goto soft; } + if (unlikely(flags & float_muladd_suppress_add_product_zero)) { + goto soft; + } float32_input_flush3(&ua.s, &ub.s, &uc.s, s); if (unlikely(!f32_is_zon3(ua, ub, uc))) { diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h index aa69aecfb0..09a40b4310 100644 --- a/include/fpu/softfloat.h +++ b/include/fpu/softfloat.h @@ -120,11 +120,16 @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status); | Using these differs from negating an input or output before calling | the muladd function in that this means that a NaN doesn't have its | sign bit inverted before it is propagated. +| +| With float_muladd_suppress_add_product_zero, if A or B is zero +| such that the product is a true zero, then return C without addition. +| This preserves the sign of C when C is +/- 0. Used for Hexagon. *----------------------------------------------------------------------------*/ enum { float_muladd_negate_c = 1, float_muladd_negate_product = 2, float_muladd_negate_result = 4, + float_muladd_suppress_add_product_zero = 8, }; /*---------------------------------------------------------------------------- From 6e7422dc22fd2a3bd581e6a496470f6edecc6357 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 09:27:44 -0600 Subject: [PATCH 61/72] target/hexagon: Use float32_mul in helper_sfmpy There are no special cases for this instruction. Remove internal_mpyf as unused. Reviewed-by: Brian Cain Signed-off-by: Richard Henderson --- target/hexagon/fma_emu.c | 8 -------- target/hexagon/fma_emu.h | 1 - target/hexagon/op_helper.c | 2 +- 3 files changed, 1 insertion(+), 10 deletions(-) diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c index 05a56d8c10..35971b8b99 100644 --- a/target/hexagon/fma_emu.c +++ b/target/hexagon/fma_emu.c @@ -655,14 +655,6 @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale, return accum_round_float32(result, fp_status); } -float32 internal_mpyf(float32 a, float32 b, float_status *fp_status) -{ - if (float32_is_zero(a) || float32_is_zero(b)) { - return float32_mul(a, b, fp_status); - } - return internal_fmafx(a, b, float32_zero, 0, fp_status); -} - float64 internal_mpyhh(float64 a, float64 b, unsigned long long int accumulated, float_status *fp_status) diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h index 91591d6050..ad5df5d038 100644 --- a/target/hexagon/fma_emu.h +++ b/target/hexagon/fma_emu.h @@ -32,7 +32,6 @@ int32_t float32_getexp(float32 f32); float32 infinite_float32(uint8_t sign); float32 internal_fmafx(float32 a, float32 b, float32 c, int scale, float_status *fp_status); -float32 internal_mpyf(float32 a, float32 b, float_status *fp_status); float64 internal_mpyhh(float64 a, float64 b, unsigned long long int accumulated, float_status *fp_status); diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c index 01d1a1b1a7..d257097091 100644 --- a/target/hexagon/op_helper.c +++ b/target/hexagon/op_helper.c @@ -1045,7 +1045,7 @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV) { float32 RdV; arch_fpop_start(env); - RdV = internal_mpyf(RsV, RtV, &env->fp_status); + RdV = float32_mul(RsV, RtV, &env->fp_status); arch_fpop_end(env); return RdV; } From 655a83cac128574c7ea386042f8eefa5be5708e5 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 09:32:05 -0600 Subject: [PATCH 62/72] target/hexagon: Use float32_muladd for helper_sffma There are no special cases for this instruction. Reviewed-by: Brian Cain Signed-off-by: Richard Henderson --- target/hexagon/op_helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c index d257097091..15b143a568 100644 --- a/target/hexagon/op_helper.c +++ b/target/hexagon/op_helper.c @@ -1054,7 +1054,7 @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV, float32 RsV, float32 RtV) { arch_fpop_start(env); - RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status); + RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status); arch_fpop_end(env); return RxV; } From 2eca1928f9afb7cbc8e72a59dffb964c8319469a Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 13:51:04 -0600 Subject: [PATCH 63/72] target/hexagon: Use float32_muladd for helper_sffms There are no special cases for this instruction. Since hexagon always uses default-nan mode, explicitly negating the first input is unnecessary. Use float_muladd_negate_product instead. Reviewed-by: Brian Cain Signed-off-by: Richard Henderson --- target/hexagon/op_helper.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c index 15b143a568..95bfa5d029 100644 --- a/target/hexagon/op_helper.c +++ b/target/hexagon/op_helper.c @@ -1096,10 +1096,9 @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV, float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV, float32 RsV, float32 RtV) { - float32 neg_RsV; arch_fpop_start(env); - neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1); - RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status); + RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product, + &env->fp_status); arch_fpop_end(env); return RxV; } From 904624ab8e1f41dd97bb6e2e524605f4c61bc795 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 14:14:36 -0600 Subject: [PATCH 64/72] target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc This instruction has a special case that 0 * x + c returns c without the normal sign folding that comes with 0 + -0. Use the new float_muladd_suppress_add_product_zero to describe this. Reviewed-by: Brian Cain Signed-off-by: Richard Henderson --- target/hexagon/op_helper.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c index 95bfa5d029..53c65e852e 100644 --- a/target/hexagon/op_helper.c +++ b/target/hexagon/op_helper.c @@ -1080,15 +1080,10 @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status) float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV, float32 RsV, float32 RtV, float32 PuV) { - size4s_t tmp; arch_fpop_start(env); - RxV = check_nan(RxV, RxV, &env->fp_status); - RxV = check_nan(RxV, RsV, &env->fp_status); - RxV = check_nan(RxV, RtV, &env->fp_status); - tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status); - if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) { - RxV = tmp; - } + RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV), + float_muladd_suppress_add_product_zero, + &env->fp_status); arch_fpop_end(env); return RxV; } From 316dca398579f2de2f433db02685e6799159c498 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 14:33:22 -0600 Subject: [PATCH 65/72] target/hexagon: Use float32_muladd for helper_sffm[as]_lib There are multiple special cases for this instruction. (1) The saturate to normal maximum instead of overflow to infinity is handled by the new float_round_nearest_even_max rounding mode. (2) The 0 * n + c special case is handled by the new float_muladd_suppress_add_product_zero flag. (3) The Inf - Inf -> 0 special case can be detected after the fact by examining float_flag_invalid_isi. Reviewed-by: Brian Cain Signed-off-by: Richard Henderson --- target/hexagon/op_helper.c | 105 +++++++++---------------------------- 1 file changed, 26 insertions(+), 79 deletions(-) diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c index 53c65e852e..6da8db8ea5 100644 --- a/target/hexagon/op_helper.c +++ b/target/hexagon/op_helper.c @@ -1059,24 +1059,6 @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV, return RxV; } -static bool is_zero_prod(float32 a, float32 b) -{ - return ((float32_is_zero(a) && is_finite(b)) || - (float32_is_zero(b) && is_finite(a))); -} - -static float32 check_nan(float32 dst, float32 x, float_status *fp_status) -{ - float32 ret = dst; - if (float32_is_any_nan(x)) { - if (extract32(x, 22, 1) == 0) { - float_raise(float_flag_invalid, fp_status); - } - ret = make_float32(0xffffffff); /* nan */ - } - return ret; -} - float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV, float32 RsV, float32 RtV, float32 PuV) { @@ -1098,78 +1080,43 @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV, return RxV; } -static bool is_inf_prod(int32_t a, int32_t b) +static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV, + float32 RsV, float32 RtV, int negate) { - return (float32_is_infinity(a) && float32_is_infinity(b)) || - (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) || - (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a)); + int flags; + + arch_fpop_start(env); + + set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status); + RxV = float32_muladd(RsV, RtV, RxV, + negate | float_muladd_suppress_add_product_zero, + &env->fp_status); + + flags = get_float_exception_flags(&env->fp_status); + if (flags) { + /* Flags are suppressed by this instruction. */ + set_float_exception_flags(0, &env->fp_status); + + /* Return 0 for Inf - Inf. */ + if (flags & float_flag_invalid_isi) { + RxV = 0; + } + } + + arch_fpop_end(env); + return RxV; } float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV, float32 RsV, float32 RtV) { - bool infinp; - bool infminusinf; - float32 tmp; - - arch_fpop_start(env); - set_float_rounding_mode(float_round_nearest_even, &env->fp_status); - infminusinf = float32_is_infinity(RxV) && - is_inf_prod(RsV, RtV) && - (fGETBIT(31, RsV ^ RxV ^ RtV) != 0); - infinp = float32_is_infinity(RxV) || - float32_is_infinity(RtV) || - float32_is_infinity(RsV); - RxV = check_nan(RxV, RxV, &env->fp_status); - RxV = check_nan(RxV, RsV, &env->fp_status); - RxV = check_nan(RxV, RtV, &env->fp_status); - tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status); - if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) { - RxV = tmp; - } - set_float_exception_flags(0, &env->fp_status); - if (float32_is_infinity(RxV) && !infinp) { - RxV = RxV - 1; - } - if (infminusinf) { - RxV = 0; - } - arch_fpop_end(env); - return RxV; + return do_sffma_lib(env, RxV, RsV, RtV, 0); } float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV, float32 RsV, float32 RtV) { - bool infinp; - bool infminusinf; - float32 tmp; - - arch_fpop_start(env); - set_float_rounding_mode(float_round_nearest_even, &env->fp_status); - infminusinf = float32_is_infinity(RxV) && - is_inf_prod(RsV, RtV) && - (fGETBIT(31, RsV ^ RxV ^ RtV) == 0); - infinp = float32_is_infinity(RxV) || - float32_is_infinity(RtV) || - float32_is_infinity(RsV); - RxV = check_nan(RxV, RxV, &env->fp_status); - RxV = check_nan(RxV, RsV, &env->fp_status); - RxV = check_nan(RxV, RtV, &env->fp_status); - float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status); - tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status); - if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) { - RxV = tmp; - } - set_float_exception_flags(0, &env->fp_status); - if (float32_is_infinity(RxV) && !infinp) { - RxV = RxV - 1; - } - if (infminusinf) { - RxV = 0; - } - arch_fpop_end(env); - return RxV; + return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product); } float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV) From 813437e5002b0726535df4c77527986acac5de3b Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 14:52:52 -0600 Subject: [PATCH 66/72] target/hexagon: Remove internal_fmafx The function is now unused. Reviewed-by: Brian Cain Signed-off-by: Richard Henderson --- target/hexagon/fma_emu.c | 171 --------------------------------------- target/hexagon/fma_emu.h | 2 - 2 files changed, 173 deletions(-) diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c index 35971b8b99..0c7c7f636c 100644 --- a/target/hexagon/fma_emu.c +++ b/target/hexagon/fma_emu.c @@ -90,21 +90,6 @@ int32_t float64_getexp(float64 f64) return -1; } -static uint64_t float32_getmant(float32 f32) -{ - Float a = { .i = f32 }; - if (float32_is_normal(f32)) { - return a.mant | 1ULL << 23; - } - if (float32_is_zero(f32)) { - return 0; - } - if (float32_is_denormal(f32)) { - return a.mant; - } - return ~0ULL; -} - int32_t float32_getexp(float32 f32) { Float a = { .i = f32 }; @@ -369,25 +354,6 @@ float32 infinite_float32(uint8_t sign) } /* Return a maximum finite value with the requested sign */ -static float32 maxfinite_float32(uint8_t sign) -{ - if (sign) { - return make_float32(SF_MINUS_MAXF); - } else { - return make_float32(SF_MAXF); - } -} - -/* Return a zero value with requested sign */ -static float32 zero_float32(uint8_t sign) -{ - if (sign) { - return make_float32(0x80000000); - } else { - return float32_zero; - } -} - #define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \ { \ @@ -517,143 +483,6 @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \ } GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double) -GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float) - -static bool is_inf_prod(float64 a, float64 b) -{ - return ((float64_is_infinity(a) && float64_is_infinity(b)) || - (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) || - (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a)))); -} - -static float64 special_fma(float64 a, float64 b, float64 c, - float_status *fp_status) -{ - float64 ret = make_float64(0); - - /* - * If A multiplied by B is an exact infinity and C is also an infinity - * but with the opposite sign, FMA returns NaN and raises invalid. - */ - uint8_t a_sign = float64_is_neg(a); - uint8_t b_sign = float64_is_neg(b); - uint8_t c_sign = float64_is_neg(c); - if (is_inf_prod(a, b) && float64_is_infinity(c)) { - if ((a_sign ^ b_sign) != c_sign) { - ret = make_float64(DF_NAN); - float_raise(float_flag_invalid, fp_status); - return ret; - } - } - if ((float64_is_infinity(a) && float64_is_zero(b)) || - (float64_is_zero(a) && float64_is_infinity(b))) { - ret = make_float64(DF_NAN); - float_raise(float_flag_invalid, fp_status); - return ret; - } - /* - * If none of the above checks are true and C is a NaN, - * a NaN shall be returned - * If A or B are NaN, a NAN shall be returned. - */ - if (float64_is_any_nan(a) || - float64_is_any_nan(b) || - float64_is_any_nan(c)) { - if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) { - float_raise(float_flag_invalid, fp_status); - } - if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) { - float_raise(float_flag_invalid, fp_status); - } - if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) { - float_raise(float_flag_invalid, fp_status); - } - ret = make_float64(DF_NAN); - return ret; - } - /* - * We have checked for adding opposite-signed infinities. - * Other infinities return infinity with the correct sign - */ - if (float64_is_infinity(c)) { - ret = infinite_float64(c_sign); - return ret; - } - if (float64_is_infinity(a) || float64_is_infinity(b)) { - ret = infinite_float64(a_sign ^ b_sign); - return ret; - } - g_assert_not_reached(); -} - -static float32 special_fmaf(float32 a, float32 b, float32 c, - float_status *fp_status) -{ - float64 aa, bb, cc; - aa = float32_to_float64(a, fp_status); - bb = float32_to_float64(b, fp_status); - cc = float32_to_float64(c, fp_status); - return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status); -} - -float32 internal_fmafx(float32 a, float32 b, float32 c, int scale, - float_status *fp_status) -{ - Accum prod; - Accum acc; - Accum result; - accum_init(&prod); - accum_init(&acc); - accum_init(&result); - - uint8_t a_sign = float32_is_neg(a); - uint8_t b_sign = float32_is_neg(b); - uint8_t c_sign = float32_is_neg(c); - if (float32_is_infinity(a) || - float32_is_infinity(b) || - float32_is_infinity(c)) { - return special_fmaf(a, b, c, fp_status); - } - if (float32_is_any_nan(a) || - float32_is_any_nan(b) || - float32_is_any_nan(c)) { - return special_fmaf(a, b, c, fp_status); - } - if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) { - float32 tmp = float32_mul(a, b, fp_status); - tmp = float32_add(tmp, c, fp_status); - return tmp; - } - - /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */ - prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b)); - - /* - * Note: extracting the mantissa into an int is multiplying by - * 2**23, so adjust here - */ - prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23; - prod.sign = a_sign ^ b_sign; - if (float32_is_zero(a) || float32_is_zero(b)) { - prod.exp = -2 * WAY_BIG_EXP; - } - if ((scale > 0) && float32_is_denormal(c)) { - acc.mant = int128_mul_6464(0, 0); - acc.exp = -WAY_BIG_EXP; - acc.sign = c_sign; - acc.sticky = 1; - result = accum_add(prod, acc); - } else if (!float32_is_zero(c)) { - acc.mant = int128_mul_6464(float32_getmant(c), 1); - acc.exp = float32_getexp(c); - acc.sign = c_sign; - result = accum_add(prod, acc); - } else { - result = prod; - } - result.exp += scale; - return accum_round_float32(result, fp_status); -} float64 internal_mpyhh(float64 a, float64 b, unsigned long long int accumulated, diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h index ad5df5d038..fed054b609 100644 --- a/target/hexagon/fma_emu.h +++ b/target/hexagon/fma_emu.h @@ -30,8 +30,6 @@ static inline uint32_t float32_getexp_raw(float32 f32) } int32_t float32_getexp(float32 f32); float32 infinite_float32(uint8_t sign); -float32 internal_fmafx(float32 a, float32 b, float32 c, - int scale, float_status *fp_status); float64 internal_mpyhh(float64 a, float64 b, unsigned long long int accumulated, float_status *fp_status); From 795d6a2c4960325c514323147e13a22d5fe21ddf Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 15:19:19 -0600 Subject: [PATCH 67/72] target/hexagon: Expand GEN_XF_ROUND This massive macro is now only used once. Expand it for use only by float64. Reviewed-by: Brian Cain Signed-off-by: Richard Henderson --- target/hexagon/fma_emu.c | 255 +++++++++++++++++++-------------------- 1 file changed, 127 insertions(+), 128 deletions(-) diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c index 0c7c7f636c..0769de43de 100644 --- a/target/hexagon/fma_emu.c +++ b/target/hexagon/fma_emu.c @@ -354,136 +354,135 @@ float32 infinite_float32(uint8_t sign) } /* Return a maximum finite value with the requested sign */ -#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \ -static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \ -{ \ - if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \ - && ((a.guard | a.round | a.sticky) == 0)) { \ - /* result zero */ \ - switch (fp_status->float_rounding_mode) { \ - case float_round_down: \ - return zero_##SUFFIX(1); \ - default: \ - return zero_##SUFFIX(0); \ - } \ - } \ - /* Normalize right */ \ - /* We want MANTBITS bits of mantissa plus the leading one. */ \ - /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \ - /* So we need to normalize right while the high word is non-zero and \ - * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \ - while ((int128_gethi(a.mant) != 0) || \ - ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \ - a = accum_norm_right(a, 1); \ - } \ - /* \ - * OK, now normalize left \ - * We want to normalize left until we have a leading one in bit 24 \ - * Theoretically, we only need to shift a maximum of one to the left if we \ - * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \ - * should be 0 \ - */ \ - while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \ - a = accum_norm_left(a); \ - } \ - /* \ - * OK, now we might need to denormalize because of potential underflow. \ - * We need to do this before rounding, and rounding might make us normal \ - * again \ - */ \ - while (a.exp <= 0) { \ - a = accum_norm_right(a, 1 - a.exp); \ - /* \ - * Do we have underflow? \ - * That's when we get an inexact answer because we ran out of bits \ - * in a denormal. \ - */ \ - if (a.guard || a.round || a.sticky) { \ - float_raise(float_flag_underflow, fp_status); \ - } \ - } \ - /* OK, we're relatively canonical... now we need to round */ \ - if (a.guard || a.round || a.sticky) { \ - float_raise(float_flag_inexact, fp_status); \ - switch (fp_status->float_rounding_mode) { \ - case float_round_to_zero: \ - /* Chop and we're done */ \ - break; \ - case float_round_up: \ - if (a.sign == 0) { \ - a.mant = int128_add(a.mant, int128_one()); \ - } \ - break; \ - case float_round_down: \ - if (a.sign != 0) { \ - a.mant = int128_add(a.mant, int128_one()); \ - } \ - break; \ - default: \ - if (a.round || a.sticky) { \ - /* round up if guard is 1, down if guard is zero */ \ - a.mant = int128_add(a.mant, int128_make64(a.guard)); \ - } else if (a.guard) { \ - /* exactly .5, round up if odd */ \ - a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \ - } \ - break; \ - } \ - } \ - /* \ - * OK, now we might have carried all the way up. \ - * So we might need to shr once \ - * at least we know that the lsb should be zero if we rounded and \ - * got a carry out... \ - */ \ - if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \ - a = accum_norm_right(a, 1); \ - } \ - /* Overflow? */ \ - if (a.exp >= INF_EXP) { \ - /* Yep, inf result */ \ - float_raise(float_flag_overflow, fp_status); \ - float_raise(float_flag_inexact, fp_status); \ - switch (fp_status->float_rounding_mode) { \ - case float_round_to_zero: \ - return maxfinite_##SUFFIX(a.sign); \ - case float_round_up: \ - if (a.sign == 0) { \ - return infinite_##SUFFIX(a.sign); \ - } else { \ - return maxfinite_##SUFFIX(a.sign); \ - } \ - case float_round_down: \ - if (a.sign != 0) { \ - return infinite_##SUFFIX(a.sign); \ - } else { \ - return maxfinite_##SUFFIX(a.sign); \ - } \ - default: \ - return infinite_##SUFFIX(a.sign); \ - } \ - } \ - /* Underflow? */ \ - if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \ - /* Leading one means: No, we're normal. So, we should be done... */ \ - INTERNAL_TYPE ret; \ - ret.i = 0; \ - ret.sign = a.sign; \ - ret.exp = a.exp; \ - ret.mant = int128_getlo(a.mant); \ - return ret.i; \ - } \ - assert(a.exp == 1); \ - INTERNAL_TYPE ret; \ - ret.i = 0; \ - ret.sign = a.sign; \ - ret.exp = 0; \ - ret.mant = int128_getlo(a.mant); \ - return ret.i; \ +static float64 accum_round_float64(Accum a, float_status *fp_status) +{ + if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) + && ((a.guard | a.round | a.sticky) == 0)) { + /* result zero */ + switch (fp_status->float_rounding_mode) { + case float_round_down: + return zero_float64(1); + default: + return zero_float64(0); + } + } + /* + * Normalize right + * We want DF_MANTBITS bits of mantissa plus the leading one. + * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF + * So we need to normalize right while the high word is non-zero and + * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 + */ + while ((int128_gethi(a.mant) != 0) || + ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) { + a = accum_norm_right(a, 1); + } + /* + * OK, now normalize left + * We want to normalize left until we have a leading one in bit 24 + * Theoretically, we only need to shift a maximum of one to the left if we + * shifted out lots of bits from B, or if we had no shift / 1 shift sticky + * should be 0 + */ + while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) { + a = accum_norm_left(a); + } + /* + * OK, now we might need to denormalize because of potential underflow. + * We need to do this before rounding, and rounding might make us normal + * again + */ + while (a.exp <= 0) { + a = accum_norm_right(a, 1 - a.exp); + /* + * Do we have underflow? + * That's when we get an inexact answer because we ran out of bits + * in a denormal. + */ + if (a.guard || a.round || a.sticky) { + float_raise(float_flag_underflow, fp_status); + } + } + /* OK, we're relatively canonical... now we need to round */ + if (a.guard || a.round || a.sticky) { + float_raise(float_flag_inexact, fp_status); + switch (fp_status->float_rounding_mode) { + case float_round_to_zero: + /* Chop and we're done */ + break; + case float_round_up: + if (a.sign == 0) { + a.mant = int128_add(a.mant, int128_one()); + } + break; + case float_round_down: + if (a.sign != 0) { + a.mant = int128_add(a.mant, int128_one()); + } + break; + default: + if (a.round || a.sticky) { + /* round up if guard is 1, down if guard is zero */ + a.mant = int128_add(a.mant, int128_make64(a.guard)); + } else if (a.guard) { + /* exactly .5, round up if odd */ + a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); + } + break; + } + } + /* + * OK, now we might have carried all the way up. + * So we might need to shr once + * at least we know that the lsb should be zero if we rounded and + * got a carry out... + */ + if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) { + a = accum_norm_right(a, 1); + } + /* Overflow? */ + if (a.exp >= DF_INF_EXP) { + /* Yep, inf result */ + float_raise(float_flag_overflow, fp_status); + float_raise(float_flag_inexact, fp_status); + switch (fp_status->float_rounding_mode) { + case float_round_to_zero: + return maxfinite_float64(a.sign); + case float_round_up: + if (a.sign == 0) { + return infinite_float64(a.sign); + } else { + return maxfinite_float64(a.sign); + } + case float_round_down: + if (a.sign != 0) { + return infinite_float64(a.sign); + } else { + return maxfinite_float64(a.sign); + } + default: + return infinite_float64(a.sign); + } + } + /* Underflow? */ + if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) { + /* Leading one means: No, we're normal. So, we should be done... */ + Double ret; + ret.i = 0; + ret.sign = a.sign; + ret.exp = a.exp; + ret.mant = int128_getlo(a.mant); + return ret.i; + } + assert(a.exp == 1); + Double ret; + ret.i = 0; + ret.sign = a.sign; + ret.exp = 0; + ret.mant = int128_getlo(a.mant); + return ret.i; } -GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double) - float64 internal_mpyhh(float64 a, float64 b, unsigned long long int accumulated, float_status *fp_status) From fefc9702e618cef00d199e6ddd43f4b2d4c2fad6 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 15:23:36 -0600 Subject: [PATCH 68/72] target/hexagon: Remove Float This structure, with bitfields, is incorrect for big-endian. Use the existing float32_getexp_raw which uses extract32. Reviewed-by: Brian Cain Signed-off-by: Richard Henderson --- target/hexagon/fma_emu.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c index 0769de43de..2a8f72fee3 100644 --- a/target/hexagon/fma_emu.c +++ b/target/hexagon/fma_emu.c @@ -53,16 +53,6 @@ typedef union { }; } Double; -typedef union { - float f; - uint32_t i; - struct { - uint32_t mant:23; - uint32_t exp:8; - uint32_t sign:1; - }; -} Float; - static uint64_t float64_getmant(float64 f64) { Double a = { .i = f64 }; @@ -92,12 +82,12 @@ int32_t float64_getexp(float64 f64) int32_t float32_getexp(float32 f32) { - Float a = { .i = f32 }; + int exp = float32_getexp_raw(f32); if (float32_is_normal(f32)) { - return a.exp; + return exp; } if (float32_is_denormal(f32)) { - return a.exp + 1; + return exp + 1; } return -1; } From 8429306c327e59786d96497bd6c36c42ccf58a06 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 15:34:33 -0600 Subject: [PATCH 69/72] target/hexagon: Remove Double This structure, with bitfields, is incorrect for big-endian. Use extract64 and deposit64 instead. Reviewed-by: Brian Cain Signed-off-by: Richard Henderson --- target/hexagon/fma_emu.c | 46 ++++++++++++++-------------------------- 1 file changed, 16 insertions(+), 30 deletions(-) diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c index 2a8f72fee3..ddc793fe14 100644 --- a/target/hexagon/fma_emu.c +++ b/target/hexagon/fma_emu.c @@ -43,39 +43,29 @@ #define WAY_BIG_EXP 4096 -typedef union { - double f; - uint64_t i; - struct { - uint64_t mant:52; - uint64_t exp:11; - uint64_t sign:1; - }; -} Double; - static uint64_t float64_getmant(float64 f64) { - Double a = { .i = f64 }; + uint64_t mant = extract64(f64, 0, 52); if (float64_is_normal(f64)) { - return a.mant | 1ULL << 52; + return mant | 1ULL << 52; } if (float64_is_zero(f64)) { return 0; } if (float64_is_denormal(f64)) { - return a.mant; + return mant; } return ~0ULL; } int32_t float64_getexp(float64 f64) { - Double a = { .i = f64 }; + int exp = extract64(f64, 52, 11); if (float64_is_normal(f64)) { - return a.exp; + return exp; } if (float64_is_denormal(f64)) { - return a.exp + 1; + return exp + 1; } return -1; } @@ -346,6 +336,8 @@ float32 infinite_float32(uint8_t sign) /* Return a maximum finite value with the requested sign */ static float64 accum_round_float64(Accum a, float_status *fp_status) { + uint64_t ret; + if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) && ((a.guard | a.round | a.sticky) == 0)) { /* result zero */ @@ -455,22 +447,16 @@ static float64 accum_round_float64(Accum a, float_status *fp_status) } } /* Underflow? */ - if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) { + ret = int128_getlo(a.mant); + if (ret & (1ULL << DF_MANTBITS)) { /* Leading one means: No, we're normal. So, we should be done... */ - Double ret; - ret.i = 0; - ret.sign = a.sign; - ret.exp = a.exp; - ret.mant = int128_getlo(a.mant); - return ret.i; + ret = deposit64(ret, 52, 11, a.exp); + } else { + assert(a.exp == 1); + ret = deposit64(ret, 52, 11, 0); } - assert(a.exp == 1); - Double ret; - ret.i = 0; - ret.sign = a.sign; - ret.exp = 0; - ret.mant = int128_getlo(a.mant); - return ret.i; + ret = deposit64(ret, 63, 1, a.sign); + return ret; } float64 internal_mpyhh(float64 a, float64 b, From 65b4dce393cddb5fb0295bf6666f7db8512b8cff Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 16:11:42 -0600 Subject: [PATCH 70/72] target/hexagon: Use mulu64 for int128_mul_6464 No need to open-code 64x64->128-bit multiplication. Reviewed-by: Brian Cain Signed-off-by: Richard Henderson --- target/hexagon/fma_emu.c | 32 +++----------------------------- 1 file changed, 3 insertions(+), 29 deletions(-) diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c index ddc793fe14..07d2880776 100644 --- a/target/hexagon/fma_emu.c +++ b/target/hexagon/fma_emu.c @@ -82,38 +82,12 @@ int32_t float32_getexp(float32 f32) return -1; } -static uint32_t int128_getw0(Int128 x) -{ - return int128_getlo(x); -} - -static uint32_t int128_getw1(Int128 x) -{ - return int128_getlo(x) >> 32; -} - static Int128 int128_mul_6464(uint64_t ai, uint64_t bi) { - Int128 a, b; - uint64_t pp0, pp1a, pp1b, pp1s, pp2; + uint64_t l, h; - a = int128_make64(ai); - b = int128_make64(bi); - pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b); - pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b); - pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a); - pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b); - - pp1s = pp1a + pp1b; - if ((pp1s < pp1a) || (pp1s < pp1b)) { - pp2 += (1ULL << 32); - } - uint64_t ret_low = pp0 + (pp1s << 32); - if ((ret_low < pp0) || (ret_low < (pp1s << 32))) { - pp2 += 1; - } - - return int128_make128(ret_low, pp2 + (pp1s >> 32)); + mulu64(&l, &h, ai, bi); + return int128_make128(l, h); } static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow) From 59abfb444e1d9654e15f85c50d09a3366e4c1c1e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 8 Dec 2024 16:15:30 -0600 Subject: [PATCH 71/72] target/hexagon: Simplify internal_mpyhh setup Initialize x with accumulated via direct assignment, rather than multiplying by 1. Reviewed-by: Brian Cain Signed-off-by: Richard Henderson --- target/hexagon/fma_emu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c index 07d2880776..c557141f11 100644 --- a/target/hexagon/fma_emu.c +++ b/target/hexagon/fma_emu.c @@ -455,7 +455,7 @@ float64 internal_mpyhh(float64 a, float64 b, float64_is_infinity(b)) { return float64_mul(a, b, fp_status); } - x.mant = int128_mul_6464(accumulated, 1); + x.mant = int128_make64(accumulated); x.sticky = sticky; prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b)); x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL)); From e4a8e093dc74be049f4829831dce76e5edab0003 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 21 Dec 2024 16:50:26 +0000 Subject: [PATCH 72/72] accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert all targets simultaneously, as the gen_intermediate_code function disappears from the target. While there are possible workarounds, they're larger than simply performing the conversion. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- accel/tcg/cpu-exec.c | 8 +++++--- accel/tcg/translate-all.c | 8 +++++--- include/exec/translator.h | 14 -------------- include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++ target/alpha/cpu.c | 1 + target/alpha/cpu.h | 2 ++ target/alpha/translate.c | 4 ++-- target/arm/cpu.c | 1 + target/arm/internals.h | 2 ++ target/arm/tcg/cpu-v7m.c | 1 + target/arm/tcg/translate.c | 5 ++--- target/avr/cpu.c | 1 + target/avr/cpu.h | 2 ++ target/avr/translate.c | 6 +++--- target/hexagon/cpu.c | 1 + target/hexagon/cpu.h | 2 ++ target/hexagon/translate.c | 4 ++-- target/hppa/cpu.c | 1 + target/hppa/cpu.h | 2 ++ target/hppa/translate.c | 4 ++-- target/i386/tcg/helper-tcg.h | 2 ++ target/i386/tcg/tcg-cpu.c | 1 + target/i386/tcg/translate.c | 5 ++--- target/loongarch/cpu.c | 1 + target/loongarch/internals.h | 2 ++ target/loongarch/tcg/translate.c | 4 ++-- target/m68k/cpu.c | 1 + target/m68k/cpu.h | 2 ++ target/m68k/translate.c | 4 ++-- target/microblaze/cpu.c | 1 + target/microblaze/cpu.h | 2 ++ target/microblaze/translate.c | 4 ++-- target/mips/cpu.c | 1 + target/mips/tcg/tcg-internal.h | 2 ++ target/mips/tcg/translate.c | 4 ++-- target/openrisc/cpu.c | 1 + target/openrisc/cpu.h | 2 ++ target/openrisc/translate.c | 4 ++-- target/ppc/cpu.h | 2 ++ target/ppc/cpu_init.c | 1 + target/ppc/translate.c | 4 ++-- target/riscv/cpu.h | 3 +++ target/riscv/tcg/tcg-cpu.c | 1 + target/riscv/translate.c | 4 ++-- target/rx/cpu.c | 1 + target/rx/cpu.h | 2 ++ target/rx/translate.c | 4 ++-- target/s390x/cpu.c | 1 + target/s390x/s390x-internal.h | 2 ++ target/s390x/tcg/translate.c | 4 ++-- target/sh4/cpu.c | 1 + target/sh4/cpu.h | 2 ++ target/sh4/translate.c | 4 ++-- target/sparc/cpu.c | 1 + target/sparc/cpu.h | 2 ++ target/sparc/translate.c | 4 ++-- target/tricore/cpu.c | 1 + target/tricore/cpu.h | 2 ++ target/tricore/translate.c | 5 ++--- target/xtensa/cpu.c | 1 + target/xtensa/cpu.h | 2 ++ target/xtensa/translate.c | 4 ++-- 62 files changed, 121 insertions(+), 62 deletions(-) diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index b507049ddb..d48b82a932 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -1088,11 +1088,13 @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp) if (!tcg_target_initialized) { /* Check mandatory TCGCPUOps handlers */ + const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; #ifndef CONFIG_USER_ONLY - assert(cpu->cc->tcg_ops->cpu_exec_halt); - assert(cpu->cc->tcg_ops->cpu_exec_interrupt); + assert(tcg_ops->cpu_exec_halt); + assert(tcg_ops->cpu_exec_interrupt); #endif /* !CONFIG_USER_ONLY */ - cpu->cc->tcg_ops->initialize(); + assert(tcg_ops->translate_code); + tcg_ops->initialize(); tcg_target_initialized = true; } diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 572a8a8797..453eb20ec9 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -276,8 +276,10 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb, tcg_func_start(tcg_ctx); - tcg_ctx->cpu = env_cpu(env); - gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc); + CPUState *cs = env_cpu(env); + tcg_ctx->cpu = cs; + cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc); + assert(tb->size != 0); tcg_ctx->cpu = NULL; *max_insns = tb->icount; @@ -364,7 +366,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, /* * Overflow of code_gen_buffer, or the current slice of it. * - * TODO: We don't need to re-do gen_intermediate_code, nor + * TODO: We don't need to re-do tcg_ops->translate_code, nor * should we re-do the tcg optimization currently hidden * inside tcg_gen_code. All that should be required is to * flush the TBs, allocate a new TB, re-initialize it per diff --git a/include/exec/translator.h b/include/exec/translator.h index 41e2a41180..d70942a10f 100644 --- a/include/exec/translator.h +++ b/include/exec/translator.h @@ -21,20 +21,6 @@ #include "qemu/bswap.h" #include "exec/vaddr.h" -/** - * gen_intermediate_code - * @cpu: cpu context - * @tb: translation block - * @max_insns: max number of instructions to translate - * @pc: guest virtual program counter address - * @host_pc: host physical program counter address - * - * This function must be provided by the target, which should create - * the target-specific DisasContext, and then invoke translator_loop. - */ -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc); - /** * DisasJumpType: * @DISAS_NEXT: Next instruction in program order. diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h index 663efb9133..2e3f1690f1 100644 --- a/include/hw/core/tcg-cpu-ops.h +++ b/include/hw/core/tcg-cpu-ops.h @@ -24,6 +24,19 @@ struct TCGCPUOps { * Called when the first CPU is realized. */ void (*initialize)(void); + /** + * @translate_code: Translate guest instructions to TCGOps + * @cpu: cpu context + * @tb: translation block + * @max_insns: max number of instructions to translate + * @pc: guest virtual program counter address + * @host_pc: host physical program counter address + * + * This function must be provided by the target, which should create + * the target-specific DisasContext, and then invoke translator_loop. + */ + void (*translate_code)(CPUState *cpu, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); /** * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock * diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c index 9fa506bff9..e1b898e575 100644 --- a/target/alpha/cpu.c +++ b/target/alpha/cpu.c @@ -224,6 +224,7 @@ static const struct SysemuCPUOps alpha_sysemu_ops = { static const TCGCPUOps alpha_tcg_ops = { .initialize = alpha_translate_init, + .translate_code = alpha_translate_code, .synchronize_from_tb = alpha_cpu_synchronize_from_tb, .restore_state_to_opc = alpha_restore_state_to_opc, diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h index 3556d3227f..80562adfb5 100644 --- a/target/alpha/cpu.h +++ b/target/alpha/cpu.h @@ -431,6 +431,8 @@ enum { }; void alpha_translate_init(void); +void alpha_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); #define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU diff --git a/target/alpha/translate.c b/target/alpha/translate.c index 629ff3cde9..2156c02214 100644 --- a/target/alpha/translate.c +++ b/target/alpha/translate.c @@ -2955,8 +2955,8 @@ static const TranslatorOps alpha_tr_ops = { .tb_stop = alpha_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void alpha_translate_code(CPUState *cpu, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc; translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base); diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 019183c9ea..dcedadc89e 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -2682,6 +2682,7 @@ static const struct SysemuCPUOps arm_sysemu_ops = { #ifdef CONFIG_TCG static const TCGCPUOps arm_tcg_ops = { .initialize = arm_translate_init, + .translate_code = arm_translate_code, .synchronize_from_tb = arm_cpu_synchronize_from_tb, .debug_excp_handler = arm_debug_excp_handler, .restore_state_to_opc = arm_restore_state_to_opc, diff --git a/target/arm/internals.h b/target/arm/internals.h index c3a5b1385f..863a84edf8 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -357,6 +357,8 @@ void init_cpreg_list(ARMCPU *cpu); void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); void arm_translate_init(void); +void arm_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void arm_cpu_register_gdb_commands(ARMCPU *cpu); void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *, diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c index 58e54578d6..03acdf83e0 100644 --- a/target/arm/tcg/cpu-v7m.c +++ b/target/arm/tcg/cpu-v7m.c @@ -234,6 +234,7 @@ static void cortex_m55_initfn(Object *obj) static const TCGCPUOps arm_v7m_tcg_ops = { .initialize = arm_translate_init, + .translate_code = arm_translate_code, .synchronize_from_tb = arm_cpu_synchronize_from_tb, .debug_excp_handler = arm_debug_excp_handler, .restore_state_to_opc = arm_restore_state_to_opc, diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c index 9ee761fc64..c16b59ab88 100644 --- a/target/arm/tcg/translate.c +++ b/target/arm/tcg/translate.c @@ -8093,9 +8093,8 @@ static const TranslatorOps thumb_translator_ops = { .tb_stop = arm_tr_tb_stop, }; -/* generate intermediate code for basic block 'tb'. */ -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void arm_translate_code(CPUState *cpu, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc = { }; const TranslatorOps *ops = &arm_translator_ops; diff --git a/target/avr/cpu.c b/target/avr/cpu.c index 2dccb09c5e..8a126ff322 100644 --- a/target/avr/cpu.c +++ b/target/avr/cpu.c @@ -207,6 +207,7 @@ static const struct SysemuCPUOps avr_sysemu_ops = { static const TCGCPUOps avr_tcg_ops = { .initialize = avr_cpu_tcg_init, + .translate_code = avr_cpu_translate_code, .synchronize_from_tb = avr_cpu_synchronize_from_tb, .restore_state_to_opc = avr_restore_state_to_opc, .cpu_exec_interrupt = avr_cpu_exec_interrupt, diff --git a/target/avr/cpu.h b/target/avr/cpu.h index 4725535102..06f5ae4d1b 100644 --- a/target/avr/cpu.h +++ b/target/avr/cpu.h @@ -183,6 +183,8 @@ static inline void set_avr_feature(CPUAVRState *env, int feature) } void avr_cpu_tcg_init(void); +void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); int cpu_avr_exec(CPUState *cpu); diff --git a/target/avr/translate.c b/target/avr/translate.c index f13b997f8d..4ab71d8138 100644 --- a/target/avr/translate.c +++ b/target/avr/translate.c @@ -2599,7 +2599,7 @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a) * * - translate() * - canonicalize_skip() - * - gen_intermediate_code() + * - translate_code() * - restore_state_to_opc() * */ @@ -2795,8 +2795,8 @@ static const TranslatorOps avr_tr_ops = { .tb_stop = avr_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc = { }; translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base); diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index c9817c7192..0b7fc98f6c 100644 --- a/target/hexagon/cpu.c +++ b/target/hexagon/cpu.c @@ -325,6 +325,7 @@ static void hexagon_cpu_init(Object *obj) static const TCGCPUOps hexagon_tcg_ops = { .initialize = hexagon_translate_init, + .translate_code = hexagon_translate_code, .synchronize_from_tb = hexagon_cpu_synchronize_from_tb, .restore_state_to_opc = hexagon_restore_state_to_opc, }; diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h index 14e6e819c2..79e60d4bfa 100644 --- a/target/hexagon/cpu.h +++ b/target/hexagon/cpu.h @@ -150,6 +150,8 @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc, typedef HexagonCPU ArchCPU; void hexagon_translate_init(void); +void hexagon_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); #include "exec/cpu-all.h" diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c index 562105705a..fe7858703c 100644 --- a/target/hexagon/translate.c +++ b/target/hexagon/translate.c @@ -1026,8 +1026,8 @@ static const TranslatorOps hexagon_tr_ops = { .tb_stop = hexagon_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void hexagon_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c index c9062e60b6..47d0160955 100644 --- a/target/hppa/cpu.c +++ b/target/hppa/cpu.c @@ -223,6 +223,7 @@ static const struct SysemuCPUOps hppa_sysemu_ops = { static const TCGCPUOps hppa_tcg_ops = { .initialize = hppa_translate_init, + .translate_code = hppa_translate_code, .synchronize_from_tb = hppa_cpu_synchronize_from_tb, .restore_state_to_opc = hppa_restore_state_to_opc, diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index e45ba50a59..22a6510e08 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -303,6 +303,8 @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env) } void hppa_translate_init(void); +void hppa_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); #define CPU_RESOLVING_TYPE TYPE_HPPA_CPU diff --git a/target/hppa/translate.c b/target/hppa/translate.c index d13f80fe3e..dc04f9f3c0 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -4869,8 +4869,8 @@ static const TranslatorOps hppa_tr_ops = { #endif }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void hppa_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx = { }; translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base); diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h index 696d6ef016..54d845379c 100644 --- a/target/i386/tcg/helper-tcg.h +++ b/target/i386/tcg/helper-tcg.h @@ -59,6 +59,8 @@ static inline target_long lshift(target_long x, int n) /* translate.c */ void tcg_x86_init(void); +void x86_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); /* excp_helper.c */ G_NORETURN void raise_exception(CPUX86State *env, int exception_index); diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c index 231ecac37d..14ee038079 100644 --- a/target/i386/tcg/tcg-cpu.c +++ b/target/i386/tcg/tcg-cpu.c @@ -109,6 +109,7 @@ static bool x86_debug_check_breakpoint(CPUState *cs) static const TCGCPUOps x86_tcg_ops = { .initialize = tcg_x86_init, + .translate_code = x86_translate_code, .synchronize_from_tb = x86_cpu_synchronize_from_tb, .restore_state_to_opc = x86_restore_state_to_opc, .cpu_exec_enter = x86_cpu_exec_enter, diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index 903553dc88..834aea1e59 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -3814,9 +3814,8 @@ static const TranslatorOps i386_tr_ops = { .tb_stop = i386_tr_tb_stop, }; -/* generate intermediate code for basic block 'tb'. */ -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void x86_translate_code(CPUState *cpu, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc; diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c index f5bc8720d1..58415ffe99 100644 --- a/target/loongarch/cpu.c +++ b/target/loongarch/cpu.c @@ -795,6 +795,7 @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) static const TCGCPUOps loongarch_tcg_ops = { .initialize = loongarch_translate_init, + .translate_code = loongarch_translate_code, .synchronize_from_tb = loongarch_cpu_synchronize_from_tb, .restore_state_to_opc = loongarch_restore_state_to_opc, diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h index 0655ac948b..ad9cf4fc7a 100644 --- a/target/loongarch/internals.h +++ b/target/loongarch/internals.h @@ -17,6 +17,8 @@ #define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS) void loongarch_translate_init(void); +void loongarch_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void G_NORETURN do_raise_exception(CPULoongArchState *env, uint32_t exception, diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c index 1fca4afc73..68be999410 100644 --- a/target/loongarch/tcg/translate.c +++ b/target/loongarch/tcg/translate.c @@ -333,8 +333,8 @@ static const TranslatorOps loongarch_tr_ops = { .tb_stop = loongarch_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void loongarch_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c index 9de8ce6707..41dfdf5804 100644 --- a/target/m68k/cpu.c +++ b/target/m68k/cpu.c @@ -551,6 +551,7 @@ static const struct SysemuCPUOps m68k_sysemu_ops = { static const TCGCPUOps m68k_tcg_ops = { .initialize = m68k_tcg_init, + .translate_code = m68k_translate_code, .restore_state_to_opc = m68k_restore_state_to_opc, #ifndef CONFIG_USER_ONLY diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h index b5bbeedb7a..ddb0f29f4a 100644 --- a/target/m68k/cpu.h +++ b/target/m68k/cpu.h @@ -193,6 +193,8 @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void m68k_tcg_init(void); +void m68k_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void m68k_cpu_init_gdb(M68kCPU *cpu); uint32_t cpu_m68k_get_ccr(CPUM68KState *env); void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t); diff --git a/target/m68k/translate.c b/target/m68k/translate.c index 077151c62d..dec2967fce 100644 --- a/target/m68k/translate.c +++ b/target/m68k/translate.c @@ -6118,8 +6118,8 @@ static const TranslatorOps m68k_tr_ops = { .tb_stop = m68k_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void m68k_translate_code(CPUState *cpu, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc; translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base); diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c index eba819378d..f114789abd 100644 --- a/target/microblaze/cpu.c +++ b/target/microblaze/cpu.c @@ -423,6 +423,7 @@ static const struct SysemuCPUOps mb_sysemu_ops = { static const TCGCPUOps mb_tcg_ops = { .initialize = mb_tcg_init, + .translate_code = mb_translate_code, .synchronize_from_tb = mb_cpu_synchronize_from_tb, .restore_state_to_opc = mb_restore_state_to_opc, diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h index 3e5a3e5c60..f6879eee35 100644 --- a/target/microblaze/cpu.h +++ b/target/microblaze/cpu.h @@ -398,6 +398,8 @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val) } void mb_tcg_init(void); +void mb_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); #define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c index d53995c26d..24005f05b2 100644 --- a/target/microblaze/translate.c +++ b/target/microblaze/translate.c @@ -1779,8 +1779,8 @@ static const TranslatorOps mb_tr_ops = { .tb_stop = mb_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void mb_translate_code(CPUState *cpu, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc; translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base); diff --git a/target/mips/cpu.c b/target/mips/cpu.c index 1b0cf6df9c..e3af02a4e6 100644 --- a/target/mips/cpu.c +++ b/target/mips/cpu.c @@ -547,6 +547,7 @@ static const Property mips_cpu_properties[] = { #include "hw/core/tcg-cpu-ops.h" static const TCGCPUOps mips_tcg_ops = { .initialize = mips_tcg_init, + .translate_code = mips_translate_code, .synchronize_from_tb = mips_cpu_synchronize_from_tb, .restore_state_to_opc = mips_restore_state_to_opc, diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h index aef032c48d..74fc1309a7 100644 --- a/target/mips/tcg/tcg-internal.h +++ b/target/mips/tcg/tcg-internal.h @@ -16,6 +16,8 @@ #include "cpu.h" void mips_tcg_init(void); +void mips_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c index bd1ef4e1fc..78b848a6d9 100644 --- a/target/mips/tcg/translate.c +++ b/target/mips/tcg/translate.c @@ -15231,8 +15231,8 @@ static const TranslatorOps mips_tr_ops = { .tb_stop = mips_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void mips_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c index 7913a0c3e1..b7bab0d7ab 100644 --- a/target/openrisc/cpu.c +++ b/target/openrisc/cpu.c @@ -236,6 +236,7 @@ static const struct SysemuCPUOps openrisc_sysemu_ops = { static const TCGCPUOps openrisc_tcg_ops = { .initialize = openrisc_translate_init, + .translate_code = openrisc_translate_code, .synchronize_from_tb = openrisc_cpu_synchronize_from_tb, .restore_state_to_opc = openrisc_restore_state_to_opc, diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h index c9fe9ae12d..b97d2ffdd2 100644 --- a/target/openrisc/cpu.h +++ b/target/openrisc/cpu.h @@ -301,6 +301,8 @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags); int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void openrisc_translate_init(void); +void openrisc_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); int print_insn_or1k(bfd_vma addr, disassemble_info *info); #ifndef CONFIG_USER_ONLY diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c index 028ba66631..7a6af183ae 100644 --- a/target/openrisc/translate.c +++ b/target/openrisc/translate.c @@ -1646,8 +1646,8 @@ static const TranslatorOps openrisc_tr_ops = { .tb_stop = openrisc_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void openrisc_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 2ffac2ed03..0b8b4c0517 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -1581,6 +1581,8 @@ extern const VMStateDescription vmstate_ppc_cpu; /*****************************************************************************/ void ppc_translate_init(void); +void ppc_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); #if !defined(CONFIG_USER_ONLY) void ppc_store_sdr1(CPUPPCState *env, target_ulong value); diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index 0fcef630f1..c05c2dc42d 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -7431,6 +7431,7 @@ static const struct SysemuCPUOps ppc_sysemu_ops = { static const TCGCPUOps ppc_tcg_ops = { .initialize = ppc_translate_init, + .translate_code = ppc_translate_code, .restore_state_to_opc = ppc_restore_state_to_opc, #ifdef CONFIG_USER_ONLY diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 8ab87f42d6..80638ab535 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -6669,8 +6669,8 @@ static const TranslatorOps ppc_tr_ops = { .tb_stop = ppc_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void ppc_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 284b112821..252fdb8672 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -602,6 +602,9 @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit); void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en); void riscv_translate_init(void); +void riscv_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); + G_NORETURN void riscv_raise_exception(CPURISCVState *env, uint32_t exception, uintptr_t pc); diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c index f0129811fd..8b89c99c0f 100644 --- a/target/riscv/tcg/tcg-cpu.c +++ b/target/riscv/tcg/tcg-cpu.c @@ -135,6 +135,7 @@ static void riscv_restore_state_to_opc(CPUState *cs, static const TCGCPUOps riscv_tcg_ops = { .initialize = riscv_translate_init, + .translate_code = riscv_translate_code, .synchronize_from_tb = riscv_cpu_synchronize_from_tb, .restore_state_to_opc = riscv_restore_state_to_opc, diff --git a/target/riscv/translate.c b/target/riscv/translate.c index a76f67c5dd..a992d4f3c6 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -1346,8 +1346,8 @@ static const TranslatorOps riscv_tr_ops = { .tb_stop = riscv_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void riscv_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/rx/cpu.c b/target/rx/cpu.c index 558280c794..8c50c7a1bc 100644 --- a/target/rx/cpu.c +++ b/target/rx/cpu.c @@ -196,6 +196,7 @@ static const struct SysemuCPUOps rx_sysemu_ops = { static const TCGCPUOps rx_tcg_ops = { .initialize = rx_translate_init, + .translate_code = rx_translate_code, .synchronize_from_tb = rx_cpu_synchronize_from_tb, .restore_state_to_opc = rx_restore_state_to_opc, .tlb_fill = rx_cpu_tlb_fill, diff --git a/target/rx/cpu.h b/target/rx/cpu.h index c53593d7aa..5ba1874bd7 100644 --- a/target/rx/cpu.h +++ b/target/rx/cpu.h @@ -139,6 +139,8 @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void rx_translate_init(void); +void rx_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte); #include "exec/cpu-all.h" diff --git a/target/rx/translate.c b/target/rx/translate.c index 4f43654bad..bbda703be8 100644 --- a/target/rx/translate.c +++ b/target/rx/translate.c @@ -2258,8 +2258,8 @@ static const TranslatorOps rx_tr_ops = { .tb_stop = rx_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void rx_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc; diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c index 0a6847b027..97d41c23de 100644 --- a/target/s390x/cpu.c +++ b/target/s390x/cpu.c @@ -362,6 +362,7 @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc, static const TCGCPUOps s390_tcg_ops = { .initialize = s390x_translate_init, + .translate_code = s390x_translate_code, .restore_state_to_opc = s390x_restore_state_to_opc, #ifdef CONFIG_USER_ONLY diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h index 4cc435042c..a750e7a343 100644 --- a/target/s390x/s390x-internal.h +++ b/target/s390x/s390x-internal.h @@ -399,6 +399,8 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, /* translate.c */ void s390x_translate_init(void); +void s390x_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void s390x_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data); diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index 81554f2ad9..00073c5560 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -6481,8 +6481,8 @@ static const TranslatorOps s390x_tr_ops = { .disas_log = s390x_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void s390x_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc; diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index e9d3e12a62..24a22724c6 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -251,6 +251,7 @@ static const struct SysemuCPUOps sh4_sysemu_ops = { static const TCGCPUOps superh_tcg_ops = { .initialize = sh4_translate_init, + .translate_code = sh4_translate_code, .synchronize_from_tb = superh_cpu_synchronize_from_tb, .restore_state_to_opc = superh_restore_state_to_opc, diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h index d928bcf006..d536d5d715 100644 --- a/target/sh4/cpu.h +++ b/target/sh4/cpu.h @@ -248,6 +248,8 @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, uintptr_t retaddr); void sh4_translate_init(void); +void sh4_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); #if !defined(CONFIG_USER_ONLY) hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); diff --git a/target/sh4/translate.c b/target/sh4/translate.c index f076da9bac..bcdd558818 100644 --- a/target/sh4/translate.c +++ b/target/sh4/translate.c @@ -2318,8 +2318,8 @@ static const TranslatorOps sh4_tr_ops = { .tb_stop = sh4_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void sh4_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c index 373a335c39..fbd38ec334 100644 --- a/target/sparc/cpu.c +++ b/target/sparc/cpu.c @@ -996,6 +996,7 @@ static const struct SysemuCPUOps sparc_sysemu_ops = { static const TCGCPUOps sparc_tcg_ops = { .initialize = sparc_tcg_init, + .translate_code = sparc_translate_code, .synchronize_from_tb = sparc_cpu_synchronize_from_tb, .restore_state_to_opc = sparc_restore_state_to_opc, diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h index 5c981234bb..dda811503b 100644 --- a/target/sparc/cpu.h +++ b/target/sparc/cpu.h @@ -609,6 +609,8 @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr, /* translate.c */ void sparc_tcg_init(void); +void sparc_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); /* fop_helper.c */ target_ulong cpu_get_fsr(CPUSPARCState *); diff --git a/target/sparc/translate.c b/target/sparc/translate.c index 465e20f4f3..7e5c7351cb 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -5819,8 +5819,8 @@ static const TranslatorOps sparc_tr_ops = { .tb_stop = sparc_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void sparc_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc = {}; diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c index 95fb546666..95202fadbf 100644 --- a/target/tricore/cpu.c +++ b/target/tricore/cpu.c @@ -172,6 +172,7 @@ static const struct SysemuCPUOps tricore_sysemu_ops = { static const TCGCPUOps tricore_tcg_ops = { .initialize = tricore_tcg_init, + .translate_code = tricore_translate_code, .synchronize_from_tb = tricore_cpu_synchronize_from_tb, .restore_state_to_opc = tricore_restore_state_to_opc, .tlb_fill = tricore_cpu_tlb_fill, diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h index 220af69fc2..8e431d7922 100644 --- a/target/tricore/cpu.h +++ b/target/tricore/cpu.h @@ -252,6 +252,8 @@ FIELD(TB_FLAGS, PRIV, 0, 2) void cpu_state_reset(CPUTriCoreState *s); void tricore_tcg_init(void); +void tricore_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc, uint64_t *cs_base, uint32_t *flags) diff --git a/target/tricore/translate.c b/target/tricore/translate.c index 2b67395c09..0ef3743f3e 100644 --- a/target/tricore/translate.c +++ b/target/tricore/translate.c @@ -8460,9 +8460,8 @@ static const TranslatorOps tricore_tr_ops = { .tb_stop = tricore_tr_tb_stop, }; - -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void tricore_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; translator_loop(cs, tb, max_insns, pc, host_pc, diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c index 0d4d79b58b..0910a3d290 100644 --- a/target/xtensa/cpu.c +++ b/target/xtensa/cpu.c @@ -232,6 +232,7 @@ static const struct SysemuCPUOps xtensa_sysemu_ops = { static const TCGCPUOps xtensa_tcg_ops = { .initialize = xtensa_translate_init, + .translate_code = xtensa_translate_code, .debug_excp_handler = xtensa_breakpoint_handler, .restore_state_to_opc = xtensa_restore_state_to_opc, diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h index 77e48eef19..0e6302c5bd 100644 --- a/target/xtensa/cpu.h +++ b/target/xtensa/cpu.h @@ -617,6 +617,8 @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, void xtensa_collect_sr_names(const XtensaConfig *config); void xtensa_translate_init(void); +void xtensa_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void **xtensa_get_regfile_by_name(const char *name, int entries, int bits); void xtensa_breakpoint_handler(CPUState *cs); void xtensa_register_core(XtensaConfigList *node); diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index 3c62c99b4f..4f02cefde3 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -1228,8 +1228,8 @@ static const TranslatorOps xtensa_translator_ops = { .tb_stop = xtensa_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc = {}; translator_loop(cpu, tb, max_insns, pc, host_pc,