From f0c29a02343d733154a64271124609cb5f117797 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 11 Feb 2025 12:58:27 +0000 Subject: [PATCH 01/68] target/alpha: Don't corrupt error_code with unknown softfloat flags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In do_cvttq() we set env->error_code with what is supposed to be a set of FPCR exception bit values. However, if the set of float exception flags we get back from softfloat for the conversion includes a flag which is not one of the three we expect here (invalid_cvti, invalid, inexact) then we will fall through the if-ladder and set env->error_code to the unconverted softfloat exception_flag value. This will then cause us to take a spurious exception. This is harmless now, but when we add new floating point exception flags to softfloat it will cause problems. Add an else clause to the if-ladder to make it ignore any float exception flags it doesn't care about. Specifically, without this fix, 'make check-tcg' will fail for Alpha when the commit adding float_flag_input_denormal_used lands. Fixes: aa3bad5b59e7 ("target/alpha: Use float64_to_int64_modulo for CVTTQ") Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé --- target/alpha/fpu_helper.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/target/alpha/fpu_helper.c b/target/alpha/fpu_helper.c index 63d9e9ce39..f810a9b6a4 100644 --- a/target/alpha/fpu_helper.c +++ b/target/alpha/fpu_helper.c @@ -476,6 +476,8 @@ static uint64_t do_cvttq(CPUAlphaState *env, uint64_t a, int roundmode) exc = FPCR_INV; } else if (exc & float_flag_inexact) { exc = FPCR_INE; + } else { + exc = 0; } } env->error_code = exc; From 7ad6a567570ed8cd829c4f05a006e2ba2c86fa4a Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:06 +0000 Subject: [PATCH 02/68] fpu: Add float_class_denormal Currently in softfloat we canonicalize input denormals and so the code that implements floating point operations does not need to care whether the input value was originally normal or denormal. However, both x86 and Arm FEAT_AFP require that an exception flag is set if: * an input is denormal * that input is not squashed to zero * that input is actually used in the calculation (e.g. we did not find the other input was a NaN) So we need to track that the input was a non-squashed denormal. To do this we add a new value to the FloatClass enum. In this commit we add the value and adjust the code everywhere that looks at FloatClass values so that the new float_class_denormal behaves identically to float_class_normal. We will add the code that does the "raise a new float exception flag if an input was an unsquashed denormal and we used it" in a subsequent commit. There should be no behavioural change in this commit. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- fpu/softfloat-parts.c.inc | 40 ++++++++++++++++++++++++--------------- fpu/softfloat.c | 32 ++++++++++++++++++++++++++++--- 2 files changed, 54 insertions(+), 18 deletions(-) diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc index 73621f4a97..8621cb8718 100644 --- a/fpu/softfloat-parts.c.inc +++ b/fpu/softfloat-parts.c.inc @@ -204,7 +204,7 @@ static void partsN(canonicalize)(FloatPartsN *p, float_status *status, frac_clear(p); } else { int shift = frac_normalize(p); - p->cls = float_class_normal; + p->cls = float_class_denormal; p->exp = fmt->frac_shift - fmt->exp_bias - shift + !fmt->m68k_denormal; } @@ -395,7 +395,7 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s, static void partsN(uncanon)(FloatPartsN *p, float_status *s, const FloatFmt *fmt) { - if (likely(p->cls == float_class_normal)) { + if (likely(is_anynorm(p->cls))) { parts_uncanon_normal(p, s, fmt); } else { switch (p->cls) { @@ -435,7 +435,7 @@ static FloatPartsN *partsN(addsub)(FloatPartsN *a, FloatPartsN *b, if (a->sign != b_sign) { /* Subtraction */ - if (likely(ab_mask == float_cmask_normal)) { + if (likely(cmask_is_only_normals(ab_mask))) { if (parts_sub_normal(a, b)) { return a; } @@ -468,7 +468,7 @@ static FloatPartsN *partsN(addsub)(FloatPartsN *a, FloatPartsN *b, } } else { /* Addition */ - if (likely(ab_mask == float_cmask_normal)) { + if (likely(cmask_is_only_normals(ab_mask))) { parts_add_normal(a, b); return a; } @@ -488,12 +488,12 @@ static FloatPartsN *partsN(addsub)(FloatPartsN *a, FloatPartsN *b, } if (b->cls == float_class_zero) { - g_assert(a->cls == float_class_normal); + g_assert(is_anynorm(a->cls)); return a; } g_assert(a->cls == float_class_zero); - g_assert(b->cls == float_class_normal); + g_assert(is_anynorm(b->cls)); return_b: b->sign = b_sign; return b; @@ -513,7 +513,7 @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b, int ab_mask = float_cmask(a->cls) | float_cmask(b->cls); bool sign = a->sign ^ b->sign; - if (likely(ab_mask == float_cmask_normal)) { + if (likely(cmask_is_only_normals(ab_mask))) { FloatPartsW tmp; frac_mulw(&tmp, a, b); @@ -596,7 +596,7 @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b, a->sign ^= 1; } - if (unlikely(ab_mask != float_cmask_normal)) { + if (unlikely(!cmask_is_only_normals(ab_mask))) { if (unlikely(ab_mask == float_cmask_infzero)) { float_raise(float_flag_invalid | float_flag_invalid_imz, s); goto d_nan; @@ -611,7 +611,7 @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b, } g_assert(ab_mask & float_cmask_zero); - if (c->cls == float_class_normal) { + if (is_anynorm(c->cls)) { *a = *c; goto return_normal; } @@ -692,7 +692,7 @@ static FloatPartsN *partsN(div)(FloatPartsN *a, FloatPartsN *b, int ab_mask = float_cmask(a->cls) | float_cmask(b->cls); bool sign = a->sign ^ b->sign; - if (likely(ab_mask == float_cmask_normal)) { + if (likely(cmask_is_only_normals(ab_mask))) { a->sign = sign; a->exp -= b->exp + frac_div(a, b); return a; @@ -750,7 +750,7 @@ static FloatPartsN *partsN(modrem)(FloatPartsN *a, FloatPartsN *b, { int ab_mask = float_cmask(a->cls) | float_cmask(b->cls); - if (likely(ab_mask == float_cmask_normal)) { + if (likely(cmask_is_only_normals(ab_mask))) { frac_modrem(a, b, mod_quot); return a; } @@ -800,6 +800,8 @@ static void partsN(sqrt)(FloatPartsN *a, float_status *status, if (unlikely(a->cls != float_class_normal)) { switch (a->cls) { + case float_class_denormal: + break; case float_class_snan: case float_class_qnan: parts_return_nan(a, status); @@ -1130,6 +1132,7 @@ static void partsN(round_to_int)(FloatPartsN *a, FloatRoundMode rmode, case float_class_inf: break; case float_class_normal: + case float_class_denormal: if (parts_round_to_int_normal(a, rmode, scale, fmt->frac_size)) { float_raise(float_flag_inexact, s); } @@ -1174,6 +1177,7 @@ static int64_t partsN(float_to_sint)(FloatPartsN *p, FloatRoundMode rmode, return 0; case float_class_normal: + case float_class_denormal: /* TODO: N - 2 is frac_size for rounding; could use input fmt. */ if (parts_round_to_int_normal(p, rmode, scale, N - 2)) { flags = float_flag_inexact; @@ -1241,6 +1245,7 @@ static uint64_t partsN(float_to_uint)(FloatPartsN *p, FloatRoundMode rmode, return 0; case float_class_normal: + case float_class_denormal: /* TODO: N - 2 is frac_size for rounding; could use input fmt. */ if (parts_round_to_int_normal(p, rmode, scale, N - 2)) { flags = float_flag_inexact; @@ -1304,6 +1309,7 @@ static int64_t partsN(float_to_sint_modulo)(FloatPartsN *p, return 0; case float_class_normal: + case float_class_denormal: /* TODO: N - 2 is frac_size for rounding; could use input fmt. */ if (parts_round_to_int_normal(p, rmode, 0, N - 2)) { flags = float_flag_inexact; @@ -1452,9 +1458,10 @@ static FloatPartsN *partsN(minmax)(FloatPartsN *a, FloatPartsN *b, a_exp = a->exp; b_exp = b->exp; - if (unlikely(ab_mask != float_cmask_normal)) { + if (unlikely(!cmask_is_only_normals(ab_mask))) { switch (a->cls) { case float_class_normal: + case float_class_denormal: break; case float_class_inf: a_exp = INT16_MAX; @@ -1467,6 +1474,7 @@ static FloatPartsN *partsN(minmax)(FloatPartsN *a, FloatPartsN *b, } switch (b->cls) { case float_class_normal: + case float_class_denormal: break; case float_class_inf: b_exp = INT16_MAX; @@ -1513,7 +1521,7 @@ static FloatRelation partsN(compare)(FloatPartsN *a, FloatPartsN *b, { int ab_mask = float_cmask(a->cls) | float_cmask(b->cls); - if (likely(ab_mask == float_cmask_normal)) { + if (likely(cmask_is_only_normals(ab_mask))) { FloatRelation cmp; if (a->sign != b->sign) { @@ -1581,6 +1589,7 @@ static void partsN(scalbn)(FloatPartsN *a, int n, float_status *s) case float_class_inf: break; case float_class_normal: + case float_class_denormal: a->exp += MIN(MAX(n, -0x10000), 0x10000); break; default: @@ -1599,6 +1608,8 @@ static void partsN(log2)(FloatPartsN *a, float_status *s, const FloatFmt *fmt) if (unlikely(a->cls != float_class_normal)) { switch (a->cls) { + case float_class_denormal: + break; case float_class_snan: case float_class_qnan: parts_return_nan(a, s); @@ -1615,9 +1626,8 @@ static void partsN(log2)(FloatPartsN *a, float_status *s, const FloatFmt *fmt) } return; default: - break; + g_assert_not_reached(); } - g_assert_not_reached(); } if (unlikely(a->sign)) { goto d_nan; diff --git a/fpu/softfloat.c b/fpu/softfloat.c index 26f3a8dc87..03a604c38e 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -404,12 +404,16 @@ float64_gen2(float64 xa, float64 xb, float_status *s, /* * Classify a floating point number. Everything above float_class_qnan * is a NaN so cls >= float_class_qnan is any NaN. + * + * Note that we canonicalize denormals, so most code should treat + * class_normal and class_denormal identically. */ typedef enum __attribute__ ((__packed__)) { float_class_unclassified, float_class_zero, float_class_normal, + float_class_denormal, /* input was a non-squashed denormal */ float_class_inf, float_class_qnan, /* all NaNs from here */ float_class_snan, @@ -420,12 +424,14 @@ typedef enum __attribute__ ((__packed__)) { enum { float_cmask_zero = float_cmask(float_class_zero), float_cmask_normal = float_cmask(float_class_normal), + float_cmask_denormal = float_cmask(float_class_denormal), float_cmask_inf = float_cmask(float_class_inf), float_cmask_qnan = float_cmask(float_class_qnan), float_cmask_snan = float_cmask(float_class_snan), float_cmask_infzero = float_cmask_zero | float_cmask_inf, float_cmask_anynan = float_cmask_qnan | float_cmask_snan, + float_cmask_anynorm = float_cmask_normal | float_cmask_denormal, }; /* Flags for parts_minmax. */ @@ -459,6 +465,20 @@ static inline __attribute__((unused)) bool is_qnan(FloatClass c) return c == float_class_qnan; } +/* + * Return true if the float_cmask has only normals in it + * (including input denormals that were canonicalized) + */ +static inline bool cmask_is_only_normals(int cmask) +{ + return !(cmask & ~float_cmask_anynorm); +} + +static inline bool is_anynorm(FloatClass c) +{ + return float_cmask(c) & float_cmask_anynorm; +} + /* * Structure holding all of the decomposed parts of a float. * The exponent is unbiased and the fraction is normalized. @@ -1729,6 +1749,7 @@ static float64 float64r32_round_pack_canonical(FloatParts64 *p, */ switch (p->cls) { case float_class_normal: + case float_class_denormal: if (unlikely(p->exp == 0)) { /* * The result is denormal for float32, but can be represented @@ -1817,6 +1838,7 @@ static floatx80 floatx80_round_pack_canonical(FloatParts128 *p, switch (p->cls) { case float_class_normal: + case float_class_denormal: if (s->floatx80_rounding_precision == floatx80_precision_x) { parts_uncanon_normal(p, s, fmt); frac = p->frac_hi; @@ -2697,6 +2719,7 @@ static void parts_float_to_ahp(FloatParts64 *a, float_status *s) break; case float_class_normal: + case float_class_denormal: case float_class_zero: break; @@ -2729,7 +2752,7 @@ static void parts_float_to_float_narrow(FloatParts64 *a, FloatParts128 *b, a->sign = b->sign; a->exp = b->exp; - if (a->cls == float_class_normal) { + if (is_anynorm(a->cls)) { frac_truncjam(a, b); } else if (is_nan(a->cls)) { /* Discard the low bits of the NaN. */ @@ -3218,6 +3241,7 @@ static Int128 float128_to_int128_scalbn(float128 a, FloatRoundMode rmode, return int128_zero(); case float_class_normal: + case float_class_denormal: if (parts_round_to_int_normal(&p, rmode, scale, 128 - 2)) { flags = float_flag_inexact; } @@ -3645,6 +3669,7 @@ static Int128 float128_to_uint128_scalbn(float128 a, FloatRoundMode rmode, return int128_zero(); case float_class_normal: + case float_class_denormal: if (parts_round_to_int_normal(&p, rmode, scale, 128 - 2)) { flags = float_flag_inexact; if (p.cls == float_class_zero) { @@ -5231,6 +5256,8 @@ float32 float32_exp2(float32 a, float_status *status) float32_unpack_canonical(&xp, a, status); if (unlikely(xp.cls != float_class_normal)) { switch (xp.cls) { + case float_class_denormal: + break; case float_class_snan: case float_class_qnan: parts_return_nan(&xp, status); @@ -5240,9 +5267,8 @@ float32 float32_exp2(float32 a, float_status *status) case float_class_zero: return float32_one; default: - break; + g_assert_not_reached(); } - g_assert_not_reached(); } float_raise(float_flag_inexact, status); From 029a2083a293f55b7b3d56e16b1546c882d9299c Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:07 +0000 Subject: [PATCH 03/68] fpu: Implement float_flag_input_denormal_used For the x86 and the Arm FEAT_AFP semantics, we need to be able to tell the target code that the FPU operation has used an input denormal. Implement this; when it happens we set the new float_flag_denormal_input_used. Note that we only set this when an input denormal is actually used by the operation: if the operation results in Invalid Operation or Divide By Zero or the result is a NaN because some other input was a NaN then we never needed to look at the input denormal and do not set denormal_input_used. We mostly do not need to adjust the hardfloat codepaths to deal with this flag, because almost all hardfloat operations are already gated on the input not being a denormal, and will fall back to softfloat for a denormal input. The only exception is the comparison operations, where we need to add the check for input denormals, which must now fall back to softfloat where they did not before. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- fpu/softfloat-parts.c.inc | 68 ++++++++++++++++++++++++++++++++++- fpu/softfloat.c | 38 +++++++++++++++++--- include/fpu/softfloat-types.h | 7 ++++ 3 files changed, 107 insertions(+), 6 deletions(-) diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc index 8621cb8718..0122b35008 100644 --- a/fpu/softfloat-parts.c.inc +++ b/fpu/softfloat-parts.c.inc @@ -433,6 +433,15 @@ static FloatPartsN *partsN(addsub)(FloatPartsN *a, FloatPartsN *b, bool b_sign = b->sign ^ subtract; int ab_mask = float_cmask(a->cls) | float_cmask(b->cls); + /* + * For addition and subtraction, we will consume an + * input denormal unless the other input is a NaN. + */ + if ((ab_mask & (float_cmask_denormal | float_cmask_anynan)) == + float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } + if (a->sign != b_sign) { /* Subtraction */ if (likely(cmask_is_only_normals(ab_mask))) { @@ -516,6 +525,10 @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b, if (likely(cmask_is_only_normals(ab_mask))) { FloatPartsW tmp; + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } + frac_mulw(&tmp, a, b); frac_truncjam(a, &tmp); @@ -541,6 +554,10 @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b, } /* Multiply by 0 or Inf */ + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } + if (ab_mask & float_cmask_inf) { a->cls = float_class_inf; a->sign = sign; @@ -664,6 +681,16 @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b, if (flags & float_muladd_negate_result) { a->sign ^= 1; } + + /* + * All result types except for "return the default NaN + * because this is an Invalid Operation" go through here; + * this matches the set of cases where we consumed a + * denormal input. + */ + if (abc_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } return a; return_sub_zero: @@ -693,6 +720,9 @@ static FloatPartsN *partsN(div)(FloatPartsN *a, FloatPartsN *b, bool sign = a->sign ^ b->sign; if (likely(cmask_is_only_normals(ab_mask))) { + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } a->sign = sign; a->exp -= b->exp + frac_div(a, b); return a; @@ -713,6 +743,10 @@ static FloatPartsN *partsN(div)(FloatPartsN *a, FloatPartsN *b, return parts_pick_nan(a, b, s); } + if ((ab_mask & float_cmask_denormal) && b->cls != float_class_zero) { + float_raise(float_flag_input_denormal_used, s); + } + a->sign = sign; /* Inf / X */ @@ -751,6 +785,9 @@ static FloatPartsN *partsN(modrem)(FloatPartsN *a, FloatPartsN *b, int ab_mask = float_cmask(a->cls) | float_cmask(b->cls); if (likely(cmask_is_only_normals(ab_mask))) { + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } frac_modrem(a, b, mod_quot); return a; } @@ -771,6 +808,10 @@ static FloatPartsN *partsN(modrem)(FloatPartsN *a, FloatPartsN *b, return a; } + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } + /* N % Inf; 0 % N */ g_assert(b->cls == float_class_inf || a->cls == float_class_zero); return a; @@ -801,6 +842,10 @@ static void partsN(sqrt)(FloatPartsN *a, float_status *status, if (unlikely(a->cls != float_class_normal)) { switch (a->cls) { case float_class_denormal: + if (!a->sign) { + /* -ve denormal will be InvalidOperation */ + float_raise(float_flag_input_denormal_used, status); + } break; case float_class_snan: case float_class_qnan: @@ -1431,6 +1476,9 @@ static FloatPartsN *partsN(minmax)(FloatPartsN *a, FloatPartsN *b, if ((flags & (minmax_isnum | minmax_isnumber)) && !(ab_mask & float_cmask_snan) && (ab_mask & ~float_cmask_qnan)) { + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } return is_nan(a->cls) ? b : a; } @@ -1455,6 +1503,10 @@ static FloatPartsN *partsN(minmax)(FloatPartsN *a, FloatPartsN *b, return parts_pick_nan(a, b, s); } + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } + a_exp = a->exp; b_exp = b->exp; @@ -1524,6 +1576,10 @@ static FloatRelation partsN(compare)(FloatPartsN *a, FloatPartsN *b, if (likely(cmask_is_only_normals(ab_mask))) { FloatRelation cmp; + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } + if (a->sign != b->sign) { goto a_sign; } @@ -1549,6 +1605,10 @@ static FloatRelation partsN(compare)(FloatPartsN *a, FloatPartsN *b, return float_relation_unordered; } + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } + if (ab_mask & float_cmask_zero) { if (ab_mask == float_cmask_zero) { return float_relation_equal; @@ -1588,8 +1648,10 @@ static void partsN(scalbn)(FloatPartsN *a, int n, float_status *s) case float_class_zero: case float_class_inf: break; - case float_class_normal: case float_class_denormal: + float_raise(float_flag_input_denormal_used, s); + /* fall through */ + case float_class_normal: a->exp += MIN(MAX(n, -0x10000), 0x10000); break; default: @@ -1609,6 +1671,10 @@ static void partsN(log2)(FloatPartsN *a, float_status *s, const FloatFmt *fmt) if (unlikely(a->cls != float_class_normal)) { switch (a->cls) { case float_class_denormal: + if (!a->sign) { + /* -ve denormal will be InvalidOperation */ + float_raise(float_flag_input_denormal_used, s); + } break; case float_class_snan: case float_class_qnan: diff --git a/fpu/softfloat.c b/fpu/softfloat.c index 03a604c38e..f4fed9bfda 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -2718,8 +2718,10 @@ static void parts_float_to_ahp(FloatParts64 *a, float_status *s) float16_params_ahp.frac_size + 1); break; - case float_class_normal: case float_class_denormal: + float_raise(float_flag_input_denormal_used, s); + break; + case float_class_normal: case float_class_zero: break; @@ -2733,6 +2735,9 @@ static void parts64_float_to_float(FloatParts64 *a, float_status *s) if (is_nan(a->cls)) { parts_return_nan(a, s); } + if (a->cls == float_class_denormal) { + float_raise(float_flag_input_denormal_used, s); + } } static void parts128_float_to_float(FloatParts128 *a, float_status *s) @@ -2740,6 +2745,9 @@ static void parts128_float_to_float(FloatParts128 *a, float_status *s) if (is_nan(a->cls)) { parts_return_nan(a, s); } + if (a->cls == float_class_denormal) { + float_raise(float_flag_input_denormal_used, s); + } } #define parts_float_to_float(P, S) \ @@ -2752,12 +2760,21 @@ static void parts_float_to_float_narrow(FloatParts64 *a, FloatParts128 *b, a->sign = b->sign; a->exp = b->exp; - if (is_anynorm(a->cls)) { + switch (a->cls) { + case float_class_denormal: + float_raise(float_flag_input_denormal_used, s); + /* fall through */ + case float_class_normal: frac_truncjam(a, b); - } else if (is_nan(a->cls)) { + break; + case float_class_snan: + case float_class_qnan: /* Discard the low bits of the NaN. */ a->frac = b->frac_hi; parts_return_nan(a, s); + break; + default: + break; } } @@ -2772,6 +2789,9 @@ static void parts_float_to_float_widen(FloatParts128 *a, FloatParts64 *b, if (is_nan(a->cls)) { parts_return_nan(a, s); } + if (a->cls == float_class_denormal) { + float_raise(float_flag_input_denormal_used, s); + } } float32 float16_to_float32(float16 a, bool ieee, float_status *s) @@ -4411,7 +4431,11 @@ float32_hs_compare(float32 xa, float32 xb, float_status *s, bool is_quiet) goto soft; } - float32_input_flush2(&ua.s, &ub.s, s); + if (unlikely(float32_is_denormal(ua.s) || float32_is_denormal(ub.s))) { + /* We may need to set the input_denormal_used flag */ + goto soft; + } + if (isgreaterequal(ua.h, ub.h)) { if (isgreater(ua.h, ub.h)) { return float_relation_greater; @@ -4461,7 +4485,11 @@ float64_hs_compare(float64 xa, float64 xb, float_status *s, bool is_quiet) goto soft; } - float64_input_flush2(&ua.s, &ub.s, s); + if (unlikely(float64_is_denormal(ua.s) || float64_is_denormal(ub.s))) { + /* We may need to set the input_denormal_used flag */ + goto soft; + } + if (isgreaterequal(ua.h, ub.h)) { if (isgreater(ua.h, ub.h)) { return float_relation_greater; diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h index 2e43d1dd9e..bba1c397bb 100644 --- a/include/fpu/softfloat-types.h +++ b/include/fpu/softfloat-types.h @@ -165,6 +165,13 @@ enum { float_flag_invalid_sqrt = 0x0800, /* sqrt(-x) */ float_flag_invalid_cvti = 0x1000, /* non-nan to integer */ float_flag_invalid_snan = 0x2000, /* any operand was snan */ + /* + * An input was denormal and we used it (without flushing it to zero). + * Not set if we do not actually use the denormal input (e.g. + * because some other input was a NaN, or because the operation + * wasn't actually carried out (divide-by-zero; invalid)) + */ + float_flag_input_denormal_used = 0x4000, }; /* From 28f13bccbe6a100f53519d1f32cbb78e407e2b83 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:08 +0000 Subject: [PATCH 04/68] fpu: allow flushing of output denormals to be after rounding Currently we handle flushing of output denormals in uncanon_normal always before we deal with rounding. This works for architectures that detect tininess before rounding, but is usually not the right place when the architecture detects tininess after rounding. For example, for x86 the SDM states that the MXCSR FTZ control bit causes outputs to be flushed to zero "when it detects a floating-point underflow condition". This means that we mustn't flush to zero if the input is such that after rounding it is no longer tiny. At least one of our guest architectures does underflow detection after rounding but flushing of denormals before rounding (MIPS MSA); this means we need to have a config knob for this that is separate from our existing tininess_before_rounding setting. Add an ftz_detection flag. For consistency with tininess_before_rounding, we make it default to "detect ftz after rounding"; this means that we need to explicitly set the flag to "detect ftz before rounding" on every existing architecture that sets flush_to_zero, so that this commit has no behaviour change. (This means more code change here but for the long term a less confusing API.) For several architectures the current behaviour is either definitely or possibly wrong; annotate those with TODO comments. These architectures are definitely wrong (and should detect ftz after rounding): * x86 * Alpha For these architectures the spec is unclear: * MIPS (for non-MSA) * RX * SH4 PA-RISC makes ftz detection IMPDEF, but we aren't setting the "tininess before rounding" setting that we ought to. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- fpu/softfloat-parts.c.inc | 21 +++++++++++++++------ include/fpu/softfloat-helpers.h | 11 +++++++++++ include/fpu/softfloat-types.h | 18 ++++++++++++++++++ target/alpha/cpu.c | 7 +++++++ target/arm/cpu.c | 1 + target/hppa/fpu_helper.c | 11 +++++++++++ target/i386/tcg/fpu_helper.c | 8 ++++++++ target/mips/fpu_helper.h | 6 ++++++ target/mips/msa.c | 9 +++++++++ target/ppc/cpu_init.c | 3 +++ target/rx/cpu.c | 8 ++++++++ target/sh4/cpu.c | 8 ++++++++ target/tricore/helper.c | 1 + tests/fp/fp-bench.c | 1 + 14 files changed, 107 insertions(+), 6 deletions(-) diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc index 0122b35008..1d09f066c5 100644 --- a/fpu/softfloat-parts.c.inc +++ b/fpu/softfloat-parts.c.inc @@ -334,7 +334,8 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s, p->frac_lo &= ~round_mask; } frac_shr(p, frac_shift); - } else if (s->flush_to_zero) { + } else if (s->flush_to_zero && + s->ftz_detection == float_ftz_before_rounding) { flags |= float_flag_output_denormal_flushed; p->cls = float_class_zero; exp = 0; @@ -381,11 +382,19 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s, exp = (p->frac_hi & DECOMPOSED_IMPLICIT_BIT) && !fmt->m68k_denormal; frac_shr(p, frac_shift); - if (is_tiny && (flags & float_flag_inexact)) { - flags |= float_flag_underflow; - } - if (exp == 0 && frac_eqz(p)) { - p->cls = float_class_zero; + if (is_tiny) { + if (s->flush_to_zero) { + assert(s->ftz_detection == float_ftz_after_rounding); + flags |= float_flag_output_denormal_flushed; + p->cls = float_class_zero; + exp = 0; + frac_clear(p); + } else if (flags & float_flag_inexact) { + flags |= float_flag_underflow; + } + if (exp == 0 && frac_eqz(p)) { + p->cls = float_class_zero; + } } } p->exp = exp; diff --git a/include/fpu/softfloat-helpers.h b/include/fpu/softfloat-helpers.h index 4cb30a4822..8983c2748e 100644 --- a/include/fpu/softfloat-helpers.h +++ b/include/fpu/softfloat-helpers.h @@ -109,6 +109,12 @@ static inline void set_flush_inputs_to_zero(bool val, float_status *status) status->flush_inputs_to_zero = val; } +static inline void set_float_ftz_detection(FloatFTZDetection d, + float_status *status) +{ + status->ftz_detection = d; +} + static inline void set_default_nan_mode(bool val, float_status *status) { status->default_nan_mode = val; @@ -183,4 +189,9 @@ static inline bool get_default_nan_mode(const float_status *status) return status->default_nan_mode; } +static inline FloatFTZDetection get_float_ftz_detection(const float_status *status) +{ + return status->ftz_detection; +} + #endif /* SOFTFLOAT_HELPERS_H */ diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h index bba1c397bb..53d5eb8521 100644 --- a/include/fpu/softfloat-types.h +++ b/include/fpu/softfloat-types.h @@ -304,6 +304,22 @@ typedef enum __attribute__((__packed__)) { float_infzeronan_suppress_invalid = (1 << 7), } FloatInfZeroNaNRule; +/* + * When flush_to_zero is set, should we detect denormal results to + * be flushed before or after rounding? For most architectures this + * should be set to match the tininess_before_rounding setting, + * but a few architectures, e.g. MIPS MSA, detect FTZ before + * rounding but tininess after rounding. + * + * This enum is arranged so that the default if the target doesn't + * configure it matches the default for tininess_before_rounding + * (i.e. "after rounding"). + */ +typedef enum __attribute__((__packed__)) { + float_ftz_after_rounding = 0, + float_ftz_before_rounding = 1, +} FloatFTZDetection; + /* * Floating Point Status. Individual architectures may maintain * several versions of float_status for different functions. The @@ -321,6 +337,8 @@ typedef struct float_status { bool tininess_before_rounding; /* should denormalised results go to zero and set output_denormal_flushed? */ bool flush_to_zero; + /* do we detect and flush denormal results before or after rounding? */ + FloatFTZDetection ftz_detection; /* should denormalised inputs go to zero and set input_denormal_flushed? */ bool flush_inputs_to_zero; bool default_nan_mode; diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c index e1b898e575..f5dd744987 100644 --- a/target/alpha/cpu.c +++ b/target/alpha/cpu.c @@ -202,6 +202,13 @@ static void alpha_cpu_initfn(Object *obj) set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status); /* Default NaN: sign bit clear, msb frac bit set */ set_float_default_nan_pattern(0b01000000, &env->fp_status); + /* + * TODO: this is incorrect. The Alpha Architecture Handbook version 4 + * section 4.7.7.11 says that we flush to zero for underflow cases, so + * this should be float_ftz_after_rounding to match the + * tininess_after_rounding (which is specified in section 4.7.5). + */ + set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status); #if defined(CONFIG_USER_ONLY) env->flags = ENV_FLAG_PS_USER | ENV_FLAG_FEN; cpu_alpha_store_fpcr(env, (uint64_t)(FPCR_INVD | FPCR_DZED | FPCR_OVFD diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 32dc7c1e69..8037744300 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -185,6 +185,7 @@ void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, static void arm_set_default_fp_behaviours(float_status *s) { set_float_detect_tininess(float_tininess_before_rounding, s); + set_float_ftz_detection(float_ftz_before_rounding, s); set_float_2nan_prop_rule(float_2nan_prop_s_ab, s); set_float_3nan_prop_rule(float_3nan_prop_s_cab, s); set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, s); diff --git a/target/hppa/fpu_helper.c b/target/hppa/fpu_helper.c index 239c027ec5..8ff4b44804 100644 --- a/target/hppa/fpu_helper.c +++ b/target/hppa/fpu_helper.c @@ -67,6 +67,17 @@ void HELPER(loaded_fr0)(CPUHPPAState *env) set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status); /* Default NaN: sign bit clear, msb-1 frac bit set */ set_float_default_nan_pattern(0b00100000, &env->fp_status); + /* + * "PA-RISC 2.0 Architecture" says it is IMPDEF whether the flushing + * enabled by FPSR.D happens before or after rounding. We pick "before" + * for consistency with tininess detection. + */ + set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status); + /* + * TODO: "PA-RISC 2.0 Architecture" chapter 10 says that we should + * detect tininess before rounding, but we don't set that here so we + * get the default tininess after rounding. + */ } void cpu_hppa_loaded_fr0(CPUHPPAState *env) diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c index de6d0b252e..f112c6c673 100644 --- a/target/i386/tcg/fpu_helper.c +++ b/target/i386/tcg/fpu_helper.c @@ -188,6 +188,14 @@ void cpu_init_fp_statuses(CPUX86State *env) set_float_default_nan_pattern(0b11000000, &env->fp_status); set_float_default_nan_pattern(0b11000000, &env->mmx_status); set_float_default_nan_pattern(0b11000000, &env->sse_status); + /* + * TODO: x86 does flush-to-zero detection after rounding (the SDM + * section 10.2.3.3 on the FTZ bit of MXCSR says that we flush + * when we detect underflow, which x86 does after rounding). + */ + set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status); + set_float_ftz_detection(float_ftz_before_rounding, &env->mmx_status); + set_float_ftz_detection(float_ftz_before_rounding, &env->sse_status); } static inline uint8_t save_exception_flags(CPUX86State *env) diff --git a/target/mips/fpu_helper.h b/target/mips/fpu_helper.h index 6ad1e466cf..08fb409390 100644 --- a/target/mips/fpu_helper.h +++ b/target/mips/fpu_helper.h @@ -84,6 +84,12 @@ static inline void fp_reset(CPUMIPSState *env) */ set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->active_fpu.fp_status); + /* + * TODO: the spec does't say clearly whether FTZ happens before + * or after rounding for normal FPU operations. + */ + set_float_ftz_detection(float_ftz_before_rounding, + &env->active_fpu.fp_status); } /* MSA */ diff --git a/target/mips/msa.c b/target/mips/msa.c index fc77bfc7b9..32c6acbcc5 100644 --- a/target/mips/msa.c +++ b/target/mips/msa.c @@ -48,6 +48,15 @@ void msa_reset(CPUMIPSState *env) /* tininess detected after rounding.*/ set_float_detect_tininess(float_tininess_after_rounding, &env->active_tc.msa_fp_status); + /* + * MSACSR.FS detects tiny results to flush to zero before rounding + * (per "MIPS Architecture for Programmers Volume IV-j: The MIPS64 SIMD + * Architecture Module, Revision 1.1" section 3.5.4), even though it + * detects tininess after rounding for underflow purposes (section 3.4.2 + * table 3.3). + */ + set_float_ftz_detection(float_ftz_before_rounding, + &env->active_tc.msa_fp_status); /* * According to MIPS specifications, if one of the two operands is diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index 8e49051254..062a6e85fb 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -7262,6 +7262,9 @@ static void ppc_cpu_reset_hold(Object *obj, ResetType type) /* tininess for underflow is detected before rounding */ set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status); + /* Similarly for flush-to-zero */ + set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status); + /* * PowerPC propagation rules: * 1. A if it sNaN or qNaN diff --git a/target/rx/cpu.c b/target/rx/cpu.c index 8c50c7a1bc..37a6fdd569 100644 --- a/target/rx/cpu.c +++ b/target/rx/cpu.c @@ -103,6 +103,14 @@ static void rx_cpu_reset_hold(Object *obj, ResetType type) set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status); /* Default NaN value: sign bit clear, set frac msb */ set_float_default_nan_pattern(0b01000000, &env->fp_status); + /* + * TODO: "RX Family RXv1 Instruction Set Architecture" is not 100% clear + * on whether flush-to-zero should happen before or after rounding, but + * section 1.3.2 says that it happens when underflow is detected, and + * implies that underflow is detected after rounding. So this may not + * be the correct setting. + */ + set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status); } static ObjectClass *rx_cpu_class_by_name(const char *cpu_model) diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index 24a22724c6..4ac693d99b 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -130,6 +130,14 @@ static void superh_cpu_reset_hold(Object *obj, ResetType type) set_default_nan_mode(1, &env->fp_status); /* sign bit clear, set all frac bits other than msb */ set_float_default_nan_pattern(0b00111111, &env->fp_status); + /* + * TODO: "SH-4 CPU Core Architecture ADCS 7182230F" doesn't say whether + * it detects tininess before or after rounding. Section 6.4 is clear + * that flush-to-zero happens when the result underflows, though, so + * either this should be "detect ftz after rounding" or else we should + * be setting "detect tininess before rounding". + */ + set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status); } static void superh_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) diff --git a/target/tricore/helper.c b/target/tricore/helper.c index e8b0ec5161..9898752eb0 100644 --- a/target/tricore/helper.c +++ b/target/tricore/helper.c @@ -116,6 +116,7 @@ void fpu_set_state(CPUTriCoreState *env) set_flush_inputs_to_zero(1, &env->fp_status); set_flush_to_zero(1, &env->fp_status); set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status); + set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status); set_default_nan_mode(1, &env->fp_status); /* Default NaN pattern: sign bit clear, frac msb set */ set_float_default_nan_pattern(0b01000000, &env->fp_status); diff --git a/tests/fp/fp-bench.c b/tests/fp/fp-bench.c index eacb39b99c..d90f542ea2 100644 --- a/tests/fp/fp-bench.c +++ b/tests/fp/fp-bench.c @@ -496,6 +496,7 @@ static void run_bench(void) set_float_3nan_prop_rule(float_3nan_prop_s_cab, &soft_status); set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, &soft_status); set_float_default_nan_pattern(0b01000000, &soft_status); + set_float_ftz_detection(float_ftz_before_rounding, &soft_status); f = bench_funcs[operation][precision]; g_assert(f); From 7ea2b119f939ec408cc3fc28cb190343d28af427 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:09 +0000 Subject: [PATCH 05/68] target/arm: Define FPCR AH, FIZ, NEP bits The Armv8.7 FEAT_AFP feature defines three new control bits in the FPCR: * FPCR.AH: "alternate floating point mode"; this changes floating point behaviour in a variety of ways, including: - the sign of a default NaN is 1, not 0 - if FPCR.FZ is also 1, denormals detected after rounding with an unbounded exponent has been applied are flushed to zero - FPCR.FZ does not cause denormalized inputs to be flushed to zero - miscellaneous other corner-case behaviour changes * FPCR.FIZ: flush denormalized numbers to zero on input for most instructions * FPCR.NEP: makes scalar SIMD operations merge the result with higher vector elements in one of the source registers, instead of zeroing the higher elements of the destination This commit defines the new bits in the FPCR, and allows them to be read or written when FEAT_AFP is implemented. Actual behaviour changes will be implemented in subsequent commits. Note that these are the first FPCR bits which don't appear in the AArch32 FPSCR view of the register, and which share bit positions with FPSR bits. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/cpu-features.h | 5 +++++ target/arm/cpu.h | 3 +++ target/arm/vfp_helper.c | 11 ++++++++--- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h index 30302d6c5b..7bf24c506b 100644 --- a/target/arm/cpu-features.h +++ b/target/arm/cpu-features.h @@ -802,6 +802,11 @@ static inline bool isar_feature_aa64_hcx(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HCX) != 0; } +static inline bool isar_feature_aa64_afp(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, AFP) != 0; +} + static inline bool isar_feature_aa64_tidcp1(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, TIDCP1) != 0; diff --git a/target/arm/cpu.h b/target/arm/cpu.h index c2d2d99b46..1c91b1f50f 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -1714,6 +1714,9 @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val); */ /* FPCR bits */ +#define FPCR_FIZ (1 << 0) /* Flush Inputs to Zero (FEAT_AFP) */ +#define FPCR_AH (1 << 1) /* Alternate Handling (FEAT_AFP) */ +#define FPCR_NEP (1 << 2) /* SIMD scalar ops preserve elts (FEAT_AFP) */ #define FPCR_IOE (1 << 8) /* Invalid Operation exception trap enable */ #define FPCR_DZE (1 << 9) /* Divide by Zero exception trap enable */ #define FPCR_OFE (1 << 10) /* Overflow exception trap enable */ diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 3c8f3e6588..8c79ab4fc8 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -242,6 +242,9 @@ static void vfp_set_fpcr_masked(CPUARMState *env, uint32_t val, uint32_t mask) if (!cpu_isar_feature(any_fp16, cpu)) { val &= ~FPCR_FZ16; } + if (!cpu_isar_feature(aa64_afp, cpu)) { + val &= ~(FPCR_FIZ | FPCR_AH | FPCR_NEP); + } if (!cpu_isar_feature(aa64_ebf16, cpu)) { val &= ~FPCR_EBF; @@ -271,12 +274,14 @@ static void vfp_set_fpcr_masked(CPUARMState *env, uint32_t val, uint32_t mask) * We don't implement trapped exception handling, so the * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!) * - * The FPCR bits we keep in vfp.fpcr are AHP, DN, FZ, RMode, EBF - * and FZ16. Len, Stride and LTPSIZE we just handled. Store those bits + * The FPCR bits we keep in vfp.fpcr are AHP, DN, FZ, RMode, EBF, FZ16, + * FIZ, AH, and NEP. + * Len, Stride and LTPSIZE we just handled. Store those bits * there, and zero any of the other FPCR bits and the RES0 and RAZ/WI * bits. */ - val &= FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK | FPCR_FZ16 | FPCR_EBF; + val &= FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK | FPCR_FZ16 | + FPCR_EBF | FPCR_FIZ | FPCR_AH | FPCR_NEP; env->vfp.fpcr &= ~mask; env->vfp.fpcr |= val; } From c2e60204779616daf35ade1bc5ef31f1388e9892 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:10 +0000 Subject: [PATCH 06/68] target/arm: Implement FPCR.FIZ handling Part of FEAT_AFP is the new control bit FPCR.FIZ. This bit affects flushing of single and double precision denormal inputs to zero for AArch64 floating point instructions. (For half-precision, the existing FPCR.FZ16 control remains the only one.) FPCR.FIZ differs from FPCR.FZ in that if we flush an input denormal only because of FPCR.FIZ then we should *not* set the cumulative exception bit FPSR.IDC. FEAT_AFP also defines that in AArch64 the existing FPCR.FZ only applies when FPCR.AH is 0. We can implement this by setting the "flush inputs to zero" state appropriately when FPCR is written, and by not reflecting the float_flag_input_denormal status flag into FPSR reads when it is the result only of FPSR.FIZ. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/vfp_helper.c | 60 ++++++++++++++++++++++++++++++++++------- 1 file changed, 50 insertions(+), 10 deletions(-) diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 8c79ab4fc8..30c170ecee 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -61,19 +61,31 @@ static inline uint32_t vfp_exceptbits_from_host(int host_bits) static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) { - uint32_t i = 0; + uint32_t a32_flags = 0, a64_flags = 0; - i |= get_float_exception_flags(&env->vfp.fp_status_a32); - i |= get_float_exception_flags(&env->vfp.fp_status_a64); - i |= get_float_exception_flags(&env->vfp.standard_fp_status); + a32_flags |= get_float_exception_flags(&env->vfp.fp_status_a32); + a32_flags |= get_float_exception_flags(&env->vfp.standard_fp_status); /* FZ16 does not generate an input denormal exception. */ - i |= (get_float_exception_flags(&env->vfp.fp_status_f16_a32) + a32_flags |= (get_float_exception_flags(&env->vfp.fp_status_f16_a32) & ~float_flag_input_denormal_flushed); - i |= (get_float_exception_flags(&env->vfp.fp_status_f16_a64) + a32_flags |= (get_float_exception_flags(&env->vfp.standard_fp_status_f16) & ~float_flag_input_denormal_flushed); - i |= (get_float_exception_flags(&env->vfp.standard_fp_status_f16) + + a64_flags |= get_float_exception_flags(&env->vfp.fp_status_a64); + a64_flags |= (get_float_exception_flags(&env->vfp.fp_status_f16_a64) & ~float_flag_input_denormal_flushed); - return vfp_exceptbits_from_host(i); + /* + * Flushing an input denormal *only* because FPCR.FIZ == 1 does + * not set FPSR.IDC; if FPCR.FZ is also set then this takes + * precedence and IDC is set (see the FPUnpackBase pseudocode). + * So squash it unless (FPCR.AH == 0 && FPCR.FZ == 1). + * We only do this for the a64 flags because FIZ has no effect + * on AArch32 even if it is set. + */ + if ((env->vfp.fpcr & (FPCR_FZ | FPCR_AH)) != FPCR_FZ) { + a64_flags &= ~float_flag_input_denormal_flushed; + } + return vfp_exceptbits_from_host(a32_flags | a64_flags); } static void vfp_clear_float_status_exc_flags(CPUARMState *env) @@ -91,6 +103,17 @@ static void vfp_clear_float_status_exc_flags(CPUARMState *env) set_float_exception_flags(0, &env->vfp.standard_fp_status_f16); } +static void vfp_sync_and_clear_float_status_exc_flags(CPUARMState *env) +{ + /* + * Synchronize any pending exception-flag information in the + * float_status values into env->vfp.fpsr, and then clear out + * the float_status data. + */ + env->vfp.fpsr |= vfp_get_fpsr_from_host(env); + vfp_clear_float_status_exc_flags(env); +} + static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) { uint64_t changed = env->vfp.fpcr; @@ -130,9 +153,18 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) if (changed & FPCR_FZ) { bool ftz_enabled = val & FPCR_FZ; set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_a32); - set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_a32); set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_a64); - set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_a64); + /* FIZ is A64 only so FZ always makes A32 code flush inputs to zero */ + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_a32); + } + if (changed & (FPCR_FZ | FPCR_AH | FPCR_FIZ)) { + /* + * A64: Flush denormalized inputs to zero if FPCR.FIZ = 1, or + * both FPCR.AH = 0 and FPCR.FZ = 1. + */ + bool fitz_enabled = (val & FPCR_FIZ) || + (val & (FPCR_FZ | FPCR_AH)) == FPCR_FZ; + set_flush_inputs_to_zero(fitz_enabled, &env->vfp.fp_status_a64); } if (changed & FPCR_DN) { bool dnan_enabled = val & FPCR_DN; @@ -141,6 +173,14 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16_a32); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16_a64); } + /* + * If any bits changed that we look at in vfp_get_fpsr_from_host(), + * we must sync the float_status flags into vfp.fpsr now (under the + * old regime) before we update vfp.fpcr. + */ + if (changed & (FPCR_FZ | FPCR_AH | FPCR_FIZ)) { + vfp_sync_and_clear_float_status_exc_flags(env); + } } #else From b3d00d0a448b6ea5750da1a163aeead0287f6c05 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:11 +0000 Subject: [PATCH 07/68] target/arm: Adjust FP behaviour for FPCR.AH = 1 When FPCR.AH is set, various behaviours of AArch64 floating point operations which are controlled by softfloat config settings change: * tininess and ftz detection before/after rounding * NaN propagation order * result of 0 * Inf + NaN * default NaN value When the guest changes the value of the AH bit, switch these config settings on the fp_status_a64 and fp_status_f16_a64 float_status fields. This requires us to make the arm_set_default_fp_behaviours() function global, since we now need to call it from cpu.c and vfp_helper.c; we move it to vfp_helper.c so it can be next to the new arm_set_ah_fp_behaviours(). Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/cpu.c | 23 ---------------- target/arm/internals.h | 4 +++ target/arm/vfp_helper.c | 58 ++++++++++++++++++++++++++++++++++++++++- 3 files changed, 61 insertions(+), 24 deletions(-) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 8037744300..f95e6cf09e 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -169,29 +169,6 @@ void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, QLIST_INSERT_HEAD(&cpu->el_change_hooks, entry, node); } -/* - * Set the float_status behaviour to match the Arm defaults: - * * tininess-before-rounding - * * 2-input NaN propagation prefers SNaN over QNaN, and then - * operand A over operand B (see FPProcessNaNs() pseudocode) - * * 3-input NaN propagation prefers SNaN over QNaN, and then - * operand C over A over B (see FPProcessNaNs3() pseudocode, - * but note that for QEMU muladd is a * b + c, whereas for - * the pseudocode function the arguments are in the order c, a, b. - * * 0 * Inf + NaN returns the default NaN if the input NaN is quiet, - * and the input NaN if it is signalling - * * Default NaN has sign bit clear, msb frac bit set - */ -static void arm_set_default_fp_behaviours(float_status *s) -{ - set_float_detect_tininess(float_tininess_before_rounding, s); - set_float_ftz_detection(float_ftz_before_rounding, s); - set_float_2nan_prop_rule(float_2nan_prop_s_ab, s); - set_float_3nan_prop_rule(float_3nan_prop_s_cab, s); - set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, s); - set_float_default_nan_pattern(0b01000000, s); -} - static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque) { /* Reset a single ARMCPRegInfo register */ diff --git a/target/arm/internals.h b/target/arm/internals.h index 863a84edf8..98073acc27 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -1828,4 +1828,8 @@ uint64_t gt_virt_cnt_offset(CPUARMState *env); * all EL1" scope; this covers stage 1 and stage 2. */ int alle1_tlbmask(CPUARMState *env); + +/* Set the float_status behaviour to match the Arm defaults */ +void arm_set_default_fp_behaviours(float_status *s); + #endif diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 30c170ecee..ef85c186f1 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -22,15 +22,59 @@ #include "exec/helper-proto.h" #include "internals.h" #include "cpu-features.h" +#include "fpu/softfloat.h" #ifdef CONFIG_TCG #include "qemu/log.h" -#include "fpu/softfloat.h" #endif /* VFP support. We follow the convention used for VFP instructions: Single precision routines have a "s" suffix, double precision a "d" suffix. */ +/* + * Set the float_status behaviour to match the Arm defaults: + * * tininess-before-rounding + * * 2-input NaN propagation prefers SNaN over QNaN, and then + * operand A over operand B (see FPProcessNaNs() pseudocode) + * * 3-input NaN propagation prefers SNaN over QNaN, and then + * operand C over A over B (see FPProcessNaNs3() pseudocode, + * but note that for QEMU muladd is a * b + c, whereas for + * the pseudocode function the arguments are in the order c, a, b. + * * 0 * Inf + NaN returns the default NaN if the input NaN is quiet, + * and the input NaN if it is signalling + * * Default NaN has sign bit clear, msb frac bit set + */ +void arm_set_default_fp_behaviours(float_status *s) +{ + set_float_detect_tininess(float_tininess_before_rounding, s); + set_float_ftz_detection(float_ftz_before_rounding, s); + set_float_2nan_prop_rule(float_2nan_prop_s_ab, s); + set_float_3nan_prop_rule(float_3nan_prop_s_cab, s); + set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, s); + set_float_default_nan_pattern(0b01000000, s); +} + +/* + * Set the float_status behaviour to match the FEAT_AFP + * FPCR.AH=1 requirements: + * * tininess-after-rounding + * * 2-input NaN propagation prefers the first NaN + * * 3-input NaN propagation prefers a over b over c + * * 0 * Inf + NaN always returns the input NaN and doesn't + * set Invalid for a QNaN + * * default NaN has sign bit set, msb frac bit set + */ +static void arm_set_ah_fp_behaviours(float_status *s) +{ + set_float_detect_tininess(float_tininess_after_rounding, s); + set_float_ftz_detection(float_ftz_after_rounding, s); + set_float_2nan_prop_rule(float_2nan_prop_ab, s); + set_float_3nan_prop_rule(float_3nan_prop_abc, s); + set_float_infzeronan_rule(float_infzeronan_dnan_never | + float_infzeronan_suppress_invalid, s); + set_float_default_nan_pattern(0b11000000, s); +} + #ifdef CONFIG_TCG /* Convert host exception flags to vfp form. */ @@ -173,6 +217,18 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16_a32); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16_a64); } + if (changed & FPCR_AH) { + bool ah_enabled = val & FPCR_AH; + + if (ah_enabled) { + /* Change behaviours for A64 FP operations */ + arm_set_ah_fp_behaviours(&env->vfp.fp_status_a64); + arm_set_ah_fp_behaviours(&env->vfp.fp_status_f16_a64); + } else { + arm_set_default_fp_behaviours(&env->vfp.fp_status_a64); + arm_set_default_fp_behaviours(&env->vfp.fp_status_f16_a64); + } + } /* * If any bits changed that we look at in vfp_get_fpsr_from_host(), * we must sync the float_status flags into vfp.fpsr now (under the From a4550b7be90ccbda06b58b2c965bc87d697ec130 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:12 +0000 Subject: [PATCH 08/68] target/arm: Adjust exception flag handling for AH = 1 When FPCR.AH = 1, some of the cumulative exception flags in the FPSR behave slightly differently for A64 operations: * IDC is set when a denormal input is used without flushing * IXC (Inexact) is set when an output denormal is flushed to zero Update vfp_get_fpsr_from_host() to do this. Note that because half-precision operations never set IDC, we now need to add float_flag_input_denormal_used to the set we mask out of fp_status_f16_a64. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/vfp_helper.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index ef85c186f1..62b86d56b8 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -78,7 +78,7 @@ static void arm_set_ah_fp_behaviours(float_status *s) #ifdef CONFIG_TCG /* Convert host exception flags to vfp form. */ -static inline uint32_t vfp_exceptbits_from_host(int host_bits) +static inline uint32_t vfp_exceptbits_from_host(int host_bits, bool ah) { uint32_t target_bits = 0; @@ -100,6 +100,16 @@ static inline uint32_t vfp_exceptbits_from_host(int host_bits) if (host_bits & float_flag_input_denormal_flushed) { target_bits |= FPSR_IDC; } + /* + * With FPCR.AH, IDC is set when an input denormal is used, + * and flushing an output denormal to zero sets both IXC and UFC. + */ + if (ah && (host_bits & float_flag_input_denormal_used)) { + target_bits |= FPSR_IDC; + } + if (ah && (host_bits & float_flag_output_denormal_flushed)) { + target_bits |= FPSR_IXC; + } return target_bits; } @@ -117,7 +127,7 @@ static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) a64_flags |= get_float_exception_flags(&env->vfp.fp_status_a64); a64_flags |= (get_float_exception_flags(&env->vfp.fp_status_f16_a64) - & ~float_flag_input_denormal_flushed); + & ~(float_flag_input_denormal_flushed | float_flag_input_denormal_used)); /* * Flushing an input denormal *only* because FPCR.FIZ == 1 does * not set FPSR.IDC; if FPCR.FZ is also set then this takes @@ -129,7 +139,8 @@ static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) if ((env->vfp.fpcr & (FPCR_FZ | FPCR_AH)) != FPCR_FZ) { a64_flags &= ~float_flag_input_denormal_flushed; } - return vfp_exceptbits_from_host(a32_flags | a64_flags); + return vfp_exceptbits_from_host(a64_flags, env->vfp.fpcr & FPCR_AH) | + vfp_exceptbits_from_host(a32_flags, false); } static void vfp_clear_float_status_exc_flags(CPUARMState *env) From 731528d35e681eb7e70207f58b8dcce419c8d567 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:13 +0000 Subject: [PATCH 09/68] target/arm: Add FPCR.AH to tbflags We are going to need to generate different code in some cases when FPCR.AH is 1. For example: * Floating point neg and abs must not flip the sign bit of NaNs * some insns (FRECPE, FRECPS, FRECPX, FRSQRTE, FRSQRTS, and various BFCVT and BFM bfloat16 ops) need to use a different float_status to the usual one Encode FPCR.AH into the A64 tbflags, so we can refer to it at translate time. Because we now have a bit in FPCR that affects codegen, we can't mark the AArch64 FPCR register as being SUPPRESS_TB_END any more; writes to it will now end the TB and trigger a regeneration of hflags. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/cpu.h | 1 + target/arm/helper.c | 2 +- target/arm/tcg/hflags.c | 4 ++++ target/arm/tcg/translate-a64.c | 1 + target/arm/tcg/translate.h | 2 ++ 5 files changed, 9 insertions(+), 1 deletion(-) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 1c91b1f50f..a71f848cc5 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -3198,6 +3198,7 @@ FIELD(TBFLAG_A64, NV2, 34, 1) FIELD(TBFLAG_A64, NV2_MEM_E20, 35, 1) /* Set if FEAT_NV2 RAM accesses are big-endian */ FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1) +FIELD(TBFLAG_A64, AH, 37, 1) /* FPCR.AH */ /* * Helpers for using the above. Note that only the A64 accessors use diff --git a/target/arm/helper.c b/target/arm/helper.c index 40bdfc851a..7d95eae997 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -4848,7 +4848,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, { .name = "FPCR", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, - .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, + .access = PL0_RW, .type = ARM_CP_FPU, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, { .name = "FPSR", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c index f03977b4b0..b3a78564ec 100644 --- a/target/arm/tcg/hflags.c +++ b/target/arm/tcg/hflags.c @@ -404,6 +404,10 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx)); } + if (env->vfp.fpcr & FPCR_AH) { + DP_TBFLAG_A64(flags, AH, 1); + } + return rebuild_hflags_common(env, fp_el, mmu_idx, flags); } diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index d6ac2ed418..be3f4489e5 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -9655,6 +9655,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->nv2 = EX_TBFLAG_A64(tb_flags, NV2); dc->nv2_mem_e20 = EX_TBFLAG_A64(tb_flags, NV2_MEM_E20); dc->nv2_mem_be = EX_TBFLAG_A64(tb_flags, NV2_MEM_BE); + dc->fpcr_ah = EX_TBFLAG_A64(tb_flags, AH); dc->vec_len = 0; dc->vec_stride = 0; dc->cp_regs = arm_cpu->cp_regs; diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index 084ee63d99..1fc4fdd779 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -155,6 +155,8 @@ typedef struct DisasContext { bool nv2_mem_e20; /* True if NV2 enabled and NV2 RAM accesses are big-endian */ bool nv2_mem_be; + /* True if FPCR.AH is 1 (alternate floating point handling) */ + bool fpcr_ah; /* * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. * < 0, set by the current instruction. From 1828053990e3666a51ec08b6e23d1d70a82a68f9 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:14 +0000 Subject: [PATCH 10/68] target/arm: Set up float_status to use for FPCR.AH=1 behaviour When FPCR.AH is 1, the behaviour of some instructions changes: * AdvSIMD BFCVT, BFCVTN, BFCVTN2, BFMLALB, BFMLALT * SVE BFCVT, BFCVTNT, BFMLALB, BFMLALT, BFMLSLB, BFMLSLT * SME BFCVT, BFCVTN, BFMLAL, BFMLSL (these are all in SME2 which QEMU does not yet implement) * FRECPE, FRECPS, FRECPX, FRSQRTE, FRSQRTS The behaviour change is: * the instructions do not update the FPSR cumulative exception flags * trapped floating point exceptions are disabled (a no-op for QEMU, which doesn't implement FPCR.{IDE,IXE,UFE,OFE,DZE,IOE}) * rounding is always round-to-nearest-even regardless of FPCR.RMode * denormalized inputs and outputs are always flushed to zero, as if FPCR.{FZ,FIZ} is {1,1} * FPCR.FZ16 is still honoured for half-precision inputs (See the Arm ARM DDI0487L.a section A1.5.9.) We can provide all these behaviours with another pair of float_status fields which we use only for these insns, when FPCR.AH is 1. These float_status fields will always have: * flush_to_zero and flush_inputs_to_zero set for the non-F16 field * rounding mode set to round-to-nearest-even and so the only FPCR fields they need to honour are DN and FZ16. In this commit we only define the new fp_status fields and give them the required behaviour when FPSR is updated. In subsequent commits we will arrange to use this new fp_status field for the instructions that should be affected by FPCR.AH in this way. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/cpu.c | 4 ++++ target/arm/cpu.h | 15 +++++++++++++++ target/arm/internals.h | 2 ++ target/arm/tcg/translate.h | 14 ++++++++++++++ target/arm/vfp_helper.c | 13 ++++++++++++- 5 files changed, 47 insertions(+), 1 deletion(-) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index f95e6cf09e..1307ee34ce 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -556,6 +556,10 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) arm_set_default_fp_behaviours(&env->vfp.fp_status_f16_a32); arm_set_default_fp_behaviours(&env->vfp.fp_status_f16_a64); arm_set_default_fp_behaviours(&env->vfp.standard_fp_status_f16); + arm_set_ah_fp_behaviours(&env->vfp.ah_fp_status); + set_flush_to_zero(1, &env->vfp.ah_fp_status); + set_flush_inputs_to_zero(1, &env->vfp.ah_fp_status); + arm_set_ah_fp_behaviours(&env->vfp.ah_fp_status_f16); #ifndef CONFIG_USER_ONLY if (kvm_enabled()) { diff --git a/target/arm/cpu.h b/target/arm/cpu.h index a71f848cc5..51fc50980b 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -640,6 +640,13 @@ typedef struct CPUArchState { * standard_fp_status : the ARM "Standard FPSCR Value" * standard_fp_status_fp16 : used for half-precision * calculations with the ARM "Standard FPSCR Value" + * ah_fp_status: used for the A64 insns which change behaviour + * when FPCR.AH == 1 (bfloat16 conversions and multiplies, + * and the reciprocal and square root estimate/step insns) + * ah_fp_status_f16: used for the A64 insns which change behaviour + * when FPCR.AH == 1 (bfloat16 conversions and multiplies, + * and the reciprocal and square root estimate/step insns); + * for half-precision * * Half-precision operations are governed by a separate * flush-to-zero control bit in FPSCR:FZ16. We pass a separate @@ -654,6 +661,12 @@ typedef struct CPUArchState { * the "standard FPSCR" tracks the FPSCR.FZ16 bit rather than * using a fixed value for it. * + * The ah_fp_status is needed because some insns have different + * behaviour when FPCR.AH == 1: they don't update cumulative + * exception flags, they act like FPCR.{FZ,FIZ} = {1,1} and + * they ignore FPCR.RMode. But they don't ignore FPCR.FZ16, + * which means we need an ah_fp_status_f16 as well. + * * To avoid having to transfer exception bits around, we simply * say that the FPSCR cumulative exception flags are the logical * OR of the flags in the four fp statuses. This relies on the @@ -666,6 +679,8 @@ typedef struct CPUArchState { float_status fp_status_f16_a64; float_status standard_fp_status; float_status standard_fp_status_f16; + float_status ah_fp_status; + float_status ah_fp_status_f16; uint64_t zcr_el[4]; /* ZCR_EL[1-3] */ uint64_t smcr_el[4]; /* SMCR_EL[1-3] */ diff --git a/target/arm/internals.h b/target/arm/internals.h index 98073acc27..b318734145 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -1831,5 +1831,7 @@ int alle1_tlbmask(CPUARMState *env); /* Set the float_status behaviour to match the Arm defaults */ void arm_set_default_fp_behaviours(float_status *s); +/* Set the float_status behaviour to match Arm FPCR.AH=1 behaviour */ +void arm_set_ah_fp_behaviours(float_status *s); #endif diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index 1fc4fdd779..3be3fcbe72 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -676,6 +676,8 @@ typedef enum ARMFPStatusFlavour { FPST_A64, FPST_A32_F16, FPST_A64_F16, + FPST_AH, + FPST_AH_F16, FPST_STD, FPST_STD_F16, } ARMFPStatusFlavour; @@ -696,6 +698,12 @@ typedef enum ARMFPStatusFlavour { * for AArch32 operations controlled by the FPCR where FPCR.FZ16 is to be used * FPST_A64_F16 * for AArch64 operations controlled by the FPCR where FPCR.FZ16 is to be used + * FPST_AH: + * for AArch64 operations which change behaviour when AH=1 (specifically, + * bfloat16 conversions and multiplies, and the reciprocal and square root + * estimate/step insns) + * FPST_AH_F16: + * ditto, but for half-precision operations * FPST_STD * for A32/T32 Neon operations using the "standard FPSCR value" * FPST_STD_F16 @@ -719,6 +727,12 @@ static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour) case FPST_A64_F16: offset = offsetof(CPUARMState, vfp.fp_status_f16_a64); break; + case FPST_AH: + offset = offsetof(CPUARMState, vfp.ah_fp_status); + break; + case FPST_AH_F16: + offset = offsetof(CPUARMState, vfp.ah_fp_status_f16); + break; case FPST_STD: offset = offsetof(CPUARMState, vfp.standard_fp_status); break; diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 62b86d56b8..5c370e263c 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -64,7 +64,7 @@ void arm_set_default_fp_behaviours(float_status *s) * set Invalid for a QNaN * * default NaN has sign bit set, msb frac bit set */ -static void arm_set_ah_fp_behaviours(float_status *s) +void arm_set_ah_fp_behaviours(float_status *s) { set_float_detect_tininess(float_tininess_after_rounding, s); set_float_ftz_detection(float_ftz_after_rounding, s); @@ -128,6 +128,11 @@ static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) a64_flags |= get_float_exception_flags(&env->vfp.fp_status_a64); a64_flags |= (get_float_exception_flags(&env->vfp.fp_status_f16_a64) & ~(float_flag_input_denormal_flushed | float_flag_input_denormal_used)); + /* + * We do not merge in flags from ah_fp_status or ah_fp_status_f16, because + * they are used for insns that must not set the cumulative exception bits. + */ + /* * Flushing an input denormal *only* because FPCR.FIZ == 1 does * not set FPSR.IDC; if FPCR.FZ is also set then this takes @@ -156,6 +161,8 @@ static void vfp_clear_float_status_exc_flags(CPUARMState *env) set_float_exception_flags(0, &env->vfp.fp_status_f16_a64); set_float_exception_flags(0, &env->vfp.standard_fp_status); set_float_exception_flags(0, &env->vfp.standard_fp_status_f16); + set_float_exception_flags(0, &env->vfp.ah_fp_status); + set_float_exception_flags(0, &env->vfp.ah_fp_status_f16); } static void vfp_sync_and_clear_float_status_exc_flags(CPUARMState *env) @@ -201,9 +208,11 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a32); set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a64); set_flush_to_zero(ftz_enabled, &env->vfp.standard_fp_status_f16); + set_flush_to_zero(ftz_enabled, &env->vfp.ah_fp_status_f16); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a32); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a64); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.standard_fp_status_f16); + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.ah_fp_status_f16); } if (changed & FPCR_FZ) { bool ftz_enabled = val & FPCR_FZ; @@ -227,6 +236,8 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_a64); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16_a32); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16_a64); + set_default_nan_mode(dnan_enabled, &env->vfp.ah_fp_status); + set_default_nan_mode(dnan_enabled, &env->vfp.ah_fp_status_f16); } if (changed & FPCR_AH) { bool ah_enabled = val & FPCR_AH; From a8559e0e5306c7faa76de8e571d1edf92f436df3 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:15 +0000 Subject: [PATCH 11/68] target/arm: Use FPST_FPCR_AH for FRECPE, FRECPS, FRECPX, FRSQRTE, FRSQRTS For the instructions FRECPE, FRECPS, FRECPX, FRSQRTE, FRSQRTS, use FPST_FPCR_AH or FPST_FPCR_AH_F16 when FPCR.AH is 1, so that they get the required behaviour changes. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 119 +++++++++++++++++++++++++-------- target/arm/tcg/translate-a64.h | 13 ++++ target/arm/tcg/translate-sve.c | 30 ++++++--- 3 files changed, 127 insertions(+), 35 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index be3f4489e5..1556980c76 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -712,10 +712,10 @@ static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd, * an out-of-line helper. */ static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn, - int rm, bool is_fp16, int data, + int rm, ARMFPStatusFlavour fpsttype, int data, gen_helper_gvec_3_ptr *fn) { - TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_A64_F16 : FPST_A64); + TCGv_ptr fpst = fpstatus_ptr(fpsttype); tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), fpst, @@ -5025,14 +5025,16 @@ typedef struct FPScalar { void (*gen_d)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr); } FPScalar; -static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f) +static bool do_fp3_scalar_with_fpsttype(DisasContext *s, arg_rrr_e *a, + const FPScalar *f, + ARMFPStatusFlavour fpsttype) { switch (a->esz) { case MO_64: if (fp_access_check(s)) { TCGv_i64 t0 = read_fp_dreg(s, a->rn); TCGv_i64 t1 = read_fp_dreg(s, a->rm); - f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_A64)); + f->gen_d(t0, t0, t1, fpstatus_ptr(fpsttype)); write_fp_dreg(s, a->rd, t0); } break; @@ -5040,7 +5042,7 @@ static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f) if (fp_access_check(s)) { TCGv_i32 t0 = read_fp_sreg(s, a->rn); TCGv_i32 t1 = read_fp_sreg(s, a->rm); - f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_A64)); + f->gen_s(t0, t0, t1, fpstatus_ptr(fpsttype)); write_fp_sreg(s, a->rd, t0); } break; @@ -5051,7 +5053,7 @@ static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f) if (fp_access_check(s)) { TCGv_i32 t0 = read_fp_hreg(s, a->rn); TCGv_i32 t1 = read_fp_hreg(s, a->rm); - f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_A64_F16)); + f->gen_h(t0, t0, t1, fpstatus_ptr(fpsttype)); write_fp_sreg(s, a->rd, t0); } break; @@ -5061,6 +5063,18 @@ static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f) return true; } +static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f) +{ + return do_fp3_scalar_with_fpsttype(s, a, f, + a->esz == MO_16 ? + FPST_A64_F16 : FPST_A64); +} + +static bool do_fp3_scalar_ah(DisasContext *s, arg_rrr_e *a, const FPScalar *f) +{ + return do_fp3_scalar_with_fpsttype(s, a, f, select_ah_fpst(s, a->esz)); +} + static const FPScalar f_scalar_fadd = { gen_helper_vfp_addh, gen_helper_vfp_adds, @@ -5214,14 +5228,14 @@ static const FPScalar f_scalar_frecps = { gen_helper_recpsf_f32, gen_helper_recpsf_f64, }; -TRANS(FRECPS_s, do_fp3_scalar, a, &f_scalar_frecps) +TRANS(FRECPS_s, do_fp3_scalar_ah, a, &f_scalar_frecps) static const FPScalar f_scalar_frsqrts = { gen_helper_rsqrtsf_f16, gen_helper_rsqrtsf_f32, gen_helper_rsqrtsf_f64, }; -TRANS(FRSQRTS_s, do_fp3_scalar, a, &f_scalar_frsqrts) +TRANS(FRSQRTS_s, do_fp3_scalar_ah, a, &f_scalar_frsqrts) static bool do_fcmp0_s(DisasContext *s, arg_rr_e *a, const FPScalar *f, bool swap) @@ -5472,8 +5486,10 @@ TRANS(CMHS_s, do_cmop_d, a, TCG_COND_GEU) TRANS(CMEQ_s, do_cmop_d, a, TCG_COND_EQ) TRANS(CMTST_s, do_cmop_d, a, TCG_COND_TSTNE) -static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a, int data, - gen_helper_gvec_3_ptr * const fns[3]) +static bool do_fp3_vector_with_fpsttype(DisasContext *s, arg_qrrr_e *a, + int data, + gen_helper_gvec_3_ptr * const fns[3], + ARMFPStatusFlavour fpsttype) { MemOp esz = a->esz; int check = fp_access_check_vector_hsd(s, a->q, esz); @@ -5482,11 +5498,26 @@ static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a, int data, return check == 0; } - gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm, - esz == MO_16, data, fns[esz - 1]); + gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm, fpsttype, + data, fns[esz - 1]); return true; } +static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a, int data, + gen_helper_gvec_3_ptr * const fns[3]) +{ + return do_fp3_vector_with_fpsttype(s, a, data, fns, + a->esz == MO_16 ? + FPST_A64_F16 : FPST_A64); +} + +static bool do_fp3_vector_ah(DisasContext *s, arg_qrrr_e *a, int data, + gen_helper_gvec_3_ptr * const f[3]) +{ + return do_fp3_vector_with_fpsttype(s, a, data, f, + select_ah_fpst(s, a->esz)); +} + static gen_helper_gvec_3_ptr * const f_vector_fadd[3] = { gen_helper_gvec_fadd_h, gen_helper_gvec_fadd_s, @@ -5611,14 +5642,14 @@ static gen_helper_gvec_3_ptr * const f_vector_frecps[3] = { gen_helper_gvec_recps_s, gen_helper_gvec_recps_d, }; -TRANS(FRECPS_v, do_fp3_vector, a, 0, f_vector_frecps) +TRANS(FRECPS_v, do_fp3_vector_ah, a, 0, f_vector_frecps) static gen_helper_gvec_3_ptr * const f_vector_frsqrts[3] = { gen_helper_gvec_rsqrts_h, gen_helper_gvec_rsqrts_s, gen_helper_gvec_rsqrts_d, }; -TRANS(FRSQRTS_v, do_fp3_vector, a, 0, f_vector_frsqrts) +TRANS(FRSQRTS_v, do_fp3_vector_ah, a, 0, f_vector_frsqrts) static gen_helper_gvec_3_ptr * const f_vector_faddp[3] = { gen_helper_gvec_faddp_h, @@ -6374,7 +6405,8 @@ static bool do_fp3_vector_idx(DisasContext *s, arg_qrrx_e *a, } gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm, - esz == MO_16, a->idx, fns[esz - 1]); + esz == MO_16 ? FPST_A64_F16 : FPST_A64, + a->idx, fns[esz - 1]); return true; } @@ -8383,8 +8415,9 @@ typedef struct FPScalar1 { void (*gen_d)(TCGv_i64, TCGv_i64, TCGv_ptr); } FPScalar1; -static bool do_fp1_scalar(DisasContext *s, arg_rr_e *a, - const FPScalar1 *f, int rmode) +static bool do_fp1_scalar_with_fpsttype(DisasContext *s, arg_rr_e *a, + const FPScalar1 *f, int rmode, + ARMFPStatusFlavour fpsttype) { TCGv_i32 tcg_rmode = NULL; TCGv_ptr fpst; @@ -8396,7 +8429,7 @@ static bool do_fp1_scalar(DisasContext *s, arg_rr_e *a, return check == 0; } - fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); + fpst = fpstatus_ptr(fpsttype); if (rmode >= 0) { tcg_rmode = gen_set_rmode(rmode, fpst); } @@ -8427,6 +8460,20 @@ static bool do_fp1_scalar(DisasContext *s, arg_rr_e *a, return true; } +static bool do_fp1_scalar(DisasContext *s, arg_rr_e *a, + const FPScalar1 *f, int rmode) +{ + return do_fp1_scalar_with_fpsttype(s, a, f, rmode, + a->esz == MO_16 ? + FPST_A64_F16 : FPST_A64); +} + +static bool do_fp1_scalar_ah(DisasContext *s, arg_rr_e *a, + const FPScalar1 *f, int rmode) +{ + return do_fp1_scalar_with_fpsttype(s, a, f, rmode, select_ah_fpst(s, a->esz)); +} + static const FPScalar1 f_scalar_fsqrt = { gen_helper_vfp_sqrth, gen_helper_vfp_sqrts, @@ -8481,21 +8528,21 @@ static const FPScalar1 f_scalar_frecpe = { gen_helper_recpe_f32, gen_helper_recpe_f64, }; -TRANS(FRECPE_s, do_fp1_scalar, a, &f_scalar_frecpe, -1) +TRANS(FRECPE_s, do_fp1_scalar_ah, a, &f_scalar_frecpe, -1) static const FPScalar1 f_scalar_frecpx = { gen_helper_frecpx_f16, gen_helper_frecpx_f32, gen_helper_frecpx_f64, }; -TRANS(FRECPX_s, do_fp1_scalar, a, &f_scalar_frecpx, -1) +TRANS(FRECPX_s, do_fp1_scalar_ah, a, &f_scalar_frecpx, -1) static const FPScalar1 f_scalar_frsqrte = { gen_helper_rsqrte_f16, gen_helper_rsqrte_f32, gen_helper_rsqrte_f64, }; -TRANS(FRSQRTE_s, do_fp1_scalar, a, &f_scalar_frsqrte, -1) +TRANS(FRSQRTE_s, do_fp1_scalar_ah, a, &f_scalar_frsqrte, -1) static bool trans_FCVT_s_ds(DisasContext *s, arg_rr *a) { @@ -9350,9 +9397,10 @@ TRANS_FEAT(FRINT64Z_v, aa64_frint, do_fp1_vector, a, &f_scalar_frint64, FPROUNDING_ZERO) TRANS_FEAT(FRINT64X_v, aa64_frint, do_fp1_vector, a, &f_scalar_frint64, -1) -static bool do_gvec_op2_fpst(DisasContext *s, MemOp esz, bool is_q, - int rd, int rn, int data, - gen_helper_gvec_2_ptr * const fns[3]) +static bool do_gvec_op2_fpst_with_fpsttype(DisasContext *s, MemOp esz, + bool is_q, int rd, int rn, int data, + gen_helper_gvec_2_ptr * const fns[3], + ARMFPStatusFlavour fpsttype) { int check = fp_access_check_vector_hsd(s, is_q, esz); TCGv_ptr fpst; @@ -9361,7 +9409,7 @@ static bool do_gvec_op2_fpst(DisasContext *s, MemOp esz, bool is_q, return check == 0; } - fpst = fpstatus_ptr(esz == MO_16 ? FPST_A64_F16 : FPST_A64); + fpst = fpstatus_ptr(fpsttype); tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), fpst, is_q ? 16 : 8, vec_full_reg_size(s), @@ -9369,6 +9417,23 @@ static bool do_gvec_op2_fpst(DisasContext *s, MemOp esz, bool is_q, return true; } +static bool do_gvec_op2_fpst(DisasContext *s, MemOp esz, bool is_q, + int rd, int rn, int data, + gen_helper_gvec_2_ptr * const fns[3]) +{ + return do_gvec_op2_fpst_with_fpsttype(s, esz, is_q, rd, rn, data, fns, + esz == MO_16 ? FPST_A64_F16 : + FPST_A64); +} + +static bool do_gvec_op2_ah_fpst(DisasContext *s, MemOp esz, bool is_q, + int rd, int rn, int data, + gen_helper_gvec_2_ptr * const fns[3]) +{ + return do_gvec_op2_fpst_with_fpsttype(s, esz, is_q, rd, rn, data, + fns, select_ah_fpst(s, esz)); +} + static gen_helper_gvec_2_ptr * const f_scvtf_v[] = { gen_helper_gvec_vcvt_sh, gen_helper_gvec_vcvt_sf, @@ -9478,14 +9543,14 @@ static gen_helper_gvec_2_ptr * const f_frecpe[] = { gen_helper_gvec_frecpe_s, gen_helper_gvec_frecpe_d, }; -TRANS(FRECPE_v, do_gvec_op2_fpst, a->esz, a->q, a->rd, a->rn, 0, f_frecpe) +TRANS(FRECPE_v, do_gvec_op2_ah_fpst, a->esz, a->q, a->rd, a->rn, 0, f_frecpe) static gen_helper_gvec_2_ptr * const f_frsqrte[] = { gen_helper_gvec_frsqrte_h, gen_helper_gvec_frsqrte_s, gen_helper_gvec_frsqrte_d, }; -TRANS(FRSQRTE_v, do_gvec_op2_fpst, a->esz, a->q, a->rd, a->rn, 0, f_frsqrte) +TRANS(FRSQRTE_v, do_gvec_op2_ah_fpst, a->esz, a->q, a->rd, a->rn, 0, f_frsqrte) static bool trans_FCVTL_v(DisasContext *s, arg_qrr_e *a) { diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h index 0fcf7cb63a..7d3b59ccd9 100644 --- a/target/arm/tcg/translate-a64.h +++ b/target/arm/tcg/translate-a64.h @@ -185,6 +185,19 @@ static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno) return ret; } +/* + * Return the ARMFPStatusFlavour to use based on element size and + * whether FPCR.AH is set. + */ +static inline ARMFPStatusFlavour select_ah_fpst(DisasContext *s, MemOp esz) +{ + if (s->fpcr_ah) { + return esz == MO_16 ? FPST_AH_F16 : FPST_AH; + } else { + return esz == MO_16 ? FPST_A64_F16 : FPST_A64; + } +} + bool disas_sve(DisasContext *, uint32_t); bool disas_sme(DisasContext *, uint32_t); diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index e1788330aa..c084bb58e7 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -137,11 +137,11 @@ static bool gen_gvec_fpst_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, return true; } -static bool gen_gvec_fpst_arg_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, - arg_rr_esz *a, int data) +static bool gen_gvec_fpst_ah_arg_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, + arg_rr_esz *a, int data) { return gen_gvec_fpst_zz(s, fn, a->rd, a->rn, data, - a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); + select_ah_fpst(s, a->esz)); } /* Invoke an out-of-line helper on 3 Zregs. */ @@ -194,6 +194,13 @@ static bool gen_gvec_fpst_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); } +static bool gen_gvec_fpst_ah_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, + arg_rrr_esz *a, int data) +{ + return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data, + select_ah_fpst(s, a->esz)); +} + /* Invoke an out-of-line helper on 4 Zregs. */ static bool gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn, int rd, int rn, int rm, int ra, int data) @@ -3597,13 +3604,13 @@ static gen_helper_gvec_2_ptr * const frecpe_fns[] = { NULL, gen_helper_gvec_frecpe_h, gen_helper_gvec_frecpe_s, gen_helper_gvec_frecpe_d, }; -TRANS_FEAT(FRECPE, aa64_sve, gen_gvec_fpst_arg_zz, frecpe_fns[a->esz], a, 0) +TRANS_FEAT(FRECPE, aa64_sve, gen_gvec_fpst_ah_arg_zz, frecpe_fns[a->esz], a, 0) static gen_helper_gvec_2_ptr * const frsqrte_fns[] = { NULL, gen_helper_gvec_frsqrte_h, gen_helper_gvec_frsqrte_s, gen_helper_gvec_frsqrte_d, }; -TRANS_FEAT(FRSQRTE, aa64_sve, gen_gvec_fpst_arg_zz, frsqrte_fns[a->esz], a, 0) +TRANS_FEAT(FRSQRTE, aa64_sve, gen_gvec_fpst_ah_arg_zz, frsqrte_fns[a->esz], a, 0) /* *** SVE Floating Point Compare with Zero Group @@ -3707,11 +3714,18 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) }; \ TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_arg_zzz, name##_fns[a->esz], a, 0) +#define DO_FP3_AH(NAME, name) \ + static gen_helper_gvec_3_ptr * const name##_fns[4] = { \ + NULL, gen_helper_gvec_##name##_h, \ + gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \ + }; \ + TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_ah_arg_zzz, name##_fns[a->esz], a, 0) + DO_FP3(FADD_zzz, fadd) DO_FP3(FSUB_zzz, fsub) DO_FP3(FMUL_zzz, fmul) -DO_FP3(FRECPS, recps) -DO_FP3(FRSQRTS, rsqrts) +DO_FP3_AH(FRECPS, recps) +DO_FP3_AH(FRSQRTS, rsqrts) #undef DO_FP3 @@ -3993,7 +4007,7 @@ static gen_helper_gvec_3_ptr * const frecpx_fns[] = { gen_helper_sve_frecpx_s, gen_helper_sve_frecpx_d, }; TRANS_FEAT(FRECPX, aa64_sve, gen_gvec_fpst_arg_zpz, frecpx_fns[a->esz], - a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) + a, 0, select_ah_fpst(s, a->esz)) static gen_helper_gvec_3_ptr * const fsqrt_fns[] = { NULL, gen_helper_sve_fsqrt_h, From b6295046e6b4bc5cb070df702ddb10e3672ac6ff Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:16 +0000 Subject: [PATCH 12/68] target/arm: Use FPST_FPCR_AH for BFCVT* insns When FPCR.AH is 1, use FPST_FPCR_AH for: * AdvSIMD BFCVT, BFCVTN, BFCVTN2 * SVE BFCVT, BFCVTNT so that they get the required behaviour changes. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 27 +++++++++++++++++++++------ target/arm/tcg/translate-sve.c | 6 ++++-- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 1556980c76..f3028463e3 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -8503,7 +8503,7 @@ TRANS(FRINTX_s, do_fp1_scalar, a, &f_scalar_frintx, -1) static const FPScalar1 f_scalar_bfcvt = { .gen_s = gen_helper_bfcvt, }; -TRANS_FEAT(BFCVT_s, aa64_bf16, do_fp1_scalar, a, &f_scalar_bfcvt, -1) +TRANS_FEAT(BFCVT_s, aa64_bf16, do_fp1_scalar_ah, a, &f_scalar_bfcvt, -1) static const FPScalar1 f_scalar_frint32 = { NULL, @@ -9279,12 +9279,27 @@ static void gen_bfcvtn_hs(TCGv_i64 d, TCGv_i64 n) tcg_gen_extu_i32_i64(d, tmp); } -static ArithOneOp * const f_vector_bfcvtn[] = { - NULL, - gen_bfcvtn_hs, - NULL, +static void gen_bfcvtn_ah_hs(TCGv_i64 d, TCGv_i64 n) +{ + TCGv_ptr fpst = fpstatus_ptr(FPST_AH); + TCGv_i32 tmp = tcg_temp_new_i32(); + gen_helper_bfcvt_pair(tmp, n, fpst); + tcg_gen_extu_i32_i64(d, tmp); +} + +static ArithOneOp * const f_vector_bfcvtn[2][3] = { + { + NULL, + gen_bfcvtn_hs, + NULL, + }, { + NULL, + gen_bfcvtn_ah_hs, + NULL, + } }; -TRANS_FEAT(BFCVTN_v, aa64_bf16, do_2misc_narrow_vector, a, f_vector_bfcvtn) +TRANS_FEAT(BFCVTN_v, aa64_bf16, do_2misc_narrow_vector, a, + f_vector_bfcvtn[s->fpcr_ah]) static bool trans_SHLL_v(DisasContext *s, arg_qrr_e *a) { diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index c084bb58e7..be2b5528ba 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -3904,7 +3904,8 @@ TRANS_FEAT(FCVT_hs, aa64_sve, gen_gvec_fpst_arg_zpz, gen_helper_sve_fcvt_hs, a, 0, FPST_A64_F16) TRANS_FEAT(BFCVT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz, - gen_helper_sve_bfcvt, a, 0, FPST_A64) + gen_helper_sve_bfcvt, a, 0, + s->fpcr_ah ? FPST_AH : FPST_A64) TRANS_FEAT(FCVT_dh, aa64_sve, gen_gvec_fpst_arg_zpz, gen_helper_sve_fcvt_dh, a, 0, FPST_A64) @@ -7054,7 +7055,8 @@ TRANS_FEAT(FCVTNT_ds, aa64_sve2, gen_gvec_fpst_arg_zpz, gen_helper_sve2_fcvtnt_ds, a, 0, FPST_A64) TRANS_FEAT(BFCVTNT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz, - gen_helper_sve_bfcvtnt, a, 0, FPST_A64) + gen_helper_sve_bfcvtnt, a, 0, + s->fpcr_ah ? FPST_AH : FPST_A64) TRANS_FEAT(FCVTLT_hs, aa64_sve2, gen_gvec_fpst_arg_zpz, gen_helper_sve2_fcvtlt_hs, a, 0, FPST_A64) From 8a5a2b943e6a96487d5cf000b39aff8a4faf0821 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:17 +0000 Subject: [PATCH 13/68] target/arm: Use FPST_FPCR_AH for BFMLAL*, BFMLSL* insns When FPCR.AH is 1, use FPST_FPCR_AH for: * AdvSIMD BFMLALB, BFMLALT * SVE BFMLALB, BFMLALT, BFMLSLB, BFMLSLT so that they get the required behaviour changes. We do this by making gen_gvec_op4_fpst() take an ARMFPStatusFlavour rather than a bool is_fp16; existing callsites now select FPST_FPCR_F16_A64 vs FPST_FPCR_A64 themselves rather than passing in the boolean. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 20 +++++++++++++------- target/arm/tcg/translate-sve.c | 6 ++++-- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index f3028463e3..f9b62a2c4f 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -754,10 +754,11 @@ static void gen_gvec_op4_env(DisasContext *s, bool is_q, int rd, int rn, * an out-of-line helper. */ static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn, - int rm, int ra, bool is_fp16, int data, + int rm, int ra, ARMFPStatusFlavour fpsttype, + int data, gen_helper_gvec_4_ptr *fn) { - TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_A64_F16 : FPST_A64); + TCGv_ptr fpst = fpstatus_ptr(fpsttype); tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), @@ -5826,7 +5827,8 @@ static bool trans_BFMLAL_v(DisasContext *s, arg_qrrr_e *a) } if (fp_access_check(s)) { /* Q bit selects BFMLALB vs BFMLALT. */ - gen_gvec_op4_fpst(s, true, a->rd, a->rn, a->rm, a->rd, false, a->q, + gen_gvec_op4_fpst(s, true, a->rd, a->rn, a->rm, a->rd, + s->fpcr_ah ? FPST_AH : FPST_A64, a->q, gen_helper_gvec_bfmlal); } return true; @@ -5859,7 +5861,8 @@ static bool trans_FCMLA_v(DisasContext *s, arg_FCMLA_v *a) } gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd, - a->esz == MO_16, a->rot, fn[a->esz]); + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64, + a->rot, fn[a->esz]); return true; } @@ -6439,7 +6442,8 @@ static bool do_fmla_vector_idx(DisasContext *s, arg_qrrx_e *a, bool neg) } gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd, - esz == MO_16, (a->idx << 1) | neg, + esz == MO_16 ? FPST_A64_F16 : FPST_A64, + (a->idx << 1) | neg, fns[esz - 1]); return true; } @@ -6574,7 +6578,8 @@ static bool trans_BFMLAL_vi(DisasContext *s, arg_qrrx_e *a) } if (fp_access_check(s)) { /* Q bit selects BFMLALB vs BFMLALT. */ - gen_gvec_op4_fpst(s, true, a->rd, a->rn, a->rm, a->rd, 0, + gen_gvec_op4_fpst(s, true, a->rd, a->rn, a->rm, a->rd, + s->fpcr_ah ? FPST_AH : FPST_A64, (a->idx << 1) | a->q, gen_helper_gvec_bfmlal_idx); } @@ -6603,7 +6608,8 @@ static bool trans_FCMLA_vi(DisasContext *s, arg_FCMLA_vi *a) } if (fp_access_check(s)) { gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd, - a->esz == MO_16, (a->idx << 2) | a->rot, fn); + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64, + (a->idx << 2) | a->rot, fn); } return true; } diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index be2b5528ba..e38a49dd31 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -7117,7 +7117,8 @@ TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_env_arg_zzzz, static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) { return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal, - a->rd, a->rn, a->rm, a->ra, sel, FPST_A64); + a->rd, a->rn, a->rm, a->ra, sel, + s->fpcr_ah ? FPST_AH : FPST_A64); } TRANS_FEAT(BFMLALB_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, false) @@ -7127,7 +7128,8 @@ static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel) { return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal_idx, a->rd, a->rn, a->rm, a->ra, - (a->index << 1) | sel, FPST_A64); + (a->index << 1) | sel, + s->fpcr_ah ? FPST_AH : FPST_A64); } TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false) From 7025fa996f57f3f767a1e7fe150df21da198118b Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:18 +0000 Subject: [PATCH 14/68] target/arm: Add FPCR.NEP to TBFLAGS For FEAT_AFP, we want to emit different code when FPCR.NEP is set, so that instead of zeroing the high elements of a vector register when we write the output of a scalar operation to it, we instead merge in those elements from one of the source registers. Since this affects the generated code, we need to put FPCR.NEP into the TBFLAGS. FPCR.NEP is treated as 0 when in streaming SVE mode and FEAT_SME_FA64 is not implemented or not enabled; we can implement this logic in rebuild_hflags_a64(). Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/cpu.h | 1 + target/arm/tcg/hflags.c | 9 +++++++++ target/arm/tcg/translate-a64.c | 1 + target/arm/tcg/translate.h | 2 ++ 4 files changed, 13 insertions(+) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 51fc50980b..24b0a5fcb9 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -3214,6 +3214,7 @@ FIELD(TBFLAG_A64, NV2_MEM_E20, 35, 1) /* Set if FEAT_NV2 RAM accesses are big-endian */ FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1) FIELD(TBFLAG_A64, AH, 37, 1) /* FPCR.AH */ +FIELD(TBFLAG_A64, NEP, 38, 1) /* FPCR.NEP */ /* * Helpers for using the above. Note that only the A64 accessors use diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c index b3a78564ec..9e6a1869f9 100644 --- a/target/arm/tcg/hflags.c +++ b/target/arm/tcg/hflags.c @@ -407,6 +407,15 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, if (env->vfp.fpcr & FPCR_AH) { DP_TBFLAG_A64(flags, AH, 1); } + if (env->vfp.fpcr & FPCR_NEP) { + /* + * In streaming-SVE without FA64, NEP behaves as if zero; + * compare pseudocode IsMerging() + */ + if (!(EX_TBFLAG_A64(flags, PSTATE_SM) && !sme_fa64(env, el))) { + DP_TBFLAG_A64(flags, NEP, 1); + } + } return rebuild_hflags_common(env, fp_el, mmu_idx, flags); } diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index f9b62a2c4f..d94a0022e4 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -9742,6 +9742,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->nv2_mem_e20 = EX_TBFLAG_A64(tb_flags, NV2_MEM_E20); dc->nv2_mem_be = EX_TBFLAG_A64(tb_flags, NV2_MEM_BE); dc->fpcr_ah = EX_TBFLAG_A64(tb_flags, AH); + dc->fpcr_nep = EX_TBFLAG_A64(tb_flags, NEP); dc->vec_len = 0; dc->vec_stride = 0; dc->cp_regs = arm_cpu->cp_regs; diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index 3be3fcbe72..0dff00015e 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -157,6 +157,8 @@ typedef struct DisasContext { bool nv2_mem_be; /* True if FPCR.AH is 1 (alternate floating point handling) */ bool fpcr_ah; + /* True if FPCR.NEP is 1 (FEAT_AFP scalar upper-element result handling) */ + bool fpcr_nep; /* * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. * < 0, set by the current instruction. From 78dcfe20e92a928375ff18c13b868ce2778c571c Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:19 +0000 Subject: [PATCH 15/68] target/arm: Define and use new write_fp_*reg_merging() functions For FEAT_AFP's FPCR.NEP bit, we need to programmatically change the behaviour of the writeback of the result for most SIMD scalar operations, so that instead of zeroing the upper part of the result register it merges the upper elements from one of the input registers. Provide new functions write_fp_*reg_merging() which can be used instead of the existing write_fp_*reg() functions when we want this "merge the result with one of the input registers if FPCR.NEP is enabled" handling, and use them in do_fp3_scalar_with_fpsttype(). Note that (as documented in the description of the FPCR.NEP bit) which input register to use as the merge source varies by instruction: for these 2-input scalar operations, the comparison instructions take from Rm, not Rn. We'll extend this to also provide the merging behaviour for the remaining scalar insns in subsequent commits. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 117 +++++++++++++++++++++++++-------- 1 file changed, 91 insertions(+), 26 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index d94a0022e4..67c85db477 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -654,6 +654,68 @@ static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v) write_fp_dreg(s, reg, tmp); } +/* + * Write a double result to 128 bit vector register reg, honouring FPCR.NEP: + * - if FPCR.NEP == 0, clear the high elements of reg + * - if FPCR.NEP == 1, set the high elements of reg from mergereg + * (i.e. merge the result with those high elements) + * In either case, SVE register bits above 128 are zeroed (per R_WKYLB). + */ +static void write_fp_dreg_merging(DisasContext *s, int reg, int mergereg, + TCGv_i64 v) +{ + if (!s->fpcr_nep) { + write_fp_dreg(s, reg, v); + return; + } + + /* + * Move from mergereg to reg; this sets the high elements and + * clears the bits above 128 as a side effect. + */ + tcg_gen_gvec_mov(MO_64, vec_full_reg_offset(s, reg), + vec_full_reg_offset(s, mergereg), + 16, vec_full_reg_size(s)); + tcg_gen_st_i64(v, tcg_env, vec_full_reg_offset(s, reg)); +} + +/* + * Write a single-prec result, but only clear the higher elements + * of the destination register if FPCR.NEP is 0; otherwise preserve them. + */ +static void write_fp_sreg_merging(DisasContext *s, int reg, int mergereg, + TCGv_i32 v) +{ + if (!s->fpcr_nep) { + write_fp_sreg(s, reg, v); + return; + } + + tcg_gen_gvec_mov(MO_64, vec_full_reg_offset(s, reg), + vec_full_reg_offset(s, mergereg), + 16, vec_full_reg_size(s)); + tcg_gen_st_i32(v, tcg_env, fp_reg_offset(s, reg, MO_32)); +} + +/* + * Write a half-prec result, but only clear the higher elements + * of the destination register if FPCR.NEP is 0; otherwise preserve them. + * The caller must ensure that the top 16 bits of v are zero. + */ +static void write_fp_hreg_merging(DisasContext *s, int reg, int mergereg, + TCGv_i32 v) +{ + if (!s->fpcr_nep) { + write_fp_sreg(s, reg, v); + return; + } + + tcg_gen_gvec_mov(MO_64, vec_full_reg_offset(s, reg), + vec_full_reg_offset(s, mergereg), + 16, vec_full_reg_size(s)); + tcg_gen_st16_i32(v, tcg_env, fp_reg_offset(s, reg, MO_16)); +} + /* Expand a 2-operand AdvSIMD vector operation using an expander function. */ static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn, GVecGen2Fn *gvec_fn, int vece) @@ -5027,7 +5089,7 @@ typedef struct FPScalar { } FPScalar; static bool do_fp3_scalar_with_fpsttype(DisasContext *s, arg_rrr_e *a, - const FPScalar *f, + const FPScalar *f, int mergereg, ARMFPStatusFlavour fpsttype) { switch (a->esz) { @@ -5036,7 +5098,7 @@ static bool do_fp3_scalar_with_fpsttype(DisasContext *s, arg_rrr_e *a, TCGv_i64 t0 = read_fp_dreg(s, a->rn); TCGv_i64 t1 = read_fp_dreg(s, a->rm); f->gen_d(t0, t0, t1, fpstatus_ptr(fpsttype)); - write_fp_dreg(s, a->rd, t0); + write_fp_dreg_merging(s, a->rd, mergereg, t0); } break; case MO_32: @@ -5044,7 +5106,7 @@ static bool do_fp3_scalar_with_fpsttype(DisasContext *s, arg_rrr_e *a, TCGv_i32 t0 = read_fp_sreg(s, a->rn); TCGv_i32 t1 = read_fp_sreg(s, a->rm); f->gen_s(t0, t0, t1, fpstatus_ptr(fpsttype)); - write_fp_sreg(s, a->rd, t0); + write_fp_sreg_merging(s, a->rd, mergereg, t0); } break; case MO_16: @@ -5055,7 +5117,7 @@ static bool do_fp3_scalar_with_fpsttype(DisasContext *s, arg_rrr_e *a, TCGv_i32 t0 = read_fp_hreg(s, a->rn); TCGv_i32 t1 = read_fp_hreg(s, a->rm); f->gen_h(t0, t0, t1, fpstatus_ptr(fpsttype)); - write_fp_sreg(s, a->rd, t0); + write_fp_hreg_merging(s, a->rd, mergereg, t0); } break; default: @@ -5064,16 +5126,19 @@ static bool do_fp3_scalar_with_fpsttype(DisasContext *s, arg_rrr_e *a, return true; } -static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f) +static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f, + int mergereg) { - return do_fp3_scalar_with_fpsttype(s, a, f, + return do_fp3_scalar_with_fpsttype(s, a, f, mergereg, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); } -static bool do_fp3_scalar_ah(DisasContext *s, arg_rrr_e *a, const FPScalar *f) +static bool do_fp3_scalar_ah(DisasContext *s, arg_rrr_e *a, const FPScalar *f, + int mergereg) { - return do_fp3_scalar_with_fpsttype(s, a, f, select_ah_fpst(s, a->esz)); + return do_fp3_scalar_with_fpsttype(s, a, f, mergereg, + select_ah_fpst(s, a->esz)); } static const FPScalar f_scalar_fadd = { @@ -5081,63 +5146,63 @@ static const FPScalar f_scalar_fadd = { gen_helper_vfp_adds, gen_helper_vfp_addd, }; -TRANS(FADD_s, do_fp3_scalar, a, &f_scalar_fadd) +TRANS(FADD_s, do_fp3_scalar, a, &f_scalar_fadd, a->rn) static const FPScalar f_scalar_fsub = { gen_helper_vfp_subh, gen_helper_vfp_subs, gen_helper_vfp_subd, }; -TRANS(FSUB_s, do_fp3_scalar, a, &f_scalar_fsub) +TRANS(FSUB_s, do_fp3_scalar, a, &f_scalar_fsub, a->rn) static const FPScalar f_scalar_fdiv = { gen_helper_vfp_divh, gen_helper_vfp_divs, gen_helper_vfp_divd, }; -TRANS(FDIV_s, do_fp3_scalar, a, &f_scalar_fdiv) +TRANS(FDIV_s, do_fp3_scalar, a, &f_scalar_fdiv, a->rn) static const FPScalar f_scalar_fmul = { gen_helper_vfp_mulh, gen_helper_vfp_muls, gen_helper_vfp_muld, }; -TRANS(FMUL_s, do_fp3_scalar, a, &f_scalar_fmul) +TRANS(FMUL_s, do_fp3_scalar, a, &f_scalar_fmul, a->rn) static const FPScalar f_scalar_fmax = { gen_helper_vfp_maxh, gen_helper_vfp_maxs, gen_helper_vfp_maxd, }; -TRANS(FMAX_s, do_fp3_scalar, a, &f_scalar_fmax) +TRANS(FMAX_s, do_fp3_scalar, a, &f_scalar_fmax, a->rn) static const FPScalar f_scalar_fmin = { gen_helper_vfp_minh, gen_helper_vfp_mins, gen_helper_vfp_mind, }; -TRANS(FMIN_s, do_fp3_scalar, a, &f_scalar_fmin) +TRANS(FMIN_s, do_fp3_scalar, a, &f_scalar_fmin, a->rn) static const FPScalar f_scalar_fmaxnm = { gen_helper_vfp_maxnumh, gen_helper_vfp_maxnums, gen_helper_vfp_maxnumd, }; -TRANS(FMAXNM_s, do_fp3_scalar, a, &f_scalar_fmaxnm) +TRANS(FMAXNM_s, do_fp3_scalar, a, &f_scalar_fmaxnm, a->rn) static const FPScalar f_scalar_fminnm = { gen_helper_vfp_minnumh, gen_helper_vfp_minnums, gen_helper_vfp_minnumd, }; -TRANS(FMINNM_s, do_fp3_scalar, a, &f_scalar_fminnm) +TRANS(FMINNM_s, do_fp3_scalar, a, &f_scalar_fminnm, a->rn) static const FPScalar f_scalar_fmulx = { gen_helper_advsimd_mulxh, gen_helper_vfp_mulxs, gen_helper_vfp_mulxd, }; -TRANS(FMULX_s, do_fp3_scalar, a, &f_scalar_fmulx) +TRANS(FMULX_s, do_fp3_scalar, a, &f_scalar_fmulx, a->rn) static void gen_fnmul_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) { @@ -5162,42 +5227,42 @@ static const FPScalar f_scalar_fnmul = { gen_fnmul_s, gen_fnmul_d, }; -TRANS(FNMUL_s, do_fp3_scalar, a, &f_scalar_fnmul) +TRANS(FNMUL_s, do_fp3_scalar, a, &f_scalar_fnmul, a->rn) static const FPScalar f_scalar_fcmeq = { gen_helper_advsimd_ceq_f16, gen_helper_neon_ceq_f32, gen_helper_neon_ceq_f64, }; -TRANS(FCMEQ_s, do_fp3_scalar, a, &f_scalar_fcmeq) +TRANS(FCMEQ_s, do_fp3_scalar, a, &f_scalar_fcmeq, a->rm) static const FPScalar f_scalar_fcmge = { gen_helper_advsimd_cge_f16, gen_helper_neon_cge_f32, gen_helper_neon_cge_f64, }; -TRANS(FCMGE_s, do_fp3_scalar, a, &f_scalar_fcmge) +TRANS(FCMGE_s, do_fp3_scalar, a, &f_scalar_fcmge, a->rm) static const FPScalar f_scalar_fcmgt = { gen_helper_advsimd_cgt_f16, gen_helper_neon_cgt_f32, gen_helper_neon_cgt_f64, }; -TRANS(FCMGT_s, do_fp3_scalar, a, &f_scalar_fcmgt) +TRANS(FCMGT_s, do_fp3_scalar, a, &f_scalar_fcmgt, a->rm) static const FPScalar f_scalar_facge = { gen_helper_advsimd_acge_f16, gen_helper_neon_acge_f32, gen_helper_neon_acge_f64, }; -TRANS(FACGE_s, do_fp3_scalar, a, &f_scalar_facge) +TRANS(FACGE_s, do_fp3_scalar, a, &f_scalar_facge, a->rm) static const FPScalar f_scalar_facgt = { gen_helper_advsimd_acgt_f16, gen_helper_neon_acgt_f32, gen_helper_neon_acgt_f64, }; -TRANS(FACGT_s, do_fp3_scalar, a, &f_scalar_facgt) +TRANS(FACGT_s, do_fp3_scalar, a, &f_scalar_facgt, a->rm) static void gen_fabd_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) { @@ -5222,21 +5287,21 @@ static const FPScalar f_scalar_fabd = { gen_fabd_s, gen_fabd_d, }; -TRANS(FABD_s, do_fp3_scalar, a, &f_scalar_fabd) +TRANS(FABD_s, do_fp3_scalar, a, &f_scalar_fabd, a->rn) static const FPScalar f_scalar_frecps = { gen_helper_recpsf_f16, gen_helper_recpsf_f32, gen_helper_recpsf_f64, }; -TRANS(FRECPS_s, do_fp3_scalar_ah, a, &f_scalar_frecps) +TRANS(FRECPS_s, do_fp3_scalar_ah, a, &f_scalar_frecps, a->rn) static const FPScalar f_scalar_frsqrts = { gen_helper_rsqrtsf_f16, gen_helper_rsqrtsf_f32, gen_helper_rsqrtsf_f64, }; -TRANS(FRSQRTS_s, do_fp3_scalar_ah, a, &f_scalar_frsqrts) +TRANS(FRSQRTS_s, do_fp3_scalar_ah, a, &f_scalar_frsqrts, a->rn) static bool do_fcmp0_s(DisasContext *s, arg_rr_e *a, const FPScalar *f, bool swap) From 0378199dc2fdf90550f9a819701b0c1709b34eab Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:20 +0000 Subject: [PATCH 16/68] target/arm: Handle FPCR.NEP for 3-input scalar operations Handle FPCR.NEP for the 3-input scalar operations which use do_fmla_scalar_idx() and do_fmadd(), by making them call the appropriate write_fp_*reg_merging() functions. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 67c85db477..3f6e31d36a 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -6345,7 +6345,7 @@ static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg) gen_vfp_negd(t1, t1); } gen_helper_vfp_muladdd(t0, t1, t2, t0, fpstatus_ptr(FPST_A64)); - write_fp_dreg(s, a->rd, t0); + write_fp_dreg_merging(s, a->rd, a->rd, t0); } break; case MO_32: @@ -6359,7 +6359,7 @@ static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg) gen_vfp_negs(t1, t1); } gen_helper_vfp_muladds(t0, t1, t2, t0, fpstatus_ptr(FPST_A64)); - write_fp_sreg(s, a->rd, t0); + write_fp_sreg_merging(s, a->rd, a->rd, t0); } break; case MO_16: @@ -6377,7 +6377,7 @@ static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg) } gen_helper_advsimd_muladdh(t0, t1, t2, t0, fpstatus_ptr(FPST_A64_F16)); - write_fp_sreg(s, a->rd, t0); + write_fp_hreg_merging(s, a->rd, a->rd, t0); } break; default: @@ -6856,7 +6856,7 @@ static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n) } fpst = fpstatus_ptr(FPST_A64); gen_helper_vfp_muladdd(ta, tn, tm, ta, fpst); - write_fp_dreg(s, a->rd, ta); + write_fp_dreg_merging(s, a->rd, a->ra, ta); } break; @@ -6874,7 +6874,7 @@ static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n) } fpst = fpstatus_ptr(FPST_A64); gen_helper_vfp_muladds(ta, tn, tm, ta, fpst); - write_fp_sreg(s, a->rd, ta); + write_fp_sreg_merging(s, a->rd, a->ra, ta); } break; @@ -6895,7 +6895,7 @@ static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n) } fpst = fpstatus_ptr(FPST_A64_F16); gen_helper_advsimd_muladdh(ta, tn, tm, ta, fpst); - write_fp_sreg(s, a->rd, ta); + write_fp_hreg_merging(s, a->rd, a->ra, ta); } break; From 053b39aad9a94784d5f1926e50ec7199978525a8 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:21 +0000 Subject: [PATCH 17/68] target/arm: Handle FPCR.NEP for BFCVT scalar Currently we implement BFCVT scalar via do_fp1_scalar(). This works even though BFCVT is a narrowing operation from 32 to 16 bits, because we can use write_fp_sreg() for float16. However, FPCR.NEP support requires that we use write_fp_hreg_merging() for float16 outputs, so we can't continue to borrow the non-narrowing do_fp1_scalar() function for this. Split out trans_BFCVT_s() into its own implementation that honours FPCR.NEP. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 3f6e31d36a..61ea2bb9f7 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -8571,10 +8571,27 @@ static const FPScalar1 f_scalar_frintx = { }; TRANS(FRINTX_s, do_fp1_scalar, a, &f_scalar_frintx, -1) -static const FPScalar1 f_scalar_bfcvt = { - .gen_s = gen_helper_bfcvt, -}; -TRANS_FEAT(BFCVT_s, aa64_bf16, do_fp1_scalar_ah, a, &f_scalar_bfcvt, -1) +static bool trans_BFCVT_s(DisasContext *s, arg_rr_e *a) +{ + ARMFPStatusFlavour fpsttype = s->fpcr_ah ? FPST_AH : FPST_A64; + TCGv_i32 t32; + int check; + + if (!dc_isar_feature(aa64_bf16, s)) { + return false; + } + + check = fp_access_check_scalar_hsd(s, a->esz); + + if (check <= 0) { + return check == 0; + } + + t32 = read_fp_sreg(s, a->rn); + gen_helper_bfcvt(t32, t32, fpstatus_ptr(fpsttype)); + write_fp_hreg_merging(s, a->rd, a->rd, t32); + return true; +} static const FPScalar1 f_scalar_frint32 = { NULL, From 3f88032ce23db57e64efb6ce24b760b201106a87 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:22 +0000 Subject: [PATCH 18/68] target/arm: Handle FPCR.NEP for 1-input scalar operations Handle FPCR.NEP for the 1-input scalar operations. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 61ea2bb9f7..a5f59a94ec 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -8509,17 +8509,17 @@ static bool do_fp1_scalar_with_fpsttype(DisasContext *s, arg_rr_e *a, case MO_64: t64 = read_fp_dreg(s, a->rn); f->gen_d(t64, t64, fpst); - write_fp_dreg(s, a->rd, t64); + write_fp_dreg_merging(s, a->rd, a->rd, t64); break; case MO_32: t32 = read_fp_sreg(s, a->rn); f->gen_s(t32, t32, fpst); - write_fp_sreg(s, a->rd, t32); + write_fp_sreg_merging(s, a->rd, a->rd, t32); break; case MO_16: t32 = read_fp_hreg(s, a->rn); f->gen_h(t32, t32, fpst); - write_fp_sreg(s, a->rd, t32); + write_fp_hreg_merging(s, a->rd, a->rd, t32); break; default: g_assert_not_reached(); @@ -8640,7 +8640,7 @@ static bool trans_FCVT_s_ds(DisasContext *s, arg_rr *a) TCGv_ptr fpst = fpstatus_ptr(FPST_A64); gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, fpst); - write_fp_dreg(s, a->rd, tcg_rd); + write_fp_dreg_merging(s, a->rd, a->rd, tcg_rd); } return true; } @@ -8653,8 +8653,8 @@ static bool trans_FCVT_s_hs(DisasContext *s, arg_rr *a) TCGv_ptr fpst = fpstatus_ptr(FPST_A64); gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp); - /* write_fp_sreg is OK here because top half of result is zero */ - write_fp_sreg(s, a->rd, tmp); + /* write_fp_hreg_merging is OK here because top half of result is zero */ + write_fp_hreg_merging(s, a->rd, a->rd, tmp); } return true; } @@ -8667,7 +8667,7 @@ static bool trans_FCVT_s_sd(DisasContext *s, arg_rr *a) TCGv_ptr fpst = fpstatus_ptr(FPST_A64); gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, fpst); - write_fp_sreg(s, a->rd, tcg_rd); + write_fp_sreg_merging(s, a->rd, a->rd, tcg_rd); } return true; } @@ -8681,8 +8681,8 @@ static bool trans_FCVT_s_hd(DisasContext *s, arg_rr *a) TCGv_ptr fpst = fpstatus_ptr(FPST_A64); gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp); - /* write_fp_sreg is OK here because top half of tcg_rd is zero */ - write_fp_sreg(s, a->rd, tcg_rd); + /* write_fp_hreg_merging is OK here because top half of tcg_rd is zero */ + write_fp_hreg_merging(s, a->rd, a->rd, tcg_rd); } return true; } @@ -8696,7 +8696,7 @@ static bool trans_FCVT_s_sh(DisasContext *s, arg_rr *a) TCGv_i32 tcg_ahp = get_ahp_flag(); gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); - write_fp_sreg(s, a->rd, tcg_rd); + write_fp_sreg_merging(s, a->rd, a->rd, tcg_rd); } return true; } @@ -8710,7 +8710,7 @@ static bool trans_FCVT_s_dh(DisasContext *s, arg_rr *a) TCGv_i32 tcg_ahp = get_ahp_flag(); gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); - write_fp_dreg(s, a->rd, tcg_rd); + write_fp_dreg_merging(s, a->rd, a->rd, tcg_rd); } return true; } @@ -8958,7 +8958,9 @@ static bool do_fcvt_f(DisasContext *s, arg_fcvt *a, do_fcvt_scalar(s, a->esz | (is_signed ? MO_SIGN : 0), a->esz, tcg_int, a->shift, a->rn, rmode); - clear_vec(s, a->rd); + if (!s->fpcr_nep) { + clear_vec(s, a->rd); + } write_vec_element(s, tcg_int, a->rd, 0, a->esz); return true; } From 13ab4eb19088f78851c69faaa8b5c6f3a3821150 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:23 +0000 Subject: [PATCH 19/68] target/arm: Handle FPCR.NEP in do_cvtf_scalar() Handle FPCR.NEP in the operations handled by do_cvtf_scalar(). Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index a5f59a94ec..804b6b5b67 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -8733,7 +8733,7 @@ static bool do_cvtf_scalar(DisasContext *s, MemOp esz, int rd, int shift, } else { gen_helper_vfp_uqtod(tcg_double, tcg_int, tcg_shift, tcg_fpstatus); } - write_fp_dreg(s, rd, tcg_double); + write_fp_dreg_merging(s, rd, rd, tcg_double); break; case MO_32: @@ -8743,7 +8743,7 @@ static bool do_cvtf_scalar(DisasContext *s, MemOp esz, int rd, int shift, } else { gen_helper_vfp_uqtos(tcg_single, tcg_int, tcg_shift, tcg_fpstatus); } - write_fp_sreg(s, rd, tcg_single); + write_fp_sreg_merging(s, rd, rd, tcg_single); break; case MO_16: @@ -8753,7 +8753,7 @@ static bool do_cvtf_scalar(DisasContext *s, MemOp esz, int rd, int shift, } else { gen_helper_vfp_uqtoh(tcg_single, tcg_int, tcg_shift, tcg_fpstatus); } - write_fp_sreg(s, rd, tcg_single); + write_fp_hreg_merging(s, rd, rd, tcg_single); break; default: From 64339259a9c2c5d504ea5179fc42091e3e4a7b22 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:24 +0000 Subject: [PATCH 20/68] target/arm: Handle FPCR.NEP for scalar FABS and FNEG Handle FPCR.NEP merging for scalar FABS and FNEG; this requires an extra parameter to do_fp1_scalar_int(), since FMOV scalar does not have the merging behaviour. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 804b6b5b67..3195cb5fef 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -8426,21 +8426,30 @@ typedef struct FPScalar1Int { } FPScalar1Int; static bool do_fp1_scalar_int(DisasContext *s, arg_rr_e *a, - const FPScalar1Int *f) + const FPScalar1Int *f, + bool merging) { switch (a->esz) { case MO_64: if (fp_access_check(s)) { TCGv_i64 t = read_fp_dreg(s, a->rn); f->gen_d(t, t); - write_fp_dreg(s, a->rd, t); + if (merging) { + write_fp_dreg_merging(s, a->rd, a->rd, t); + } else { + write_fp_dreg(s, a->rd, t); + } } break; case MO_32: if (fp_access_check(s)) { TCGv_i32 t = read_fp_sreg(s, a->rn); f->gen_s(t, t); - write_fp_sreg(s, a->rd, t); + if (merging) { + write_fp_sreg_merging(s, a->rd, a->rd, t); + } else { + write_fp_sreg(s, a->rd, t); + } } break; case MO_16: @@ -8450,7 +8459,11 @@ static bool do_fp1_scalar_int(DisasContext *s, arg_rr_e *a, if (fp_access_check(s)) { TCGv_i32 t = read_fp_hreg(s, a->rn); f->gen_h(t, t); - write_fp_sreg(s, a->rd, t); + if (merging) { + write_fp_hreg_merging(s, a->rd, a->rd, t); + } else { + write_fp_sreg(s, a->rd, t); + } } break; default: @@ -8464,21 +8477,21 @@ static const FPScalar1Int f_scalar_fmov = { tcg_gen_mov_i32, tcg_gen_mov_i64, }; -TRANS(FMOV_s, do_fp1_scalar_int, a, &f_scalar_fmov) +TRANS(FMOV_s, do_fp1_scalar_int, a, &f_scalar_fmov, false) static const FPScalar1Int f_scalar_fabs = { gen_vfp_absh, gen_vfp_abss, gen_vfp_absd, }; -TRANS(FABS_s, do_fp1_scalar_int, a, &f_scalar_fabs) +TRANS(FABS_s, do_fp1_scalar_int, a, &f_scalar_fabs, true) static const FPScalar1Int f_scalar_fneg = { gen_vfp_negh, gen_vfp_negs, gen_vfp_negd, }; -TRANS(FNEG_s, do_fp1_scalar_int, a, &f_scalar_fneg) +TRANS(FNEG_s, do_fp1_scalar_int, a, &f_scalar_fneg, true) typedef struct FPScalar1 { void (*gen_h)(TCGv_i32, TCGv_i32, TCGv_ptr); From 555639065d5b5310e8e67af383539309784d37dd Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:25 +0000 Subject: [PATCH 21/68] target/arm: Handle FPCR.NEP for FCVTXN (scalar) Unlike the other users of do_2misc_narrow_scalar(), FCVTXN (scalar) is always double-to-single and must honour FPCR.NEP. Implement this directly in a trans function rather than using do_2misc_narrow_scalar(). We still need gen_fcvtxn_sd() and the f_scalar_fcvtxn[] array for the FCVTXN (vector) insn, so we move those down in the file to where they are used. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 43 ++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 3195cb5fef..ac8196136c 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -9247,24 +9247,21 @@ static ArithOneOp * const f_scalar_uqxtn[] = { }; TRANS(UQXTN_s, do_2misc_narrow_scalar, a, f_scalar_uqxtn) -static void gen_fcvtxn_sd(TCGv_i64 d, TCGv_i64 n) +static bool trans_FCVTXN_s(DisasContext *s, arg_rr_e *a) { - /* - * 64 bit to 32 bit float conversion - * with von Neumann rounding (round to odd) - */ - TCGv_i32 tmp = tcg_temp_new_i32(); - gen_helper_fcvtx_f64_to_f32(tmp, n, fpstatus_ptr(FPST_A64)); - tcg_gen_extu_i32_i64(d, tmp); + if (fp_access_check(s)) { + /* + * 64 bit to 32 bit float conversion + * with von Neumann rounding (round to odd) + */ + TCGv_i64 src = read_fp_dreg(s, a->rn); + TCGv_i32 dst = tcg_temp_new_i32(); + gen_helper_fcvtx_f64_to_f32(dst, src, fpstatus_ptr(FPST_A64)); + write_fp_sreg_merging(s, a->rd, a->rd, dst); + } + return true; } -static ArithOneOp * const f_scalar_fcvtxn[] = { - NULL, - NULL, - gen_fcvtxn_sd, -}; -TRANS(FCVTXN_s, do_2misc_narrow_scalar, a, f_scalar_fcvtxn) - #undef WRAP_ENV static bool do_gvec_fn2(DisasContext *s, arg_qrr_e *a, GVecGen2Fn *fn) @@ -9366,11 +9363,27 @@ static void gen_fcvtn_sd(TCGv_i64 d, TCGv_i64 n) tcg_gen_extu_i32_i64(d, tmp); } +static void gen_fcvtxn_sd(TCGv_i64 d, TCGv_i64 n) +{ + /* + * 64 bit to 32 bit float conversion + * with von Neumann rounding (round to odd) + */ + TCGv_i32 tmp = tcg_temp_new_i32(); + gen_helper_fcvtx_f64_to_f32(tmp, n, fpstatus_ptr(FPST_A64)); + tcg_gen_extu_i32_i64(d, tmp); +} + static ArithOneOp * const f_vector_fcvtn[] = { NULL, gen_fcvtn_hs, gen_fcvtn_sd, }; +static ArithOneOp * const f_scalar_fcvtxn[] = { + NULL, + NULL, + gen_fcvtxn_sd, +}; TRANS(FCVTN_v, do_2misc_narrow_vector, a, f_vector_fcvtn) TRANS(FCVTXN_v, do_2misc_narrow_vector, a, f_scalar_fcvtxn) From 7bcde0c6dd9e7d150fb530b85f9218270db98638 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:26 +0000 Subject: [PATCH 22/68] target/arm: Handle FPCR.NEP for NEP for FMUL, FMULX scalar by element do_fp3_scalar_idx() is used only for the FMUL and FMULX scalar by element instructions; these both need to merge the result with the Rn register when FPCR.NEP is set. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index ac8196136c..f31acee30a 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -6296,7 +6296,7 @@ static bool do_fp3_scalar_idx(DisasContext *s, arg_rrx_e *a, const FPScalar *f) read_vec_element(s, t1, a->rm, a->idx, MO_64); f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_A64)); - write_fp_dreg(s, a->rd, t0); + write_fp_dreg_merging(s, a->rd, a->rn, t0); } break; case MO_32: @@ -6306,7 +6306,7 @@ static bool do_fp3_scalar_idx(DisasContext *s, arg_rrx_e *a, const FPScalar *f) read_vec_element_i32(s, t1, a->rm, a->idx, MO_32); f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_A64)); - write_fp_sreg(s, a->rd, t0); + write_fp_sreg_merging(s, a->rd, a->rn, t0); } break; case MO_16: @@ -6319,7 +6319,7 @@ static bool do_fp3_scalar_idx(DisasContext *s, arg_rrx_e *a, const FPScalar *f) read_vec_element_i32(s, t1, a->rm, a->idx, MO_16); f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_A64_F16)); - write_fp_sreg(s, a->rd, t0); + write_fp_hreg_merging(s, a->rd, a->rn, t0); } break; default: From 2a734f3fe3cf528f3cf2f69600e88797b937201a Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:27 +0000 Subject: [PATCH 23/68] target/arm: Implement FPCR.AH semantics for scalar FMIN/FMAX When FPCR.AH == 1, floating point FMIN and FMAX have some odd special cases: * comparing two zeroes (even of different sign) or comparing a NaN with anything always returns the second argument (possibly squashed to zero) * denormal outputs are not squashed to zero regardless of FZ or FZ16 Implement these semantics in new helper functions and select them at translate time if FPCR.AH is 1 for the scalar FMAX and FMIN insns. (We will convert the other FMAX and FMIN insns in subsequent commits.) Note that FMINNM and FMAXNM are not affected. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/helper-a64.c | 36 ++++++++++++++++++++++++++++++++++ target/arm/tcg/helper-a64.h | 7 +++++++ target/arm/tcg/translate-a64.c | 23 ++++++++++++++++++++-- 3 files changed, 64 insertions(+), 2 deletions(-) diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index 05036089dd..ed5e4a4599 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -399,6 +399,42 @@ float32 HELPER(fcvtx_f64_to_f32)(float64 a, float_status *fpst) return r; } +/* + * AH=1 min/max have some odd special cases: + * comparing two zeroes (regardless of sign), (NaN, anything), + * or (anything, NaN) should return the second argument (possibly + * squashed to zero). + * Also, denormal outputs are not squashed to zero regardless of FZ or FZ16. + */ +#define AH_MINMAX_HELPER(NAME, CTYPE, FLOATTYPE, MINMAX) \ + CTYPE HELPER(NAME)(CTYPE a, CTYPE b, float_status *fpst) \ + { \ + bool save; \ + CTYPE r; \ + a = FLOATTYPE ## _squash_input_denormal(a, fpst); \ + b = FLOATTYPE ## _squash_input_denormal(b, fpst); \ + if (FLOATTYPE ## _is_zero(a) && FLOATTYPE ## _is_zero(b)) { \ + return b; \ + } \ + if (FLOATTYPE ## _is_any_nan(a) || \ + FLOATTYPE ## _is_any_nan(b)) { \ + float_raise(float_flag_invalid, fpst); \ + return b; \ + } \ + save = get_flush_to_zero(fpst); \ + set_flush_to_zero(false, fpst); \ + r = FLOATTYPE ## _ ## MINMAX(a, b, fpst); \ + set_flush_to_zero(save, fpst); \ + return r; \ + } + +AH_MINMAX_HELPER(vfp_ah_minh, dh_ctype_f16, float16, min) +AH_MINMAX_HELPER(vfp_ah_mins, float32, float32, min) +AH_MINMAX_HELPER(vfp_ah_mind, float64, float64, min) +AH_MINMAX_HELPER(vfp_ah_maxh, dh_ctype_f16, float16, max) +AH_MINMAX_HELPER(vfp_ah_maxs, float32, float32, max) +AH_MINMAX_HELPER(vfp_ah_maxd, float64, float64, max) + /* 64-bit versions of the CRC helpers. Note that although the operation * (and the prototypes of crc32c() and crc32() mean that only the bottom * 32 bits of the accumulator and result are used, we pass and return diff --git a/target/arm/tcg/helper-a64.h b/target/arm/tcg/helper-a64.h index bac12fbe55..ae0424f6de 100644 --- a/target/arm/tcg/helper-a64.h +++ b/target/arm/tcg/helper-a64.h @@ -67,6 +67,13 @@ DEF_HELPER_4(advsimd_muladd2h, i32, i32, i32, i32, fpst) DEF_HELPER_2(advsimd_rinth_exact, f16, f16, fpst) DEF_HELPER_2(advsimd_rinth, f16, f16, fpst) +DEF_HELPER_3(vfp_ah_minh, f16, f16, f16, fpst) +DEF_HELPER_3(vfp_ah_mins, f32, f32, f32, fpst) +DEF_HELPER_3(vfp_ah_mind, f64, f64, f64, fpst) +DEF_HELPER_3(vfp_ah_maxh, f16, f16, f16, fpst) +DEF_HELPER_3(vfp_ah_maxs, f32, f32, f32, fpst) +DEF_HELPER_3(vfp_ah_maxd, f64, f64, f64, fpst) + DEF_HELPER_2(exception_return, void, env, i64) DEF_HELPER_FLAGS_2(dc_zva, TCG_CALL_NO_WG, void, env, i64) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index f31acee30a..89f061d20b 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -5141,6 +5141,15 @@ static bool do_fp3_scalar_ah(DisasContext *s, arg_rrr_e *a, const FPScalar *f, select_ah_fpst(s, a->esz)); } +/* Some insns need to call different helpers when FPCR.AH == 1 */ +static bool do_fp3_scalar_2fn(DisasContext *s, arg_rrr_e *a, + const FPScalar *fnormal, + const FPScalar *fah, + int mergereg) +{ + return do_fp3_scalar(s, a, s->fpcr_ah ? fah : fnormal, mergereg); +} + static const FPScalar f_scalar_fadd = { gen_helper_vfp_addh, gen_helper_vfp_adds, @@ -5174,14 +5183,24 @@ static const FPScalar f_scalar_fmax = { gen_helper_vfp_maxs, gen_helper_vfp_maxd, }; -TRANS(FMAX_s, do_fp3_scalar, a, &f_scalar_fmax, a->rn) +static const FPScalar f_scalar_fmax_ah = { + gen_helper_vfp_ah_maxh, + gen_helper_vfp_ah_maxs, + gen_helper_vfp_ah_maxd, +}; +TRANS(FMAX_s, do_fp3_scalar_2fn, a, &f_scalar_fmax, &f_scalar_fmax_ah, a->rn) static const FPScalar f_scalar_fmin = { gen_helper_vfp_minh, gen_helper_vfp_mins, gen_helper_vfp_mind, }; -TRANS(FMIN_s, do_fp3_scalar, a, &f_scalar_fmin, a->rn) +static const FPScalar f_scalar_fmin_ah = { + gen_helper_vfp_ah_minh, + gen_helper_vfp_ah_mins, + gen_helper_vfp_ah_mind, +}; +TRANS(FMIN_s, do_fp3_scalar_2fn, a, &f_scalar_fmin, &f_scalar_fmin_ah, a->rn) static const FPScalar f_scalar_fmaxnm = { gen_helper_vfp_maxnumh, From d613e44010658ced19ac9add6a9587bef1d63c67 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:28 +0000 Subject: [PATCH 24/68] target/arm: Implement FPCR.AH semantics for vector FMIN/FMAX Implement the FPCR.AH == 1 semantics for vector FMIN/FMAX, by creating new _ah_ versions of the gvec helpers which invoke the scalar fmin_ah and fmax_ah helpers on each element. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/helper-sve.h | 14 ++++++++++++++ target/arm/tcg/translate-a64.c | 21 +++++++++++++++++++-- target/arm/tcg/vec_helper.c | 8 ++++++++ 3 files changed, 41 insertions(+), 2 deletions(-) diff --git a/target/arm/tcg/helper-sve.h b/target/arm/tcg/helper-sve.h index fea43b319c..f1b4606f76 100644 --- a/target/arm/tcg/helper-sve.h +++ b/target/arm/tcg/helper-sve.h @@ -972,6 +972,20 @@ DEF_HELPER_FLAGS_5(gvec_rsqrts_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_5(gvec_rsqrts_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmax_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmax_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmax_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_ah_fmin_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmin_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmin_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + DEF_HELPER_FLAGS_4(sve_faddv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_faddv_s, TCG_CALL_NO_RWG, diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 89f061d20b..24695ab55b 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -5596,6 +5596,13 @@ static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a, int data, FPST_A64_F16 : FPST_A64); } +static bool do_fp3_vector_2fn(DisasContext *s, arg_qrrr_e *a, int data, + gen_helper_gvec_3_ptr * const fnormal[3], + gen_helper_gvec_3_ptr * const fah[3]) +{ + return do_fp3_vector(s, a, data, s->fpcr_ah ? fah : fnormal); +} + static bool do_fp3_vector_ah(DisasContext *s, arg_qrrr_e *a, int data, gen_helper_gvec_3_ptr * const f[3]) { @@ -5636,14 +5643,24 @@ static gen_helper_gvec_3_ptr * const f_vector_fmax[3] = { gen_helper_gvec_fmax_s, gen_helper_gvec_fmax_d, }; -TRANS(FMAX_v, do_fp3_vector, a, 0, f_vector_fmax) +static gen_helper_gvec_3_ptr * const f_vector_fmax_ah[3] = { + gen_helper_gvec_ah_fmax_h, + gen_helper_gvec_ah_fmax_s, + gen_helper_gvec_ah_fmax_d, +}; +TRANS(FMAX_v, do_fp3_vector_2fn, a, 0, f_vector_fmax, f_vector_fmax_ah) static gen_helper_gvec_3_ptr * const f_vector_fmin[3] = { gen_helper_gvec_fmin_h, gen_helper_gvec_fmin_s, gen_helper_gvec_fmin_d, }; -TRANS(FMIN_v, do_fp3_vector, a, 0, f_vector_fmin) +static gen_helper_gvec_3_ptr * const f_vector_fmin_ah[3] = { + gen_helper_gvec_ah_fmin_h, + gen_helper_gvec_ah_fmin_s, + gen_helper_gvec_ah_fmin_d, +}; +TRANS(FMIN_v, do_fp3_vector_2fn, a, 0, f_vector_fmin, f_vector_fmin_ah) static gen_helper_gvec_3_ptr * const f_vector_fmaxnm[3] = { gen_helper_gvec_fmaxnum_h, diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index 7330b373c3..9f77aa6b91 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -1448,6 +1448,14 @@ DO_3OP(gvec_rsqrts_h, helper_rsqrtsf_f16, float16) DO_3OP(gvec_rsqrts_s, helper_rsqrtsf_f32, float32) DO_3OP(gvec_rsqrts_d, helper_rsqrtsf_f64, float64) +DO_3OP(gvec_ah_fmax_h, helper_vfp_ah_maxh, float16) +DO_3OP(gvec_ah_fmax_s, helper_vfp_ah_maxs, float32) +DO_3OP(gvec_ah_fmax_d, helper_vfp_ah_maxd, float64) + +DO_3OP(gvec_ah_fmin_h, helper_vfp_ah_minh, float16) +DO_3OP(gvec_ah_fmin_s, helper_vfp_ah_mins, float32) +DO_3OP(gvec_ah_fmin_d, helper_vfp_ah_mind, float64) + #endif #undef DO_3OP From bb9330b662c62c5e84620f7028859c9b178642d6 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:29 +0000 Subject: [PATCH 25/68] target/arm: Implement FPCR.AH semantics for FMAXV and FMINV Implement the FPCR.AH semantics for FMAXV and FMINV. These are the "recursively reduce all lanes of a vector to a scalar result" insns; we just need to use the _ah_ helper for the reduction step when FPCR.AH == 1. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 24695ab55b..4ae0426bf7 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -7018,27 +7018,35 @@ static TCGv_i32 do_reduction_op(DisasContext *s, int rn, MemOp esz, } static bool do_fp_reduction(DisasContext *s, arg_qrr_e *a, - NeonGenTwoSingleOpFn *fn) + NeonGenTwoSingleOpFn *fnormal, + NeonGenTwoSingleOpFn *fah) { if (fp_access_check(s)) { MemOp esz = a->esz; int elts = (a->q ? 16 : 8) >> esz; TCGv_ptr fpst = fpstatus_ptr(esz == MO_16 ? FPST_A64_F16 : FPST_A64); - TCGv_i32 res = do_reduction_op(s, a->rn, esz, 0, elts, fpst, fn); + TCGv_i32 res = do_reduction_op(s, a->rn, esz, 0, elts, fpst, + s->fpcr_ah ? fah : fnormal); write_fp_sreg(s, a->rd, res); } return true; } -TRANS_FEAT(FMAXNMV_h, aa64_fp16, do_fp_reduction, a, gen_helper_vfp_maxnumh) -TRANS_FEAT(FMINNMV_h, aa64_fp16, do_fp_reduction, a, gen_helper_vfp_minnumh) -TRANS_FEAT(FMAXV_h, aa64_fp16, do_fp_reduction, a, gen_helper_vfp_maxh) -TRANS_FEAT(FMINV_h, aa64_fp16, do_fp_reduction, a, gen_helper_vfp_minh) +TRANS_FEAT(FMAXNMV_h, aa64_fp16, do_fp_reduction, a, + gen_helper_vfp_maxnumh, gen_helper_vfp_maxnumh) +TRANS_FEAT(FMINNMV_h, aa64_fp16, do_fp_reduction, a, + gen_helper_vfp_minnumh, gen_helper_vfp_minnumh) +TRANS_FEAT(FMAXV_h, aa64_fp16, do_fp_reduction, a, + gen_helper_vfp_maxh, gen_helper_vfp_ah_maxh) +TRANS_FEAT(FMINV_h, aa64_fp16, do_fp_reduction, a, + gen_helper_vfp_minh, gen_helper_vfp_ah_minh) -TRANS(FMAXNMV_s, do_fp_reduction, a, gen_helper_vfp_maxnums) -TRANS(FMINNMV_s, do_fp_reduction, a, gen_helper_vfp_minnums) -TRANS(FMAXV_s, do_fp_reduction, a, gen_helper_vfp_maxs) -TRANS(FMINV_s, do_fp_reduction, a, gen_helper_vfp_mins) +TRANS(FMAXNMV_s, do_fp_reduction, a, + gen_helper_vfp_maxnums, gen_helper_vfp_maxnums) +TRANS(FMINNMV_s, do_fp_reduction, a, + gen_helper_vfp_minnums, gen_helper_vfp_minnums) +TRANS(FMAXV_s, do_fp_reduction, a, gen_helper_vfp_maxs, gen_helper_vfp_ah_maxs) +TRANS(FMINV_s, do_fp_reduction, a, gen_helper_vfp_mins, gen_helper_vfp_ah_mins) /* * Floating-point Immediate From 384433e709836209ae34bedda8a2a57992be8e18 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:30 +0000 Subject: [PATCH 26/68] target/arm: Implement FPCR.AH semantics for FMINP and FMAXP Implement the FPCR.AH semantics for the pairwise floating point minimum/maximum insns FMINP and FMAXP. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/helper-sve.h | 14 ++++++++++++++ target/arm/tcg/translate-a64.c | 25 +++++++++++++++++++++---- target/arm/tcg/vec_helper.c | 10 ++++++++++ 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/target/arm/tcg/helper-sve.h b/target/arm/tcg/helper-sve.h index f1b4606f76..8349752e99 100644 --- a/target/arm/tcg/helper-sve.h +++ b/target/arm/tcg/helper-sve.h @@ -986,6 +986,20 @@ DEF_HELPER_FLAGS_5(gvec_ah_fmin_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_5(gvec_ah_fmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmaxp_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmaxp_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmaxp_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_ah_fminp_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fminp_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fminp_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + DEF_HELPER_FLAGS_4(sve_faddv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_faddv_s, TCG_CALL_NO_RWG, diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 4ae0426bf7..02701b10c6 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -5765,14 +5765,24 @@ static gen_helper_gvec_3_ptr * const f_vector_fmaxp[3] = { gen_helper_gvec_fmaxp_s, gen_helper_gvec_fmaxp_d, }; -TRANS(FMAXP_v, do_fp3_vector, a, 0, f_vector_fmaxp) +static gen_helper_gvec_3_ptr * const f_vector_ah_fmaxp[3] = { + gen_helper_gvec_ah_fmaxp_h, + gen_helper_gvec_ah_fmaxp_s, + gen_helper_gvec_ah_fmaxp_d, +}; +TRANS(FMAXP_v, do_fp3_vector_2fn, a, 0, f_vector_fmaxp, f_vector_ah_fmaxp) static gen_helper_gvec_3_ptr * const f_vector_fminp[3] = { gen_helper_gvec_fminp_h, gen_helper_gvec_fminp_s, gen_helper_gvec_fminp_d, }; -TRANS(FMINP_v, do_fp3_vector, a, 0, f_vector_fminp) +static gen_helper_gvec_3_ptr * const f_vector_ah_fminp[3] = { + gen_helper_gvec_ah_fminp_h, + gen_helper_gvec_ah_fminp_s, + gen_helper_gvec_ah_fminp_d, +}; +TRANS(FMINP_v, do_fp3_vector_2fn, a, 0, f_vector_fminp, f_vector_ah_fminp) static gen_helper_gvec_3_ptr * const f_vector_fmaxnmp[3] = { gen_helper_gvec_fmaxnump_h, @@ -6764,9 +6774,16 @@ static bool do_fp3_scalar_pair(DisasContext *s, arg_rr_e *a, const FPScalar *f) return true; } +static bool do_fp3_scalar_pair_2fn(DisasContext *s, arg_rr_e *a, + const FPScalar *fnormal, + const FPScalar *fah) +{ + return do_fp3_scalar_pair(s, a, s->fpcr_ah ? fah : fnormal); +} + TRANS(FADDP_s, do_fp3_scalar_pair, a, &f_scalar_fadd) -TRANS(FMAXP_s, do_fp3_scalar_pair, a, &f_scalar_fmax) -TRANS(FMINP_s, do_fp3_scalar_pair, a, &f_scalar_fmin) +TRANS(FMAXP_s, do_fp3_scalar_pair_2fn, a, &f_scalar_fmax, &f_scalar_fmax_ah) +TRANS(FMINP_s, do_fp3_scalar_pair_2fn, a, &f_scalar_fmin, &f_scalar_fmin_ah) TRANS(FMAXNMP_s, do_fp3_scalar_pair, a, &f_scalar_fmaxnm) TRANS(FMINNMP_s, do_fp3_scalar_pair, a, &f_scalar_fminnm) diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index 9f77aa6b91..61e6e54570 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -2444,6 +2444,16 @@ DO_3OP_PAIR(gvec_fminnump_h, float16_minnum, float16, H2) DO_3OP_PAIR(gvec_fminnump_s, float32_minnum, float32, H4) DO_3OP_PAIR(gvec_fminnump_d, float64_minnum, float64, ) +#ifdef TARGET_AARCH64 +DO_3OP_PAIR(gvec_ah_fmaxp_h, helper_vfp_ah_maxh, float16, H2) +DO_3OP_PAIR(gvec_ah_fmaxp_s, helper_vfp_ah_maxs, float32, H4) +DO_3OP_PAIR(gvec_ah_fmaxp_d, helper_vfp_ah_maxd, float64, ) + +DO_3OP_PAIR(gvec_ah_fminp_h, helper_vfp_ah_minh, float16, H2) +DO_3OP_PAIR(gvec_ah_fminp_s, helper_vfp_ah_mins, float32, H4) +DO_3OP_PAIR(gvec_ah_fminp_d, helper_vfp_ah_mind, float64, ) +#endif + #undef DO_3OP_PAIR #define DO_3OP_PAIR(NAME, FUNC, TYPE, H) \ From dac3a42f39ef1fae52b5b7b15662f3277f9ce7f2 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:31 +0000 Subject: [PATCH 27/68] target/arm: Implement FPCR.AH semantics for SVE FMAXV and FMINV Implement the FPCR.AH semantics for the SVE FMAXV and FMINV vector-reduction-to-scalar max/min operations. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/helper-sve.h | 14 +++++++++++ target/arm/tcg/sve_helper.c | 43 +++++++++++++++++++++------------- target/arm/tcg/translate-sve.c | 16 +++++++++++-- 3 files changed, 55 insertions(+), 18 deletions(-) diff --git a/target/arm/tcg/helper-sve.h b/target/arm/tcg/helper-sve.h index 8349752e99..7ca95b8fa9 100644 --- a/target/arm/tcg/helper-sve.h +++ b/target/arm/tcg/helper-sve.h @@ -1035,6 +1035,20 @@ DEF_HELPER_FLAGS_4(sve_fminv_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_4(sve_fminv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sve_ah_fmaxv_h, TCG_CALL_NO_RWG, + i64, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sve_ah_fmaxv_s, TCG_CALL_NO_RWG, + i64, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sve_ah_fmaxv_d, TCG_CALL_NO_RWG, + i64, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(sve_ah_fminv_h, TCG_CALL_NO_RWG, + i64, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sve_ah_fminv_s, TCG_CALL_NO_RWG, + i64, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sve_ah_fminv_d, TCG_CALL_NO_RWG, + i64, ptr, ptr, fpst, i32) + DEF_HELPER_FLAGS_5(sve_fadda_h, TCG_CALL_NO_RWG, i64, i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fadda_s, TCG_CALL_NO_RWG, diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index 9837c5bc7a..3631d85f23 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -4190,7 +4190,7 @@ static TYPE NAME##_reduce(TYPE *data, float_status *status, uintptr_t n) \ uintptr_t half = n / 2; \ TYPE lo = NAME##_reduce(data, status, half); \ TYPE hi = NAME##_reduce(data + half, status, half); \ - return TYPE##_##FUNC(lo, hi, status); \ + return FUNC(lo, hi, status); \ } \ } \ uint64_t HELPER(NAME)(void *vn, void *vg, float_status *s, uint32_t desc) \ @@ -4211,26 +4211,37 @@ uint64_t HELPER(NAME)(void *vn, void *vg, float_status *s, uint32_t desc) \ return NAME##_reduce(data, s, maxsz / sizeof(TYPE)); \ } -DO_REDUCE(sve_faddv_h, float16, H1_2, add, float16_zero) -DO_REDUCE(sve_faddv_s, float32, H1_4, add, float32_zero) -DO_REDUCE(sve_faddv_d, float64, H1_8, add, float64_zero) +DO_REDUCE(sve_faddv_h, float16, H1_2, float16_add, float16_zero) +DO_REDUCE(sve_faddv_s, float32, H1_4, float32_add, float32_zero) +DO_REDUCE(sve_faddv_d, float64, H1_8, float64_add, float64_zero) /* Identity is floatN_default_nan, without the function call. */ -DO_REDUCE(sve_fminnmv_h, float16, H1_2, minnum, 0x7E00) -DO_REDUCE(sve_fminnmv_s, float32, H1_4, minnum, 0x7FC00000) -DO_REDUCE(sve_fminnmv_d, float64, H1_8, minnum, 0x7FF8000000000000ULL) +DO_REDUCE(sve_fminnmv_h, float16, H1_2, float16_minnum, 0x7E00) +DO_REDUCE(sve_fminnmv_s, float32, H1_4, float32_minnum, 0x7FC00000) +DO_REDUCE(sve_fminnmv_d, float64, H1_8, float64_minnum, 0x7FF8000000000000ULL) -DO_REDUCE(sve_fmaxnmv_h, float16, H1_2, maxnum, 0x7E00) -DO_REDUCE(sve_fmaxnmv_s, float32, H1_4, maxnum, 0x7FC00000) -DO_REDUCE(sve_fmaxnmv_d, float64, H1_8, maxnum, 0x7FF8000000000000ULL) +DO_REDUCE(sve_fmaxnmv_h, float16, H1_2, float16_maxnum, 0x7E00) +DO_REDUCE(sve_fmaxnmv_s, float32, H1_4, float32_maxnum, 0x7FC00000) +DO_REDUCE(sve_fmaxnmv_d, float64, H1_8, float64_maxnum, 0x7FF8000000000000ULL) -DO_REDUCE(sve_fminv_h, float16, H1_2, min, float16_infinity) -DO_REDUCE(sve_fminv_s, float32, H1_4, min, float32_infinity) -DO_REDUCE(sve_fminv_d, float64, H1_8, min, float64_infinity) +DO_REDUCE(sve_fminv_h, float16, H1_2, float16_min, float16_infinity) +DO_REDUCE(sve_fminv_s, float32, H1_4, float32_min, float32_infinity) +DO_REDUCE(sve_fminv_d, float64, H1_8, float64_min, float64_infinity) -DO_REDUCE(sve_fmaxv_h, float16, H1_2, max, float16_chs(float16_infinity)) -DO_REDUCE(sve_fmaxv_s, float32, H1_4, max, float32_chs(float32_infinity)) -DO_REDUCE(sve_fmaxv_d, float64, H1_8, max, float64_chs(float64_infinity)) +DO_REDUCE(sve_fmaxv_h, float16, H1_2, float16_max, float16_chs(float16_infinity)) +DO_REDUCE(sve_fmaxv_s, float32, H1_4, float32_max, float32_chs(float32_infinity)) +DO_REDUCE(sve_fmaxv_d, float64, H1_8, float64_max, float64_chs(float64_infinity)) + +DO_REDUCE(sve_ah_fminv_h, float16, H1_2, helper_vfp_ah_minh, float16_infinity) +DO_REDUCE(sve_ah_fminv_s, float32, H1_4, helper_vfp_ah_mins, float32_infinity) +DO_REDUCE(sve_ah_fminv_d, float64, H1_8, helper_vfp_ah_mind, float64_infinity) + +DO_REDUCE(sve_ah_fmaxv_h, float16, H1_2, helper_vfp_ah_maxh, + float16_chs(float16_infinity)) +DO_REDUCE(sve_ah_fmaxv_s, float32, H1_4, helper_vfp_ah_maxs, + float32_chs(float32_infinity)) +DO_REDUCE(sve_ah_fmaxv_d, float64, H1_8, helper_vfp_ah_maxd, + float64_chs(float64_infinity)) #undef DO_REDUCE diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index e38a49dd31..35f6d78a0e 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -3588,11 +3588,23 @@ static bool do_reduce(DisasContext *s, arg_rpr_esz *a, }; \ TRANS_FEAT(NAME, aa64_sve, do_reduce, a, name##_fns[a->esz]) +#define DO_VPZ_AH(NAME, name) \ + static gen_helper_fp_reduce * const name##_fns[4] = { \ + NULL, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ + }; \ + static gen_helper_fp_reduce * const name##_ah_fns[4] = { \ + NULL, gen_helper_sve_ah_##name##_h, \ + gen_helper_sve_ah_##name##_s, gen_helper_sve_ah_##name##_d, \ + }; \ + TRANS_FEAT(NAME, aa64_sve, do_reduce, a, \ + s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz]) + DO_VPZ(FADDV, faddv) DO_VPZ(FMINNMV, fminnmv) DO_VPZ(FMAXNMV, fmaxnmv) -DO_VPZ(FMINV, fminv) -DO_VPZ(FMAXV, fmaxv) +DO_VPZ_AH(FMINV, fminv) +DO_VPZ_AH(FMAXV, fmaxv) #undef DO_VPZ From bf92725bb26e6bce2b2267f384d8070595c46daa Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:32 +0000 Subject: [PATCH 28/68] target/arm: Implement FPCR.AH semantics for SVE FMIN/FMAX immediate Implement the FPCR.AH semantics for the SVE FMAX and FMIN operations that take an immediate as the second operand. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/helper-sve.h | 14 ++++++++++++++ target/arm/tcg/sve_helper.c | 8 ++++++++ target/arm/tcg/translate-sve.c | 25 +++++++++++++++++++++++-- 3 files changed, 45 insertions(+), 2 deletions(-) diff --git a/target/arm/tcg/helper-sve.h b/target/arm/tcg/helper-sve.h index 7ca95b8fa9..3c1d2624ed 100644 --- a/target/arm/tcg/helper-sve.h +++ b/target/arm/tcg/helper-sve.h @@ -1231,6 +1231,20 @@ DEF_HELPER_FLAGS_6(sve_fmins_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(sve_fmins_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmaxs_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmaxs_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmaxs_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, fpst, i32) + +DEF_HELPER_FLAGS_6(sve_ah_fmins_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmins_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmins_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, fpst, i32) + DEF_HELPER_FLAGS_5(sve_fcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvt_dh, TCG_CALL_NO_RWG, diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index 3631d85f23..2f6fc82ee4 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -4459,6 +4459,14 @@ DO_ZPZS_FP(sve_fmins_h, float16, H1_2, float16_min) DO_ZPZS_FP(sve_fmins_s, float32, H1_4, float32_min) DO_ZPZS_FP(sve_fmins_d, float64, H1_8, float64_min) +DO_ZPZS_FP(sve_ah_fmaxs_h, float16, H1_2, helper_vfp_ah_maxh) +DO_ZPZS_FP(sve_ah_fmaxs_s, float32, H1_4, helper_vfp_ah_maxs) +DO_ZPZS_FP(sve_ah_fmaxs_d, float64, H1_8, helper_vfp_ah_maxd) + +DO_ZPZS_FP(sve_ah_fmins_h, float16, H1_2, helper_vfp_ah_minh) +DO_ZPZS_FP(sve_ah_fmins_s, float32, H1_4, helper_vfp_ah_mins) +DO_ZPZS_FP(sve_ah_fmins_d, float64, H1_8, helper_vfp_ah_mind) + /* Fully general two-operand expander, controlled by a predicate, * With the extra float_status parameter. */ diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 35f6d78a0e..187bd647c2 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -3821,14 +3821,35 @@ static bool do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm, TRANS_FEAT(NAME##_zpzi, aa64_sve, do_fp_imm, a, \ name##_const[a->esz][a->imm], name##_fns[a->esz]) +#define DO_FP_AH_IMM(NAME, name, const0, const1) \ + static gen_helper_sve_fp2scalar * const name##_fns[4] = { \ + NULL, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, \ + gen_helper_sve_##name##_d \ + }; \ + static gen_helper_sve_fp2scalar * const name##_ah_fns[4] = { \ + NULL, gen_helper_sve_ah_##name##_h, \ + gen_helper_sve_ah_##name##_s, \ + gen_helper_sve_ah_##name##_d \ + }; \ + static uint64_t const name##_const[4][2] = { \ + { -1, -1 }, \ + { float16_##const0, float16_##const1 }, \ + { float32_##const0, float32_##const1 }, \ + { float64_##const0, float64_##const1 }, \ + }; \ + TRANS_FEAT(NAME##_zpzi, aa64_sve, do_fp_imm, a, \ + name##_const[a->esz][a->imm], \ + s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz]) + DO_FP_IMM(FADD, fadds, half, one) DO_FP_IMM(FSUB, fsubs, half, one) DO_FP_IMM(FMUL, fmuls, half, two) DO_FP_IMM(FSUBR, fsubrs, half, one) DO_FP_IMM(FMAXNM, fmaxnms, zero, one) DO_FP_IMM(FMINNM, fminnms, zero, one) -DO_FP_IMM(FMAX, fmaxs, zero, one) -DO_FP_IMM(FMIN, fmins, zero, one) +DO_FP_AH_IMM(FMAX, fmaxs, zero, one) +DO_FP_AH_IMM(FMIN, fmins, zero, one) #undef DO_FP_IMM From 4ba5383bc50d8ef18aed0d6aaa203c331dc0abf5 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:33 +0000 Subject: [PATCH 29/68] target/arm: Implement FPCR.AH semantics for SVE FMIN/FMAX vector Implement the FPCR.AH semantics for the SVE FMAX and FMIN operations that take two vector operands. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/helper-sve.h | 14 ++++++++++++++ target/arm/tcg/sve_helper.c | 8 ++++++++ target/arm/tcg/translate-sve.c | 17 +++++++++++++++-- 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/target/arm/tcg/helper-sve.h b/target/arm/tcg/helper-sve.h index 3c1d2624ed..918f2e61b7 100644 --- a/target/arm/tcg/helper-sve.h +++ b/target/arm/tcg/helper-sve.h @@ -1140,6 +1140,20 @@ DEF_HELPER_FLAGS_6(sve_fmax_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(sve_fmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmin_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmin_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmin_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_6(sve_ah_fmax_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmax_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmax_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) + DEF_HELPER_FLAGS_6(sve_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fminnum_s, TCG_CALL_NO_RWG, diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index 2f6fc82ee4..a688b98d28 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -4347,6 +4347,14 @@ DO_ZPZZ_FP(sve_fmax_h, uint16_t, H1_2, float16_max) DO_ZPZZ_FP(sve_fmax_s, uint32_t, H1_4, float32_max) DO_ZPZZ_FP(sve_fmax_d, uint64_t, H1_8, float64_max) +DO_ZPZZ_FP(sve_ah_fmin_h, uint16_t, H1_2, helper_vfp_ah_minh) +DO_ZPZZ_FP(sve_ah_fmin_s, uint32_t, H1_4, helper_vfp_ah_mins) +DO_ZPZZ_FP(sve_ah_fmin_d, uint64_t, H1_8, helper_vfp_ah_mind) + +DO_ZPZZ_FP(sve_ah_fmax_h, uint16_t, H1_2, helper_vfp_ah_maxh) +DO_ZPZZ_FP(sve_ah_fmax_s, uint32_t, H1_4, helper_vfp_ah_maxs) +DO_ZPZZ_FP(sve_ah_fmax_d, uint64_t, H1_8, helper_vfp_ah_maxd) + DO_ZPZZ_FP(sve_fminnum_h, uint16_t, H1_2, float16_minnum) DO_ZPZZ_FP(sve_fminnum_s, uint32_t, H1_4, float32_minnum) DO_ZPZZ_FP(sve_fminnum_d, uint64_t, H1_8, float64_minnum) diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 187bd647c2..2813e5f487 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -3759,11 +3759,24 @@ TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz, }; \ TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, name##_zpzz_fns[a->esz], a) +#define DO_ZPZZ_AH_FP(NAME, FEAT, name, ah_name) \ + static gen_helper_gvec_4_ptr * const name##_zpzz_fns[4] = { \ + NULL, gen_helper_##name##_h, \ + gen_helper_##name##_s, gen_helper_##name##_d \ + }; \ + static gen_helper_gvec_4_ptr * const name##_ah_zpzz_fns[4] = { \ + NULL, gen_helper_##ah_name##_h, \ + gen_helper_##ah_name##_s, gen_helper_##ah_name##_d \ + }; \ + TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, \ + s->fpcr_ah ? name##_ah_zpzz_fns[a->esz] : \ + name##_zpzz_fns[a->esz], a) + DO_ZPZZ_FP(FADD_zpzz, aa64_sve, sve_fadd) DO_ZPZZ_FP(FSUB_zpzz, aa64_sve, sve_fsub) DO_ZPZZ_FP(FMUL_zpzz, aa64_sve, sve_fmul) -DO_ZPZZ_FP(FMIN_zpzz, aa64_sve, sve_fmin) -DO_ZPZZ_FP(FMAX_zpzz, aa64_sve, sve_fmax) +DO_ZPZZ_AH_FP(FMIN_zpzz, aa64_sve, sve_fmin, sve_ah_fmin) +DO_ZPZZ_AH_FP(FMAX_zpzz, aa64_sve, sve_fmax, sve_ah_fmax) DO_ZPZZ_FP(FMINNM_zpzz, aa64_sve, sve_fminnum) DO_ZPZZ_FP(FMAXNM_zpzz, aa64_sve, sve_fmaxnum) DO_ZPZZ_FP(FABD, aa64_sve, sve_fabd) From e76df44d2dd06018016f35aff41258ce356d6e40 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:34 +0000 Subject: [PATCH 30/68] target/arm: Implement FPCR.AH handling of negation of NaN FPCR.AH == 1 mandates that negation of a NaN value should not flip its sign bit. This means we can no longer use gen_vfp_neg*() everywhere but must instead generate slightly more complex code when FPCR.AH is set. Make this change for the scalar FNEG and for those places in translate-a64.c which were previously directly calling gen_vfp_neg*(). This change in semantics also affects any other instruction whose pseudocode calls FPNeg(); in following commits we extend this change to the other affected instructions. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 125 ++++++++++++++++++++++++++++++--- 1 file changed, 114 insertions(+), 11 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 02701b10c6..abd0d916b1 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -828,6 +828,74 @@ static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn, is_q ? 16 : 8, vec_full_reg_size(s), data, fn); } +/* + * When FPCR.AH == 1, NEG and ABS do not flip the sign bit of a NaN. + * These functions implement + * d = floatN_is_any_nan(s) ? s : floatN_chs(s) + * which for float32 is + * d = (s & ~(1 << 31)) > 0x7f800000UL) ? s : (s ^ (1 << 31)) + * and similarly for the other float sizes. + */ +static void gen_vfp_ah_negh(TCGv_i32 d, TCGv_i32 s) +{ + TCGv_i32 abs_s = tcg_temp_new_i32(), chs_s = tcg_temp_new_i32(); + + gen_vfp_negh(chs_s, s); + gen_vfp_absh(abs_s, s); + tcg_gen_movcond_i32(TCG_COND_GTU, d, + abs_s, tcg_constant_i32(0x7c00), + s, chs_s); +} + +static void gen_vfp_ah_negs(TCGv_i32 d, TCGv_i32 s) +{ + TCGv_i32 abs_s = tcg_temp_new_i32(), chs_s = tcg_temp_new_i32(); + + gen_vfp_negs(chs_s, s); + gen_vfp_abss(abs_s, s); + tcg_gen_movcond_i32(TCG_COND_GTU, d, + abs_s, tcg_constant_i32(0x7f800000UL), + s, chs_s); +} + +static void gen_vfp_ah_negd(TCGv_i64 d, TCGv_i64 s) +{ + TCGv_i64 abs_s = tcg_temp_new_i64(), chs_s = tcg_temp_new_i64(); + + gen_vfp_negd(chs_s, s); + gen_vfp_absd(abs_s, s); + tcg_gen_movcond_i64(TCG_COND_GTU, d, + abs_s, tcg_constant_i64(0x7ff0000000000000ULL), + s, chs_s); +} + +static void gen_vfp_maybe_ah_negh(DisasContext *dc, TCGv_i32 d, TCGv_i32 s) +{ + if (dc->fpcr_ah) { + gen_vfp_ah_negh(d, s); + } else { + gen_vfp_negh(d, s); + } +} + +static void gen_vfp_maybe_ah_negs(DisasContext *dc, TCGv_i32 d, TCGv_i32 s) +{ + if (dc->fpcr_ah) { + gen_vfp_ah_negs(d, s); + } else { + gen_vfp_negs(d, s); + } +} + +static void gen_vfp_maybe_ah_negd(DisasContext *dc, TCGv_i64 d, TCGv_i64 s) +{ + if (dc->fpcr_ah) { + gen_vfp_ah_negd(d, s); + } else { + gen_vfp_negd(d, s); + } +} + /* Set ZF and NF based on a 64 bit result. This is alas fiddlier * than the 32 bit equivalent. */ @@ -5241,12 +5309,35 @@ static void gen_fnmul_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s) gen_vfp_negd(d, d); } +static void gen_fnmul_ah_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) +{ + gen_helper_vfp_mulh(d, n, m, s); + gen_vfp_ah_negh(d, d); +} + +static void gen_fnmul_ah_s(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) +{ + gen_helper_vfp_muls(d, n, m, s); + gen_vfp_ah_negs(d, d); +} + +static void gen_fnmul_ah_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s) +{ + gen_helper_vfp_muld(d, n, m, s); + gen_vfp_ah_negd(d, d); +} + static const FPScalar f_scalar_fnmul = { gen_fnmul_h, gen_fnmul_s, gen_fnmul_d, }; -TRANS(FNMUL_s, do_fp3_scalar, a, &f_scalar_fnmul, a->rn) +static const FPScalar f_scalar_ah_fnmul = { + gen_fnmul_ah_h, + gen_fnmul_ah_s, + gen_fnmul_ah_d, +}; +TRANS(FNMUL_s, do_fp3_scalar_2fn, a, &f_scalar_fnmul, &f_scalar_ah_fnmul, a->rn) static const FPScalar f_scalar_fcmeq = { gen_helper_advsimd_ceq_f16, @@ -6388,7 +6479,7 @@ static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg) read_vec_element(s, t2, a->rm, a->idx, MO_64); if (neg) { - gen_vfp_negd(t1, t1); + gen_vfp_maybe_ah_negd(s, t1, t1); } gen_helper_vfp_muladdd(t0, t1, t2, t0, fpstatus_ptr(FPST_A64)); write_fp_dreg_merging(s, a->rd, a->rd, t0); @@ -6402,7 +6493,7 @@ static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg) read_vec_element_i32(s, t2, a->rm, a->idx, MO_32); if (neg) { - gen_vfp_negs(t1, t1); + gen_vfp_maybe_ah_negs(s, t1, t1); } gen_helper_vfp_muladds(t0, t1, t2, t0, fpstatus_ptr(FPST_A64)); write_fp_sreg_merging(s, a->rd, a->rd, t0); @@ -6419,7 +6510,7 @@ static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg) read_vec_element_i32(s, t2, a->rm, a->idx, MO_16); if (neg) { - gen_vfp_negh(t1, t1); + gen_vfp_maybe_ah_negh(s, t1, t1); } gen_helper_advsimd_muladdh(t0, t1, t2, t0, fpstatus_ptr(FPST_A64_F16)); @@ -6902,10 +6993,10 @@ static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n) TCGv_i64 ta = read_fp_dreg(s, a->ra); if (neg_a) { - gen_vfp_negd(ta, ta); + gen_vfp_maybe_ah_negd(s, ta, ta); } if (neg_n) { - gen_vfp_negd(tn, tn); + gen_vfp_maybe_ah_negd(s, tn, tn); } fpst = fpstatus_ptr(FPST_A64); gen_helper_vfp_muladdd(ta, tn, tm, ta, fpst); @@ -6920,10 +7011,10 @@ static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n) TCGv_i32 ta = read_fp_sreg(s, a->ra); if (neg_a) { - gen_vfp_negs(ta, ta); + gen_vfp_maybe_ah_negs(s, ta, ta); } if (neg_n) { - gen_vfp_negs(tn, tn); + gen_vfp_maybe_ah_negs(s, tn, tn); } fpst = fpstatus_ptr(FPST_A64); gen_helper_vfp_muladds(ta, tn, tm, ta, fpst); @@ -6941,10 +7032,10 @@ static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n) TCGv_i32 ta = read_fp_hreg(s, a->ra); if (neg_a) { - gen_vfp_negh(ta, ta); + gen_vfp_maybe_ah_negh(s, ta, ta); } if (neg_n) { - gen_vfp_negh(tn, tn); + gen_vfp_maybe_ah_negh(s, tn, tn); } fpst = fpstatus_ptr(FPST_A64_F16); gen_helper_advsimd_muladdh(ta, tn, tm, ta, fpst); @@ -8533,6 +8624,13 @@ static bool do_fp1_scalar_int(DisasContext *s, arg_rr_e *a, return true; } +static bool do_fp1_scalar_int_2fn(DisasContext *s, arg_rr_e *a, + const FPScalar1Int *fnormal, + const FPScalar1Int *fah) +{ + return do_fp1_scalar_int(s, a, s->fpcr_ah ? fah : fnormal, true); +} + static const FPScalar1Int f_scalar_fmov = { tcg_gen_mov_i32, tcg_gen_mov_i32, @@ -8552,7 +8650,12 @@ static const FPScalar1Int f_scalar_fneg = { gen_vfp_negs, gen_vfp_negd, }; -TRANS(FNEG_s, do_fp1_scalar_int, a, &f_scalar_fneg, true) +static const FPScalar1Int f_scalar_ah_fneg = { + gen_vfp_ah_negh, + gen_vfp_ah_negs, + gen_vfp_ah_negd, +}; +TRANS(FNEG_s, do_fp1_scalar_int_2fn, a, &f_scalar_fneg, &f_scalar_ah_fneg) typedef struct FPScalar1 { void (*gen_h)(TCGv_i32, TCGv_i32, TCGv_ptr); From d091ce2d3f5bad6df30c686558807bcffb0c5a96 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:35 +0000 Subject: [PATCH 31/68] target/arm: Implement FPCR.AH handling for scalar FABS and FABD FPCR.AH == 1 mandates that taking the absolute value of a NaN should not change its sign bit. This means we can no longer use gen_vfp_abs*() everywhere but must instead generate slightly more complex code when FPCR.AH is set. Implement these semantics for scalar FABS and FABD. This change also affects all other instructions whose psuedocode calls FPAbs(); we will extend the change to those instructions in following commits. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 69 +++++++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 2 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index abd0d916b1..83cfcdc43b 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -869,6 +869,43 @@ static void gen_vfp_ah_negd(TCGv_i64 d, TCGv_i64 s) s, chs_s); } +/* + * These functions implement + * d = floatN_is_any_nan(s) ? s : floatN_abs(s) + * which for float32 is + * d = (s & ~(1 << 31)) > 0x7f800000UL) ? s : (s & ~(1 << 31)) + * and similarly for the other float sizes. + */ +static void gen_vfp_ah_absh(TCGv_i32 d, TCGv_i32 s) +{ + TCGv_i32 abs_s = tcg_temp_new_i32(); + + gen_vfp_absh(abs_s, s); + tcg_gen_movcond_i32(TCG_COND_GTU, d, + abs_s, tcg_constant_i32(0x7c00), + s, abs_s); +} + +static void gen_vfp_ah_abss(TCGv_i32 d, TCGv_i32 s) +{ + TCGv_i32 abs_s = tcg_temp_new_i32(); + + gen_vfp_abss(abs_s, s); + tcg_gen_movcond_i32(TCG_COND_GTU, d, + abs_s, tcg_constant_i32(0x7f800000UL), + s, abs_s); +} + +static void gen_vfp_ah_absd(TCGv_i64 d, TCGv_i64 s) +{ + TCGv_i64 abs_s = tcg_temp_new_i64(); + + gen_vfp_absd(abs_s, s); + tcg_gen_movcond_i64(TCG_COND_GTU, d, + abs_s, tcg_constant_i64(0x7ff0000000000000ULL), + s, abs_s); +} + static void gen_vfp_maybe_ah_negh(DisasContext *dc, TCGv_i32 d, TCGv_i32 s) { if (dc->fpcr_ah) { @@ -5392,12 +5429,35 @@ static void gen_fabd_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s) gen_vfp_absd(d, d); } +static void gen_fabd_ah_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) +{ + gen_helper_vfp_subh(d, n, m, s); + gen_vfp_ah_absh(d, d); +} + +static void gen_fabd_ah_s(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) +{ + gen_helper_vfp_subs(d, n, m, s); + gen_vfp_ah_abss(d, d); +} + +static void gen_fabd_ah_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s) +{ + gen_helper_vfp_subd(d, n, m, s); + gen_vfp_ah_absd(d, d); +} + static const FPScalar f_scalar_fabd = { gen_fabd_h, gen_fabd_s, gen_fabd_d, }; -TRANS(FABD_s, do_fp3_scalar, a, &f_scalar_fabd, a->rn) +static const FPScalar f_scalar_ah_fabd = { + gen_fabd_ah_h, + gen_fabd_ah_s, + gen_fabd_ah_d, +}; +TRANS(FABD_s, do_fp3_scalar_2fn, a, &f_scalar_fabd, &f_scalar_ah_fabd, a->rn) static const FPScalar f_scalar_frecps = { gen_helper_recpsf_f16, @@ -8643,7 +8703,12 @@ static const FPScalar1Int f_scalar_fabs = { gen_vfp_abss, gen_vfp_absd, }; -TRANS(FABS_s, do_fp1_scalar_int, a, &f_scalar_fabs, true) +static const FPScalar1Int f_scalar_ah_fabs = { + gen_vfp_ah_absh, + gen_vfp_ah_abss, + gen_vfp_ah_absd, +}; +TRANS(FABS_s, do_fp1_scalar_int_2fn, a, &f_scalar_fabs, &f_scalar_ah_fabs) static const FPScalar1Int f_scalar_fneg = { gen_vfp_negh, From 538deec62339594ecac4434e278b43b9807085e2 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:36 +0000 Subject: [PATCH 32/68] target/arm: Handle FPCR.AH in vector FABD Split the handling of vector FABD so that it calls a different set of helpers when FPCR.AH is 1, which implement the "no negation of the sign of a NaN" semantics. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/helper.h | 4 ++++ target/arm/tcg/translate-a64.c | 7 ++++++- target/arm/tcg/vec_helper.c | 23 +++++++++++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/target/arm/helper.h b/target/arm/helper.h index 15bad0773c..43505d5fed 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -722,6 +722,10 @@ DEF_HELPER_FLAGS_5(gvec_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + DEF_HELPER_FLAGS_5(gvec_fceq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fceq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fceq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 83cfcdc43b..76c41b9bdb 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -5888,7 +5888,12 @@ static gen_helper_gvec_3_ptr * const f_vector_fabd[3] = { gen_helper_gvec_fabd_s, gen_helper_gvec_fabd_d, }; -TRANS(FABD_v, do_fp3_vector, a, 0, f_vector_fabd) +static gen_helper_gvec_3_ptr * const f_vector_ah_fabd[3] = { + gen_helper_gvec_ah_fabd_h, + gen_helper_gvec_ah_fabd_s, + gen_helper_gvec_ah_fabd_d, +}; +TRANS(FABD_v, do_fp3_vector_2fn, a, 0, f_vector_fabd, f_vector_ah_fabd) static gen_helper_gvec_3_ptr * const f_vector_frecps[3] = { gen_helper_gvec_recps_h, diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index 61e6e54570..0b84a562c0 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -1302,6 +1302,25 @@ static float64 float64_abd(float64 op1, float64 op2, float_status *stat) return float64_abs(float64_sub(op1, op2, stat)); } +/* ABD when FPCR.AH = 1: avoid flipping sign bit of a NaN result */ +static float16 float16_ah_abd(float16 op1, float16 op2, float_status *stat) +{ + float16 r = float16_sub(op1, op2, stat); + return float16_is_any_nan(r) ? r : float16_abs(r); +} + +static float32 float32_ah_abd(float32 op1, float32 op2, float_status *stat) +{ + float32 r = float32_sub(op1, op2, stat); + return float32_is_any_nan(r) ? r : float32_abs(r); +} + +static float64 float64_ah_abd(float64 op1, float64 op2, float_status *stat) +{ + float64 r = float64_sub(op1, op2, stat); + return float64_is_any_nan(r) ? r : float64_abs(r); +} + /* * Reciprocal step. These are the AArch32 version which uses a * non-fused multiply-and-subtract. @@ -1389,6 +1408,10 @@ DO_3OP(gvec_fabd_h, float16_abd, float16) DO_3OP(gvec_fabd_s, float32_abd, float32) DO_3OP(gvec_fabd_d, float64_abd, float64) +DO_3OP(gvec_ah_fabd_h, float16_ah_abd, float16) +DO_3OP(gvec_ah_fabd_s, float32_ah_abd, float32) +DO_3OP(gvec_ah_fabd_d, float64_ah_abd, float64) + DO_3OP(gvec_fceq_h, float16_ceq, float16) DO_3OP(gvec_fceq_s, float32_ceq, float32) DO_3OP(gvec_fceq_d, float64_ceq, float64) From 47d3216240f642102821467809d6719509eebc12 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:37 +0000 Subject: [PATCH 33/68] target/arm: Handle FPCR.AH in SVE FNEG Make SVE FNEG honour the FPCR.AH "don't negate the sign of a NaN" semantics. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/helper-sve.h | 4 ++++ target/arm/tcg/sve_helper.c | 8 ++++++++ target/arm/tcg/translate-sve.c | 7 ++++++- 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/target/arm/tcg/helper-sve.h b/target/arm/tcg/helper-sve.h index 918f2e61b7..867a6d96e0 100644 --- a/target/arm/tcg/helper-sve.h +++ b/target/arm/tcg/helper-sve.h @@ -545,6 +545,10 @@ DEF_HELPER_FLAGS_4(sve_fneg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fneg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fneg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ah_fneg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ah_fneg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ah_fneg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + DEF_HELPER_FLAGS_4(sve_not_zpz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_not_zpz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_not_zpz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index a688b98d28..976f3be44e 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -885,6 +885,14 @@ DO_ZPZ(sve_fneg_h, uint16_t, H1_2, DO_FNEG) DO_ZPZ(sve_fneg_s, uint32_t, H1_4, DO_FNEG) DO_ZPZ_D(sve_fneg_d, uint64_t, DO_FNEG) +#define DO_AH_FNEG_H(N) (float16_is_any_nan(N) ? (N) : DO_FNEG(N)) +#define DO_AH_FNEG_S(N) (float32_is_any_nan(N) ? (N) : DO_FNEG(N)) +#define DO_AH_FNEG_D(N) (float64_is_any_nan(N) ? (N) : DO_FNEG(N)) + +DO_ZPZ(sve_ah_fneg_h, uint16_t, H1_2, DO_AH_FNEG_H) +DO_ZPZ(sve_ah_fneg_s, uint32_t, H1_4, DO_AH_FNEG_S) +DO_ZPZ_D(sve_ah_fneg_d, uint64_t, DO_AH_FNEG_D) + #define DO_NOT(N) (~N) DO_ZPZ(sve_not_zpz_b, uint8_t, H1, DO_NOT) diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 2813e5f487..4d5de2004f 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -789,7 +789,12 @@ static gen_helper_gvec_3 * const fneg_fns[4] = { NULL, gen_helper_sve_fneg_h, gen_helper_sve_fneg_s, gen_helper_sve_fneg_d, }; -TRANS_FEAT(FNEG, aa64_sve, gen_gvec_ool_arg_zpz, fneg_fns[a->esz], a, 0) +static gen_helper_gvec_3 * const fneg_ah_fns[4] = { + NULL, gen_helper_sve_ah_fneg_h, + gen_helper_sve_ah_fneg_s, gen_helper_sve_ah_fneg_d, +}; +TRANS_FEAT(FNEG, aa64_sve, gen_gvec_ool_arg_zpz, + s->fpcr_ah ? fneg_ah_fns[a->esz] : fneg_fns[a->esz], a, 0) static gen_helper_gvec_3 * const sxtb_fns[4] = { NULL, gen_helper_sve_sxtb_h, From 4de25ed588dd29387af78829197bc16cfa00aa2e Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:38 +0000 Subject: [PATCH 34/68] target/arm: Handle FPCR.AH in SVE FABS Make SVE FABS honour the FPCR.AH "don't negate the sign of a NaN" semantics. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/helper-sve.h | 4 ++++ target/arm/tcg/sve_helper.c | 8 ++++++++ target/arm/tcg/translate-sve.c | 7 ++++++- 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/target/arm/tcg/helper-sve.h b/target/arm/tcg/helper-sve.h index 867a6d96e0..ff12f650c8 100644 --- a/target/arm/tcg/helper-sve.h +++ b/target/arm/tcg/helper-sve.h @@ -541,6 +541,10 @@ DEF_HELPER_FLAGS_4(sve_fabs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fabs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fabs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ah_fabs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ah_fabs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ah_fabs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + DEF_HELPER_FLAGS_4(sve_fneg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fneg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fneg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index 976f3be44e..5ce7d73647 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -879,6 +879,14 @@ DO_ZPZ(sve_fabs_h, uint16_t, H1_2, DO_FABS) DO_ZPZ(sve_fabs_s, uint32_t, H1_4, DO_FABS) DO_ZPZ_D(sve_fabs_d, uint64_t, DO_FABS) +#define DO_AH_FABS_H(N) (float16_is_any_nan(N) ? (N) : DO_FABS(N)) +#define DO_AH_FABS_S(N) (float32_is_any_nan(N) ? (N) : DO_FABS(N)) +#define DO_AH_FABS_D(N) (float64_is_any_nan(N) ? (N) : DO_FABS(N)) + +DO_ZPZ(sve_ah_fabs_h, uint16_t, H1_2, DO_AH_FABS_H) +DO_ZPZ(sve_ah_fabs_s, uint32_t, H1_4, DO_AH_FABS_S) +DO_ZPZ_D(sve_ah_fabs_d, uint64_t, DO_AH_FABS_D) + #define DO_FNEG(N) (N ^ ~((__typeof(N))-1 >> 1)) DO_ZPZ(sve_fneg_h, uint16_t, H1_2, DO_FNEG) diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 4d5de2004f..646d0171d9 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -783,7 +783,12 @@ static gen_helper_gvec_3 * const fabs_fns[4] = { NULL, gen_helper_sve_fabs_h, gen_helper_sve_fabs_s, gen_helper_sve_fabs_d, }; -TRANS_FEAT(FABS, aa64_sve, gen_gvec_ool_arg_zpz, fabs_fns[a->esz], a, 0) +static gen_helper_gvec_3 * const fabs_ah_fns[4] = { + NULL, gen_helper_sve_ah_fabs_h, + gen_helper_sve_ah_fabs_s, gen_helper_sve_ah_fabs_d, +}; +TRANS_FEAT(FABS, aa64_sve, gen_gvec_ool_arg_zpz, + s->fpcr_ah ? fabs_ah_fns[a->esz] : fabs_fns[a->esz], a, 0) static gen_helper_gvec_3 * const fneg_fns[4] = { NULL, gen_helper_sve_fneg_h, From 60dd5806608ea9be3fb9b542dadf2909bd0acb76 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:39 +0000 Subject: [PATCH 35/68] target/arm: Handle FPCR.AH in SVE FABD Make the SVE FABD insn honour the FPCR.AH "don't negate the sign of a NaN" semantics. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/helper-sve.h | 7 +++++++ target/arm/tcg/sve_helper.c | 22 ++++++++++++++++++++++ target/arm/tcg/translate-sve.c | 2 +- 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/target/arm/tcg/helper-sve.h b/target/arm/tcg/helper-sve.h index ff12f650c8..29c70f054a 100644 --- a/target/arm/tcg/helper-sve.h +++ b/target/arm/tcg/helper-sve.h @@ -1183,6 +1183,13 @@ DEF_HELPER_FLAGS_6(sve_fabd_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(sve_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fabd_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fabd_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fabd_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) + DEF_HELPER_FLAGS_6(sve_fscalbn_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fscalbn_s, TCG_CALL_NO_RWG, diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index 5ce7d73647..8527a7495a 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -4394,9 +4394,31 @@ static inline float64 abd_d(float64 a, float64 b, float_status *s) return float64_abs(float64_sub(a, b, s)); } +/* ABD when FPCR.AH = 1: avoid flipping sign bit of a NaN result */ +static float16 ah_abd_h(float16 op1, float16 op2, float_status *stat) +{ + float16 r = float16_sub(op1, op2, stat); + return float16_is_any_nan(r) ? r : float16_abs(r); +} + +static float32 ah_abd_s(float32 op1, float32 op2, float_status *stat) +{ + float32 r = float32_sub(op1, op2, stat); + return float32_is_any_nan(r) ? r : float32_abs(r); +} + +static float64 ah_abd_d(float64 op1, float64 op2, float_status *stat) +{ + float64 r = float64_sub(op1, op2, stat); + return float64_is_any_nan(r) ? r : float64_abs(r); +} + DO_ZPZZ_FP(sve_fabd_h, uint16_t, H1_2, abd_h) DO_ZPZZ_FP(sve_fabd_s, uint32_t, H1_4, abd_s) DO_ZPZZ_FP(sve_fabd_d, uint64_t, H1_8, abd_d) +DO_ZPZZ_FP(sve_ah_fabd_h, uint16_t, H1_2, ah_abd_h) +DO_ZPZZ_FP(sve_ah_fabd_s, uint32_t, H1_4, ah_abd_s) +DO_ZPZZ_FP(sve_ah_fabd_d, uint64_t, H1_8, ah_abd_d) static inline float64 scalbn_d(float64 a, int64_t b, float_status *s) { diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 646d0171d9..0d8bd1a49c 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -3789,7 +3789,7 @@ DO_ZPZZ_AH_FP(FMIN_zpzz, aa64_sve, sve_fmin, sve_ah_fmin) DO_ZPZZ_AH_FP(FMAX_zpzz, aa64_sve, sve_fmax, sve_ah_fmax) DO_ZPZZ_FP(FMINNM_zpzz, aa64_sve, sve_fminnum) DO_ZPZZ_FP(FMAXNM_zpzz, aa64_sve, sve_fmaxnum) -DO_ZPZZ_FP(FABD, aa64_sve, sve_fabd) +DO_ZPZZ_AH_FP(FABD, aa64_sve, sve_fabd, sve_ah_fabd) DO_ZPZZ_FP(FSCALE, aa64_sve, sve_fscalbn) DO_ZPZZ_FP(FDIV, aa64_sve, sve_fdiv) DO_ZPZZ_FP(FMULX, aa64_sve, sve_fmulx) From 416650ac2ba1f30ddb41bea92fe7b8b7d1f6ec73 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:40 +0000 Subject: [PATCH 36/68] target/arm: Handle FPCR.AH in negation steps in SVE FCADD The negation steps in FCADD must honour FPCR.AH's "don't change the sign of a NaN" semantics. Implement this in the same way we did for the base ASIMD FCADD, by encoding FPCR.AH into the SIMD data field passed to the helper and using that to decide whether to negate the values. The construction of neg_imag and neg_real were done to make it easy to apply both in parallel with two simple logical operations. This changed with FPCR.AH, which is more complex than that. Switch to an approach that follows the pseudocode more closely, by extracting the 'rot=1' parameter from the SIMD data field and changing the sign of the appropriate input value. Note that there was a naming issue with neg_imag and neg_real. They were named backward, with neg_imag being non-zero for rot=1, and vice versa. This was combined with reversed usage within the loop, so that the negation in the end turned out correct. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/sve_helper.c | 42 ++++++++++++++++++++++++---------- target/arm/tcg/translate-sve.c | 2 +- target/arm/tcg/vec_internal.h | 17 ++++++++++++++ 3 files changed, 48 insertions(+), 13 deletions(-) diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index 8527a7495a..770945a2c6 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -5131,8 +5131,8 @@ void HELPER(sve_fcadd_h)(void *vd, void *vn, void *vm, void *vg, { intptr_t j, i = simd_oprsz(desc); uint64_t *g = vg; - float16 neg_imag = float16_set_sign(0, simd_data(desc)); - float16 neg_real = float16_chs(neg_imag); + bool rot = extract32(desc, SIMD_DATA_SHIFT, 1); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 1, 1); do { uint64_t pg = g[(i - 1) >> 6]; @@ -5144,9 +5144,15 @@ void HELPER(sve_fcadd_h)(void *vd, void *vn, void *vm, void *vg, i -= 2 * sizeof(float16); e0 = *(float16 *)(vn + H1_2(i)); - e1 = *(float16 *)(vm + H1_2(j)) ^ neg_real; + e1 = *(float16 *)(vm + H1_2(j)); e2 = *(float16 *)(vn + H1_2(j)); - e3 = *(float16 *)(vm + H1_2(i)) ^ neg_imag; + e3 = *(float16 *)(vm + H1_2(i)); + + if (rot) { + e3 = float16_maybe_ah_chs(e3, fpcr_ah); + } else { + e1 = float16_maybe_ah_chs(e1, fpcr_ah); + } if (likely((pg >> (i & 63)) & 1)) { *(float16 *)(vd + H1_2(i)) = float16_add(e0, e1, s); @@ -5163,8 +5169,8 @@ void HELPER(sve_fcadd_s)(void *vd, void *vn, void *vm, void *vg, { intptr_t j, i = simd_oprsz(desc); uint64_t *g = vg; - float32 neg_imag = float32_set_sign(0, simd_data(desc)); - float32 neg_real = float32_chs(neg_imag); + bool rot = extract32(desc, SIMD_DATA_SHIFT, 1); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 1, 1); do { uint64_t pg = g[(i - 1) >> 6]; @@ -5176,9 +5182,15 @@ void HELPER(sve_fcadd_s)(void *vd, void *vn, void *vm, void *vg, i -= 2 * sizeof(float32); e0 = *(float32 *)(vn + H1_2(i)); - e1 = *(float32 *)(vm + H1_2(j)) ^ neg_real; + e1 = *(float32 *)(vm + H1_2(j)); e2 = *(float32 *)(vn + H1_2(j)); - e3 = *(float32 *)(vm + H1_2(i)) ^ neg_imag; + e3 = *(float32 *)(vm + H1_2(i)); + + if (rot) { + e3 = float32_maybe_ah_chs(e3, fpcr_ah); + } else { + e1 = float32_maybe_ah_chs(e1, fpcr_ah); + } if (likely((pg >> (i & 63)) & 1)) { *(float32 *)(vd + H1_2(i)) = float32_add(e0, e1, s); @@ -5195,8 +5207,8 @@ void HELPER(sve_fcadd_d)(void *vd, void *vn, void *vm, void *vg, { intptr_t j, i = simd_oprsz(desc); uint64_t *g = vg; - float64 neg_imag = float64_set_sign(0, simd_data(desc)); - float64 neg_real = float64_chs(neg_imag); + bool rot = extract32(desc, SIMD_DATA_SHIFT, 1); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 1, 1); do { uint64_t pg = g[(i - 1) >> 6]; @@ -5208,9 +5220,15 @@ void HELPER(sve_fcadd_d)(void *vd, void *vn, void *vm, void *vg, i -= 2 * sizeof(float64); e0 = *(float64 *)(vn + H1_2(i)); - e1 = *(float64 *)(vm + H1_2(j)) ^ neg_real; + e1 = *(float64 *)(vm + H1_2(j)); e2 = *(float64 *)(vn + H1_2(j)); - e3 = *(float64 *)(vm + H1_2(i)) ^ neg_imag; + e3 = *(float64 *)(vm + H1_2(i)); + + if (rot) { + e3 = float64_maybe_ah_chs(e3, fpcr_ah); + } else { + e1 = float64_maybe_ah_chs(e1, fpcr_ah); + } if (likely((pg >> (i & 63)) & 1)) { *(float64 *)(vd + H1_2(i)) = float64_add(e0, e1, s); diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 0d8bd1a49c..7816b5801a 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -3916,7 +3916,7 @@ static gen_helper_gvec_4_ptr * const fcadd_fns[] = { gen_helper_sve_fcadd_s, gen_helper_sve_fcadd_d, }; TRANS_FEAT(FCADD, aa64_sve, gen_gvec_fpst_zzzp, fcadd_fns[a->esz], - a->rd, a->rn, a->rm, a->pg, a->rot, + a->rd, a->rn, a->rm, a->pg, a->rot | (s->fpcr_ah << 1), a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) #define DO_FMLA(NAME, name) \ diff --git a/target/arm/tcg/vec_internal.h b/target/arm/tcg/vec_internal.h index 094f5c169c..826791523a 100644 --- a/target/arm/tcg/vec_internal.h +++ b/target/arm/tcg/vec_internal.h @@ -20,6 +20,8 @@ #ifndef TARGET_ARM_VEC_INTERNAL_H #define TARGET_ARM_VEC_INTERNAL_H +#include "fpu/softfloat.h" + /* * Note that vector data is stored in host-endian 64-bit chunks, * so addressing units smaller than that needs a host-endian fixup. @@ -265,4 +267,19 @@ float32 bfdotadd_ebf(float32 sum, uint32_t e1, uint32_t e2, */ bool is_ebf(CPUARMState *env, float_status *statusp, float_status *oddstatusp); +static inline float16 float16_maybe_ah_chs(float16 a, bool fpcr_ah) +{ + return fpcr_ah && float16_is_any_nan(a) ? a : float16_chs(a); +} + +static inline float32 float32_maybe_ah_chs(float32 a, bool fpcr_ah) +{ + return fpcr_ah && float32_is_any_nan(a) ? a : float32_chs(a); +} + +static inline float64 float64_maybe_ah_chs(float64 a, bool fpcr_ah) +{ + return fpcr_ah && float64_is_any_nan(a) ? a : float64_chs(a); +} + #endif /* TARGET_ARM_VEC_INTERNAL_H */ From 72203eefab04a6903328807b0e3c635210031262 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:41 +0000 Subject: [PATCH 37/68] target/arm: Handle FPCR.AH in negation steps in FCADD The negation steps in FCADD must honour FPCR.AH's "don't change the sign of a NaN" semantics. Implement this by encoding FPCR.AH into the SIMD data field passed to the helper and using that to decide whether to negate the values. The construction of neg_imag and neg_real were done to make it easy to apply both in parallel with two simple logical operations. This changed with FPCR.AH, which is more complex than that. Switch to an approach closer to the pseudocode, where we extract the rot parameter from the SIMD data word and negate the appropriate input value. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/translate-a64.c | 10 +++++-- target/arm/tcg/vec_helper.c | 54 +++++++++++++++++++--------------- 2 files changed, 38 insertions(+), 26 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 76c41b9bdb..e8db9f39b1 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -6106,8 +6106,14 @@ static gen_helper_gvec_3_ptr * const f_vector_fcadd[3] = { gen_helper_gvec_fcadds, gen_helper_gvec_fcaddd, }; -TRANS_FEAT(FCADD_90, aa64_fcma, do_fp3_vector, a, 0, f_vector_fcadd) -TRANS_FEAT(FCADD_270, aa64_fcma, do_fp3_vector, a, 1, f_vector_fcadd) +/* + * Encode FPCR.AH into the data so the helper knows whether the + * negations it does should avoid flipping the sign bit on a NaN + */ +TRANS_FEAT(FCADD_90, aa64_fcma, do_fp3_vector, a, 0 | (s->fpcr_ah << 1), + f_vector_fcadd) +TRANS_FEAT(FCADD_270, aa64_fcma, do_fp3_vector, a, 1 | (s->fpcr_ah << 1), + f_vector_fcadd) static bool trans_FCMLA_v(DisasContext *s, arg_FCMLA_v *a) { diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index 0b84a562c0..b181b9734d 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -879,19 +879,21 @@ void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm, float16 *d = vd; float16 *n = vn; float16 *m = vm; - uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1); - uint32_t neg_imag = neg_real ^ 1; + bool rot = extract32(desc, SIMD_DATA_SHIFT, 1); + bool fpcr_ah = extract64(desc, SIMD_DATA_SHIFT + 1, 1); uintptr_t i; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 15; - neg_imag <<= 15; - for (i = 0; i < opr_sz / 2; i += 2) { float16 e0 = n[H2(i)]; - float16 e1 = m[H2(i + 1)] ^ neg_imag; + float16 e1 = m[H2(i + 1)]; float16 e2 = n[H2(i + 1)]; - float16 e3 = m[H2(i)] ^ neg_real; + float16 e3 = m[H2(i)]; + + if (rot) { + e3 = float16_maybe_ah_chs(e3, fpcr_ah); + } else { + e1 = float16_maybe_ah_chs(e1, fpcr_ah); + } d[H2(i)] = float16_add(e0, e1, fpst); d[H2(i + 1)] = float16_add(e2, e3, fpst); @@ -906,19 +908,21 @@ void HELPER(gvec_fcadds)(void *vd, void *vn, void *vm, float32 *d = vd; float32 *n = vn; float32 *m = vm; - uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1); - uint32_t neg_imag = neg_real ^ 1; + bool rot = extract32(desc, SIMD_DATA_SHIFT, 1); + bool fpcr_ah = extract64(desc, SIMD_DATA_SHIFT + 1, 1); uintptr_t i; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 31; - neg_imag <<= 31; - for (i = 0; i < opr_sz / 4; i += 2) { float32 e0 = n[H4(i)]; - float32 e1 = m[H4(i + 1)] ^ neg_imag; + float32 e1 = m[H4(i + 1)]; float32 e2 = n[H4(i + 1)]; - float32 e3 = m[H4(i)] ^ neg_real; + float32 e3 = m[H4(i)]; + + if (rot) { + e3 = float32_maybe_ah_chs(e3, fpcr_ah); + } else { + e1 = float32_maybe_ah_chs(e1, fpcr_ah); + } d[H4(i)] = float32_add(e0, e1, fpst); d[H4(i + 1)] = float32_add(e2, e3, fpst); @@ -933,19 +937,21 @@ void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm, float64 *d = vd; float64 *n = vn; float64 *m = vm; - uint64_t neg_real = extract64(desc, SIMD_DATA_SHIFT, 1); - uint64_t neg_imag = neg_real ^ 1; + bool rot = extract32(desc, SIMD_DATA_SHIFT, 1); + bool fpcr_ah = extract64(desc, SIMD_DATA_SHIFT + 1, 1); uintptr_t i; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 63; - neg_imag <<= 63; - for (i = 0; i < opr_sz / 8; i += 2) { float64 e0 = n[i]; - float64 e1 = m[i + 1] ^ neg_imag; + float64 e1 = m[i + 1]; float64 e2 = n[i + 1]; - float64 e3 = m[i] ^ neg_real; + float64 e3 = m[i]; + + if (rot) { + e3 = float64_maybe_ah_chs(e3, fpcr_ah); + } else { + e1 = float64_maybe_ah_chs(e1, fpcr_ah); + } d[i] = float64_add(e0, e1, fpst); d[i + 1] = float64_add(e2, e3, fpst); From 28048a3d180b3893912344869dfa64c1e6a7d40f Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:42 +0000 Subject: [PATCH 38/68] target/arm: Handle FPCR.AH in FRECPS and FRSQRTS scalar insns Handle the FPCR.AH semantics that we do not change the sign of an input NaN in the FRECPS and FRSQRTS scalar insns, by providing new helper functions that do the CHS part of the operation differently. Since the extra helper functions would be very repetitive if written out longhand, we condense them and the existing non-AH helpers into being emitted via macros. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/helper-a64.c | 115 ++++++++++++--------------------- target/arm/tcg/helper-a64.h | 6 ++ target/arm/tcg/translate-a64.c | 25 +++++-- target/arm/tcg/vec_internal.h | 18 ++++++ 4 files changed, 83 insertions(+), 81 deletions(-) diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index ed5e4a4599..32f0647ca4 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -38,6 +38,7 @@ #ifdef CONFIG_USER_ONLY #include "user/page-protection.h" #endif +#include "vec_internal.h" /* C2.4.7 Multiply and divide */ /* special cases for 0 and LLONG_MIN are mandated by the standard */ @@ -208,88 +209,52 @@ uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, float_status *fpst) return -float64_lt(b, a, fpst); } -/* Reciprocal step and sqrt step. Note that unlike the A32/T32 +/* + * Reciprocal step and sqrt step. Note that unlike the A32/T32 * versions, these do a fully fused multiply-add or * multiply-add-and-halve. + * The FPCR.AH == 1 versions need to avoid flipping the sign of NaN. */ - -uint32_t HELPER(recpsf_f16)(uint32_t a, uint32_t b, float_status *fpst) -{ - a = float16_squash_input_denormal(a, fpst); - b = float16_squash_input_denormal(b, fpst); - - a = float16_chs(a); - if ((float16_is_infinity(a) && float16_is_zero(b)) || - (float16_is_infinity(b) && float16_is_zero(a))) { - return float16_two; +#define DO_RECPS(NAME, CTYPE, FLOATTYPE, CHSFN) \ + CTYPE HELPER(NAME)(CTYPE a, CTYPE b, float_status *fpst) \ + { \ + a = FLOATTYPE ## _squash_input_denormal(a, fpst); \ + b = FLOATTYPE ## _squash_input_denormal(b, fpst); \ + a = FLOATTYPE ## _ ## CHSFN(a); \ + if ((FLOATTYPE ## _is_infinity(a) && FLOATTYPE ## _is_zero(b)) || \ + (FLOATTYPE ## _is_infinity(b) && FLOATTYPE ## _is_zero(a))) { \ + return FLOATTYPE ## _two; \ + } \ + return FLOATTYPE ## _muladd(a, b, FLOATTYPE ## _two, 0, fpst); \ } - return float16_muladd(a, b, float16_two, 0, fpst); -} -float32 HELPER(recpsf_f32)(float32 a, float32 b, float_status *fpst) -{ - a = float32_squash_input_denormal(a, fpst); - b = float32_squash_input_denormal(b, fpst); +DO_RECPS(recpsf_f16, uint32_t, float16, chs) +DO_RECPS(recpsf_f32, float32, float32, chs) +DO_RECPS(recpsf_f64, float64, float64, chs) +DO_RECPS(recpsf_ah_f16, uint32_t, float16, ah_chs) +DO_RECPS(recpsf_ah_f32, float32, float32, ah_chs) +DO_RECPS(recpsf_ah_f64, float64, float64, ah_chs) - a = float32_chs(a); - if ((float32_is_infinity(a) && float32_is_zero(b)) || - (float32_is_infinity(b) && float32_is_zero(a))) { - return float32_two; - } - return float32_muladd(a, b, float32_two, 0, fpst); -} +#define DO_RSQRTSF(NAME, CTYPE, FLOATTYPE, CHSFN) \ + CTYPE HELPER(NAME)(CTYPE a, CTYPE b, float_status *fpst) \ + { \ + a = FLOATTYPE ## _squash_input_denormal(a, fpst); \ + b = FLOATTYPE ## _squash_input_denormal(b, fpst); \ + a = FLOATTYPE ## _ ## CHSFN(a); \ + if ((FLOATTYPE ## _is_infinity(a) && FLOATTYPE ## _is_zero(b)) || \ + (FLOATTYPE ## _is_infinity(b) && FLOATTYPE ## _is_zero(a))) { \ + return FLOATTYPE ## _one_point_five; \ + } \ + return FLOATTYPE ## _muladd_scalbn(a, b, FLOATTYPE ## _three, \ + -1, 0, fpst); \ + } \ -float64 HELPER(recpsf_f64)(float64 a, float64 b, float_status *fpst) -{ - a = float64_squash_input_denormal(a, fpst); - b = float64_squash_input_denormal(b, fpst); - - a = float64_chs(a); - if ((float64_is_infinity(a) && float64_is_zero(b)) || - (float64_is_infinity(b) && float64_is_zero(a))) { - return float64_two; - } - return float64_muladd(a, b, float64_two, 0, fpst); -} - -uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst) -{ - a = float16_squash_input_denormal(a, fpst); - b = float16_squash_input_denormal(b, fpst); - - a = float16_chs(a); - if ((float16_is_infinity(a) && float16_is_zero(b)) || - (float16_is_infinity(b) && float16_is_zero(a))) { - return float16_one_point_five; - } - return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst); -} - -float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst) -{ - a = float32_squash_input_denormal(a, fpst); - b = float32_squash_input_denormal(b, fpst); - - a = float32_chs(a); - if ((float32_is_infinity(a) && float32_is_zero(b)) || - (float32_is_infinity(b) && float32_is_zero(a))) { - return float32_one_point_five; - } - return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst); -} - -float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst) -{ - a = float64_squash_input_denormal(a, fpst); - b = float64_squash_input_denormal(b, fpst); - - a = float64_chs(a); - if ((float64_is_infinity(a) && float64_is_zero(b)) || - (float64_is_infinity(b) && float64_is_zero(a))) { - return float64_one_point_five; - } - return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst); -} +DO_RSQRTSF(rsqrtsf_f16, uint32_t, float16, chs) +DO_RSQRTSF(rsqrtsf_f32, float32, float32, chs) +DO_RSQRTSF(rsqrtsf_f64, float64, float64, chs) +DO_RSQRTSF(rsqrtsf_ah_f16, uint32_t, float16, ah_chs) +DO_RSQRTSF(rsqrtsf_ah_f32, float32, float32, ah_chs) +DO_RSQRTSF(rsqrtsf_ah_f64, float64, float64, ah_chs) /* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */ uint32_t HELPER(frecpx_f16)(uint32_t a, float_status *fpst) diff --git a/target/arm/tcg/helper-a64.h b/target/arm/tcg/helper-a64.h index ae0424f6de..85023465b7 100644 --- a/target/arm/tcg/helper-a64.h +++ b/target/arm/tcg/helper-a64.h @@ -38,9 +38,15 @@ DEF_HELPER_FLAGS_3(neon_cgt_f64, TCG_CALL_NO_RWG, i64, i64, i64, fpst) DEF_HELPER_FLAGS_3(recpsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, fpst) DEF_HELPER_FLAGS_3(recpsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, fpst) DEF_HELPER_FLAGS_3(recpsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, fpst) +DEF_HELPER_FLAGS_3(recpsf_ah_f16, TCG_CALL_NO_RWG, f16, f16, f16, fpst) +DEF_HELPER_FLAGS_3(recpsf_ah_f32, TCG_CALL_NO_RWG, f32, f32, f32, fpst) +DEF_HELPER_FLAGS_3(recpsf_ah_f64, TCG_CALL_NO_RWG, f64, f64, f64, fpst) DEF_HELPER_FLAGS_3(rsqrtsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, fpst) DEF_HELPER_FLAGS_3(rsqrtsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, fpst) DEF_HELPER_FLAGS_3(rsqrtsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, fpst) +DEF_HELPER_FLAGS_3(rsqrtsf_ah_f16, TCG_CALL_NO_RWG, f16, f16, f16, fpst) +DEF_HELPER_FLAGS_3(rsqrtsf_ah_f32, TCG_CALL_NO_RWG, f32, f32, f32, fpst) +DEF_HELPER_FLAGS_3(rsqrtsf_ah_f64, TCG_CALL_NO_RWG, f64, f64, f64, fpst) DEF_HELPER_FLAGS_2(frecpx_f64, TCG_CALL_NO_RWG, f64, f64, fpst) DEF_HELPER_FLAGS_2(frecpx_f32, TCG_CALL_NO_RWG, f32, f32, fpst) DEF_HELPER_FLAGS_2(frecpx_f16, TCG_CALL_NO_RWG, f16, f16, fpst) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index e8db9f39b1..1b6d8598b8 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -5239,11 +5239,12 @@ static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f, FPST_A64_F16 : FPST_A64); } -static bool do_fp3_scalar_ah(DisasContext *s, arg_rrr_e *a, const FPScalar *f, - int mergereg) +static bool do_fp3_scalar_ah_2fn(DisasContext *s, arg_rrr_e *a, + const FPScalar *fnormal, const FPScalar *fah, + int mergereg) { - return do_fp3_scalar_with_fpsttype(s, a, f, mergereg, - select_ah_fpst(s, a->esz)); + return do_fp3_scalar_with_fpsttype(s, a, s->fpcr_ah ? fah : fnormal, + mergereg, select_ah_fpst(s, a->esz)); } /* Some insns need to call different helpers when FPCR.AH == 1 */ @@ -5464,14 +5465,26 @@ static const FPScalar f_scalar_frecps = { gen_helper_recpsf_f32, gen_helper_recpsf_f64, }; -TRANS(FRECPS_s, do_fp3_scalar_ah, a, &f_scalar_frecps, a->rn) +static const FPScalar f_scalar_ah_frecps = { + gen_helper_recpsf_ah_f16, + gen_helper_recpsf_ah_f32, + gen_helper_recpsf_ah_f64, +}; +TRANS(FRECPS_s, do_fp3_scalar_ah_2fn, a, + &f_scalar_frecps, &f_scalar_ah_frecps, a->rn) static const FPScalar f_scalar_frsqrts = { gen_helper_rsqrtsf_f16, gen_helper_rsqrtsf_f32, gen_helper_rsqrtsf_f64, }; -TRANS(FRSQRTS_s, do_fp3_scalar_ah, a, &f_scalar_frsqrts, a->rn) +static const FPScalar f_scalar_ah_frsqrts = { + gen_helper_rsqrtsf_ah_f16, + gen_helper_rsqrtsf_ah_f32, + gen_helper_rsqrtsf_ah_f64, +}; +TRANS(FRSQRTS_s, do_fp3_scalar_ah_2fn, a, + &f_scalar_frsqrts, &f_scalar_ah_frsqrts, a->rn) static bool do_fcmp0_s(DisasContext *s, arg_rr_e *a, const FPScalar *f, bool swap) diff --git a/target/arm/tcg/vec_internal.h b/target/arm/tcg/vec_internal.h index 826791523a..6b93b5aeb9 100644 --- a/target/arm/tcg/vec_internal.h +++ b/target/arm/tcg/vec_internal.h @@ -267,6 +267,24 @@ float32 bfdotadd_ebf(float32 sum, uint32_t e1, uint32_t e2, */ bool is_ebf(CPUARMState *env, float_status *statusp, float_status *oddstatusp); +/* + * Negate as for FPCR.AH=1 -- do not negate NaNs. + */ +static inline float16 float16_ah_chs(float16 a) +{ + return float16_is_any_nan(a) ? a : float16_chs(a); +} + +static inline float32 float32_ah_chs(float32 a) +{ + return float32_is_any_nan(a) ? a : float32_chs(a); +} + +static inline float64 float64_ah_chs(float64 a) +{ + return float64_is_any_nan(a) ? a : float64_chs(a); +} + static inline float16 float16_maybe_ah_chs(float16 a, bool fpcr_ah) { return fpcr_ah && float16_is_any_nan(a) ? a : float16_chs(a); From fdf89638dc1ca42222685d06dc0c465e4735cf44 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:43 +0000 Subject: [PATCH 39/68] target/arm: Handle FPCR.AH in FRECPS and FRSQRTS vector insns Handle the FPCR.AH "don't negate the sign of a NaN" semantics in the vector versions of FRECPS and FRSQRTS, by implementing new vector wrappers that call the _ah_ scalar helpers. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/helper-sve.h | 14 ++++++++++++++ target/arm/tcg/translate-a64.c | 21 ++++++++++++++++----- target/arm/tcg/translate-sve.c | 7 ++++++- target/arm/tcg/vec_helper.c | 8 ++++++++ 4 files changed, 44 insertions(+), 6 deletions(-) diff --git a/target/arm/tcg/helper-sve.h b/target/arm/tcg/helper-sve.h index 29c70f054a..a2e96a498d 100644 --- a/target/arm/tcg/helper-sve.h +++ b/target/arm/tcg/helper-sve.h @@ -980,6 +980,20 @@ DEF_HELPER_FLAGS_5(gvec_rsqrts_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_5(gvec_rsqrts_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_recps_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_recps_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_recps_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_ah_rsqrts_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_rsqrts_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_rsqrts_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + DEF_HELPER_FLAGS_5(gvec_ah_fmax_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_ah_fmax_s, TCG_CALL_NO_RWG, diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 1b6d8598b8..50c207c9a1 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -5767,10 +5767,11 @@ static bool do_fp3_vector_2fn(DisasContext *s, arg_qrrr_e *a, int data, return do_fp3_vector(s, a, data, s->fpcr_ah ? fah : fnormal); } -static bool do_fp3_vector_ah(DisasContext *s, arg_qrrr_e *a, int data, - gen_helper_gvec_3_ptr * const f[3]) +static bool do_fp3_vector_ah_2fn(DisasContext *s, arg_qrrr_e *a, int data, + gen_helper_gvec_3_ptr * const fnormal[3], + gen_helper_gvec_3_ptr * const fah[3]) { - return do_fp3_vector_with_fpsttype(s, a, data, f, + return do_fp3_vector_with_fpsttype(s, a, data, s->fpcr_ah ? fah : fnormal, select_ah_fpst(s, a->esz)); } @@ -5913,14 +5914,24 @@ static gen_helper_gvec_3_ptr * const f_vector_frecps[3] = { gen_helper_gvec_recps_s, gen_helper_gvec_recps_d, }; -TRANS(FRECPS_v, do_fp3_vector_ah, a, 0, f_vector_frecps) +static gen_helper_gvec_3_ptr * const f_vector_ah_frecps[3] = { + gen_helper_gvec_ah_recps_h, + gen_helper_gvec_ah_recps_s, + gen_helper_gvec_ah_recps_d, +}; +TRANS(FRECPS_v, do_fp3_vector_ah_2fn, a, 0, f_vector_frecps, f_vector_ah_frecps) static gen_helper_gvec_3_ptr * const f_vector_frsqrts[3] = { gen_helper_gvec_rsqrts_h, gen_helper_gvec_rsqrts_s, gen_helper_gvec_rsqrts_d, }; -TRANS(FRSQRTS_v, do_fp3_vector_ah, a, 0, f_vector_frsqrts) +static gen_helper_gvec_3_ptr * const f_vector_ah_frsqrts[3] = { + gen_helper_gvec_ah_rsqrts_h, + gen_helper_gvec_ah_rsqrts_s, + gen_helper_gvec_ah_rsqrts_d, +}; +TRANS(FRSQRTS_v, do_fp3_vector_ah_2fn, a, 0, f_vector_frsqrts, f_vector_ah_frsqrts) static gen_helper_gvec_3_ptr * const f_vector_faddp[3] = { gen_helper_gvec_faddp_h, diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 7816b5801a..50f16d5aff 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -3741,7 +3741,12 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) NULL, gen_helper_gvec_##name##_h, \ gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \ }; \ - TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_ah_arg_zzz, name##_fns[a->esz], a, 0) + static gen_helper_gvec_3_ptr * const name##_ah_fns[4] = { \ + NULL, gen_helper_gvec_ah_##name##_h, \ + gen_helper_gvec_ah_##name##_s, gen_helper_gvec_ah_##name##_d \ + }; \ + TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_ah_arg_zzz, \ + s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz], a, 0) DO_FP3(FADD_zzz, fadd) DO_FP3(FSUB_zzz, fsub) diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index b181b9734d..e4c519f9e3 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -1477,6 +1477,14 @@ DO_3OP(gvec_rsqrts_h, helper_rsqrtsf_f16, float16) DO_3OP(gvec_rsqrts_s, helper_rsqrtsf_f32, float32) DO_3OP(gvec_rsqrts_d, helper_rsqrtsf_f64, float64) +DO_3OP(gvec_ah_recps_h, helper_recpsf_ah_f16, float16) +DO_3OP(gvec_ah_recps_s, helper_recpsf_ah_f32, float32) +DO_3OP(gvec_ah_recps_d, helper_recpsf_ah_f64, float64) + +DO_3OP(gvec_ah_rsqrts_h, helper_rsqrtsf_ah_f16, float16) +DO_3OP(gvec_ah_rsqrts_s, helper_rsqrtsf_ah_f32, float32) +DO_3OP(gvec_ah_rsqrts_d, helper_rsqrtsf_ah_f64, float64) + DO_3OP(gvec_ah_fmax_h, helper_vfp_ah_maxh, float16) DO_3OP(gvec_ah_fmax_s, helper_vfp_ah_maxs, float32) DO_3OP(gvec_ah_fmax_d, helper_vfp_ah_maxd, float64) From b85d8684c5f6be7d52355e1c8fc7444ee46a8237 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:44 +0000 Subject: [PATCH 40/68] target/arm: Handle FPCR.AH in negation step in FMLS (indexed) Handle the FPCR.AH "don't negate the sign of a NaN" semantics in FMLS (indexed). We do this by creating 6 new helpers, which allow us to do the negation either by XOR (for AH=0) or by muladd flags (for AH=1). Signed-off-by: Peter Maydell [PMM: Mostly from RTH's patch; error in index order into fns[][] fixed] Reviewed-by: Richard Henderson --- target/arm/helper.h | 14 ++++++++++++++ target/arm/tcg/translate-a64.c | 17 +++++++++++------ target/arm/tcg/translate-sve.c | 31 +++++++++++++++++-------------- target/arm/tcg/vec_helper.c | 24 +++++++++++++++--------- 4 files changed, 57 insertions(+), 29 deletions(-) diff --git a/target/arm/helper.h b/target/arm/helper.h index 43505d5fed..be47edff89 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -813,6 +813,20 @@ DEF_HELPER_FLAGS_6(gvec_fmla_idx_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(gvec_fmla_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_fmls_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_fmls_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_fmls_idx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) + DEF_HELPER_FLAGS_5(gvec_uqadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_uqadd_h, TCG_CALL_NO_RWG, diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 50c207c9a1..dc35e5d896 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -6726,10 +6726,16 @@ TRANS(FMULX_vi, do_fp3_vector_idx, a, f_vector_idx_fmulx) static bool do_fmla_vector_idx(DisasContext *s, arg_qrrx_e *a, bool neg) { - static gen_helper_gvec_4_ptr * const fns[3] = { - gen_helper_gvec_fmla_idx_h, - gen_helper_gvec_fmla_idx_s, - gen_helper_gvec_fmla_idx_d, + static gen_helper_gvec_4_ptr * const fns[3][3] = { + { gen_helper_gvec_fmla_idx_h, + gen_helper_gvec_fmla_idx_s, + gen_helper_gvec_fmla_idx_d }, + { gen_helper_gvec_fmls_idx_h, + gen_helper_gvec_fmls_idx_s, + gen_helper_gvec_fmls_idx_d }, + { gen_helper_gvec_ah_fmls_idx_h, + gen_helper_gvec_ah_fmls_idx_s, + gen_helper_gvec_ah_fmls_idx_d }, }; MemOp esz = a->esz; int check = fp_access_check_vector_hsd(s, a->q, esz); @@ -6740,8 +6746,7 @@ static bool do_fmla_vector_idx(DisasContext *s, arg_qrrx_e *a, bool neg) gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd, esz == MO_16 ? FPST_A64_F16 : FPST_A64, - (a->idx << 1) | neg, - fns[esz - 1]); + a->idx, fns[neg ? 1 + s->fpcr_ah : 0][esz - 1]); return true; } diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 50f16d5aff..e81e996c56 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -3524,21 +3524,24 @@ DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d) *** SVE Floating Point Multiply-Add Indexed Group */ -static bool do_FMLA_zzxz(DisasContext *s, arg_rrxr_esz *a, bool sub) -{ - static gen_helper_gvec_4_ptr * const fns[4] = { - NULL, - gen_helper_gvec_fmla_idx_h, - gen_helper_gvec_fmla_idx_s, - gen_helper_gvec_fmla_idx_d, - }; - return gen_gvec_fpst_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, - (a->index << 1) | sub, - a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); -} +static gen_helper_gvec_4_ptr * const fmla_idx_fns[4] = { + NULL, gen_helper_gvec_fmla_idx_h, + gen_helper_gvec_fmla_idx_s, gen_helper_gvec_fmla_idx_d +}; +TRANS_FEAT(FMLA_zzxz, aa64_sve, gen_gvec_fpst_zzzz, + fmla_idx_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->index, + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) -TRANS_FEAT(FMLA_zzxz, aa64_sve, do_FMLA_zzxz, a, false) -TRANS_FEAT(FMLS_zzxz, aa64_sve, do_FMLA_zzxz, a, true) +static gen_helper_gvec_4_ptr * const fmls_idx_fns[4][2] = { + { NULL, NULL }, + { gen_helper_gvec_fmls_idx_h, gen_helper_gvec_ah_fmls_idx_h }, + { gen_helper_gvec_fmls_idx_s, gen_helper_gvec_ah_fmls_idx_s }, + { gen_helper_gvec_fmls_idx_d, gen_helper_gvec_ah_fmls_idx_d }, +}; +TRANS_FEAT(FMLS_zzxz, aa64_sve, gen_gvec_fpst_zzzz, + fmls_idx_fns[a->esz][s->fpcr_ah], + a->rd, a->rn, a->rm, a->ra, a->index, + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) /* *** SVE Floating Point Multiply Indexed Group diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index e4c519f9e3..ae3cb50fa2 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -1680,29 +1680,35 @@ DO_FMUL_IDX(gvec_fmls_nf_idx_s, float32_sub, float32_mul, float32, H4) #undef DO_FMUL_IDX -#define DO_FMLA_IDX(NAME, TYPE, H) \ +#define DO_FMLA_IDX(NAME, TYPE, H, NEGX, NEGF) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \ float_status *stat, uint32_t desc) \ { \ intptr_t i, j, oprsz = simd_oprsz(desc); \ intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \ - TYPE op1_neg = extract32(desc, SIMD_DATA_SHIFT, 1); \ - intptr_t idx = desc >> (SIMD_DATA_SHIFT + 1); \ + intptr_t idx = simd_data(desc); \ TYPE *d = vd, *n = vn, *m = vm, *a = va; \ - op1_neg <<= (8 * sizeof(TYPE) - 1); \ for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \ TYPE mm = m[H(i + idx)]; \ for (j = 0; j < segment; j++) { \ - d[i + j] = TYPE##_muladd(n[i + j] ^ op1_neg, \ - mm, a[i + j], 0, stat); \ + d[i + j] = TYPE##_muladd(n[i + j] ^ NEGX, mm, \ + a[i + j], NEGF, stat); \ } \ } \ clear_tail(d, oprsz, simd_maxsz(desc)); \ } -DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2) -DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4) -DO_FMLA_IDX(gvec_fmla_idx_d, float64, H8) +DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2, 0, 0) +DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4, 0, 0) +DO_FMLA_IDX(gvec_fmla_idx_d, float64, H8, 0, 0) + +DO_FMLA_IDX(gvec_fmls_idx_h, float16, H2, INT16_MIN, 0) +DO_FMLA_IDX(gvec_fmls_idx_s, float32, H4, INT32_MIN, 0) +DO_FMLA_IDX(gvec_fmls_idx_d, float64, H8, INT64_MIN, 0) + +DO_FMLA_IDX(gvec_ah_fmls_idx_h, float16, H2, 0, float_muladd_negate_product) +DO_FMLA_IDX(gvec_ah_fmls_idx_s, float32, H4, 0, float_muladd_negate_product) +DO_FMLA_IDX(gvec_ah_fmls_idx_d, float64, H8, 0, float_muladd_negate_product) #undef DO_FMLA_IDX From 1fae4f5e9f48f1620c20f33eb1c673276a301548 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:45 +0000 Subject: [PATCH 41/68] target/arm: Handle FPCR.AH in negation in FMLS (vector) Handle the FPCR.AH "don't negate the sign of a NaN" semantics in FMLS (vector), by implementing a new set of helpers for the AH=1 case. The float_muladd_negate_product flag produces the same result as negating either of the multiplication operands, assuming neither of the operands are NaNs. But since FEAT_AFP does not negate NaNs, this behaviour is exactly what we need. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/helper.h | 4 ++++ target/arm/tcg/translate-a64.c | 7 ++++++- target/arm/tcg/vec_helper.c | 22 ++++++++++++++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/target/arm/helper.h b/target/arm/helper.h index be47edff89..f0a783b708 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -782,6 +782,10 @@ DEF_HELPER_FLAGS_5(gvec_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG, diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index dc35e5d896..3ab84611a6 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -5860,7 +5860,12 @@ static gen_helper_gvec_3_ptr * const f_vector_fmls[3] = { gen_helper_gvec_vfms_s, gen_helper_gvec_vfms_d, }; -TRANS(FMLS_v, do_fp3_vector, a, 0, f_vector_fmls) +static gen_helper_gvec_3_ptr * const f_vector_fmls_ah[3] = { + gen_helper_gvec_ah_vfms_h, + gen_helper_gvec_ah_vfms_s, + gen_helper_gvec_ah_vfms_d, +}; +TRANS(FMLS_v, do_fp3_vector_2fn, a, 0, f_vector_fmls, f_vector_fmls_ah) static gen_helper_gvec_3_ptr * const f_vector_fcmeq[3] = { gen_helper_gvec_fceq_h, diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index ae3cb50fa2..fc3e6587b8 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -1558,6 +1558,24 @@ static float64 float64_mulsub_f(float64 dest, float64 op1, float64 op2, return float64_muladd(float64_chs(op1), op2, dest, 0, stat); } +static float16 float16_ah_mulsub_f(float16 dest, float16 op1, float16 op2, + float_status *stat) +{ + return float16_muladd(op1, op2, dest, float_muladd_negate_product, stat); +} + +static float32 float32_ah_mulsub_f(float32 dest, float32 op1, float32 op2, + float_status *stat) +{ + return float32_muladd(op1, op2, dest, float_muladd_negate_product, stat); +} + +static float64 float64_ah_mulsub_f(float64 dest, float64 op1, float64 op2, + float_status *stat) +{ + return float64_muladd(op1, op2, dest, float_muladd_negate_product, stat); +} + #define DO_MULADD(NAME, FUNC, TYPE) \ void HELPER(NAME)(void *vd, void *vn, void *vm, \ float_status *stat, uint32_t desc) \ @@ -1584,6 +1602,10 @@ DO_MULADD(gvec_vfms_h, float16_mulsub_f, float16) DO_MULADD(gvec_vfms_s, float32_mulsub_f, float32) DO_MULADD(gvec_vfms_d, float64_mulsub_f, float64) +DO_MULADD(gvec_ah_vfms_h, float16_ah_mulsub_f, float16) +DO_MULADD(gvec_ah_vfms_s, float32_ah_mulsub_f, float32) +DO_MULADD(gvec_ah_vfms_d, float64_ah_mulsub_f, float64) + /* For the indexed ops, SVE applies the index per 128-bit vector segment. * For AdvSIMD, there is of course only one such vector segment. */ From 51330e58480b05cd39a9b913be9f83a0530471cb Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:46 +0000 Subject: [PATCH 42/68] target/arm: Handle FPCR.AH in negation step in SVE FMLS (vector) Handle the FPCR.AH "don't negate the sign of a NaN" semantics fro the SVE FMLS (vector) insns, by providing new helpers for the AH=1 case which end up passing fpcr_ah = true to the do_fmla_zpzzz_* functions that do the work. The float*_muladd functions have a flags argument that can perform optional negation of various operand. We don't use that for "normal" arm fmla, because the muladd flags are not applied when an input is a NaN. But since FEAT_AFP does not negate NaNs, this behaviour is exactly what we need. The non-AH helpers pass in a zero flags argument and control the negation via the neg1 and neg3 arguments; the AH helpers always pass in neg1 and neg3 as zero and control the negation via the flags argument. This allows us to avoid conditional branches within the inner loop. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/helper-sve.h | 21 ++++++++ target/arm/tcg/sve_helper.c | 99 +++++++++++++++++++++++++++------- target/arm/tcg/translate-sve.c | 18 ++++--- 3 files changed, 114 insertions(+), 24 deletions(-) diff --git a/target/arm/tcg/helper-sve.h b/target/arm/tcg/helper-sve.h index a2e96a498d..0b1b588783 100644 --- a/target/arm/tcg/helper-sve.h +++ b/target/arm/tcg/helper-sve.h @@ -1475,6 +1475,27 @@ DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_7(sve_ah_fnmls_zpzzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fnmls_zpzzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fnmls_zpzzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) + DEF_HELPER_FLAGS_7(sve_fcmla_zpzzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fcmla_zpzzz_s, TCG_CALL_NO_RWG, diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index 770945a2c6..90d4defc0d 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -4802,7 +4802,7 @@ DO_ZPZ_FP(flogb_d, float64, H1_8, do_float64_logb_as_int) static void do_fmla_zpzzz_h(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc, - uint16_t neg1, uint16_t neg3) + uint16_t neg1, uint16_t neg3, int flags) { intptr_t i = simd_oprsz(desc); uint64_t *g = vg; @@ -4817,7 +4817,7 @@ static void do_fmla_zpzzz_h(void *vd, void *vn, void *vm, void *va, void *vg, e1 = *(uint16_t *)(vn + H1_2(i)) ^ neg1; e2 = *(uint16_t *)(vm + H1_2(i)); e3 = *(uint16_t *)(va + H1_2(i)) ^ neg3; - r = float16_muladd(e1, e2, e3, 0, status); + r = float16_muladd(e1, e2, e3, flags, status); *(uint16_t *)(vd + H1_2(i)) = r; } } while (i & 63); @@ -4827,30 +4827,51 @@ static void do_fmla_zpzzz_h(void *vd, void *vn, void *vm, void *va, void *vg, void HELPER(sve_fmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0); + do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0, 0); } void HELPER(sve_fmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0); + do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0, 0); } void HELPER(sve_fnmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0x8000); + do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0x8000, 0); } void HELPER(sve_fnmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0x8000); + do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0x8000, 0); +} + +void HELPER(sve_ah_fmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product); +} + +void HELPER(sve_ah_fnmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product | float_muladd_negate_c); +} + +void HELPER(sve_ah_fnmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_c); } static void do_fmla_zpzzz_s(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc, - uint32_t neg1, uint32_t neg3) + uint32_t neg1, uint32_t neg3, int flags) { intptr_t i = simd_oprsz(desc); uint64_t *g = vg; @@ -4865,7 +4886,7 @@ static void do_fmla_zpzzz_s(void *vd, void *vn, void *vm, void *va, void *vg, e1 = *(uint32_t *)(vn + H1_4(i)) ^ neg1; e2 = *(uint32_t *)(vm + H1_4(i)); e3 = *(uint32_t *)(va + H1_4(i)) ^ neg3; - r = float32_muladd(e1, e2, e3, 0, status); + r = float32_muladd(e1, e2, e3, flags, status); *(uint32_t *)(vd + H1_4(i)) = r; } } while (i & 63); @@ -4875,30 +4896,51 @@ static void do_fmla_zpzzz_s(void *vd, void *vn, void *vm, void *va, void *vg, void HELPER(sve_fmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0); + do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0, 0); } void HELPER(sve_fmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0); + do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0, 0); } void HELPER(sve_fnmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0x80000000); + do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0x80000000, 0); } void HELPER(sve_fnmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0x80000000); + do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0x80000000, 0); +} + +void HELPER(sve_ah_fmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product); +} + +void HELPER(sve_ah_fnmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product | float_muladd_negate_c); +} + +void HELPER(sve_ah_fnmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_c); } static void do_fmla_zpzzz_d(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc, - uint64_t neg1, uint64_t neg3) + uint64_t neg1, uint64_t neg3, int flags) { intptr_t i = simd_oprsz(desc); uint64_t *g = vg; @@ -4913,7 +4955,7 @@ static void do_fmla_zpzzz_d(void *vd, void *vn, void *vm, void *va, void *vg, e1 = *(uint64_t *)(vn + i) ^ neg1; e2 = *(uint64_t *)(vm + i); e3 = *(uint64_t *)(va + i) ^ neg3; - r = float64_muladd(e1, e2, e3, 0, status); + r = float64_muladd(e1, e2, e3, flags, status); *(uint64_t *)(vd + i) = r; } } while (i & 63); @@ -4923,25 +4965,46 @@ static void do_fmla_zpzzz_d(void *vd, void *vn, void *vm, void *va, void *vg, void HELPER(sve_fmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0); + do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0, 0); } void HELPER(sve_fmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, 0); + do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, 0, 0); } void HELPER(sve_fnmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, INT64_MIN); + do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, INT64_MIN, 0); } void HELPER(sve_fnmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, INT64_MIN); + do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, INT64_MIN, 0); +} + +void HELPER(sve_ah_fmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product); +} + +void HELPER(sve_ah_fnmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product | float_muladd_negate_c); +} + +void HELPER(sve_ah_fnmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_c); } /* Two operand floating-point comparison controlled by a predicate. diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index e81e996c56..17016854d8 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -3927,19 +3927,25 @@ TRANS_FEAT(FCADD, aa64_sve, gen_gvec_fpst_zzzp, fcadd_fns[a->esz], a->rd, a->rn, a->rm, a->pg, a->rot | (s->fpcr_ah << 1), a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) -#define DO_FMLA(NAME, name) \ +#define DO_FMLA(NAME, name, ah_name) \ static gen_helper_gvec_5_ptr * const name##_fns[4] = { \ NULL, gen_helper_sve_##name##_h, \ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ }; \ - TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_zzzzp, name##_fns[a->esz], \ + static gen_helper_gvec_5_ptr * const name##_ah_fns[4] = { \ + NULL, gen_helper_sve_##ah_name##_h, \ + gen_helper_sve_##ah_name##_s, gen_helper_sve_##ah_name##_d \ + }; \ + TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_zzzzp, \ + s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz], \ a->rd, a->rn, a->rm, a->ra, a->pg, 0, \ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) -DO_FMLA(FMLA_zpzzz, fmla_zpzzz) -DO_FMLA(FMLS_zpzzz, fmls_zpzzz) -DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz) -DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz) +/* We don't need an ah_fmla_zpzzz because fmla doesn't negate anything */ +DO_FMLA(FMLA_zpzzz, fmla_zpzzz, fmla_zpzzz) +DO_FMLA(FMLS_zpzzz, fmls_zpzzz, ah_fmls_zpzzz) +DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz, ah_fnmla_zpzzz) +DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz, ah_fnmls_zpzzz) #undef DO_FMLA From 6dcd51ccf6815578bd34ea64a33a1eda9cc324e5 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:47 +0000 Subject: [PATCH 43/68] target/arm: Handle FPCR.AH in SVE FTSSEL The negation step in the SVE FTSSEL insn mustn't negate a NaN when FPCR.AH is set. Pass FPCR.AH to the helper via the SIMD data field and use that to determine whether to do the negation. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/sve_helper.c | 18 +++++++++++++++--- target/arm/tcg/translate-sve.c | 4 ++-- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index 90d4defc0d..bf88bde8a3 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -2555,6 +2555,7 @@ void HELPER(sve_fexpa_d)(void *vd, void *vn, uint32_t desc) void HELPER(sve_ftssel_h)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 2; + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT, 1); uint16_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i += 1) { uint16_t nn = n[i]; @@ -2562,13 +2563,17 @@ void HELPER(sve_ftssel_h)(void *vd, void *vn, void *vm, uint32_t desc) if (mm & 1) { nn = float16_one; } - d[i] = nn ^ (mm & 2) << 14; + if (mm & 2) { + nn = float16_maybe_ah_chs(nn, fpcr_ah); + } + d[i] = nn; } } void HELPER(sve_ftssel_s)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 4; + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i += 1) { uint32_t nn = n[i]; @@ -2576,13 +2581,17 @@ void HELPER(sve_ftssel_s)(void *vd, void *vn, void *vm, uint32_t desc) if (mm & 1) { nn = float32_one; } - d[i] = nn ^ (mm & 2) << 30; + if (mm & 2) { + nn = float32_maybe_ah_chs(nn, fpcr_ah); + } + d[i] = nn; } } void HELPER(sve_ftssel_d)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT, 1); uint64_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i += 1) { uint64_t nn = n[i]; @@ -2590,7 +2599,10 @@ void HELPER(sve_ftssel_d)(void *vd, void *vn, void *vm, uint32_t desc) if (mm & 1) { nn = float64_one; } - d[i] = nn ^ (mm & 2) << 62; + if (mm & 2) { + nn = float64_maybe_ah_chs(nn, fpcr_ah); + } + d[i] = nn; } } diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 17016854d8..2dd4605bb2 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -1238,14 +1238,14 @@ static gen_helper_gvec_2 * const fexpa_fns[4] = { gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d, }; TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz, - fexpa_fns[a->esz], a->rd, a->rn, 0) + fexpa_fns[a->esz], a->rd, a->rn, s->fpcr_ah) static gen_helper_gvec_3 * const ftssel_fns[4] = { NULL, gen_helper_sve_ftssel_h, gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d, }; TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, - ftssel_fns[a->esz], a, 0) + ftssel_fns[a->esz], a, s->fpcr_ah) /* *** SVE Predicate Logical Operations Group From 07e6b8d7526380a8a30081354f12f48f39d3464e Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:48 +0000 Subject: [PATCH 44/68] target/arm: Handle FPCR.AH in SVE FTMAD The negation step in the SVE FTMAD insn mustn't negate a NaN when FPCR.AH is set. Pass FPCR.AH to the helper via the SIMD data field, so we can select the correct behaviour. Because the operand is known to be negative, negating the operand is the same as taking the absolute value. Defer this to the muladd operation via flags, so that it happens after NaN detection, which is correct for FPCR.AH. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/sve_helper.c | 42 ++++++++++++++++++++++++++-------- target/arm/tcg/translate-sve.c | 3 ++- 2 files changed, 35 insertions(+), 10 deletions(-) diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index bf88bde8a3..c12b2600bd 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -5134,16 +5134,24 @@ void HELPER(sve_ftmad_h)(void *vd, void *vn, void *vm, 0x3c00, 0xb800, 0x293a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float16); - intptr_t x = simd_data(desc); + intptr_t x = extract32(desc, SIMD_DATA_SHIFT, 3); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 3, 1); float16 *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i++) { float16 mm = m[i]; intptr_t xx = x; + int flags = 0; + if (float16_is_neg(mm)) { - mm = float16_abs(mm); + if (fpcr_ah) { + flags = float_muladd_negate_product; + } else { + mm = float16_abs(mm); + } xx += 8; } - d[i] = float16_muladd(n[i], mm, coeff[xx], 0, s); + d[i] = float16_muladd(n[i], mm, coeff[xx], flags, s); } } @@ -5157,16 +5165,24 @@ void HELPER(sve_ftmad_s)(void *vd, void *vn, void *vm, 0x37cd37cc, 0x00000000, 0x00000000, 0x00000000, }; intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float32); - intptr_t x = simd_data(desc); + intptr_t x = extract32(desc, SIMD_DATA_SHIFT, 3); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 3, 1); float32 *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i++) { float32 mm = m[i]; intptr_t xx = x; + int flags = 0; + if (float32_is_neg(mm)) { - mm = float32_abs(mm); + if (fpcr_ah) { + flags = float_muladd_negate_product; + } else { + mm = float32_abs(mm); + } xx += 8; } - d[i] = float32_muladd(n[i], mm, coeff[xx], 0, s); + d[i] = float32_muladd(n[i], mm, coeff[xx], flags, s); } } @@ -5184,16 +5200,24 @@ void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm, 0x3e21ee96d2641b13ull, 0xbda8f76380fbb401ull, }; intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float64); - intptr_t x = simd_data(desc); + intptr_t x = extract32(desc, SIMD_DATA_SHIFT, 3); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 3, 1); float64 *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i++) { float64 mm = m[i]; intptr_t xx = x; + int flags = 0; + if (float64_is_neg(mm)) { - mm = float64_abs(mm); + if (fpcr_ah) { + flags = float_muladd_negate_product; + } else { + mm = float64_abs(mm); + } xx += 8; } - d[i] = float64_muladd(n[i], mm, coeff[xx], 0, s); + d[i] = float64_muladd(n[i], mm, coeff[xx], flags, s); } } diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 2dd4605bb2..410087c3fb 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -3685,7 +3685,8 @@ static gen_helper_gvec_3_ptr * const ftmad_fns[4] = { gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d, }; TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz, - ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm, + ftmad_fns[a->esz], a->rd, a->rn, a->rm, + a->imm | (s->fpcr_ah << 3), a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) /* From a7868aaa30e4df6e6040fd96217e1febe247714a Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:39:49 +0000 Subject: [PATCH 45/68] target/arm: Handle FPCR.AH in vector FCMLA The negation step in FCMLA mustn't negate a NaN when FPCR.AH is set. Handle this by passing FPCR.AH to the helper via the SIMD data field, and use this to select whether to do the negation via XOR or via the muladd negate_product flag. Signed-off-by: Richard Henderson Message-id: 20250129013857.135256-26-richard.henderson@linaro.org [PMM: Expanded commit message] Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/tcg/translate-a64.c | 2 +- target/arm/tcg/vec_helper.c | 66 ++++++++++++++++++++-------------- 2 files changed, 40 insertions(+), 28 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 3ab84611a6..ca06982d31 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -6164,7 +6164,7 @@ static bool trans_FCMLA_v(DisasContext *s, arg_FCMLA_v *a) gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64, - a->rot, fn[a->esz]); + a->rot | (s->fpcr_ah << 2), fn[a->esz]); return true; } diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index fc3e6587b8..630513f00b 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -965,22 +965,26 @@ void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, void *va, uintptr_t opr_sz = simd_oprsz(desc); float16 *d = vd, *n = vn, *m = vm, *a = va; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); - uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); - uint32_t neg_real = flip ^ neg_imag; + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_real = flip ^ negf_imag; + float16 negx_imag, negx_real; uintptr_t i; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 15; - neg_imag <<= 15; + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (negf_real & ~fpcr_ah) << 15; + negx_imag = (negf_imag & ~fpcr_ah) << 15; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); for (i = 0; i < opr_sz / 2; i += 2) { float16 e2 = n[H2(i + flip)]; - float16 e1 = m[H2(i + flip)] ^ neg_real; + float16 e1 = m[H2(i + flip)] ^ negx_real; float16 e4 = e2; - float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag; + float16 e3 = m[H2(i + 1 - flip)] ^ negx_imag; - d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], 0, fpst); - d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], 0, fpst); + d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], negf_real, fpst); + d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], negf_imag, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } @@ -1025,22 +1029,26 @@ void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, void *va, uintptr_t opr_sz = simd_oprsz(desc); float32 *d = vd, *n = vn, *m = vm, *a = va; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); - uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); - uint32_t neg_real = flip ^ neg_imag; + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_real = flip ^ negf_imag; + float32 negx_imag, negx_real; uintptr_t i; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 31; - neg_imag <<= 31; + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (negf_real & ~fpcr_ah) << 31; + negx_imag = (negf_imag & ~fpcr_ah) << 31; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); for (i = 0; i < opr_sz / 4; i += 2) { float32 e2 = n[H4(i + flip)]; - float32 e1 = m[H4(i + flip)] ^ neg_real; + float32 e1 = m[H4(i + flip)] ^ negx_real; float32 e4 = e2; - float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag; + float32 e3 = m[H4(i + 1 - flip)] ^ negx_imag; - d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], 0, fpst); - d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], 0, fpst); + d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], negf_real, fpst); + d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], negf_imag, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } @@ -1085,22 +1093,26 @@ void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, void *va, uintptr_t opr_sz = simd_oprsz(desc); float64 *d = vd, *n = vn, *m = vm, *a = va; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); - uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); - uint64_t neg_real = flip ^ neg_imag; + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_real = flip ^ negf_imag; + float64 negx_real, negx_imag; uintptr_t i; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 63; - neg_imag <<= 63; + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (uint64_t)(negf_real & ~fpcr_ah) << 63; + negx_imag = (uint64_t)(negf_imag & ~fpcr_ah) << 63; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); for (i = 0; i < opr_sz / 8; i += 2) { float64 e2 = n[i + flip]; - float64 e1 = m[i + flip] ^ neg_real; + float64 e1 = m[i + flip] ^ negx_real; float64 e4 = e2; - float64 e3 = m[i + 1 - flip] ^ neg_imag; + float64 e3 = m[i + 1 - flip] ^ negx_imag; - d[i] = float64_muladd(e2, e1, a[i], 0, fpst); - d[i + 1] = float64_muladd(e4, e3, a[i + 1], 0, fpst); + d[i] = float64_muladd(e2, e1, a[i], negf_real, fpst); + d[i + 1] = float64_muladd(e4, e3, a[i + 1], negf_imag, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } From 6d5ccfd44fdcf606ceadaab7c90d44ea7d5f4fe5 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:39:50 +0000 Subject: [PATCH 46/68] target/arm: Handle FPCR.AH in FCMLA by index The negation step in FCMLA by index mustn't negate a NaN when FPCR.AH is set. Use the same approach as vector FCMLA of passing in FPCR.AH and using it to select whether to negate by XOR or by the muladd negate_product flag. Signed-off-by: Richard Henderson Message-id: 20250129013857.135256-27-richard.henderson@linaro.org [PMM: Expanded commit message] Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/tcg/translate-a64.c | 2 +- target/arm/tcg/vec_helper.c | 44 ++++++++++++++++++++-------------- 2 files changed, 27 insertions(+), 19 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index ca06982d31..1846f81bf5 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -6916,7 +6916,7 @@ static bool trans_FCMLA_vi(DisasContext *s, arg_FCMLA_vi *a) if (fp_access_check(s)) { gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64, - (a->idx << 2) | a->rot, fn); + (s->fpcr_ah << 4) | (a->idx << 2) | a->rot, fn); } return true; } diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index 630513f00b..c2f98a5c67 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -995,29 +995,33 @@ void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, void *va, uintptr_t opr_sz = simd_oprsz(desc); float16 *d = vd, *n = vn, *m = vm, *a = va; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); - uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2); - uint32_t neg_real = flip ^ neg_imag; + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 4, 1); + uint32_t negf_real = flip ^ negf_imag; intptr_t elements = opr_sz / sizeof(float16); intptr_t eltspersegment = MIN(16 / sizeof(float16), elements); + float16 negx_imag, negx_real; intptr_t i, j; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 15; - neg_imag <<= 15; + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (negf_real & ~fpcr_ah) << 15; + negx_imag = (negf_imag & ~fpcr_ah) << 15; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); for (i = 0; i < elements; i += eltspersegment) { float16 mr = m[H2(i + 2 * index + 0)]; float16 mi = m[H2(i + 2 * index + 1)]; - float16 e1 = neg_real ^ (flip ? mi : mr); - float16 e3 = neg_imag ^ (flip ? mr : mi); + float16 e1 = negx_real ^ (flip ? mi : mr); + float16 e3 = negx_imag ^ (flip ? mr : mi); for (j = i; j < i + eltspersegment; j += 2) { float16 e2 = n[H2(j + flip)]; float16 e4 = e2; - d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], 0, fpst); - d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], 0, fpst); + d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], negf_real, fpst); + d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], negf_imag, fpst); } } clear_tail(d, opr_sz, simd_maxsz(desc)); @@ -1059,29 +1063,33 @@ void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, void *va, uintptr_t opr_sz = simd_oprsz(desc); float32 *d = vd, *n = vn, *m = vm, *a = va; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); - uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2); - uint32_t neg_real = flip ^ neg_imag; + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 4, 1); + uint32_t negf_real = flip ^ negf_imag; intptr_t elements = opr_sz / sizeof(float32); intptr_t eltspersegment = MIN(16 / sizeof(float32), elements); + float32 negx_imag, negx_real; intptr_t i, j; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 31; - neg_imag <<= 31; + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (negf_real & ~fpcr_ah) << 31; + negx_imag = (negf_imag & ~fpcr_ah) << 31; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); for (i = 0; i < elements; i += eltspersegment) { float32 mr = m[H4(i + 2 * index + 0)]; float32 mi = m[H4(i + 2 * index + 1)]; - float32 e1 = neg_real ^ (flip ? mi : mr); - float32 e3 = neg_imag ^ (flip ? mr : mi); + float32 e1 = negx_real ^ (flip ? mi : mr); + float32 e3 = negx_imag ^ (flip ? mr : mi); for (j = i; j < i + eltspersegment; j += 2) { float32 e2 = n[H4(j + flip)]; float32 e4 = e2; - d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], 0, fpst); - d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], 0, fpst); + d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], negf_real, fpst); + d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], negf_imag, fpst); } } clear_tail(d, opr_sz, simd_maxsz(desc)); From 0b5ca769cfae0039d6a71a4dd2d28845f96c4b81 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:39:51 +0000 Subject: [PATCH 47/68] target/arm: Handle FPCR.AH in SVE FCMLA The negation step in SVE FCMLA mustn't negate a NaN when FPCR.AH is set. Use the same approach as we did for A64 FCMLA of passing in FPCR.AH and using it to select whether to negate by XOR or by the muladd negate_product flag. Signed-off-by: Richard Henderson Message-id: 20250129013857.135256-28-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/tcg/sve_helper.c | 69 +++++++++++++++++++++------------- target/arm/tcg/translate-sve.c | 2 +- 2 files changed, 43 insertions(+), 28 deletions(-) diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index c12b2600bd..c206ca65ce 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -5347,13 +5347,18 @@ void HELPER(sve_fcmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); - unsigned rot = simd_data(desc); - bool flip = rot & 1; - float16 neg_imag, neg_real; + bool flip = extract32(desc, SIMD_DATA_SHIFT, 1); + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_real = flip ^ negf_imag; + float16 negx_imag, negx_real; uint64_t *g = vg; - neg_imag = float16_set_sign(0, (rot & 2) != 0); - neg_real = float16_set_sign(0, rot == 1 || rot == 2); + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (negf_real & ~fpcr_ah) << 15; + negx_imag = (negf_imag & ~fpcr_ah) << 15; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); do { uint64_t pg = g[(i - 1) >> 6]; @@ -5370,18 +5375,18 @@ void HELPER(sve_fcmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va, mi = *(float16 *)(vm + H1_2(j)); e2 = (flip ? ni : nr); - e1 = (flip ? mi : mr) ^ neg_real; + e1 = (flip ? mi : mr) ^ negx_real; e4 = e2; - e3 = (flip ? mr : mi) ^ neg_imag; + e3 = (flip ? mr : mi) ^ negx_imag; if (likely((pg >> (i & 63)) & 1)) { d = *(float16 *)(va + H1_2(i)); - d = float16_muladd(e2, e1, d, 0, status); + d = float16_muladd(e2, e1, d, negf_real, status); *(float16 *)(vd + H1_2(i)) = d; } if (likely((pg >> (j & 63)) & 1)) { d = *(float16 *)(va + H1_2(j)); - d = float16_muladd(e4, e3, d, 0, status); + d = float16_muladd(e4, e3, d, negf_imag, status); *(float16 *)(vd + H1_2(j)) = d; } } while (i & 63); @@ -5392,13 +5397,18 @@ void HELPER(sve_fcmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); - unsigned rot = simd_data(desc); - bool flip = rot & 1; - float32 neg_imag, neg_real; + bool flip = extract32(desc, SIMD_DATA_SHIFT, 1); + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_real = flip ^ negf_imag; + float32 negx_imag, negx_real; uint64_t *g = vg; - neg_imag = float32_set_sign(0, (rot & 2) != 0); - neg_real = float32_set_sign(0, rot == 1 || rot == 2); + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (negf_real & ~fpcr_ah) << 31; + negx_imag = (negf_imag & ~fpcr_ah) << 31; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); do { uint64_t pg = g[(i - 1) >> 6]; @@ -5415,18 +5425,18 @@ void HELPER(sve_fcmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va, mi = *(float32 *)(vm + H1_2(j)); e2 = (flip ? ni : nr); - e1 = (flip ? mi : mr) ^ neg_real; + e1 = (flip ? mi : mr) ^ negx_real; e4 = e2; - e3 = (flip ? mr : mi) ^ neg_imag; + e3 = (flip ? mr : mi) ^ negx_imag; if (likely((pg >> (i & 63)) & 1)) { d = *(float32 *)(va + H1_2(i)); - d = float32_muladd(e2, e1, d, 0, status); + d = float32_muladd(e2, e1, d, negf_real, status); *(float32 *)(vd + H1_2(i)) = d; } if (likely((pg >> (j & 63)) & 1)) { d = *(float32 *)(va + H1_2(j)); - d = float32_muladd(e4, e3, d, 0, status); + d = float32_muladd(e4, e3, d, negf_imag, status); *(float32 *)(vd + H1_2(j)) = d; } } while (i & 63); @@ -5437,13 +5447,18 @@ void HELPER(sve_fcmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); - unsigned rot = simd_data(desc); - bool flip = rot & 1; - float64 neg_imag, neg_real; + bool flip = extract32(desc, SIMD_DATA_SHIFT, 1); + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_real = flip ^ negf_imag; + float64 negx_imag, negx_real; uint64_t *g = vg; - neg_imag = float64_set_sign(0, (rot & 2) != 0); - neg_real = float64_set_sign(0, rot == 1 || rot == 2); + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (uint64_t)(negf_real & ~fpcr_ah) << 63; + negx_imag = (uint64_t)(negf_imag & ~fpcr_ah) << 63; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); do { uint64_t pg = g[(i - 1) >> 6]; @@ -5460,18 +5475,18 @@ void HELPER(sve_fcmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va, mi = *(float64 *)(vm + H1_2(j)); e2 = (flip ? ni : nr); - e1 = (flip ? mi : mr) ^ neg_real; + e1 = (flip ? mi : mr) ^ negx_real; e4 = e2; - e3 = (flip ? mr : mi) ^ neg_imag; + e3 = (flip ? mr : mi) ^ negx_imag; if (likely((pg >> (i & 63)) & 1)) { d = *(float64 *)(va + H1_2(i)); - d = float64_muladd(e2, e1, d, 0, status); + d = float64_muladd(e2, e1, d, negf_real, status); *(float64 *)(vd + H1_2(i)) = d; } if (likely((pg >> (j & 63)) & 1)) { d = *(float64 *)(va + H1_2(j)); - d = float64_muladd(e4, e3, d, 0, status); + d = float64_muladd(e4, e3, d, negf_imag, status); *(float64 *)(vd + H1_2(j)) = d; } } while (i & 63); diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 410087c3fb..6af94fedd0 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -3955,7 +3955,7 @@ static gen_helper_gvec_5_ptr * const fcmla_fns[4] = { gen_helper_sve_fcmla_zpzzz_s, gen_helper_sve_fcmla_zpzzz_d, }; TRANS_FEAT(FCMLA_zpzzz, aa64_sve, gen_gvec_fpst_zzzzp, fcmla_fns[a->esz], - a->rd, a->rn, a->rm, a->ra, a->pg, a->rot, + a->rd, a->rn, a->rm, a->ra, a->pg, a->rot | (s->fpcr_ah << 2), a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) static gen_helper_gvec_4_ptr * const fcmla_idx_fns[4] = { From 0fa4b7afd4f78ed6ec0dedffba7dc93c00ca3d83 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:39:52 +0000 Subject: [PATCH 48/68] target/arm: Handle FPCR.AH in FMLSL (by element and vector) Handle FPCR.AH's requirement to not negate the sign of a NaN in FMLSL by element and vector, using the usual trick of negating by XOR when AH=0 and by muladd flags when AH=1. Since we have the CPUARMState* in the helper anyway, we can look directly at env->vfp.fpcr and don't need toa pass in the FPCR.AH value via the SIMD data word. Signed-off-by: Richard Henderson Message-id: 20250129013857.135256-31-richard.henderson@linaro.org [PMM: commit message tweaked] Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/tcg/vec_helper.c | 71 ++++++++++++++++++++++++------------- 1 file changed, 46 insertions(+), 25 deletions(-) diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index c2f98a5c67..5f0656f34c 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -2124,27 +2124,24 @@ static uint64_t load4_f16(uint64_t *ptr, int is_q, int is_2) */ static void do_fmlal(float32 *d, void *vn, void *vm, float_status *fpst, - uint32_t desc, bool fz16) + uint64_t negx, int negf, uint32_t desc, bool fz16) { intptr_t i, oprsz = simd_oprsz(desc); - int is_s = extract32(desc, SIMD_DATA_SHIFT, 1); int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1); int is_q = oprsz == 16; uint64_t n_4, m_4; - /* Pre-load all of the f16 data, avoiding overlap issues. */ - n_4 = load4_f16(vn, is_q, is_2); + /* + * Pre-load all of the f16 data, avoiding overlap issues. + * Negate all inputs for AH=0 FMLSL at once. + */ + n_4 = load4_f16(vn, is_q, is_2) ^ negx; m_4 = load4_f16(vm, is_q, is_2); - /* Negate all inputs for FMLSL at once. */ - if (is_s) { - n_4 ^= 0x8000800080008000ull; - } - for (i = 0; i < oprsz / 4; i++) { float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16); float32 m_1 = float16_to_float32_by_bits(m_4 >> (i * 16), fz16); - d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst); + d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], negf, fpst); } clear_tail(d, oprsz, simd_maxsz(desc)); } @@ -2152,14 +2149,28 @@ static void do_fmlal(float32 *d, void *vn, void *vm, float_status *fpst, void HELPER(gvec_fmlal_a32)(void *vd, void *vn, void *vm, CPUARMState *env, uint32_t desc) { - do_fmlal(vd, vn, vm, &env->vfp.standard_fp_status, desc, + bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); + uint64_t negx = is_s ? 0x8000800080008000ull : 0; + + do_fmlal(vd, vn, vm, &env->vfp.standard_fp_status, negx, 0, desc, get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a32)); } void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm, CPUARMState *env, uint32_t desc) { - do_fmlal(vd, vn, vm, &env->vfp.fp_status_a64, desc, + bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); + uint64_t negx = 0; + int negf = 0; + + if (is_s) { + if (env->vfp.fpcr & FPCR_AH) { + negf = float_muladd_negate_product; + } else { + negx = 0x8000800080008000ull; + } + } + do_fmlal(vd, vn, vm, &env->vfp.fp_status_a64, negx, negf, desc, get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a64)); } @@ -2184,29 +2195,25 @@ void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va, } static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst, - uint32_t desc, bool fz16) + uint64_t negx, int negf, uint32_t desc, bool fz16) { intptr_t i, oprsz = simd_oprsz(desc); - int is_s = extract32(desc, SIMD_DATA_SHIFT, 1); int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1); int index = extract32(desc, SIMD_DATA_SHIFT + 2, 3); int is_q = oprsz == 16; uint64_t n_4; float32 m_1; - /* Pre-load all of the f16 data, avoiding overlap issues. */ - n_4 = load4_f16(vn, is_q, is_2); - - /* Negate all inputs for FMLSL at once. */ - if (is_s) { - n_4 ^= 0x8000800080008000ull; - } - + /* + * Pre-load all of the f16 data, avoiding overlap issues. + * Negate all inputs for AH=0 FMLSL at once. + */ + n_4 = load4_f16(vn, is_q, is_2) ^ negx; m_1 = float16_to_float32_by_bits(((float16 *)vm)[H2(index)], fz16); for (i = 0; i < oprsz / 4; i++) { float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16); - d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst); + d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], negf, fpst); } clear_tail(d, oprsz, simd_maxsz(desc)); } @@ -2214,14 +2221,28 @@ static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst, void HELPER(gvec_fmlal_idx_a32)(void *vd, void *vn, void *vm, CPUARMState *env, uint32_t desc) { - do_fmlal_idx(vd, vn, vm, &env->vfp.standard_fp_status, desc, + bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); + uint64_t negx = is_s ? 0x8000800080008000ull : 0; + + do_fmlal_idx(vd, vn, vm, &env->vfp.standard_fp_status, negx, 0, desc, get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a32)); } void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm, CPUARMState *env, uint32_t desc) { - do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status_a64, desc, + bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); + uint64_t negx = 0; + int negf = 0; + + if (is_s) { + if (env->vfp.fpcr & FPCR_AH) { + negf = float_muladd_negate_product; + } else { + negx = 0x8000800080008000ull; + } + } + do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status_a64, negx, negf, desc, get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a64)); } From f67a16e7d754cde9a55dd1e26ea4db48701b6fb9 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:39:53 +0000 Subject: [PATCH 49/68] target/arm: Handle FPCR.AH in SVE FMLSL (indexed) Handle FPCR.AH's requirement to not negate the sign of a NaN in SVE FMLSL (indexed), using the usual trick of negating by XOR when AH=0 and by muladd flags when AH=1. Since we have the CPUARMState* in the helper anyway, we can look directly at env->vfp.fpcr and don't need toa pass in the FPCR.AH value via the SIMD data word. Signed-off-by: Richard Henderson Message-id: 20250129013857.135256-32-richard.henderson@linaro.org [PMM: commit message tweaked] Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/tcg/vec_helper.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index 5f0656f34c..42bb43acd7 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -2250,23 +2250,32 @@ void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va, CPUARMState *env, uint32_t desc) { intptr_t i, j, oprsz = simd_oprsz(desc); - uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15; + bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16); intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 2, 3) * sizeof(float16); float_status *status = &env->vfp.fp_status_a64; bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a64); + int negx = 0, negf = 0; + + if (is_s) { + if (env->vfp.fpcr & FPCR_AH) { + negf = float_muladd_negate_product; + } else { + negx = 0x8000; + } + } for (i = 0; i < oprsz; i += 16) { float16 mm_16 = *(float16 *)(vm + i + idx); float32 mm = float16_to_float32_by_bits(mm_16, fz16); for (j = 0; j < 16; j += sizeof(float32)) { - float16 nn_16 = *(float16 *)(vn + H1_2(i + j + sel)) ^ negn; + float16 nn_16 = *(float16 *)(vn + H1_2(i + j + sel)) ^ negx; float32 nn = float16_to_float32_by_bits(nn_16, fz16); float32 aa = *(float32 *)(va + H1_4(i + j)); *(float32 *)(vd + H1_4(i + j)) = - float32_muladd(nn, mm, aa, 0, status); + float32_muladd(nn, mm, aa, negf, status); } } } From a66c4585fff70ffc4a61e0f5f5528320a55cd9cd Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:39:54 +0000 Subject: [PATCH 50/68] target/arm: Handle FPCR.AH in SVE FMLSLB, FMLSLT (vectors) Handle FPCR.AH's requirement to not negate the sign of a NaN in SVE FMLSL (indexed), using the usual trick of negating by XOR when AH=0 and by muladd flags when AH=1. Since we have the CPUARMState* in the helper anyway, we can look directly at env->vfp.fpcr and don't need toa pass in the FPCR.AH value via the SIMD data word. Signed-off-by: Richard Henderson Message-id: 20250129013857.135256-33-richard.henderson@linaro.org [PMM: tweaked commit message] Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/tcg/vec_helper.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index 42bb43acd7..aefcd07ef0 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -2178,19 +2178,28 @@ void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va, CPUARMState *env, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); - uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15; + bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16); float_status *status = &env->vfp.fp_status_a64; bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a64); + int negx = 0, negf = 0; + + if (is_s) { + if (env->vfp.fpcr & FPCR_AH) { + negf = float_muladd_negate_product; + } else { + negx = 0x8000; + } + } for (i = 0; i < oprsz; i += sizeof(float32)) { - float16 nn_16 = *(float16 *)(vn + H1_2(i + sel)) ^ negn; + float16 nn_16 = *(float16 *)(vn + H1_2(i + sel)) ^ negx; float16 mm_16 = *(float16 *)(vm + H1_2(i + sel)); float32 nn = float16_to_float32_by_bits(nn_16, fz16); float32 mm = float16_to_float32_by_bits(mm_16, fz16); float32 aa = *(float32 *)(va + H1_4(i)); - *(float32 *)(vd + H1_4(i)) = float32_muladd(nn, mm, aa, 0, status); + *(float32 *)(vd + H1_4(i)) = float32_muladd(nn, mm, aa, negf, status); } } From d38a57a3f1ea66c4338a10d70c032741e8786c51 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:55 +0000 Subject: [PATCH 51/68] target/arm: Enable FEAT_AFP for '-cpu max' Now that we have completed the handling for FPCR.{AH,FIZ,NEP}, we can enable FEAT_AFP for '-cpu max', and document that we support it. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- docs/system/arm/emulation.rst | 1 + target/arm/tcg/cpu64.c | 1 + 2 files changed, 2 insertions(+) diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst index 60176d0859..63b4cdf5fb 100644 --- a/docs/system/arm/emulation.rst +++ b/docs/system/arm/emulation.rst @@ -20,6 +20,7 @@ the following architecture extensions: - FEAT_AA64EL3 (Support for AArch64 at EL3) - FEAT_AdvSIMD (Advanced SIMD Extension) - FEAT_AES (AESD and AESE instructions) +- FEAT_AFP (Alternate floating-point behavior) - FEAT_Armv9_Crypto (Armv9 Cryptographic Extension) - FEAT_ASID16 (16 bit ASID) - FEAT_BBM at level 2 (Translation table break-before-make levels) diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c index 93573ceeb1..0bc68aac17 100644 --- a/target/arm/tcg/cpu64.c +++ b/target/arm/tcg/cpu64.c @@ -1218,6 +1218,7 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */ t = FIELD_DP64(t, ID_AA64MMFR1, ETS, 2); /* FEAT_ETS2 */ t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1); /* FEAT_HCX */ + t = FIELD_DP64(t, ID_AA64MMFR1, AFP, 1); /* FEAT_AFP */ t = FIELD_DP64(t, ID_AA64MMFR1, TIDCP1, 1); /* FEAT_TIDCP1 */ t = FIELD_DP64(t, ID_AA64MMFR1, CMOW, 1); /* FEAT_CMOW */ cpu->isar.id_aa64mmfr1 = t; From 0ff5c021f04d7dbaec174451e7a0dab3822b1b0d Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:56 +0000 Subject: [PATCH 52/68] target/arm: Plumb FEAT_RPRES frecpe and frsqrte through to new helper FEAT_RPRES implements an "increased precision" variant of the single precision FRECPE and FRSQRTE instructions from an 8 bit to a 12 bit mantissa. This applies only when FPCR.AH == 1. Note that the halfprec and double versions of these insns retain the 8 bit precision regardless. In this commit we add all the plumbing to make these instructions call a new helper function when the increased-precision is in effect. In the following commit we will provide the actual change in behaviour in the helpers. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/cpu-features.h | 5 +++++ target/arm/helper.h | 4 ++++ target/arm/tcg/translate-a64.c | 34 ++++++++++++++++++++++++++++++---- target/arm/tcg/translate-sve.c | 16 ++++++++++++++-- target/arm/tcg/vec_helper.c | 2 ++ target/arm/vfp_helper.c | 32 ++++++++++++++++++++++++++++++-- 6 files changed, 85 insertions(+), 8 deletions(-) diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h index 7bf24c506b..525e4cee12 100644 --- a/target/arm/cpu-features.h +++ b/target/arm/cpu-features.h @@ -597,6 +597,11 @@ static inline bool isar_feature_aa64_mops(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, MOPS); } +static inline bool isar_feature_aa64_rpres(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, RPRES); +} + static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id) { /* We always set the AdvSIMD and FP fields identically. */ diff --git a/target/arm/helper.h b/target/arm/helper.h index f0a783b708..0907505839 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -245,9 +245,11 @@ DEF_HELPER_4(vfp_muladdh, f16, f16, f16, f16, fpst) DEF_HELPER_FLAGS_2(recpe_f16, TCG_CALL_NO_RWG, f16, f16, fpst) DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, fpst) +DEF_HELPER_FLAGS_2(recpe_rpres_f32, TCG_CALL_NO_RWG, f32, f32, fpst) DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, fpst) DEF_HELPER_FLAGS_2(rsqrte_f16, TCG_CALL_NO_RWG, f16, f16, fpst) DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, fpst) +DEF_HELPER_FLAGS_2(rsqrte_rpres_f32, TCG_CALL_NO_RWG, f32, f32, fpst) DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, fpst) DEF_HELPER_FLAGS_1(recpe_u32, TCG_CALL_NO_RWG, i32, i32) DEF_HELPER_FLAGS_1(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32) @@ -680,10 +682,12 @@ DEF_HELPER_FLAGS_4(gvec_vrintx_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_frecpe_rpres_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(gvec_frsqrte_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(gvec_frsqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_frsqrte_rpres_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(gvec_frsqrte_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(gvec_fcgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 1846f81bf5..8bef391bb0 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -8903,7 +8903,14 @@ static const FPScalar1 f_scalar_frecpe = { gen_helper_recpe_f32, gen_helper_recpe_f64, }; -TRANS(FRECPE_s, do_fp1_scalar_ah, a, &f_scalar_frecpe, -1) +static const FPScalar1 f_scalar_frecpe_rpres = { + gen_helper_recpe_f16, + gen_helper_recpe_rpres_f32, + gen_helper_recpe_f64, +}; +TRANS(FRECPE_s, do_fp1_scalar_ah, a, + s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ? + &f_scalar_frecpe_rpres : &f_scalar_frecpe, -1) static const FPScalar1 f_scalar_frecpx = { gen_helper_frecpx_f16, @@ -8917,7 +8924,14 @@ static const FPScalar1 f_scalar_frsqrte = { gen_helper_rsqrte_f32, gen_helper_rsqrte_f64, }; -TRANS(FRSQRTE_s, do_fp1_scalar_ah, a, &f_scalar_frsqrte, -1) +static const FPScalar1 f_scalar_frsqrte_rpres = { + gen_helper_rsqrte_f16, + gen_helper_rsqrte_rpres_f32, + gen_helper_rsqrte_f64, +}; +TRANS(FRSQRTE_s, do_fp1_scalar_ah, a, + s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ? + &f_scalar_frsqrte_rpres : &f_scalar_frsqrte, -1) static bool trans_FCVT_s_ds(DisasContext *s, arg_rr *a) { @@ -9948,14 +9962,26 @@ static gen_helper_gvec_2_ptr * const f_frecpe[] = { gen_helper_gvec_frecpe_s, gen_helper_gvec_frecpe_d, }; -TRANS(FRECPE_v, do_gvec_op2_ah_fpst, a->esz, a->q, a->rd, a->rn, 0, f_frecpe) +static gen_helper_gvec_2_ptr * const f_frecpe_rpres[] = { + gen_helper_gvec_frecpe_h, + gen_helper_gvec_frecpe_rpres_s, + gen_helper_gvec_frecpe_d, +}; +TRANS(FRECPE_v, do_gvec_op2_ah_fpst, a->esz, a->q, a->rd, a->rn, 0, + s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ? f_frecpe_rpres : f_frecpe) static gen_helper_gvec_2_ptr * const f_frsqrte[] = { gen_helper_gvec_frsqrte_h, gen_helper_gvec_frsqrte_s, gen_helper_gvec_frsqrte_d, }; -TRANS(FRSQRTE_v, do_gvec_op2_ah_fpst, a->esz, a->q, a->rd, a->rn, 0, f_frsqrte) +static gen_helper_gvec_2_ptr * const f_frsqrte_rpres[] = { + gen_helper_gvec_frsqrte_h, + gen_helper_gvec_frsqrte_rpres_s, + gen_helper_gvec_frsqrte_d, +}; +TRANS(FRSQRTE_v, do_gvec_op2_ah_fpst, a->esz, a->q, a->rd, a->rn, 0, + s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ? f_frsqrte_rpres : f_frsqrte) static bool trans_FCVTL_v(DisasContext *s, arg_qrr_e *a) { diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 6af94fedd0..d23be477b4 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -3629,13 +3629,25 @@ static gen_helper_gvec_2_ptr * const frecpe_fns[] = { NULL, gen_helper_gvec_frecpe_h, gen_helper_gvec_frecpe_s, gen_helper_gvec_frecpe_d, }; -TRANS_FEAT(FRECPE, aa64_sve, gen_gvec_fpst_ah_arg_zz, frecpe_fns[a->esz], a, 0) +static gen_helper_gvec_2_ptr * const frecpe_rpres_fns[] = { + NULL, gen_helper_gvec_frecpe_h, + gen_helper_gvec_frecpe_rpres_s, gen_helper_gvec_frecpe_d, +}; +TRANS_FEAT(FRECPE, aa64_sve, gen_gvec_fpst_ah_arg_zz, + s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ? + frecpe_rpres_fns[a->esz] : frecpe_fns[a->esz], a, 0) static gen_helper_gvec_2_ptr * const frsqrte_fns[] = { NULL, gen_helper_gvec_frsqrte_h, gen_helper_gvec_frsqrte_s, gen_helper_gvec_frsqrte_d, }; -TRANS_FEAT(FRSQRTE, aa64_sve, gen_gvec_fpst_ah_arg_zz, frsqrte_fns[a->esz], a, 0) +static gen_helper_gvec_2_ptr * const frsqrte_rpres_fns[] = { + NULL, gen_helper_gvec_frsqrte_h, + gen_helper_gvec_frsqrte_rpres_s, gen_helper_gvec_frsqrte_d, +}; +TRANS_FEAT(FRSQRTE, aa64_sve, gen_gvec_fpst_ah_arg_zz, + s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ? + frsqrte_rpres_fns[a->esz] : frsqrte_fns[a->esz], a, 0) /* *** SVE Floating Point Compare with Zero Group diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index aefcd07ef0..ff3f7d8208 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -1236,10 +1236,12 @@ void HELPER(NAME)(void *vd, void *vn, float_status *stat, uint32_t desc) \ DO_2OP(gvec_frecpe_h, helper_recpe_f16, float16) DO_2OP(gvec_frecpe_s, helper_recpe_f32, float32) +DO_2OP(gvec_frecpe_rpres_s, helper_recpe_rpres_f32, float32) DO_2OP(gvec_frecpe_d, helper_recpe_f64, float64) DO_2OP(gvec_frsqrte_h, helper_rsqrte_f16, float16) DO_2OP(gvec_frsqrte_s, helper_rsqrte_f32, float32) +DO_2OP(gvec_frsqrte_rpres_s, helper_rsqrte_rpres_f32, float32) DO_2OP(gvec_frsqrte_d, helper_rsqrte_f64, float64) DO_2OP(gvec_vrintx_h, float16_round_to_int, float16) diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 5c370e263c..b97417e5a1 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -841,7 +841,11 @@ uint32_t HELPER(recpe_f16)(uint32_t input, float_status *fpst) return make_float16(f16_val); } -float32 HELPER(recpe_f32)(float32 input, float_status *fpst) +/* + * FEAT_RPRES means the f32 FRECPE has an "increased precision" variant + * which is used when FPCR.AH == 1. + */ +static float32 do_recpe_f32(float32 input, float_status *fpst, bool rpres) { float32 f32 = float32_squash_input_denormal(input, fpst); uint32_t f32_val = float32_val(f32); @@ -890,6 +894,16 @@ float32 HELPER(recpe_f32)(float32 input, float_status *fpst) return make_float32(f32_val); } +float32 HELPER(recpe_f32)(float32 input, float_status *fpst) +{ + return do_recpe_f32(input, fpst, false); +} + +float32 HELPER(recpe_rpres_f32)(float32 input, float_status *fpst) +{ + return do_recpe_f32(input, fpst, true); +} + float64 HELPER(recpe_f64)(float64 input, float_status *fpst) { float64 f64 = float64_squash_input_denormal(input, fpst); @@ -1035,7 +1049,11 @@ uint32_t HELPER(rsqrte_f16)(uint32_t input, float_status *s) return make_float16(val); } -float32 HELPER(rsqrte_f32)(float32 input, float_status *s) +/* + * FEAT_RPRES means the f32 FRSQRTE has an "increased precision" variant + * which is used when FPCR.AH == 1. + */ +static float32 do_rsqrte_f32(float32 input, float_status *s, bool rpres) { float32 f32 = float32_squash_input_denormal(input, s); uint32_t val = float32_val(f32); @@ -1080,6 +1098,16 @@ float32 HELPER(rsqrte_f32)(float32 input, float_status *s) return make_float32(val); } +float32 HELPER(rsqrte_f32)(float32 input, float_status *s) +{ + return do_rsqrte_f32(input, s, false); +} + +float32 HELPER(rsqrte_rpres_f32)(float32 input, float_status *s) +{ + return do_rsqrte_f32(input, s, true); +} + float64 HELPER(rsqrte_f64)(float64 input, float_status *s) { float64 f64 = float64_squash_input_denormal(input, s); From c1567205e000369aec6c73bfbd60219098b34d3e Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:57 +0000 Subject: [PATCH 53/68] target/arm: Implement increased precision FRECPE Implement the increased precision variation of FRECPE. In the pseudocode this corresponds to the handling of the "increasedprecision" boolean in the FPRecipEstimate() and RecipEstimate() functions. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/vfp_helper.c | 54 +++++++++++++++++++++++++++++++++++------ 1 file changed, 46 insertions(+), 8 deletions(-) diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index b97417e5a1..2df97e128f 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -733,6 +733,33 @@ static int recip_estimate(int input) return r; } +/* + * Increased precision version: + * input is a 13 bit fixed point number + * input range 2048 .. 4095 for a number from 0.5 <= x < 1.0. + * result range 4096 .. 8191 for a number from 1.0 to 2.0 + */ +static int recip_estimate_incprec(int input) +{ + int a, b, r; + assert(2048 <= input && input < 4096); + a = (input * 2) + 1; + /* + * The pseudocode expresses this as an operation on infinite + * precision reals where it calculates 2^25 / a and then looks + * at the error between that and the rounded-down-to-integer + * value to see if it should instead round up. We instead + * follow the same approach as the pseudocode for the 8-bit + * precision version, and calculate (2 * (2^25 / a)) as an + * integer so we can do the "add one and halve" to round it. + * So the 1 << 26 here is correct. + */ + b = (1 << 26) / a; + r = (b + 1) >> 1; + assert(4096 <= r && r < 8192); + return r; +} + /* * Common wrapper to call recip_estimate * @@ -742,7 +769,8 @@ static int recip_estimate(int input) * callee. */ -static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac) +static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac, + bool increasedprecision) { uint32_t scaled, estimate; uint64_t result_frac; @@ -758,12 +786,22 @@ static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac) } } - /* scaled = UInt('1':fraction<51:44>) */ - scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); - estimate = recip_estimate(scaled); + if (increasedprecision) { + /* scaled = UInt('1':fraction<51:41>) */ + scaled = deposit32(1 << 11, 0, 11, extract64(frac, 41, 11)); + estimate = recip_estimate_incprec(scaled); + } else { + /* scaled = UInt('1':fraction<51:44>) */ + scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); + estimate = recip_estimate(scaled); + } result_exp = exp_off - *exp; - result_frac = deposit64(0, 44, 8, estimate); + if (increasedprecision) { + result_frac = deposit64(0, 40, 12, estimate); + } else { + result_frac = deposit64(0, 44, 8, estimate); + } if (result_exp == 0) { result_frac = deposit64(result_frac >> 1, 51, 1, 1); } else if (result_exp == -1) { @@ -832,7 +870,7 @@ uint32_t HELPER(recpe_f16)(uint32_t input, float_status *fpst) } f64_frac = call_recip_estimate(&f16_exp, 29, - ((uint64_t) f16_frac) << (52 - 10)); + ((uint64_t) f16_frac) << (52 - 10), false); /* result = sign : result_exp<4:0> : fraction<51:42> */ f16_val = deposit32(0, 15, 1, f16_sign); @@ -885,7 +923,7 @@ static float32 do_recpe_f32(float32 input, float_status *fpst, bool rpres) } f64_frac = call_recip_estimate(&f32_exp, 253, - ((uint64_t) f32_frac) << (52 - 23)); + ((uint64_t) f32_frac) << (52 - 23), rpres); /* result = sign : result_exp<7:0> : fraction<51:29> */ f32_val = deposit32(0, 31, 1, f32_sign); @@ -943,7 +981,7 @@ float64 HELPER(recpe_f64)(float64 input, float_status *fpst) return float64_set_sign(float64_zero, float64_is_neg(f64)); } - f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac); + f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac, false); /* result = sign : result_exp<10:0> : fraction<51:0>; */ f64_val = deposit64(0, 63, 1, f64_sign); From 22330d2b0f6152d87114fec916f323e6726341d3 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:58 +0000 Subject: [PATCH 54/68] target/arm: Implement increased precision FRSQRTE Implement the increased precision variation of FRSQRTE. In the pseudocode this corresponds to the handling of the "increasedprecision" boolean in the FPRSqrtEstimate() and RecipSqrtEstimate() functions. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/vfp_helper.c | 77 ++++++++++++++++++++++++++++++++++------- 1 file changed, 64 insertions(+), 13 deletions(-) diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 2df97e128f..9d58fa7d6f 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -1015,8 +1015,36 @@ static int do_recip_sqrt_estimate(int a) return estimate; } +static int do_recip_sqrt_estimate_incprec(int a) +{ + /* + * The Arm ARM describes the 12-bit precision version of RecipSqrtEstimate + * in terms of an infinite-precision floating point calculation of a + * square root. We implement this using the same kind of pure integer + * algorithm as the 8-bit mantissa, to get the same bit-for-bit result. + */ + int64_t b, estimate; -static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac) + assert(1024 <= a && a < 4096); + if (a < 2048) { + a = a * 2 + 1; + } else { + a = (a >> 1) << 1; + a = (a + 1) * 2; + } + b = 8192; + while (a * (b + 1) * (b + 1) < (1ULL << 39)) { + b += 1; + } + estimate = (b + 1) / 2; + + assert(4096 <= estimate && estimate < 8192); + + return estimate; +} + +static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac, + bool increasedprecision) { int estimate; uint32_t scaled; @@ -1029,17 +1057,32 @@ static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac) frac = extract64(frac, 0, 51) << 1; } - if (*exp & 1) { - /* scaled = UInt('01':fraction<51:45>) */ - scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7)); + if (increasedprecision) { + if (*exp & 1) { + /* scaled = UInt('01':fraction<51:42>) */ + scaled = deposit32(1 << 10, 0, 10, extract64(frac, 42, 10)); + } else { + /* scaled = UInt('1':fraction<51:41>) */ + scaled = deposit32(1 << 11, 0, 11, extract64(frac, 41, 11)); + } + estimate = do_recip_sqrt_estimate_incprec(scaled); } else { - /* scaled = UInt('1':fraction<51:44>) */ - scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); + if (*exp & 1) { + /* scaled = UInt('01':fraction<51:45>) */ + scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7)); + } else { + /* scaled = UInt('1':fraction<51:44>) */ + scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); + } + estimate = do_recip_sqrt_estimate(scaled); } - estimate = do_recip_sqrt_estimate(scaled); *exp = (exp_off - *exp) / 2; - return extract64(estimate, 0, 8) << 44; + if (increasedprecision) { + return extract64(estimate, 0, 12) << 40; + } else { + return extract64(estimate, 0, 8) << 44; + } } uint32_t HELPER(rsqrte_f16)(uint32_t input, float_status *s) @@ -1078,7 +1121,7 @@ uint32_t HELPER(rsqrte_f16)(uint32_t input, float_status *s) f64_frac = ((uint64_t) f16_frac) << (52 - 10); - f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac); + f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac, false); /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */ val = deposit32(0, 15, 1, f16_sign); @@ -1127,12 +1170,20 @@ static float32 do_rsqrte_f32(float32 input, float_status *s, bool rpres) f64_frac = ((uint64_t) f32_frac) << 29; - f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac); + f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac, rpres); - /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */ + /* + * result = sign : result_exp<7:0> : estimate<7:0> : Zeros(15) + * or for increased precision + * result = sign : result_exp<7:0> : estimate<11:0> : Zeros(11) + */ val = deposit32(0, 31, 1, f32_sign); val = deposit32(val, 23, 8, f32_exp); - val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8)); + if (rpres) { + val = deposit32(val, 11, 12, extract64(f64_frac, 52 - 12, 12)); + } else { + val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8)); + } return make_float32(val); } @@ -1176,7 +1227,7 @@ float64 HELPER(rsqrte_f64)(float64 input, float_status *s) return float64_zero; } - f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac); + f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac, false); /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */ val = deposit64(0, 61, 1, f64_sign); From b0bf37746bb9d66496ce8ef076c74388aa7ef899 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Sat, 1 Feb 2025 16:39:59 +0000 Subject: [PATCH 55/68] target/arm: Enable FEAT_RPRES for -cpu max Now the emulation is complete, we can enable FEAT_RPRES for the 'max' CPU type. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- docs/system/arm/emulation.rst | 1 + target/arm/tcg/cpu64.c | 1 + 2 files changed, 2 insertions(+) diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst index 63b4cdf5fb..78c2fd2113 100644 --- a/docs/system/arm/emulation.rst +++ b/docs/system/arm/emulation.rst @@ -118,6 +118,7 @@ the following architecture extensions: - FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions) - FEAT_RME (Realm Management Extension) (NB: support status in QEMU is experimental) - FEAT_RNG (Random number generator) +- FEAT_RPRES (Increased precision of FRECPE and FRSQRTE) - FEAT_S2FWB (Stage 2 forced Write-Back) - FEAT_SB (Speculation Barrier) - FEAT_SEL2 (Secure EL2) diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c index 0bc68aac17..29ab0ac79d 100644 --- a/target/arm/tcg/cpu64.c +++ b/target/arm/tcg/cpu64.c @@ -1167,6 +1167,7 @@ void aarch64_max_tcg_initfn(Object *obj) cpu->isar.id_aa64isar1 = t; t = cpu->isar.id_aa64isar2; + t = FIELD_DP64(t, ID_AA64ISAR2, RPRES, 1); /* FEAT_RPRES */ t = FIELD_DP64(t, ID_AA64ISAR2, MOPS, 1); /* FEAT_MOPS */ t = FIELD_DP64(t, ID_AA64ISAR2, BC, 1); /* FEAT_HBC */ t = FIELD_DP64(t, ID_AA64ISAR2, WFXT, 2); /* FEAT_WFxT */ From b902f5c62dbf2e796e39af55ed28294ec0467a56 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:40:00 +0000 Subject: [PATCH 56/68] target/arm: Introduce CPUARMState.vfp.fp_status[] MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move ARMFPStatusFlavour to cpu.h with which to index this array. For now, place the array in an anonymous union with the existing structures. Adjust the order of the existing structures to match the enum. Simplify fpstatus_ptr() using the new array. Signed-off-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-id: 20250129013857.135256-7-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/cpu.h | 119 +++++++++++++++++++++---------------- target/arm/tcg/translate.h | 64 +------------------- 2 files changed, 70 insertions(+), 113 deletions(-) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 24b0a5fcb9..bf68e135d7 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -202,6 +202,61 @@ typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; typedef struct NVICState NVICState; +/* + * Enum for indexing vfp.fp_status[]. + * + * FPST_A32: is the "normal" fp status for AArch32 insns + * FPST_A64: is the "normal" fp status for AArch64 insns + * FPST_A32_F16: used for AArch32 half-precision calculations + * FPST_A64_F16: used for AArch64 half-precision calculations + * FPST_STD: the ARM "Standard FPSCR Value" + * FPST_STD_F16: used for half-precision + * calculations with the ARM "Standard FPSCR Value" + * FPST_AH: used for the A64 insns which change behaviour + * when FPCR.AH == 1 (bfloat16 conversions and multiplies, + * and the reciprocal and square root estimate/step insns) + * FPST_AH_F16: used for the A64 insns which change behaviour + * when FPCR.AH == 1 (bfloat16 conversions and multiplies, + * and the reciprocal and square root estimate/step insns); + * for half-precision + * + * Half-precision operations are governed by a separate + * flush-to-zero control bit in FPSCR:FZ16. We pass a separate + * status structure to control this. + * + * The "Standard FPSCR", ie default-NaN, flush-to-zero, + * round-to-nearest and is used by any operations (generally + * Neon) which the architecture defines as controlled by the + * standard FPSCR value rather than the FPSCR. + * + * The "standard FPSCR but for fp16 ops" is needed because + * the "standard FPSCR" tracks the FPSCR.FZ16 bit rather than + * using a fixed value for it. + * + * The ah_fp_status is needed because some insns have different + * behaviour when FPCR.AH == 1: they don't update cumulative + * exception flags, they act like FPCR.{FZ,FIZ} = {1,1} and + * they ignore FPCR.RMode. But they don't ignore FPCR.FZ16, + * which means we need an ah_fp_status_f16 as well. + * + * To avoid having to transfer exception bits around, we simply + * say that the FPSCR cumulative exception flags are the logical + * OR of the flags in the four fp statuses. This relies on the + * only thing which needs to read the exception flags being + * an explicit FPSCR read. + */ +typedef enum ARMFPStatusFlavour { + FPST_A32, + FPST_A64, + FPST_A32_F16, + FPST_A64_F16, + FPST_AH, + FPST_AH_F16, + FPST_STD, + FPST_STD_F16, +} ARMFPStatusFlavour; +#define FPST_COUNT 8 + typedef struct CPUArchState { /* Regs for current mode. */ uint32_t regs[16]; @@ -631,56 +686,20 @@ typedef struct CPUArchState { /* Scratch space for aa32 neon expansion. */ uint32_t scratch[8]; - /* There are a number of distinct float control structures: - * - * fp_status_a32: is the "normal" fp status for AArch32 insns - * fp_status_a64: is the "normal" fp status for AArch64 insns - * fp_status_fp16_a32: used for AArch32 half-precision calculations - * fp_status_fp16_a64: used for AArch64 half-precision calculations - * standard_fp_status : the ARM "Standard FPSCR Value" - * standard_fp_status_fp16 : used for half-precision - * calculations with the ARM "Standard FPSCR Value" - * ah_fp_status: used for the A64 insns which change behaviour - * when FPCR.AH == 1 (bfloat16 conversions and multiplies, - * and the reciprocal and square root estimate/step insns) - * ah_fp_status_f16: used for the A64 insns which change behaviour - * when FPCR.AH == 1 (bfloat16 conversions and multiplies, - * and the reciprocal and square root estimate/step insns); - * for half-precision - * - * Half-precision operations are governed by a separate - * flush-to-zero control bit in FPSCR:FZ16. We pass a separate - * status structure to control this. - * - * The "Standard FPSCR", ie default-NaN, flush-to-zero, - * round-to-nearest and is used by any operations (generally - * Neon) which the architecture defines as controlled by the - * standard FPSCR value rather than the FPSCR. - * - * The "standard FPSCR but for fp16 ops" is needed because - * the "standard FPSCR" tracks the FPSCR.FZ16 bit rather than - * using a fixed value for it. - * - * The ah_fp_status is needed because some insns have different - * behaviour when FPCR.AH == 1: they don't update cumulative - * exception flags, they act like FPCR.{FZ,FIZ} = {1,1} and - * they ignore FPCR.RMode. But they don't ignore FPCR.FZ16, - * which means we need an ah_fp_status_f16 as well. - * - * To avoid having to transfer exception bits around, we simply - * say that the FPSCR cumulative exception flags are the logical - * OR of the flags in the four fp statuses. This relies on the - * only thing which needs to read the exception flags being - * an explicit FPSCR read. - */ - float_status fp_status_a32; - float_status fp_status_a64; - float_status fp_status_f16_a32; - float_status fp_status_f16_a64; - float_status standard_fp_status; - float_status standard_fp_status_f16; - float_status ah_fp_status; - float_status ah_fp_status_f16; + /* There are a number of distinct float control structures. */ + union { + float_status fp_status[FPST_COUNT]; + struct { + float_status fp_status_a32; + float_status fp_status_a64; + float_status fp_status_f16_a32; + float_status fp_status_f16_a64; + float_status ah_fp_status; + float_status ah_fp_status_f16; + float_status standard_fp_status; + float_status standard_fp_status_f16; + }; + }; uint64_t zcr_el[4]; /* ZCR_EL[1-3] */ uint64_t smcr_el[4]; /* SMCR_EL[1-3] */ diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index 0dff00015e..f8dc2f0d4b 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -670,80 +670,18 @@ static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb) return (CPUARMTBFlags){ tb->flags, tb->cs_base }; } -/* - * Enum for argument to fpstatus_ptr(). - */ -typedef enum ARMFPStatusFlavour { - FPST_A32, - FPST_A64, - FPST_A32_F16, - FPST_A64_F16, - FPST_AH, - FPST_AH_F16, - FPST_STD, - FPST_STD_F16, -} ARMFPStatusFlavour; - /** * fpstatus_ptr: return TCGv_ptr to the specified fp_status field * * We have multiple softfloat float_status fields in the Arm CPU state struct * (see the comment in cpu.h for details). Return a TCGv_ptr which has * been set up to point to the requested field in the CPU state struct. - * The options are: - * - * FPST_A32 - * for AArch32 non-FP16 operations controlled by the FPCR - * FPST_A64 - * for AArch64 non-FP16 operations controlled by the FPCR - * FPST_A32_F16 - * for AArch32 operations controlled by the FPCR where FPCR.FZ16 is to be used - * FPST_A64_F16 - * for AArch64 operations controlled by the FPCR where FPCR.FZ16 is to be used - * FPST_AH: - * for AArch64 operations which change behaviour when AH=1 (specifically, - * bfloat16 conversions and multiplies, and the reciprocal and square root - * estimate/step insns) - * FPST_AH_F16: - * ditto, but for half-precision operations - * FPST_STD - * for A32/T32 Neon operations using the "standard FPSCR value" - * FPST_STD_F16 - * as FPST_STD, but where FPCR.FZ16 is to be used */ static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour) { TCGv_ptr statusptr = tcg_temp_new_ptr(); - int offset; + int offset = offsetof(CPUARMState, vfp.fp_status[flavour]); - switch (flavour) { - case FPST_A32: - offset = offsetof(CPUARMState, vfp.fp_status_a32); - break; - case FPST_A64: - offset = offsetof(CPUARMState, vfp.fp_status_a64); - break; - case FPST_A32_F16: - offset = offsetof(CPUARMState, vfp.fp_status_f16_a32); - break; - case FPST_A64_F16: - offset = offsetof(CPUARMState, vfp.fp_status_f16_a64); - break; - case FPST_AH: - offset = offsetof(CPUARMState, vfp.ah_fp_status); - break; - case FPST_AH_F16: - offset = offsetof(CPUARMState, vfp.ah_fp_status_f16); - break; - case FPST_STD: - offset = offsetof(CPUARMState, vfp.standard_fp_status); - break; - case FPST_STD_F16: - offset = offsetof(CPUARMState, vfp.standard_fp_status_f16); - break; - default: - g_assert_not_reached(); - } tcg_gen_addi_ptr(statusptr, tcg_env, offset); return statusptr; } From 7678859152df85c21380e3cf441eaab94f1a04ac Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:40:01 +0000 Subject: [PATCH 57/68] target/arm: Remove standard_fp_status_f16 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace with fp_status[FPST_STD_F16]. Signed-off-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-id: 20250129013857.135256-8-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/cpu.c | 4 ++-- target/arm/cpu.h | 1 - target/arm/tcg/mve_helper.c | 24 ++++++++++++------------ target/arm/vfp_helper.c | 8 ++++---- 4 files changed, 18 insertions(+), 19 deletions(-) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 1307ee34ce..c6d91fd2c8 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -549,13 +549,13 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) set_flush_to_zero(1, &env->vfp.standard_fp_status); set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status); set_default_nan_mode(1, &env->vfp.standard_fp_status); - set_default_nan_mode(1, &env->vfp.standard_fp_status_f16); + set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD_F16]); arm_set_default_fp_behaviours(&env->vfp.fp_status_a32); arm_set_default_fp_behaviours(&env->vfp.fp_status_a64); arm_set_default_fp_behaviours(&env->vfp.standard_fp_status); arm_set_default_fp_behaviours(&env->vfp.fp_status_f16_a32); arm_set_default_fp_behaviours(&env->vfp.fp_status_f16_a64); - arm_set_default_fp_behaviours(&env->vfp.standard_fp_status_f16); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD_F16]); arm_set_ah_fp_behaviours(&env->vfp.ah_fp_status); set_flush_to_zero(1, &env->vfp.ah_fp_status); set_flush_inputs_to_zero(1, &env->vfp.ah_fp_status); diff --git a/target/arm/cpu.h b/target/arm/cpu.h index bf68e135d7..085d5383e0 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -697,7 +697,6 @@ typedef struct CPUArchState { float_status ah_fp_status; float_status ah_fp_status_f16; float_status standard_fp_status; - float_status standard_fp_status_f16; }; }; diff --git a/target/arm/tcg/mve_helper.c b/target/arm/tcg/mve_helper.c index 03ebef5ef2..911a53a23a 100644 --- a/target/arm/tcg/mve_helper.c +++ b/target/arm/tcg/mve_helper.c @@ -2814,7 +2814,7 @@ DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ &env->vfp.standard_fp_status; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ @@ -2888,7 +2888,7 @@ DO_2OP_FP_ALL(vminnma, minnuma) r[e] = 0; \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ &env->vfp.standard_fp_status; \ if (!(tm & 1)) { \ /* We need the result but without updating flags */ \ @@ -2926,7 +2926,7 @@ DO_VCADD_FP(vfcadd270s, 4, float32, float32_add, float32_sub) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ &env->vfp.standard_fp_status; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ @@ -2964,7 +2964,7 @@ DO_VFMA(vfmss, 4, float32, true) if ((mask & MAKE_64BIT_MASK(0, ESIZE * 2)) == 0) { \ continue; \ } \ - fpst0 = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + fpst0 = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ &env->vfp.standard_fp_status; \ fpst1 = fpst0; \ if (!(mask & 1)) { \ @@ -3049,7 +3049,7 @@ DO_VCMLA(vcmla270s, 4, float32, 3, DO_VCMLAS) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ &env->vfp.standard_fp_status; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ @@ -3084,7 +3084,7 @@ DO_2OP_FP_SCALAR_ALL(vfmul_scalar, mul) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ &env->vfp.standard_fp_status; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ @@ -3117,7 +3117,7 @@ DO_2OP_FP_ACC_SCALAR(vfmas_scalars, 4, float32, DO_VFMAS_SCALARS) TYPE *m = vm; \ TYPE ra = (TYPE)ra_in; \ float_status *fpst = (ESIZE == 2) ? \ - &env->vfp.standard_fp_status_f16 : \ + &env->vfp.fp_status[FPST_STD_F16] : \ &env->vfp.standard_fp_status; \ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ if (mask & 1) { \ @@ -3168,7 +3168,7 @@ DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum) if ((mask & emask) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ &env->vfp.standard_fp_status; \ if (!(mask & (1 << (e * ESIZE)))) { \ /* We need the result but without updating flags */ \ @@ -3202,7 +3202,7 @@ DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum) if ((mask & emask) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ &env->vfp.standard_fp_status; \ if (!(mask & (1 << (e * ESIZE)))) { \ /* We need the result but without updating flags */ \ @@ -3267,7 +3267,7 @@ DO_VCMP_FP_BOTH(vfcmples, vfcmple_scalars, 4, float32, !DO_GT32) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ &env->vfp.standard_fp_status; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ @@ -3301,7 +3301,7 @@ DO_VCVT_FIXED(vcvt_fu, 4, uint32_t, helper_vfp_touls_round_to_zero) float_status *fpst; \ float_status scratch_fpst; \ float_status *base_fpst = (ESIZE == 2) ? \ - &env->vfp.standard_fp_status_f16 : \ + &env->vfp.fp_status[FPST_STD_F16] : \ &env->vfp.standard_fp_status; \ uint32_t prev_rmode = get_float_rounding_mode(base_fpst); \ set_float_rounding_mode(rmode, base_fpst); \ @@ -3427,7 +3427,7 @@ void HELPER(mve_vcvtt_hs)(CPUARMState *env, void *vd, void *vm) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ &env->vfp.standard_fp_status; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 9d58fa7d6f..61e769d430 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -122,7 +122,7 @@ static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) /* FZ16 does not generate an input denormal exception. */ a32_flags |= (get_float_exception_flags(&env->vfp.fp_status_f16_a32) & ~float_flag_input_denormal_flushed); - a32_flags |= (get_float_exception_flags(&env->vfp.standard_fp_status_f16) + a32_flags |= (get_float_exception_flags(&env->vfp.fp_status[FPST_STD_F16]) & ~float_flag_input_denormal_flushed); a64_flags |= get_float_exception_flags(&env->vfp.fp_status_a64); @@ -160,7 +160,7 @@ static void vfp_clear_float_status_exc_flags(CPUARMState *env) set_float_exception_flags(0, &env->vfp.fp_status_f16_a32); set_float_exception_flags(0, &env->vfp.fp_status_f16_a64); set_float_exception_flags(0, &env->vfp.standard_fp_status); - set_float_exception_flags(0, &env->vfp.standard_fp_status_f16); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD_F16]); set_float_exception_flags(0, &env->vfp.ah_fp_status); set_float_exception_flags(0, &env->vfp.ah_fp_status_f16); } @@ -207,11 +207,11 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) bool ftz_enabled = val & FPCR_FZ16; set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a32); set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a64); - set_flush_to_zero(ftz_enabled, &env->vfp.standard_fp_status_f16); + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_STD_F16]); set_flush_to_zero(ftz_enabled, &env->vfp.ah_fp_status_f16); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a32); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a64); - set_flush_inputs_to_zero(ftz_enabled, &env->vfp.standard_fp_status_f16); + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_STD_F16]); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.ah_fp_status_f16); } if (changed & FPCR_FZ) { From f069b26b8e6b7d03399fc4fd7540229f1cb45e27 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:40:02 +0000 Subject: [PATCH 58/68] target/arm: Remove standard_fp_status MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace with fp_status[FPST_STD]. Signed-off-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-id: 20250129013857.135256-9-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/cpu.c | 8 ++++---- target/arm/cpu.h | 1 - target/arm/tcg/mve_helper.c | 28 ++++++++++++++-------------- target/arm/tcg/vec_helper.c | 4 ++-- target/arm/vfp_helper.c | 4 ++-- 5 files changed, 22 insertions(+), 23 deletions(-) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index c6d91fd2c8..75b6b93c7c 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -546,13 +546,13 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) env->sau.ctrl = 0; } - set_flush_to_zero(1, &env->vfp.standard_fp_status); - set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status); - set_default_nan_mode(1, &env->vfp.standard_fp_status); + set_flush_to_zero(1, &env->vfp.fp_status[FPST_STD]); + set_flush_inputs_to_zero(1, &env->vfp.fp_status[FPST_STD]); + set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD]); set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD_F16]); arm_set_default_fp_behaviours(&env->vfp.fp_status_a32); arm_set_default_fp_behaviours(&env->vfp.fp_status_a64); - arm_set_default_fp_behaviours(&env->vfp.standard_fp_status); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD]); arm_set_default_fp_behaviours(&env->vfp.fp_status_f16_a32); arm_set_default_fp_behaviours(&env->vfp.fp_status_f16_a64); arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD_F16]); diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 085d5383e0..9f57dfc3e9 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -696,7 +696,6 @@ typedef struct CPUArchState { float_status fp_status_f16_a64; float_status ah_fp_status; float_status ah_fp_status_f16; - float_status standard_fp_status; }; }; diff --git a/target/arm/tcg/mve_helper.c b/target/arm/tcg/mve_helper.c index 911a53a23a..3763d71e20 100644 --- a/target/arm/tcg/mve_helper.c +++ b/target/arm/tcg/mve_helper.c @@ -2815,7 +2815,7 @@ DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN) continue; \ } \ fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.standard_fp_status; \ + &env->vfp.fp_status[FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -2889,7 +2889,7 @@ DO_2OP_FP_ALL(vminnma, minnuma) continue; \ } \ fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.standard_fp_status; \ + &env->vfp.fp_status[FPST_STD]; \ if (!(tm & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -2927,7 +2927,7 @@ DO_VCADD_FP(vfcadd270s, 4, float32, float32_add, float32_sub) continue; \ } \ fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.standard_fp_status; \ + &env->vfp.fp_status[FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -2965,7 +2965,7 @@ DO_VFMA(vfmss, 4, float32, true) continue; \ } \ fpst0 = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.standard_fp_status; \ + &env->vfp.fp_status[FPST_STD]; \ fpst1 = fpst0; \ if (!(mask & 1)) { \ scratch_fpst = *fpst0; \ @@ -3050,7 +3050,7 @@ DO_VCMLA(vcmla270s, 4, float32, 3, DO_VCMLAS) continue; \ } \ fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.standard_fp_status; \ + &env->vfp.fp_status[FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3085,7 +3085,7 @@ DO_2OP_FP_SCALAR_ALL(vfmul_scalar, mul) continue; \ } \ fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.standard_fp_status; \ + &env->vfp.fp_status[FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3118,7 +3118,7 @@ DO_2OP_FP_ACC_SCALAR(vfmas_scalars, 4, float32, DO_VFMAS_SCALARS) TYPE ra = (TYPE)ra_in; \ float_status *fpst = (ESIZE == 2) ? \ &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.standard_fp_status; \ + &env->vfp.fp_status[FPST_STD]; \ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ if (mask & 1) { \ TYPE v = m[H##ESIZE(e)]; \ @@ -3169,7 +3169,7 @@ DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum) continue; \ } \ fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.standard_fp_status; \ + &env->vfp.fp_status[FPST_STD]; \ if (!(mask & (1 << (e * ESIZE)))) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3203,7 +3203,7 @@ DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum) continue; \ } \ fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.standard_fp_status; \ + &env->vfp.fp_status[FPST_STD]; \ if (!(mask & (1 << (e * ESIZE)))) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3268,7 +3268,7 @@ DO_VCMP_FP_BOTH(vfcmples, vfcmple_scalars, 4, float32, !DO_GT32) continue; \ } \ fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.standard_fp_status; \ + &env->vfp.fp_status[FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3302,7 +3302,7 @@ DO_VCVT_FIXED(vcvt_fu, 4, uint32_t, helper_vfp_touls_round_to_zero) float_status scratch_fpst; \ float_status *base_fpst = (ESIZE == 2) ? \ &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.standard_fp_status; \ + &env->vfp.fp_status[FPST_STD]; \ uint32_t prev_rmode = get_float_rounding_mode(base_fpst); \ set_float_rounding_mode(rmode, base_fpst); \ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ @@ -3347,7 +3347,7 @@ static void do_vcvt_sh(CPUARMState *env, void *vd, void *vm, int top) unsigned e; float_status *fpst; float_status scratch_fpst; - float_status *base_fpst = &env->vfp.standard_fp_status; + float_status *base_fpst = &env->vfp.fp_status[FPST_STD]; bool old_fz = get_flush_to_zero(base_fpst); set_flush_to_zero(false, base_fpst); for (e = 0; e < 16 / 4; e++, mask >>= 4) { @@ -3377,7 +3377,7 @@ static void do_vcvt_hs(CPUARMState *env, void *vd, void *vm, int top) unsigned e; float_status *fpst; float_status scratch_fpst; - float_status *base_fpst = &env->vfp.standard_fp_status; + float_status *base_fpst = &env->vfp.fp_status[FPST_STD]; bool old_fiz = get_flush_inputs_to_zero(base_fpst); set_flush_inputs_to_zero(false, base_fpst); for (e = 0; e < 16 / 4; e++, mask >>= 4) { @@ -3428,7 +3428,7 @@ void HELPER(mve_vcvtt_hs)(CPUARMState *env, void *vd, void *vm) continue; \ } \ fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.standard_fp_status; \ + &env->vfp.fp_status[FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index ff3f7d8208..cffd022260 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -2154,7 +2154,7 @@ void HELPER(gvec_fmlal_a32)(void *vd, void *vn, void *vm, bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); uint64_t negx = is_s ? 0x8000800080008000ull : 0; - do_fmlal(vd, vn, vm, &env->vfp.standard_fp_status, negx, 0, desc, + do_fmlal(vd, vn, vm, &env->vfp.fp_status[FPST_STD], negx, 0, desc, get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a32)); } @@ -2235,7 +2235,7 @@ void HELPER(gvec_fmlal_idx_a32)(void *vd, void *vn, void *vm, bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); uint64_t negx = is_s ? 0x8000800080008000ull : 0; - do_fmlal_idx(vd, vn, vm, &env->vfp.standard_fp_status, negx, 0, desc, + do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status[FPST_STD], negx, 0, desc, get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a32)); } diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 61e769d430..28b9132be5 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -118,7 +118,7 @@ static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) uint32_t a32_flags = 0, a64_flags = 0; a32_flags |= get_float_exception_flags(&env->vfp.fp_status_a32); - a32_flags |= get_float_exception_flags(&env->vfp.standard_fp_status); + a32_flags |= get_float_exception_flags(&env->vfp.fp_status[FPST_STD]); /* FZ16 does not generate an input denormal exception. */ a32_flags |= (get_float_exception_flags(&env->vfp.fp_status_f16_a32) & ~float_flag_input_denormal_flushed); @@ -159,7 +159,7 @@ static void vfp_clear_float_status_exc_flags(CPUARMState *env) set_float_exception_flags(0, &env->vfp.fp_status_a64); set_float_exception_flags(0, &env->vfp.fp_status_f16_a32); set_float_exception_flags(0, &env->vfp.fp_status_f16_a64); - set_float_exception_flags(0, &env->vfp.standard_fp_status); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD_F16]); set_float_exception_flags(0, &env->vfp.ah_fp_status); set_float_exception_flags(0, &env->vfp.ah_fp_status_f16); From fc25b174d5c514d70138bf2e77b1e32649bcead2 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:40:03 +0000 Subject: [PATCH 59/68] target/arm: Remove ah_fp_status_f16 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace with fp_status[FPST_AH_F16]. Signed-off-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-id: 20250129013857.135256-10-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/cpu.c | 2 +- target/arm/cpu.h | 3 +-- target/arm/vfp_helper.c | 10 +++++----- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 75b6b93c7c..5a21a6c6eb 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -559,7 +559,7 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) arm_set_ah_fp_behaviours(&env->vfp.ah_fp_status); set_flush_to_zero(1, &env->vfp.ah_fp_status); set_flush_inputs_to_zero(1, &env->vfp.ah_fp_status); - arm_set_ah_fp_behaviours(&env->vfp.ah_fp_status_f16); + arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_AH_F16]); #ifndef CONFIG_USER_ONLY if (kvm_enabled()) { diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 9f57dfc3e9..91e132024e 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -237,7 +237,7 @@ typedef struct NVICState NVICState; * behaviour when FPCR.AH == 1: they don't update cumulative * exception flags, they act like FPCR.{FZ,FIZ} = {1,1} and * they ignore FPCR.RMode. But they don't ignore FPCR.FZ16, - * which means we need an ah_fp_status_f16 as well. + * which means we need an FPST_AH_F16 as well. * * To avoid having to transfer exception bits around, we simply * say that the FPSCR cumulative exception flags are the logical @@ -695,7 +695,6 @@ typedef struct CPUArchState { float_status fp_status_f16_a32; float_status fp_status_f16_a64; float_status ah_fp_status; - float_status ah_fp_status_f16; }; }; diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 28b9132be5..ec7c0d6a98 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -129,7 +129,7 @@ static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) a64_flags |= (get_float_exception_flags(&env->vfp.fp_status_f16_a64) & ~(float_flag_input_denormal_flushed | float_flag_input_denormal_used)); /* - * We do not merge in flags from ah_fp_status or ah_fp_status_f16, because + * We do not merge in flags from ah_fp_status or FPST_AH_F16, because * they are used for insns that must not set the cumulative exception bits. */ @@ -162,7 +162,7 @@ static void vfp_clear_float_status_exc_flags(CPUARMState *env) set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD_F16]); set_float_exception_flags(0, &env->vfp.ah_fp_status); - set_float_exception_flags(0, &env->vfp.ah_fp_status_f16); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_AH_F16]); } static void vfp_sync_and_clear_float_status_exc_flags(CPUARMState *env) @@ -208,11 +208,11 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a32); set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a64); set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_STD_F16]); - set_flush_to_zero(ftz_enabled, &env->vfp.ah_fp_status_f16); + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_AH_F16]); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a32); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a64); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_STD_F16]); - set_flush_inputs_to_zero(ftz_enabled, &env->vfp.ah_fp_status_f16); + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_AH_F16]); } if (changed & FPCR_FZ) { bool ftz_enabled = val & FPCR_FZ; @@ -237,7 +237,7 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16_a32); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16_a64); set_default_nan_mode(dnan_enabled, &env->vfp.ah_fp_status); - set_default_nan_mode(dnan_enabled, &env->vfp.ah_fp_status_f16); + set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_AH_F16]); } if (changed & FPCR_AH) { bool ah_enabled = val & FPCR_AH; From ee4316f65b49cfdbec64ec99288d677ad68adb00 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:40:04 +0000 Subject: [PATCH 60/68] target/arm: Remove ah_fp_status MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace with fp_status[FPST_AH]. Signed-off-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-id: 20250129013857.135256-11-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/cpu.c | 6 +++--- target/arm/cpu.h | 3 +-- target/arm/vfp_helper.c | 6 +++--- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 5a21a6c6eb..6b4839b5ba 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -556,9 +556,9 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) arm_set_default_fp_behaviours(&env->vfp.fp_status_f16_a32); arm_set_default_fp_behaviours(&env->vfp.fp_status_f16_a64); arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD_F16]); - arm_set_ah_fp_behaviours(&env->vfp.ah_fp_status); - set_flush_to_zero(1, &env->vfp.ah_fp_status); - set_flush_inputs_to_zero(1, &env->vfp.ah_fp_status); + arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_AH]); + set_flush_to_zero(1, &env->vfp.fp_status[FPST_AH]); + set_flush_inputs_to_zero(1, &env->vfp.fp_status[FPST_AH]); arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_AH_F16]); #ifndef CONFIG_USER_ONLY diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 91e132024e..577c69dec1 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -233,7 +233,7 @@ typedef struct NVICState NVICState; * the "standard FPSCR" tracks the FPSCR.FZ16 bit rather than * using a fixed value for it. * - * The ah_fp_status is needed because some insns have different + * FPST_AH is needed because some insns have different * behaviour when FPCR.AH == 1: they don't update cumulative * exception flags, they act like FPCR.{FZ,FIZ} = {1,1} and * they ignore FPCR.RMode. But they don't ignore FPCR.FZ16, @@ -694,7 +694,6 @@ typedef struct CPUArchState { float_status fp_status_a64; float_status fp_status_f16_a32; float_status fp_status_f16_a64; - float_status ah_fp_status; }; }; diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index ec7c0d6a98..2bb98c4ee4 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -129,7 +129,7 @@ static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) a64_flags |= (get_float_exception_flags(&env->vfp.fp_status_f16_a64) & ~(float_flag_input_denormal_flushed | float_flag_input_denormal_used)); /* - * We do not merge in flags from ah_fp_status or FPST_AH_F16, because + * We do not merge in flags from FPST_AH or FPST_AH_F16, because * they are used for insns that must not set the cumulative exception bits. */ @@ -161,7 +161,7 @@ static void vfp_clear_float_status_exc_flags(CPUARMState *env) set_float_exception_flags(0, &env->vfp.fp_status_f16_a64); set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD_F16]); - set_float_exception_flags(0, &env->vfp.ah_fp_status); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_AH]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_AH_F16]); } @@ -236,7 +236,7 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_a64); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16_a32); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16_a64); - set_default_nan_mode(dnan_enabled, &env->vfp.ah_fp_status); + set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_AH]); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_AH_F16]); } if (changed & FPCR_AH) { From c71296565c0d3763615885306aa6a9904ea32869 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:40:05 +0000 Subject: [PATCH 61/68] target/arm: Remove fp_status_f16_a64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace with fp_status[FPST_A64_F16]. Signed-off-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-id: 20250129013857.135256-12-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/cpu.c | 2 +- target/arm/cpu.h | 1 - target/arm/tcg/sme_helper.c | 2 +- target/arm/tcg/vec_helper.c | 9 ++++----- target/arm/vfp_helper.c | 16 ++++++++-------- 5 files changed, 14 insertions(+), 16 deletions(-) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 6b4839b5ba..18e7debe29 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -554,7 +554,7 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) arm_set_default_fp_behaviours(&env->vfp.fp_status_a64); arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD]); arm_set_default_fp_behaviours(&env->vfp.fp_status_f16_a32); - arm_set_default_fp_behaviours(&env->vfp.fp_status_f16_a64); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]); arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD_F16]); arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_AH]); set_flush_to_zero(1, &env->vfp.fp_status[FPST_AH]); diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 577c69dec1..6cf97ba9c9 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -693,7 +693,6 @@ typedef struct CPUArchState { float_status fp_status_a32; float_status fp_status_a64; float_status fp_status_f16_a32; - float_status fp_status_f16_a64; }; }; diff --git a/target/arm/tcg/sme_helper.c b/target/arm/tcg/sme_helper.c index 727c085f37..6e336e10c6 100644 --- a/target/arm/tcg/sme_helper.c +++ b/target/arm/tcg/sme_helper.c @@ -1043,7 +1043,7 @@ void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn, * produces default NaNs. We also need a second copy of fp_status with * round-to-odd -- see above. */ - fpst_f16 = env->vfp.fp_status_f16_a64; + fpst_f16 = env->vfp.fp_status[FPST_A64_F16]; fpst_std = env->vfp.fp_status_a64; set_default_nan_mode(true, &fpst_std); set_default_nan_mode(true, &fpst_f16); diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index cffd022260..48dbd8bdd2 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -2173,7 +2173,7 @@ void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm, } } do_fmlal(vd, vn, vm, &env->vfp.fp_status_a64, negx, negf, desc, - get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a64)); + get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A64_F16])); } void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va, @@ -2183,7 +2183,7 @@ void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va, bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16); float_status *status = &env->vfp.fp_status_a64; - bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a64); + bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A64_F16]); int negx = 0, negf = 0; if (is_s) { @@ -2254,7 +2254,7 @@ void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm, } } do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status_a64, negx, negf, desc, - get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a64)); + get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A64_F16])); } void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va, @@ -2265,7 +2265,7 @@ void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va, intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16); intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 2, 3) * sizeof(float16); float_status *status = &env->vfp.fp_status_a64; - bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a64); + bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A64_F16]); int negx = 0, negf = 0; if (is_s) { @@ -2275,7 +2275,6 @@ void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va, negx = 0x8000; } } - for (i = 0; i < oprsz; i += 16) { float16 mm_16 = *(float16 *)(vm + i + idx); float32 mm = float16_to_float32_by_bits(mm_16, fz16); diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 2bb98c4ee4..ea6018864d 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -126,7 +126,7 @@ static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) & ~float_flag_input_denormal_flushed); a64_flags |= get_float_exception_flags(&env->vfp.fp_status_a64); - a64_flags |= (get_float_exception_flags(&env->vfp.fp_status_f16_a64) + a64_flags |= (get_float_exception_flags(&env->vfp.fp_status[FPST_A64_F16]) & ~(float_flag_input_denormal_flushed | float_flag_input_denormal_used)); /* * We do not merge in flags from FPST_AH or FPST_AH_F16, because @@ -158,7 +158,7 @@ static void vfp_clear_float_status_exc_flags(CPUARMState *env) set_float_exception_flags(0, &env->vfp.fp_status_a32); set_float_exception_flags(0, &env->vfp.fp_status_a64); set_float_exception_flags(0, &env->vfp.fp_status_f16_a32); - set_float_exception_flags(0, &env->vfp.fp_status_f16_a64); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_A64_F16]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD_F16]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_AH]); @@ -201,16 +201,16 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) set_float_rounding_mode(i, &env->vfp.fp_status_a32); set_float_rounding_mode(i, &env->vfp.fp_status_a64); set_float_rounding_mode(i, &env->vfp.fp_status_f16_a32); - set_float_rounding_mode(i, &env->vfp.fp_status_f16_a64); + set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A64_F16]); } if (changed & FPCR_FZ16) { bool ftz_enabled = val & FPCR_FZ16; set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a32); - set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a64); + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64_F16]); set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_STD_F16]); set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_AH_F16]); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a32); - set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a64); + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64_F16]); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_STD_F16]); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_AH_F16]); } @@ -235,7 +235,7 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_a32); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_a64); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16_a32); - set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16_a64); + set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A64_F16]); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_AH]); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_AH_F16]); } @@ -245,10 +245,10 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) if (ah_enabled) { /* Change behaviours for A64 FP operations */ arm_set_ah_fp_behaviours(&env->vfp.fp_status_a64); - arm_set_ah_fp_behaviours(&env->vfp.fp_status_f16_a64); + arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]); } else { arm_set_default_fp_behaviours(&env->vfp.fp_status_a64); - arm_set_default_fp_behaviours(&env->vfp.fp_status_f16_a64); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]); } } /* From 828def800e864c0ec781ea3eb5488ab186f0e24b Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:40:06 +0000 Subject: [PATCH 62/68] target/arm: Remove fp_status_f16_a32 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace with fp_status[FPST_A32_F16]. Signed-off-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-id: 20250129013857.135256-13-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/cpu.c | 2 +- target/arm/cpu.h | 1 - target/arm/tcg/vec_helper.c | 4 ++-- target/arm/vfp_helper.c | 14 +++++++------- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 18e7debe29..88bff67300 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -553,7 +553,7 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) arm_set_default_fp_behaviours(&env->vfp.fp_status_a32); arm_set_default_fp_behaviours(&env->vfp.fp_status_a64); arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD]); - arm_set_default_fp_behaviours(&env->vfp.fp_status_f16_a32); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A32_F16]); arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]); arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD_F16]); arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_AH]); diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 6cf97ba9c9..1593cc1036 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -692,7 +692,6 @@ typedef struct CPUArchState { struct { float_status fp_status_a32; float_status fp_status_a64; - float_status fp_status_f16_a32; }; }; diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index 48dbd8bdd2..78f14503f4 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -2155,7 +2155,7 @@ void HELPER(gvec_fmlal_a32)(void *vd, void *vn, void *vm, uint64_t negx = is_s ? 0x8000800080008000ull : 0; do_fmlal(vd, vn, vm, &env->vfp.fp_status[FPST_STD], negx, 0, desc, - get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a32)); + get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A32_F16])); } void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm, @@ -2236,7 +2236,7 @@ void HELPER(gvec_fmlal_idx_a32)(void *vd, void *vn, void *vm, uint64_t negx = is_s ? 0x8000800080008000ull : 0; do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status[FPST_STD], negx, 0, desc, - get_flush_inputs_to_zero(&env->vfp.fp_status_f16_a32)); + get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A32_F16])); } void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm, diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index ea6018864d..dae46a099a 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -120,7 +120,7 @@ static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) a32_flags |= get_float_exception_flags(&env->vfp.fp_status_a32); a32_flags |= get_float_exception_flags(&env->vfp.fp_status[FPST_STD]); /* FZ16 does not generate an input denormal exception. */ - a32_flags |= (get_float_exception_flags(&env->vfp.fp_status_f16_a32) + a32_flags |= (get_float_exception_flags(&env->vfp.fp_status[FPST_A32_F16]) & ~float_flag_input_denormal_flushed); a32_flags |= (get_float_exception_flags(&env->vfp.fp_status[FPST_STD_F16]) & ~float_flag_input_denormal_flushed); @@ -157,7 +157,7 @@ static void vfp_clear_float_status_exc_flags(CPUARMState *env) */ set_float_exception_flags(0, &env->vfp.fp_status_a32); set_float_exception_flags(0, &env->vfp.fp_status_a64); - set_float_exception_flags(0, &env->vfp.fp_status_f16_a32); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_A32_F16]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_A64_F16]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD_F16]); @@ -200,16 +200,16 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) } set_float_rounding_mode(i, &env->vfp.fp_status_a32); set_float_rounding_mode(i, &env->vfp.fp_status_a64); - set_float_rounding_mode(i, &env->vfp.fp_status_f16_a32); + set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A32_F16]); set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A64_F16]); } if (changed & FPCR_FZ16) { bool ftz_enabled = val & FPCR_FZ16; - set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a32); + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32_F16]); set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64_F16]); set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_STD_F16]); set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_AH_F16]); - set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16_a32); + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32_F16]); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64_F16]); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_STD_F16]); set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_AH_F16]); @@ -234,7 +234,7 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) bool dnan_enabled = val & FPCR_DN; set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_a32); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_a64); - set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16_a32); + set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A32_F16]); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A64_F16]); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_AH]); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_AH_F16]); @@ -496,7 +496,7 @@ void VFP_HELPER(cmpe, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \ softfloat_to_vfp_compare(env, \ FLOATTYPE ## _compare(a, b, &env->vfp.FPST)); \ } -DO_VFP_cmp(h, float16, dh_ctype_f16, fp_status_f16_a32) +DO_VFP_cmp(h, float16, dh_ctype_f16, fp_status[FPST_A32_F16]) DO_VFP_cmp(s, float32, float32, fp_status_a32) DO_VFP_cmp(d, float64, float64, fp_status_a32) #undef DO_VFP_cmp From 1c349f43b18608d57a72a3d6d5e95b28a1c14470 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:40:07 +0000 Subject: [PATCH 63/68] target/arm: Remove fp_status_a64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace with fp_status[FPST_A64]. Signed-off-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-id: 20250129013857.135256-14-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/cpu.c | 2 +- target/arm/cpu.h | 1 - target/arm/tcg/sme_helper.c | 2 +- target/arm/tcg/vec_helper.c | 10 +++++----- target/arm/vfp_helper.c | 16 ++++++++-------- 5 files changed, 15 insertions(+), 16 deletions(-) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 88bff67300..f04f28b681 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -551,7 +551,7 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD]); set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD_F16]); arm_set_default_fp_behaviours(&env->vfp.fp_status_a32); - arm_set_default_fp_behaviours(&env->vfp.fp_status_a64); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64]); arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD]); arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A32_F16]); arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]); diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 1593cc1036..0d8b99bd8a 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -691,7 +691,6 @@ typedef struct CPUArchState { float_status fp_status[FPST_COUNT]; struct { float_status fp_status_a32; - float_status fp_status_a64; }; }; diff --git a/target/arm/tcg/sme_helper.c b/target/arm/tcg/sme_helper.c index 6e336e10c6..dcc48e43db 100644 --- a/target/arm/tcg/sme_helper.c +++ b/target/arm/tcg/sme_helper.c @@ -1044,7 +1044,7 @@ void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn, * round-to-odd -- see above. */ fpst_f16 = env->vfp.fp_status[FPST_A64_F16]; - fpst_std = env->vfp.fp_status_a64; + fpst_std = env->vfp.fp_status[FPST_A64]; set_default_nan_mode(true, &fpst_std); set_default_nan_mode(true, &fpst_f16); fpst_odd = fpst_std; diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index 78f14503f4..215affc271 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -2172,7 +2172,7 @@ void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm, negx = 0x8000800080008000ull; } } - do_fmlal(vd, vn, vm, &env->vfp.fp_status_a64, negx, negf, desc, + do_fmlal(vd, vn, vm, &env->vfp.fp_status[FPST_A64], negx, negf, desc, get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A64_F16])); } @@ -2182,7 +2182,7 @@ void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va, intptr_t i, oprsz = simd_oprsz(desc); bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16); - float_status *status = &env->vfp.fp_status_a64; + float_status *status = &env->vfp.fp_status[FPST_A64]; bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A64_F16]); int negx = 0, negf = 0; @@ -2253,7 +2253,7 @@ void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm, negx = 0x8000800080008000ull; } } - do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status_a64, negx, negf, desc, + do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status[FPST_A64], negx, negf, desc, get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A64_F16])); } @@ -2264,7 +2264,7 @@ void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va, bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16); intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 2, 3) * sizeof(float16); - float_status *status = &env->vfp.fp_status_a64; + float_status *status = &env->vfp.fp_status[FPST_A64]; bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A64_F16]); int negx = 0, negf = 0; @@ -2951,7 +2951,7 @@ bool is_ebf(CPUARMState *env, float_status *statusp, float_status *oddstatusp) */ bool ebf = is_a64(env) && env->vfp.fpcr & FPCR_EBF; - *statusp = is_a64(env) ? env->vfp.fp_status_a64 : env->vfp.fp_status_a32; + *statusp = is_a64(env) ? env->vfp.fp_status[FPST_A64] : env->vfp.fp_status_a32; set_default_nan_mode(true, statusp); if (ebf) { diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index dae46a099a..e6b4f63401 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -125,7 +125,7 @@ static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) a32_flags |= (get_float_exception_flags(&env->vfp.fp_status[FPST_STD_F16]) & ~float_flag_input_denormal_flushed); - a64_flags |= get_float_exception_flags(&env->vfp.fp_status_a64); + a64_flags |= get_float_exception_flags(&env->vfp.fp_status[FPST_A64]); a64_flags |= (get_float_exception_flags(&env->vfp.fp_status[FPST_A64_F16]) & ~(float_flag_input_denormal_flushed | float_flag_input_denormal_used)); /* @@ -156,7 +156,7 @@ static void vfp_clear_float_status_exc_flags(CPUARMState *env) * be the architecturally up-to-date exception flag information first. */ set_float_exception_flags(0, &env->vfp.fp_status_a32); - set_float_exception_flags(0, &env->vfp.fp_status_a64); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_A64]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_A32_F16]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_A64_F16]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD]); @@ -199,7 +199,7 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) break; } set_float_rounding_mode(i, &env->vfp.fp_status_a32); - set_float_rounding_mode(i, &env->vfp.fp_status_a64); + set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A64]); set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A32_F16]); set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A64_F16]); } @@ -217,7 +217,7 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) if (changed & FPCR_FZ) { bool ftz_enabled = val & FPCR_FZ; set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_a32); - set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_a64); + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64]); /* FIZ is A64 only so FZ always makes A32 code flush inputs to zero */ set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_a32); } @@ -228,12 +228,12 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) */ bool fitz_enabled = (val & FPCR_FIZ) || (val & (FPCR_FZ | FPCR_AH)) == FPCR_FZ; - set_flush_inputs_to_zero(fitz_enabled, &env->vfp.fp_status_a64); + set_flush_inputs_to_zero(fitz_enabled, &env->vfp.fp_status[FPST_A64]); } if (changed & FPCR_DN) { bool dnan_enabled = val & FPCR_DN; set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_a32); - set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_a64); + set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A64]); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A32_F16]); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A64_F16]); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_AH]); @@ -244,10 +244,10 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) if (ah_enabled) { /* Change behaviours for A64 FP operations */ - arm_set_ah_fp_behaviours(&env->vfp.fp_status_a64); + arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_A64]); arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]); } else { - arm_set_default_fp_behaviours(&env->vfp.fp_status_a64); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64]); arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]); } } From 54afbf663234756ebc89a1cfee0a8d4c578b26f7 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:40:08 +0000 Subject: [PATCH 64/68] target/arm: Remove fp_status_a32 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace with fp_status[FPST_A32]. As this was the last of the old structures, we can remove the anonymous union and struct. Signed-off-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-id: 20250129013857.135256-15-richard.henderson@linaro.org [PMM: tweak to account for change to is_ebf()] Signed-off-by: Peter Maydell --- target/arm/cpu.c | 2 +- target/arm/cpu.h | 7 +------ target/arm/tcg/vec_helper.c | 2 +- target/arm/vfp_helper.c | 18 +++++++++--------- 4 files changed, 12 insertions(+), 17 deletions(-) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index f04f28b681..656070afb5 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -550,7 +550,7 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) set_flush_inputs_to_zero(1, &env->vfp.fp_status[FPST_STD]); set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD]); set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD_F16]); - arm_set_default_fp_behaviours(&env->vfp.fp_status_a32); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A32]); arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64]); arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD]); arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A32_F16]); diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 0d8b99bd8a..6f6cf5c888 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -687,12 +687,7 @@ typedef struct CPUArchState { uint32_t scratch[8]; /* There are a number of distinct float control structures. */ - union { - float_status fp_status[FPST_COUNT]; - struct { - float_status fp_status_a32; - }; - }; + float_status fp_status[FPST_COUNT]; uint64_t zcr_el[4]; /* ZCR_EL[1-3] */ uint64_t smcr_el[4]; /* SMCR_EL[1-3] */ diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index 215affc271..2da44ae710 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -2951,7 +2951,7 @@ bool is_ebf(CPUARMState *env, float_status *statusp, float_status *oddstatusp) */ bool ebf = is_a64(env) && env->vfp.fpcr & FPCR_EBF; - *statusp = is_a64(env) ? env->vfp.fp_status[FPST_A64] : env->vfp.fp_status_a32; + *statusp = env->vfp.fp_status[is_a64(env) ? FPST_A64 : FPST_A32]; set_default_nan_mode(true, statusp); if (ebf) { diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index e6b4f63401..4d1b697a66 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -117,7 +117,7 @@ static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) { uint32_t a32_flags = 0, a64_flags = 0; - a32_flags |= get_float_exception_flags(&env->vfp.fp_status_a32); + a32_flags |= get_float_exception_flags(&env->vfp.fp_status[FPST_A32]); a32_flags |= get_float_exception_flags(&env->vfp.fp_status[FPST_STD]); /* FZ16 does not generate an input denormal exception. */ a32_flags |= (get_float_exception_flags(&env->vfp.fp_status[FPST_A32_F16]) @@ -155,7 +155,7 @@ static void vfp_clear_float_status_exc_flags(CPUARMState *env) * values. The caller should have arranged for env->vfp.fpsr to * be the architecturally up-to-date exception flag information first. */ - set_float_exception_flags(0, &env->vfp.fp_status_a32); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_A32]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_A64]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_A32_F16]); set_float_exception_flags(0, &env->vfp.fp_status[FPST_A64_F16]); @@ -198,7 +198,7 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) i = float_round_to_zero; break; } - set_float_rounding_mode(i, &env->vfp.fp_status_a32); + set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A32]); set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A64]); set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A32_F16]); set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A64_F16]); @@ -216,10 +216,10 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) } if (changed & FPCR_FZ) { bool ftz_enabled = val & FPCR_FZ; - set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_a32); + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32]); set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64]); /* FIZ is A64 only so FZ always makes A32 code flush inputs to zero */ - set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_a32); + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32]); } if (changed & (FPCR_FZ | FPCR_AH | FPCR_FIZ)) { /* @@ -232,7 +232,7 @@ static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) } if (changed & FPCR_DN) { bool dnan_enabled = val & FPCR_DN; - set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_a32); + set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A32]); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A64]); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A32_F16]); set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A64_F16]); @@ -497,8 +497,8 @@ void VFP_HELPER(cmpe, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \ FLOATTYPE ## _compare(a, b, &env->vfp.FPST)); \ } DO_VFP_cmp(h, float16, dh_ctype_f16, fp_status[FPST_A32_F16]) -DO_VFP_cmp(s, float32, float32, fp_status_a32) -DO_VFP_cmp(d, float64, float64, fp_status_a32) +DO_VFP_cmp(s, float32, float32, fp_status[FPST_A32]) +DO_VFP_cmp(d, float64, float64, fp_status[FPST_A32]) #undef DO_VFP_cmp /* Integer to float and float to integer conversions */ @@ -1385,7 +1385,7 @@ uint64_t HELPER(fjcvtzs)(float64 value, float_status *status) uint32_t HELPER(vjcvt)(float64 value, CPUARMState *env) { - uint64_t pair = HELPER(fjcvtzs)(value, &env->vfp.fp_status_a32); + uint64_t pair = HELPER(fjcvtzs)(value, &env->vfp.fp_status[FPST_A32]); uint32_t result = pair; uint32_t z = (pair >> 32) == 0; From f81c46987aad633ff8c9a7d0fece6c3839ccc463 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:40:09 +0000 Subject: [PATCH 65/68] target/arm: Simplify fp_status indexing in mve_helper.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Select on index instead of pointer. No functional change. Signed-off-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-id: 20250129013857.135256-16-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/tcg/mve_helper.c | 40 +++++++++++++------------------------ 1 file changed, 14 insertions(+), 26 deletions(-) diff --git a/target/arm/tcg/mve_helper.c b/target/arm/tcg/mve_helper.c index 3763d71e20..274003e2e5 100644 --- a/target/arm/tcg/mve_helper.c +++ b/target/arm/tcg/mve_helper.c @@ -2814,8 +2814,7 @@ DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.fp_status[FPST_STD]; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -2888,8 +2887,7 @@ DO_2OP_FP_ALL(vminnma, minnuma) r[e] = 0; \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.fp_status[FPST_STD]; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(tm & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -2926,8 +2924,7 @@ DO_VCADD_FP(vfcadd270s, 4, float32, float32_add, float32_sub) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.fp_status[FPST_STD]; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -2964,8 +2961,7 @@ DO_VFMA(vfmss, 4, float32, true) if ((mask & MAKE_64BIT_MASK(0, ESIZE * 2)) == 0) { \ continue; \ } \ - fpst0 = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.fp_status[FPST_STD]; \ + fpst0 = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ fpst1 = fpst0; \ if (!(mask & 1)) { \ scratch_fpst = *fpst0; \ @@ -3049,8 +3045,7 @@ DO_VCMLA(vcmla270s, 4, float32, 3, DO_VCMLAS) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.fp_status[FPST_STD]; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3084,8 +3079,7 @@ DO_2OP_FP_SCALAR_ALL(vfmul_scalar, mul) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.fp_status[FPST_STD]; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3116,9 +3110,8 @@ DO_2OP_FP_ACC_SCALAR(vfmas_scalars, 4, float32, DO_VFMAS_SCALARS) unsigned e; \ TYPE *m = vm; \ TYPE ra = (TYPE)ra_in; \ - float_status *fpst = (ESIZE == 2) ? \ - &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.fp_status[FPST_STD]; \ + float_status *fpst = \ + &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ if (mask & 1) { \ TYPE v = m[H##ESIZE(e)]; \ @@ -3168,8 +3161,7 @@ DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum) if ((mask & emask) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.fp_status[FPST_STD]; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & (1 << (e * ESIZE)))) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3202,8 +3194,7 @@ DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum) if ((mask & emask) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.fp_status[FPST_STD]; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & (1 << (e * ESIZE)))) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3267,8 +3258,7 @@ DO_VCMP_FP_BOTH(vfcmples, vfcmple_scalars, 4, float32, !DO_GT32) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.fp_status[FPST_STD]; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3300,9 +3290,8 @@ DO_VCVT_FIXED(vcvt_fu, 4, uint32_t, helper_vfp_touls_round_to_zero) unsigned e; \ float_status *fpst; \ float_status scratch_fpst; \ - float_status *base_fpst = (ESIZE == 2) ? \ - &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.fp_status[FPST_STD]; \ + float_status *base_fpst = \ + &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ uint32_t prev_rmode = get_float_rounding_mode(base_fpst); \ set_float_rounding_mode(rmode, base_fpst); \ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ @@ -3427,8 +3416,7 @@ void HELPER(mve_vcvtt_hs)(CPUARMState *env, void *vd, void *vm) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.fp_status[FPST_STD_F16] : \ - &env->vfp.fp_status[FPST_STD]; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ From d75f68047624e5337963690a9a96906fcca3aefe Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:40:10 +0000 Subject: [PATCH 66/68] target/arm: Simplify DO_VFP_cmp in vfp_helper.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass ARMFPStatusFlavour index instead of fp_status[FOO]. Signed-off-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-id: 20250129013857.135256-17-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/vfp_helper.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 4d1b697a66..5d424477a2 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -489,16 +489,16 @@ static void softfloat_to_vfp_compare(CPUARMState *env, FloatRelation cmp) void VFP_HELPER(cmp, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \ { \ softfloat_to_vfp_compare(env, \ - FLOATTYPE ## _compare_quiet(a, b, &env->vfp.FPST)); \ + FLOATTYPE ## _compare_quiet(a, b, &env->vfp.fp_status[FPST])); \ } \ void VFP_HELPER(cmpe, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \ { \ softfloat_to_vfp_compare(env, \ - FLOATTYPE ## _compare(a, b, &env->vfp.FPST)); \ + FLOATTYPE ## _compare(a, b, &env->vfp.fp_status[FPST])); \ } -DO_VFP_cmp(h, float16, dh_ctype_f16, fp_status[FPST_A32_F16]) -DO_VFP_cmp(s, float32, float32, fp_status[FPST_A32]) -DO_VFP_cmp(d, float64, float64, fp_status[FPST_A32]) +DO_VFP_cmp(h, float16, dh_ctype_f16, FPST_A32_F16) +DO_VFP_cmp(s, float32, float32, FPST_A32) +DO_VFP_cmp(d, float64, float64, FPST_A32) #undef DO_VFP_cmp /* Integer to float and float to integer conversions */ From bbeed9f06a4e66d444bcde9ef4bdd21e866b7d77 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:40:11 +0000 Subject: [PATCH 67/68] target/arm: Read fz16 from env->vfp.fpcr Read the bit from the source, rather than from the proxy via get_flush_inputs_to_zero. This makes it clear that it does not matter which of the float_status structures is used. Signed-off-by: Richard Henderson Message-id: 20250129013857.135256-34-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/tcg/vec_helper.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index 2da44ae710..cc3586f44a 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -2155,7 +2155,7 @@ void HELPER(gvec_fmlal_a32)(void *vd, void *vn, void *vm, uint64_t negx = is_s ? 0x8000800080008000ull : 0; do_fmlal(vd, vn, vm, &env->vfp.fp_status[FPST_STD], negx, 0, desc, - get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A32_F16])); + env->vfp.fpcr & FPCR_FZ16); } void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm, @@ -2173,7 +2173,7 @@ void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm, } } do_fmlal(vd, vn, vm, &env->vfp.fp_status[FPST_A64], negx, negf, desc, - get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A64_F16])); + env->vfp.fpcr & FPCR_FZ16); } void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va, @@ -2183,7 +2183,7 @@ void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va, bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16); float_status *status = &env->vfp.fp_status[FPST_A64]; - bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A64_F16]); + bool fz16 = env->vfp.fpcr & FPCR_FZ16; int negx = 0, negf = 0; if (is_s) { @@ -2236,7 +2236,7 @@ void HELPER(gvec_fmlal_idx_a32)(void *vd, void *vn, void *vm, uint64_t negx = is_s ? 0x8000800080008000ull : 0; do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status[FPST_STD], negx, 0, desc, - get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A32_F16])); + env->vfp.fpcr & FPCR_FZ16); } void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm, @@ -2254,7 +2254,7 @@ void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm, } } do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status[FPST_A64], negx, negf, desc, - get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A64_F16])); + env->vfp.fpcr & FPCR_FZ16); } void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va, @@ -2265,7 +2265,7 @@ void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va, intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16); intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 2, 3) * sizeof(float16); float_status *status = &env->vfp.fp_status[FPST_A64]; - bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status[FPST_A64_F16]); + bool fz16 = env->vfp.fpcr & FPCR_FZ16; int negx = 0, negf = 0; if (is_s) { From ca4c34e07d1388df8e396520b5e7d60883cd3690 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 1 Feb 2025 16:40:12 +0000 Subject: [PATCH 68/68] target/arm: Sink fp_status and fpcr access into do_fmlal* Sink common code from the callers into do_fmlal and do_fmlal_idx. Reorder the arguments to minimize the re-sorting from the caller's arguments. Signed-off-by: Richard Henderson Message-id: 20250129013857.135256-35-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/tcg/vec_helper.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index cc3586f44a..986eaf8ffa 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -2125,9 +2125,13 @@ static uint64_t load4_f16(uint64_t *ptr, int is_q, int is_2) * as there is not yet SVE versions that might use blocking. */ -static void do_fmlal(float32 *d, void *vn, void *vm, float_status *fpst, - uint64_t negx, int negf, uint32_t desc, bool fz16) +static void do_fmlal(float32 *d, void *vn, void *vm, + CPUARMState *env, uint32_t desc, + ARMFPStatusFlavour fpst_idx, + uint64_t negx, int negf) { + float_status *fpst = &env->vfp.fp_status[fpst_idx]; + bool fz16 = env->vfp.fpcr & FPCR_FZ16; intptr_t i, oprsz = simd_oprsz(desc); int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1); int is_q = oprsz == 16; @@ -2154,8 +2158,7 @@ void HELPER(gvec_fmlal_a32)(void *vd, void *vn, void *vm, bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); uint64_t negx = is_s ? 0x8000800080008000ull : 0; - do_fmlal(vd, vn, vm, &env->vfp.fp_status[FPST_STD], negx, 0, desc, - env->vfp.fpcr & FPCR_FZ16); + do_fmlal(vd, vn, vm, env, desc, FPST_STD, negx, 0); } void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm, @@ -2172,8 +2175,7 @@ void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm, negx = 0x8000800080008000ull; } } - do_fmlal(vd, vn, vm, &env->vfp.fp_status[FPST_A64], negx, negf, desc, - env->vfp.fpcr & FPCR_FZ16); + do_fmlal(vd, vn, vm, env, desc, FPST_A64, negx, negf); } void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va, @@ -2205,9 +2207,13 @@ void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va, } } -static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst, - uint64_t negx, int negf, uint32_t desc, bool fz16) +static void do_fmlal_idx(float32 *d, void *vn, void *vm, + CPUARMState *env, uint32_t desc, + ARMFPStatusFlavour fpst_idx, + uint64_t negx, int negf) { + float_status *fpst = &env->vfp.fp_status[fpst_idx]; + bool fz16 = env->vfp.fpcr & FPCR_FZ16; intptr_t i, oprsz = simd_oprsz(desc); int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1); int index = extract32(desc, SIMD_DATA_SHIFT + 2, 3); @@ -2235,8 +2241,7 @@ void HELPER(gvec_fmlal_idx_a32)(void *vd, void *vn, void *vm, bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); uint64_t negx = is_s ? 0x8000800080008000ull : 0; - do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status[FPST_STD], negx, 0, desc, - env->vfp.fpcr & FPCR_FZ16); + do_fmlal_idx(vd, vn, vm, env, desc, FPST_STD, negx, 0); } void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm, @@ -2253,8 +2258,7 @@ void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm, negx = 0x8000800080008000ull; } } - do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status[FPST_A64], negx, negf, desc, - env->vfp.fpcr & FPCR_FZ16); + do_fmlal_idx(vd, vn, vm, env, desc, FPST_A64, negx, negf); } void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va,