target/arm: Simplify do_reduction_op

Use simple shift and add instead of ctpop, ctz, shift and mask.
Unlike SVE, there is no predicate to disable elements.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20240912024114.1097832-10-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2024-09-11 19:40:54 -07:00 committed by Peter Maydell
parent a29e2c7d33
commit d944e04961

View File

@ -9027,34 +9027,23 @@ static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
* important for correct NaN propagation that we do these * important for correct NaN propagation that we do these
* operations in exactly the order specified by the pseudocode. * operations in exactly the order specified by the pseudocode.
* *
* This is a recursive function, TCG temps should be freed by the * This is a recursive function.
* calling function once it is done with the values.
*/ */
static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn, static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
int esize, int size, int vmap, TCGv_ptr fpst) MemOp esz, int ebase, int ecount, TCGv_ptr fpst)
{ {
if (esize == size) { if (ecount == 1) {
int element; TCGv_i32 tcg_elem = tcg_temp_new_i32();
MemOp msize = esize == 16 ? MO_16 : MO_32; read_vec_element_i32(s, tcg_elem, rn, ebase, esz);
TCGv_i32 tcg_elem;
/* We should have one register left here */
assert(ctpop8(vmap) == 1);
element = ctz32(vmap);
assert(element < 8);
tcg_elem = tcg_temp_new_i32();
read_vec_element_i32(s, tcg_elem, rn, element, msize);
return tcg_elem; return tcg_elem;
} else { } else {
int bits = size / 2; int half = ecount >> 1;
int shift = ctpop8(vmap) / 2;
int vmap_lo = (vmap >> shift) & vmap;
int vmap_hi = (vmap & ~vmap_lo);
TCGv_i32 tcg_hi, tcg_lo, tcg_res; TCGv_i32 tcg_hi, tcg_lo, tcg_res;
tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst); tcg_hi = do_reduction_op(s, fpopcode, rn, esz,
tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst); ebase + half, half, fpst);
tcg_lo = do_reduction_op(s, fpopcode, rn, esz,
ebase, half, fpst);
tcg_res = tcg_temp_new_i32(); tcg_res = tcg_temp_new_i32();
switch (fpopcode) { switch (fpopcode) {
@ -9105,7 +9094,6 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
bool is_u = extract32(insn, 29, 1); bool is_u = extract32(insn, 29, 1);
bool is_fp = false; bool is_fp = false;
bool is_min = false; bool is_min = false;
int esize;
int elements; int elements;
int i; int i;
TCGv_i64 tcg_res, tcg_elt; TCGv_i64 tcg_res, tcg_elt;
@ -9152,8 +9140,7 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
return; return;
} }
esize = 8 << size; elements = (is_q ? 16 : 8) >> size;
elements = (is_q ? 128 : 64) / esize;
tcg_res = tcg_temp_new_i64(); tcg_res = tcg_temp_new_i64();
tcg_elt = tcg_temp_new_i64(); tcg_elt = tcg_temp_new_i64();
@ -9208,9 +9195,8 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
*/ */
TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
int fpopcode = opcode | is_min << 4 | is_u << 5; int fpopcode = opcode | is_min << 4 | is_u << 5;
int vmap = (1 << elements) - 1; TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, size,
TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize, 0, elements, fpst);
(is_q ? 128 : 64), vmap, fpst);
tcg_gen_extu_i32_i64(tcg_res, tcg_res32); tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
} }