From a7cfd751fb269de4a93bf1658cb13911c7ac77cc Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 16 Oct 2024 17:31:05 +0000 Subject: [PATCH 01/24] tcg: Reset data_gen_ptr correctly This pointer needs to be reset after overflow just like code_buf and code_ptr. Cc: qemu-stable@nongnu.org Fixes: 57a269469db ("tcg: Infrastructure for managing constant pools") Acked-by: Alistair Francis Reviewed-by: Pierrick Bouvier Reviewed-by: LIU Zhiwei Signed-off-by: Richard Henderson --- tcg/tcg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tcg/tcg.c b/tcg/tcg.c index 5decd83cf4..0babae1b88 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -1399,7 +1399,6 @@ TranslationBlock *tcg_tb_alloc(TCGContext *s) goto retry; } qatomic_set(&s->code_gen_ptr, next); - s->data_gen_ptr = NULL; return tb; } @@ -6172,6 +6171,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start) */ s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr); s->code_ptr = s->code_buf; + s->data_gen_ptr = NULL; #ifdef TCG_TARGET_NEED_LDST_LABELS QSIMPLEQ_INIT(&s->ldst_labels); From be46e0bf142d75c1978801d5d2c2394e7dfa304d Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 16 Oct 2024 16:57:15 +0000 Subject: [PATCH 02/24] disas/riscv: Fix vsetivli disassembly The first immediate field is unsigned, whereas operand_vimm extracts a signed value. There is no need to mask the result with 'u'; just print the immediate with 'i'. Fixes: 07f4964d178 ("disas/riscv.c: rvv: Add disas support for vector instructions") Reviewed-by: Alistair Francis Reviewed-by: Pierrick Bouvier Reviewed-by: LIU Zhiwei Signed-off-by: Richard Henderson --- disas/riscv.c | 2 +- disas/riscv.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/disas/riscv.c b/disas/riscv.c index 5965574d87..fc0331b90b 100644 --- a/disas/riscv.c +++ b/disas/riscv.c @@ -4808,7 +4808,7 @@ static void decode_inst_operands(rv_decode *dec, rv_isa isa) break; case rv_codec_vsetivli: dec->rd = operand_rd(inst); - dec->imm = operand_vimm(inst); + dec->imm = extract32(inst, 15, 5); dec->vzimm = operand_vzimm10(inst); break; case rv_codec_zcb_lb: diff --git a/disas/riscv.h b/disas/riscv.h index 16a08e4895..0d1f89ce8a 100644 --- a/disas/riscv.h +++ b/disas/riscv.h @@ -290,7 +290,7 @@ enum { #define rv_fmt_fd_vs2 "O\t3,F" #define rv_fmt_vd_vm "O\tDm" #define rv_fmt_vsetvli "O\t0,1,v" -#define rv_fmt_vsetivli "O\t0,u,v" +#define rv_fmt_vsetivli "O\t0,i,v" #define rv_fmt_rs1_rs2_zce_ldst "O\t2,i(1)" #define rv_fmt_push_rlist "O\tx,-i" #define rv_fmt_pop_rlist "O\tx,i" From f7230e09b1ccfb7055b79dfee981e18d444a118a Mon Sep 17 00:00:00 2001 From: TANG Tiancheng Date: Mon, 7 Oct 2024 10:56:49 +0800 Subject: [PATCH 03/24] util: Add RISC-V vector extension probe in cpuinfo Add support for probing RISC-V vector extension availability in the backend. This information will be used when deciding whether to use vector instructions in code generation. Cache lg2(vlenb) for the backend. The storing of lg2(vlenb) means we can convert all of the division into subtraction. While the compiler doesn't support RISCV_HWPROBE_EXT_ZVE64X, we use RISCV_HWPROBE_IMA_V instead. RISCV_HWPROBE_IMA_V is more strictly constrainted than RISCV_HWPROBE_EXT_ZVE64X. At least in current QEMU implemenation, the V vector extension depends on the zve64d extension. Signed-off-by: TANG Tiancheng Reviewed-by: Liu Zhiwei Tested-by: Daniel Henrique Barboza Message-ID: <20241007025700.47259-2-zhiwei_liu@linux.alibaba.com> Signed-off-by: Richard Henderson --- host/include/riscv/host/cpuinfo.h | 2 ++ util/cpuinfo-riscv.c | 34 ++++++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/host/include/riscv/host/cpuinfo.h b/host/include/riscv/host/cpuinfo.h index 2b00660e36..cdc784e7b6 100644 --- a/host/include/riscv/host/cpuinfo.h +++ b/host/include/riscv/host/cpuinfo.h @@ -10,9 +10,11 @@ #define CPUINFO_ZBA (1u << 1) #define CPUINFO_ZBB (1u << 2) #define CPUINFO_ZICOND (1u << 3) +#define CPUINFO_ZVE64X (1u << 4) /* Initialized with a constructor. */ extern unsigned cpuinfo; +extern unsigned riscv_lg2_vlenb; /* * We cannot rely on constructor ordering, so other constructors must diff --git a/util/cpuinfo-riscv.c b/util/cpuinfo-riscv.c index 8cacc67645..971c924012 100644 --- a/util/cpuinfo-riscv.c +++ b/util/cpuinfo-riscv.c @@ -4,6 +4,7 @@ */ #include "qemu/osdep.h" +#include "qemu/host-utils.h" #include "host/cpuinfo.h" #ifdef CONFIG_ASM_HWPROBE_H @@ -13,6 +14,7 @@ #endif unsigned cpuinfo; +unsigned riscv_lg2_vlenb; static volatile sig_atomic_t got_sigill; static void sigill_handler(int signo, siginfo_t *si, void *data) @@ -34,7 +36,7 @@ static void sigill_handler(int signo, siginfo_t *si, void *data) /* Called both as constructor and (possibly) via other constructors. */ unsigned __attribute__((constructor)) cpuinfo_init(void) { - unsigned left = CPUINFO_ZBA | CPUINFO_ZBB | CPUINFO_ZICOND; + unsigned left = CPUINFO_ZBA | CPUINFO_ZBB | CPUINFO_ZICOND | CPUINFO_ZVE64X; unsigned info = cpuinfo; if (info) { @@ -50,6 +52,10 @@ unsigned __attribute__((constructor)) cpuinfo_init(void) #endif #if defined(__riscv_arch_test) && defined(__riscv_zicond) info |= CPUINFO_ZICOND; +#endif +#if defined(__riscv_arch_test) && \ + (defined(__riscv_vector) || defined(__riscv_zve64x)) + info |= CPUINFO_ZVE64X; #endif left &= ~info; @@ -69,11 +75,22 @@ unsigned __attribute__((constructor)) cpuinfo_init(void) #ifdef RISCV_HWPROBE_EXT_ZICOND info |= pair.value & RISCV_HWPROBE_EXT_ZICOND ? CPUINFO_ZICOND : 0; left &= ~CPUINFO_ZICOND; +#endif + /* For rv64, V is Zve64d, a superset of Zve64x. */ + info |= pair.value & RISCV_HWPROBE_IMA_V ? CPUINFO_ZVE64X : 0; +#ifdef RISCV_HWPROBE_EXT_ZVE64X + info |= pair.value & RISCV_HWPROBE_EXT_ZVE64X ? CPUINFO_ZVE64X : 0; #endif } } #endif /* CONFIG_ASM_HWPROBE_H */ + /* + * We only detect support for vectors with hwprobe. All kernels with + * support for vectors in userspace also support the hwprobe syscall. + */ + left &= ~CPUINFO_ZVE64X; + if (left) { struct sigaction sa_old, sa_new; @@ -113,6 +130,21 @@ unsigned __attribute__((constructor)) cpuinfo_init(void) assert(left == 0); } + if (info & CPUINFO_ZVE64X) { + /* + * We are guaranteed by RVV-1.0 that VLEN is a power of 2. + * We are guaranteed by Zve64x that VLEN >= 64, and that + * EEW of {8,16,32,64} are supported. + */ + unsigned long vlenb; + /* csrr %0, vlenb */ + asm volatile(".insn i 0x73, 0x2, %0, zero, -990" : "=r"(vlenb)); + assert(vlenb >= 8); + assert(is_power_of_2(vlenb)); + /* Cache VLEN in a convenient form. */ + riscv_lg2_vlenb = ctz32(vlenb); + } + info |= CPUINFO_ALWAYS; cpuinfo = info; return info; From f63e7089b49e0aa031e0247fad7b22b8d650a3fb Mon Sep 17 00:00:00 2001 From: Huang Shiyuan Date: Mon, 7 Oct 2024 10:56:50 +0800 Subject: [PATCH 04/24] tcg/riscv: Add basic support for vector The RISC-V vector instruction set utilizes the LMUL field to group multiple registers, enabling variable-length vector registers. This implementation uses only the first register number of each group while reserving the other register numbers within the group. In TCG, each VEC_IR can have 3 types (TCG_TYPE_V64/128/256), and the host runtime needs to adjust LMUL based on the type to use different register groups. This presents challenges for TCG's register allocation. Currently, we avoid modifying the register allocation part of TCG and only expose the minimum number of vector registers. For example, when the host vlen is 64 bits and type is TCG_TYPE_V256, with LMUL equal to 4, we use 4 vector registers as one register group. We can use a maximum of 8 register groups, but the V0 register number is reserved as a mask register, so we can effectively use at most 7 register groups. Moreover, when type is smaller than TCG_TYPE_V256, only 7 registers are forced to be used. This is because TCG cannot yet dynamically constrain registers with type; likewise, when the host vlen is 128 bits and TCG_TYPE_V256, we can use at most 15 registers. There is not much pressure on vector register allocation in TCG now, so using 7 registers is feasible and will not have a major impact on code generation. This patch: 1. Reserves vector register 0 for use as a mask register. 2. When using register groups, reserves the additional registers within each group. Signed-off-by: Huang Shiyuan Co-authored-by: TANG Tiancheng Signed-off-by: TANG Tiancheng Reviewed-by: Liu Zhiwei Reviewed-by: Richard Henderson Message-ID: <20241007025700.47259-3-zhiwei_liu@linux.alibaba.com> Signed-off-by: Richard Henderson --- include/tcg/tcg.h | 6 + tcg/riscv/tcg-target-con-set.h | 2 + tcg/riscv/tcg-target-con-str.h | 1 + tcg/riscv/tcg-target.c.inc | 414 ++++++++++++++++++++++++++++++--- tcg/riscv/tcg-target.h | 78 ++++--- tcg/riscv/tcg-target.opc.h | 12 + 6 files changed, 442 insertions(+), 71 deletions(-) create mode 100644 tcg/riscv/tcg-target.opc.h diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index 824fb3560d..a77ed12b9d 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -521,6 +521,12 @@ struct TCGContext { struct qemu_plugin_insn *plugin_insn; #endif + /* For host-specific values. */ +#ifdef __riscv + MemOp riscv_cur_vsew; + TCGType riscv_cur_type; +#endif + GHashTable *const_table[TCG_TYPE_COUNT]; TCGTempSet free_temps[TCG_TYPE_COUNT]; TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h index aac5ceee2b..d73a62b0f2 100644 --- a/tcg/riscv/tcg-target-con-set.h +++ b/tcg/riscv/tcg-target-con-set.h @@ -21,3 +21,5 @@ C_O1_I2(r, rZ, rZ) C_N1_I2(r, r, rM) C_O1_I4(r, r, rI, rM, rM) C_O2_I4(r, r, rZ, rZ, rM, rM) +C_O0_I2(v, r) +C_O1_I1(v, r) diff --git a/tcg/riscv/tcg-target-con-str.h b/tcg/riscv/tcg-target-con-str.h index d5c419dff1..b2b3211bcb 100644 --- a/tcg/riscv/tcg-target-con-str.h +++ b/tcg/riscv/tcg-target-con-str.h @@ -9,6 +9,7 @@ * REGS(letter, register_mask) */ REGS('r', ALL_GENERAL_REGS) +REGS('v', ALL_VECTOR_REGS) /* * Define constraint letters for constants: diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index d334857226..38d71111c9 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -32,38 +32,14 @@ #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { - "zero", - "ra", - "sp", - "gp", - "tp", - "t0", - "t1", - "t2", - "s0", - "s1", - "a0", - "a1", - "a2", - "a3", - "a4", - "a5", - "a6", - "a7", - "s2", - "s3", - "s4", - "s5", - "s6", - "s7", - "s8", - "s9", - "s10", - "s11", - "t3", - "t4", - "t5", - "t6" + "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", + "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", + "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", + "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", }; #endif @@ -100,6 +76,16 @@ static const int tcg_target_reg_alloc_order[] = { TCG_REG_A5, TCG_REG_A6, TCG_REG_A7, + + /* Vector registers and TCG_REG_V0 reserved for mask. */ + TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, TCG_REG_V4, + TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, TCG_REG_V8, + TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, TCG_REG_V12, + TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, TCG_REG_V16, + TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, TCG_REG_V20, + TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, TCG_REG_V24, + TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, TCG_REG_V28, + TCG_REG_V29, TCG_REG_V30, TCG_REG_V31, }; static const int tcg_target_call_iarg_regs[] = { @@ -127,6 +113,9 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define TCG_CT_CONST_J12 0x1000 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) +#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) +#define ALL_DVECTOR_REG_GROUPS 0x5555555500000000 +#define ALL_QVECTOR_REG_GROUPS 0x1111111100000000 #define sextreg sextract64 @@ -176,6 +165,31 @@ static bool tcg_target_const_match(int64_t val, int ct, * RISC-V Base ISA opcodes (IM) */ +#define V_OPIVV (0x0 << 12) +#define V_OPFVV (0x1 << 12) +#define V_OPMVV (0x2 << 12) +#define V_OPIVI (0x3 << 12) +#define V_OPIVX (0x4 << 12) +#define V_OPFVF (0x5 << 12) +#define V_OPMVX (0x6 << 12) +#define V_OPCFG (0x7 << 12) + +/* NF <= 7 && NF >= 0 */ +#define V_NF(x) (x << 29) +#define V_UNIT_STRIDE (0x0 << 20) +#define V_UNIT_STRIDE_WHOLE_REG (0x8 << 20) + +typedef enum { + VLMUL_M1 = 0, /* LMUL=1 */ + VLMUL_M2, /* LMUL=2 */ + VLMUL_M4, /* LMUL=4 */ + VLMUL_M8, /* LMUL=8 */ + VLMUL_RESERVED, + VLMUL_MF8, /* LMUL=1/8 */ + VLMUL_MF4, /* LMUL=1/4 */ + VLMUL_MF2, /* LMUL=1/2 */ +} RISCVVlmul; + typedef enum { OPC_ADD = 0x33, OPC_ADDI = 0x13, @@ -271,6 +285,30 @@ typedef enum { /* Zicond: integer conditional operations */ OPC_CZERO_EQZ = 0x0e005033, OPC_CZERO_NEZ = 0x0e007033, + + /* V: Vector extension 1.0 */ + OPC_VSETVLI = 0x57 | V_OPCFG, + OPC_VSETIVLI = 0xc0000057 | V_OPCFG, + OPC_VSETVL = 0x80000057 | V_OPCFG, + + OPC_VLE8_V = 0x7 | V_UNIT_STRIDE, + OPC_VLE16_V = 0x5007 | V_UNIT_STRIDE, + OPC_VLE32_V = 0x6007 | V_UNIT_STRIDE, + OPC_VLE64_V = 0x7007 | V_UNIT_STRIDE, + OPC_VSE8_V = 0x27 | V_UNIT_STRIDE, + OPC_VSE16_V = 0x5027 | V_UNIT_STRIDE, + OPC_VSE32_V = 0x6027 | V_UNIT_STRIDE, + OPC_VSE64_V = 0x7027 | V_UNIT_STRIDE, + + OPC_VL1RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(0), + OPC_VL2RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1), + OPC_VL4RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3), + OPC_VL8RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7), + + OPC_VS1R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(0), + OPC_VS2R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1), + OPC_VS4R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3), + OPC_VS8R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7), } RISCVInsn; /* @@ -363,6 +401,35 @@ static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm) return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm); } +/* Type-OPIVV/OPMVV/OPIVX/OPMVX, Vector load and store */ + +static int32_t encode_v(RISCVInsn opc, TCGReg d, TCGReg s1, + TCGReg s2, bool vm) +{ + return opc | (d & 0x1f) << 7 | (s1 & 0x1f) << 15 | + (s2 & 0x1f) << 20 | (vm << 25); +} + +/* Vector vtype */ + +static uint32_t encode_vtype(bool vta, bool vma, + MemOp vsew, RISCVVlmul vlmul) +{ + return vma << 7 | vta << 6 | vsew << 3 | vlmul; +} + +static int32_t encode_vset(RISCVInsn opc, TCGReg rd, + TCGArg rs1, uint32_t vtype) +{ + return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (vtype & 0x7ff) << 20; +} + +static int32_t encode_vseti(RISCVInsn opc, TCGReg rd, + uint32_t uimm, uint32_t vtype) +{ + return opc | (rd & 0x1f) << 7 | (uimm & 0x1f) << 15 | (vtype & 0x3ff) << 20; +} + /* * RISC-V instruction emitters */ @@ -475,6 +542,38 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, } } +/* + * RISC-V vector instruction emitters + */ + +typedef struct VsetCache { + uint32_t movi_insn; + uint32_t vset_insn; +} VsetCache; + +static VsetCache riscv_vset_cache[3][4]; + +static void set_vtype(TCGContext *s, TCGType type, MemOp vsew) +{ + const VsetCache *p = &riscv_vset_cache[type - TCG_TYPE_V64][vsew]; + + s->riscv_cur_type = type; + s->riscv_cur_vsew = vsew; + + if (p->movi_insn) { + tcg_out32(s, p->movi_insn); + } + tcg_out32(s, p->vset_insn); +} + +static MemOp set_vtype_len(TCGContext *s, TCGType type) +{ + if (type != s->riscv_cur_type) { + set_vtype(s, type, MO_64); + } + return s->riscv_cur_vsew; +} + /* * TCG intrinsics */ @@ -681,18 +780,101 @@ static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, } } +static void tcg_out_vec_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, + TCGReg addr, intptr_t offset) +{ + tcg_debug_assert(data >= TCG_REG_V0); + tcg_debug_assert(addr < TCG_REG_V0); + + if (offset) { + tcg_debug_assert(addr != TCG_REG_ZERO); + if (offset == sextreg(offset, 0, 12)) { + tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP0, addr, offset); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, addr); + } + addr = TCG_REG_TMP0; + } + tcg_out32(s, encode_v(opc, data, addr, 0, true)); +} + static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { - RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD; - tcg_out_ldst(s, insn, arg, arg1, arg2); + RISCVInsn insn; + + switch (type) { + case TCG_TYPE_I32: + tcg_out_ldst(s, OPC_LW, arg, arg1, arg2); + break; + case TCG_TYPE_I64: + tcg_out_ldst(s, OPC_LD, arg, arg1, arg2); + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + if (type >= riscv_lg2_vlenb) { + static const RISCVInsn whole_reg_ld[] = { + OPC_VL1RE64_V, OPC_VL2RE64_V, OPC_VL4RE64_V, OPC_VL8RE64_V + }; + unsigned idx = type - riscv_lg2_vlenb; + + tcg_debug_assert(idx < ARRAY_SIZE(whole_reg_ld)); + insn = whole_reg_ld[idx]; + } else { + static const RISCVInsn unit_stride_ld[] = { + OPC_VLE8_V, OPC_VLE16_V, OPC_VLE32_V, OPC_VLE64_V + }; + MemOp prev_vsew = set_vtype_len(s, type); + + tcg_debug_assert(prev_vsew < ARRAY_SIZE(unit_stride_ld)); + insn = unit_stride_ld[prev_vsew]; + } + tcg_out_vec_ldst(s, insn, arg, arg1, arg2); + break; + default: + g_assert_not_reached(); + } } static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { - RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD; - tcg_out_ldst(s, insn, arg, arg1, arg2); + RISCVInsn insn; + + switch (type) { + case TCG_TYPE_I32: + tcg_out_ldst(s, OPC_SW, arg, arg1, arg2); + break; + case TCG_TYPE_I64: + tcg_out_ldst(s, OPC_SD, arg, arg1, arg2); + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + if (type >= riscv_lg2_vlenb) { + static const RISCVInsn whole_reg_st[] = { + OPC_VS1R_V, OPC_VS2R_V, OPC_VS4R_V, OPC_VS8R_V + }; + unsigned idx = type - riscv_lg2_vlenb; + + tcg_debug_assert(idx < ARRAY_SIZE(whole_reg_st)); + insn = whole_reg_st[idx]; + } else { + static const RISCVInsn unit_stride_st[] = { + OPC_VSE8_V, OPC_VSE16_V, OPC_VSE32_V, OPC_VSE64_V + }; + MemOp prev_vsew = set_vtype_len(s, type); + + tcg_debug_assert(prev_vsew < ARRAY_SIZE(unit_stride_st)); + insn = unit_stride_st[prev_vsew]; + } + tcg_out_vec_ldst(s, insn, arg, arg1, arg2); + break; + default: + g_assert_not_reached(); + } } static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, @@ -766,6 +948,23 @@ static void tcg_out_addsub2(TCGContext *s, } } +static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, TCGReg src) +{ + return false; +} + +static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, TCGReg base, intptr_t offset) +{ + return false; +} + +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, int64_t arg) +{ +} + static const struct { RISCVInsn op; bool swap; @@ -1104,12 +1303,19 @@ static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn, } } +static void init_setting_vtype(TCGContext *s) +{ + s->riscv_cur_type = TCG_TYPE_COUNT; +} + static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) { TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; ptrdiff_t offset = tcg_pcrel_diff(s, arg); int ret; + init_setting_vtype(s); + tcg_debug_assert((offset & 1) == 0); if (offset == sextreg(offset, 0, 20)) { /* short jump: -2097150 to 2097152 */ @@ -1247,6 +1453,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, ldst->oi = oi; ldst->addrlo_reg = addr_reg; + init_setting_vtype(s); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); @@ -1308,6 +1516,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, ldst->oi = oi; ldst->addrlo_reg = addr_reg; + init_setting_vtype(s); + /* We are expecting alignment max 7, so we can always use andi. */ tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12)); tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask); @@ -1881,6 +2091,46 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, } } +static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, + unsigned vecl, unsigned vece, + const TCGArg args[TCG_MAX_OP_ARGS], + const int const_args[TCG_MAX_OP_ARGS]) +{ + TCGType type = vecl + TCG_TYPE_V64; + TCGArg a0, a1, a2; + + a0 = args[0]; + a1 = args[1]; + a2 = args[2]; + + switch (opc) { + case INDEX_op_ld_vec: + tcg_out_ld(s, type, a0, a1, a2); + break; + case INDEX_op_st_vec: + tcg_out_st(s, type, a0, a1, a2); + break; + case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ + case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ + default: + g_assert_not_reached(); + } +} + +void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, + TCGArg a0, ...) +{ + g_assert_not_reached(); +} + +int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) +{ + switch (opc) { + default: + return 0; + } +} + static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) { switch (op) { @@ -2020,6 +2270,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_qemu_st_a64_i64: return C_O0_I2(rZ, r); + case INDEX_op_st_vec: + return C_O0_I2(v, r); + case INDEX_op_ld_vec: + return C_O1_I1(v, r); default: g_assert_not_reached(); } @@ -2093,7 +2347,65 @@ static void tcg_target_qemu_prologue(TCGContext *s) static void tcg_out_tb_start(TCGContext *s) { - /* nothing to do */ + init_setting_vtype(s); +} + +static bool vtype_check(unsigned vtype) +{ + unsigned long tmp; + + /* vsetvl tmp, zero, vtype */ + asm(".insn r 0x57, 7, 0x40, %0, zero, %1" : "=r"(tmp) : "r"(vtype)); + return tmp != 0; +} + +static void probe_frac_lmul_1(TCGType type, MemOp vsew) +{ + VsetCache *p = &riscv_vset_cache[type - TCG_TYPE_V64][vsew]; + unsigned avl = tcg_type_size(type) >> vsew; + int lmul = type - riscv_lg2_vlenb; + unsigned vtype = encode_vtype(true, true, vsew, lmul & 7); + bool lmul_eq_avl = true; + + /* Guaranteed by Zve64x. */ + assert(lmul < 3); + + /* + * For LMUL < -3, the host vector size is so large that TYPE + * is smaller than the minimum 1/8 fraction. + * + * For other fractional LMUL settings, implementations must + * support SEW settings between SEW_MIN and LMUL * ELEN, inclusive. + * So if ELEN = 64, LMUL = 1/2, then SEW will support e8, e16, e32, + * but e64 may not be supported. In other words, the hardware only + * guarantees SEW_MIN <= SEW <= LMUL * ELEN. Check. + */ + if (lmul < 0 && (lmul < -3 || !vtype_check(vtype))) { + vtype = encode_vtype(true, true, vsew, VLMUL_M1); + lmul_eq_avl = false; + } + + if (avl < 32) { + p->vset_insn = encode_vseti(OPC_VSETIVLI, TCG_REG_ZERO, avl, vtype); + } else if (lmul_eq_avl) { + /* rd != 0 and rs1 == 0 uses vlmax */ + p->vset_insn = encode_vset(OPC_VSETVLI, TCG_REG_TMP0, TCG_REG_ZERO, vtype); + } else { + p->movi_insn = encode_i(OPC_ADDI, TCG_REG_TMP0, TCG_REG_ZERO, avl); + p->vset_insn = encode_vset(OPC_VSETVLI, TCG_REG_ZERO, TCG_REG_TMP0, vtype); + } +} + +static void probe_frac_lmul(void) +{ + /* Match riscv_lg2_vlenb to TCG_TYPE_V64. */ + QEMU_BUILD_BUG_ON(TCG_TYPE_V64 != 3); + + for (TCGType t = TCG_TYPE_V64; t <= TCG_TYPE_V256; t++) { + for (MemOp e = MO_8; e <= MO_64; e++) { + probe_frac_lmul_1(t, e); + } + } } static void tcg_target_init(TCGContext *s) @@ -2101,7 +2413,7 @@ static void tcg_target_init(TCGContext *s) tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; - tcg_target_call_clobber_regs = -1u; + tcg_target_call_clobber_regs = -1; tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); @@ -2123,6 +2435,32 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); + + if (cpuinfo & CPUINFO_ZVE64X) { + switch (riscv_lg2_vlenb) { + case TCG_TYPE_V64: + tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; + tcg_target_available_regs[TCG_TYPE_V128] = ALL_DVECTOR_REG_GROUPS; + tcg_target_available_regs[TCG_TYPE_V256] = ALL_QVECTOR_REG_GROUPS; + s->reserved_regs |= (~ALL_QVECTOR_REG_GROUPS & ALL_VECTOR_REGS); + break; + case TCG_TYPE_V128: + tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; + tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; + tcg_target_available_regs[TCG_TYPE_V256] = ALL_DVECTOR_REG_GROUPS; + s->reserved_regs |= (~ALL_DVECTOR_REG_GROUPS & ALL_VECTOR_REGS); + break; + default: + /* Guaranteed by Zve64x. */ + tcg_debug_assert(riscv_lg2_vlenb >= TCG_TYPE_V256); + tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; + tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; + tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS; + break; + } + tcg_regset_set_reg(s->reserved_regs, TCG_REG_V0); + probe_frac_lmul(); + } } typedef struct { diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index 1a347eaf6e..12a7a37aaa 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -28,42 +28,28 @@ #include "host/cpuinfo.h" #define TCG_TARGET_INSN_UNIT_SIZE 4 -#define TCG_TARGET_NB_REGS 32 +#define TCG_TARGET_NB_REGS 64 #define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) typedef enum { - TCG_REG_ZERO, - TCG_REG_RA, - TCG_REG_SP, - TCG_REG_GP, - TCG_REG_TP, - TCG_REG_T0, - TCG_REG_T1, - TCG_REG_T2, - TCG_REG_S0, - TCG_REG_S1, - TCG_REG_A0, - TCG_REG_A1, - TCG_REG_A2, - TCG_REG_A3, - TCG_REG_A4, - TCG_REG_A5, - TCG_REG_A6, - TCG_REG_A7, - TCG_REG_S2, - TCG_REG_S3, - TCG_REG_S4, - TCG_REG_S5, - TCG_REG_S6, - TCG_REG_S7, - TCG_REG_S8, - TCG_REG_S9, - TCG_REG_S10, - TCG_REG_S11, - TCG_REG_T3, - TCG_REG_T4, - TCG_REG_T5, - TCG_REG_T6, + TCG_REG_ZERO, TCG_REG_RA, TCG_REG_SP, TCG_REG_GP, + TCG_REG_TP, TCG_REG_T0, TCG_REG_T1, TCG_REG_T2, + TCG_REG_S0, TCG_REG_S1, TCG_REG_A0, TCG_REG_A1, + TCG_REG_A2, TCG_REG_A3, TCG_REG_A4, TCG_REG_A5, + TCG_REG_A6, TCG_REG_A7, TCG_REG_S2, TCG_REG_S3, + TCG_REG_S4, TCG_REG_S5, TCG_REG_S6, TCG_REG_S7, + TCG_REG_S8, TCG_REG_S9, TCG_REG_S10, TCG_REG_S11, + TCG_REG_T3, TCG_REG_T4, TCG_REG_T5, TCG_REG_T6, + + /* RISC-V V Extension registers */ + TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, + TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, + TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, + TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, + TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, + TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, + TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, + TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31, /* aliases */ TCG_AREG0 = TCG_REG_S0, @@ -156,6 +142,32 @@ typedef enum { #define TCG_TARGET_HAS_tst 0 +/* vector instructions */ +#define TCG_TARGET_HAS_v64 0 +#define TCG_TARGET_HAS_v128 0 +#define TCG_TARGET_HAS_v256 0 +#define TCG_TARGET_HAS_andc_vec 0 +#define TCG_TARGET_HAS_orc_vec 0 +#define TCG_TARGET_HAS_nand_vec 0 +#define TCG_TARGET_HAS_nor_vec 0 +#define TCG_TARGET_HAS_eqv_vec 0 +#define TCG_TARGET_HAS_not_vec 0 +#define TCG_TARGET_HAS_neg_vec 0 +#define TCG_TARGET_HAS_abs_vec 0 +#define TCG_TARGET_HAS_roti_vec 0 +#define TCG_TARGET_HAS_rots_vec 0 +#define TCG_TARGET_HAS_rotv_vec 0 +#define TCG_TARGET_HAS_shi_vec 0 +#define TCG_TARGET_HAS_shs_vec 0 +#define TCG_TARGET_HAS_shv_vec 0 +#define TCG_TARGET_HAS_mul_vec 0 +#define TCG_TARGET_HAS_sat_vec 0 +#define TCG_TARGET_HAS_minmax_vec 0 +#define TCG_TARGET_HAS_bitsel_vec 0 +#define TCG_TARGET_HAS_cmpsel_vec 0 + +#define TCG_TARGET_HAS_tst_vec 0 + #define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_NEED_LDST_LABELS diff --git a/tcg/riscv/tcg-target.opc.h b/tcg/riscv/tcg-target.opc.h new file mode 100644 index 0000000000..b80b39e1e5 --- /dev/null +++ b/tcg/riscv/tcg-target.opc.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) C-SKY Microsystems Co., Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + * + * See the COPYING file in the top-level directory for details. + * + * Target-specific opcodes for host vector expansion. These will be + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, + * consider these to be UNSPEC with names. + */ From d4be6ee111118e7f15644bc33ec8995dd610c68e Mon Sep 17 00:00:00 2001 From: TANG Tiancheng Date: Mon, 7 Oct 2024 10:56:52 +0800 Subject: [PATCH 05/24] tcg/riscv: Implement vector mov/dup{m/i} Signed-off-by: TANG Tiancheng Reviewed-by: Liu Zhiwei Reviewed-by: Richard Henderson Message-ID: <20241007025700.47259-5-zhiwei_liu@linux.alibaba.com> Signed-off-by: Richard Henderson --- tcg/riscv/tcg-target.c.inc | 76 +++++++++++++++++++++++++++++++++++++- 1 file changed, 74 insertions(+), 2 deletions(-) diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 38d71111c9..17fcc21b0e 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -309,6 +309,12 @@ typedef enum { OPC_VS2R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1), OPC_VS4R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3), OPC_VS8R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7), + + OPC_VMV_V_V = 0x5e000057 | V_OPIVV, + OPC_VMV_V_I = 0x5e000057 | V_OPIVI, + OPC_VMV_V_X = 0x5e000057 | V_OPIVX, + + OPC_VMVNR_V = 0x9e000057 | V_OPIVI, } RISCVInsn; /* @@ -401,6 +407,16 @@ static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm) return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm); } + +/* Type-OPIVI */ + +static int32_t encode_vi(RISCVInsn opc, TCGReg rd, int32_t imm, + TCGReg vs2, bool vm) +{ + return opc | (rd & 0x1f) << 7 | (imm & 0x1f) << 15 | + (vs2 & 0x1f) << 20 | (vm << 25); +} + /* Type-OPIVV/OPMVV/OPIVX/OPMVX, Vector load and store */ static int32_t encode_v(RISCVInsn opc, TCGReg d, TCGReg s1, @@ -546,6 +562,24 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, * RISC-V vector instruction emitters */ +/* + * Vector registers uses the same 5 lower bits as GPR registers, + * and vm=0 (vm = false) means vector masking ENABLED. + * With RVV 1.0, vs2 is the first operand, while rs1/imm is the + * second operand. + */ +static void tcg_out_opc_vx(TCGContext *s, RISCVInsn opc, + TCGReg vd, TCGReg vs2, TCGReg rs1) +{ + tcg_out32(s, encode_v(opc, vd, rs1, vs2, true)); +} + +static void tcg_out_opc_vi(TCGContext *s, RISCVInsn opc, + TCGReg vd, TCGReg vs2, int32_t imm) +{ + tcg_out32(s, encode_vi(opc, vd, imm, vs2, true)); +} + typedef struct VsetCache { uint32_t movi_insn; uint32_t vset_insn; @@ -574,6 +608,13 @@ static MemOp set_vtype_len(TCGContext *s, TCGType type) return s->riscv_cur_vsew; } +static void set_vtype_len_sew(TCGContext *s, TCGType type, MemOp vsew) +{ + if (type != s->riscv_cur_type || vsew != s->riscv_cur_vsew) { + set_vtype(s, type, vsew); + } +} + /* * TCG intrinsics */ @@ -588,6 +629,15 @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) case TCG_TYPE_I64: tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0); break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + { + int lmul = type - riscv_lg2_vlenb; + int nf = 1 << MAX(lmul, 0); + tcg_out_opc_vi(s, OPC_VMVNR_V, ret, arg, nf - 1); + } + break; default: g_assert_not_reached(); } @@ -951,18 +1001,35 @@ static void tcg_out_addsub2(TCGContext *s, static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg dst, TCGReg src) { - return false; + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vx(s, OPC_VMV_V_X, dst, 0, src); + return true; } static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg dst, TCGReg base, intptr_t offset) { - return false; + tcg_out_ld(s, TCG_TYPE_REG, TCG_REG_TMP0, base, offset); + return tcg_out_dup_vec(s, type, vece, dst, TCG_REG_TMP0); } static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg dst, int64_t arg) { + /* Arg is replicated by VECE; extract the highest element. */ + arg >>= (-8 << vece) & 63; + + if (arg >= -16 && arg < 16) { + if (arg == 0 || arg == -1) { + set_vtype_len(s, type); + } else { + set_vtype_len_sew(s, type, vece); + } + tcg_out_opc_vi(s, OPC_VMV_V_I, dst, 0, arg); + return; + } + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, arg); + tcg_out_dup_vec(s, type, vece, dst, TCG_REG_TMP0); } static const struct { @@ -2104,6 +2171,9 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, a2 = args[2]; switch (opc) { + case INDEX_op_dupm_vec: + tcg_out_dupm_vec(s, type, vece, a0, a1, a2); + break; case INDEX_op_ld_vec: tcg_out_ld(s, type, a0, a1, a2); break; @@ -2272,6 +2342,8 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_st_vec: return C_O0_I2(v, r); + case INDEX_op_dup_vec: + case INDEX_op_dupm_vec: case INDEX_op_ld_vec: return C_O1_I1(v, r); default: From 5a63f5998791460342ddc1fcd74db5909d00a2b9 Mon Sep 17 00:00:00 2001 From: TANG Tiancheng Date: Mon, 7 Oct 2024 10:56:53 +0800 Subject: [PATCH 06/24] tcg/riscv: Add support for basic vector opcodes Signed-off-by: TANG Tiancheng Reviewed-by: Liu Zhiwei Reviewed-by: Richard Henderson Message-ID: <20241007025700.47259-6-zhiwei_liu@linux.alibaba.com> Signed-off-by: Richard Henderson --- tcg/riscv/tcg-target-con-set.h | 3 ++ tcg/riscv/tcg-target-con-str.h | 1 + tcg/riscv/tcg-target.c.inc | 80 ++++++++++++++++++++++++++++++++++ tcg/riscv/tcg-target.h | 2 +- 4 files changed, 85 insertions(+), 1 deletion(-) diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h index d73a62b0f2..6513cebc4c 100644 --- a/tcg/riscv/tcg-target-con-set.h +++ b/tcg/riscv/tcg-target-con-set.h @@ -23,3 +23,6 @@ C_O1_I4(r, r, rI, rM, rM) C_O2_I4(r, r, rZ, rZ, rM, rM) C_O0_I2(v, r) C_O1_I1(v, r) +C_O1_I1(v, v) +C_O1_I2(v, v, v) +C_O1_I2(v, v, vK) diff --git a/tcg/riscv/tcg-target-con-str.h b/tcg/riscv/tcg-target-con-str.h index b2b3211bcb..0aaad7b753 100644 --- a/tcg/riscv/tcg-target-con-str.h +++ b/tcg/riscv/tcg-target-con-str.h @@ -17,6 +17,7 @@ REGS('v', ALL_VECTOR_REGS) */ CONST('I', TCG_CT_CONST_S12) CONST('J', TCG_CT_CONST_J12) +CONST('K', TCG_CT_CONST_S5) CONST('N', TCG_CT_CONST_N12) CONST('M', TCG_CT_CONST_M12) CONST('Z', TCG_CT_CONST_ZERO) diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 17fcc21b0e..c8540f9a75 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -111,6 +111,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define TCG_CT_CONST_N12 0x400 #define TCG_CT_CONST_M12 0x800 #define TCG_CT_CONST_J12 0x1000 +#define TCG_CT_CONST_S5 0x2000 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) #define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) @@ -129,6 +130,10 @@ static bool tcg_target_const_match(int64_t val, int ct, if ((ct & TCG_CT_CONST_ZERO) && val == 0) { return 1; } + if (type >= TCG_TYPE_V64) { + /* Val is replicated by VECE; extract the highest element. */ + val >>= (-8 << vece) & 63; + } /* * Sign extended from 12 bits: [-0x800, 0x7ff]. * Used for most arithmetic, as this is the isa field. @@ -158,6 +163,13 @@ static bool tcg_target_const_match(int64_t val, int ct, if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) { return 1; } + /* + * Sign extended from 5 bits: [-0x10, 0x0f]. + * Used for vector-immediate. + */ + if ((ct & TCG_CT_CONST_S5) && val >= -0x10 && val <= 0x0f) { + return 1; + } return 0; } @@ -310,6 +322,16 @@ typedef enum { OPC_VS4R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3), OPC_VS8R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7), + OPC_VADD_VV = 0x57 | V_OPIVV, + OPC_VADD_VI = 0x57 | V_OPIVI, + OPC_VSUB_VV = 0x8000057 | V_OPIVV, + OPC_VAND_VV = 0x24000057 | V_OPIVV, + OPC_VAND_VI = 0x24000057 | V_OPIVI, + OPC_VOR_VV = 0x28000057 | V_OPIVV, + OPC_VOR_VI = 0x28000057 | V_OPIVI, + OPC_VXOR_VV = 0x2c000057 | V_OPIVV, + OPC_VXOR_VI = 0x2c000057 | V_OPIVI, + OPC_VMV_V_V = 0x5e000057 | V_OPIVV, OPC_VMV_V_I = 0x5e000057 | V_OPIVI, OPC_VMV_V_X = 0x5e000057 | V_OPIVX, @@ -568,6 +590,12 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, * With RVV 1.0, vs2 is the first operand, while rs1/imm is the * second operand. */ +static void tcg_out_opc_vv(TCGContext *s, RISCVInsn opc, + TCGReg vd, TCGReg vs2, TCGReg vs1) +{ + tcg_out32(s, encode_v(opc, vd, vs1, vs2, true)); +} + static void tcg_out_opc_vx(TCGContext *s, RISCVInsn opc, TCGReg vd, TCGReg vs2, TCGReg rs1) { @@ -580,6 +608,16 @@ static void tcg_out_opc_vi(TCGContext *s, RISCVInsn opc, tcg_out32(s, encode_vi(opc, vd, imm, vs2, true)); } +static void tcg_out_opc_vv_vi(TCGContext *s, RISCVInsn o_vv, RISCVInsn o_vi, + TCGReg vd, TCGReg vs2, TCGArg vi1, int c_vi1) +{ + if (c_vi1) { + tcg_out_opc_vi(s, o_vi, vd, vs2, vi1); + } else { + tcg_out_opc_vv(s, o_vv, vd, vs2, vi1); + } +} + typedef struct VsetCache { uint32_t movi_insn; uint32_t vset_insn; @@ -2165,10 +2203,12 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, { TCGType type = vecl + TCG_TYPE_V64; TCGArg a0, a1, a2; + int c2; a0 = args[0]; a1 = args[1]; a2 = args[2]; + c2 = const_args[2]; switch (opc) { case INDEX_op_dupm_vec: @@ -2180,6 +2220,30 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, case INDEX_op_st_vec: tcg_out_st(s, type, a0, a1, a2); break; + case INDEX_op_add_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VADD_VV, OPC_VADD_VI, a0, a1, a2, c2); + break; + case INDEX_op_sub_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv(s, OPC_VSUB_VV, a0, a1, a2); + break; + case INDEX_op_and_vec: + set_vtype_len(s, type); + tcg_out_opc_vv_vi(s, OPC_VAND_VV, OPC_VAND_VI, a0, a1, a2, c2); + break; + case INDEX_op_or_vec: + set_vtype_len(s, type); + tcg_out_opc_vv_vi(s, OPC_VOR_VV, OPC_VOR_VI, a0, a1, a2, c2); + break; + case INDEX_op_xor_vec: + set_vtype_len(s, type); + tcg_out_opc_vv_vi(s, OPC_VXOR_VV, OPC_VXOR_VI, a0, a1, a2, c2); + break; + case INDEX_op_not_vec: + set_vtype_len(s, type); + tcg_out_opc_vi(s, OPC_VXOR_VI, a0, a1, -1); + break; case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ default: @@ -2196,6 +2260,13 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) { switch (opc) { + case INDEX_op_add_vec: + case INDEX_op_sub_vec: + case INDEX_op_and_vec: + case INDEX_op_or_vec: + case INDEX_op_xor_vec: + case INDEX_op_not_vec: + return 1; default: return 0; } @@ -2346,6 +2417,15 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_dupm_vec: case INDEX_op_ld_vec: return C_O1_I1(v, r); + case INDEX_op_not_vec: + return C_O1_I1(v, v); + case INDEX_op_add_vec: + case INDEX_op_and_vec: + case INDEX_op_or_vec: + case INDEX_op_xor_vec: + return C_O1_I2(v, v, vK); + case INDEX_op_sub_vec: + return C_O1_I2(v, v, v); default: g_assert_not_reached(); } diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index 12a7a37aaa..acb8dfdf16 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -151,7 +151,7 @@ typedef enum { #define TCG_TARGET_HAS_nand_vec 0 #define TCG_TARGET_HAS_nor_vec 0 #define TCG_TARGET_HAS_eqv_vec 0 -#define TCG_TARGET_HAS_not_vec 0 +#define TCG_TARGET_HAS_not_vec 1 #define TCG_TARGET_HAS_neg_vec 0 #define TCG_TARGET_HAS_abs_vec 0 #define TCG_TARGET_HAS_roti_vec 0 From a31768c0192a6f1f62d07c4985a77814be34a915 Mon Sep 17 00:00:00 2001 From: TANG Tiancheng Date: Mon, 7 Oct 2024 10:56:54 +0800 Subject: [PATCH 07/24] tcg/riscv: Implement vector cmp/cmpsel ops Extend comparison results from mask registers to SEW-width elements, following recommendations in The RISC-V SPEC Volume I (Version 20240411). This aligns with TCG's cmp_vec behavior by expanding compare results to full element width: all 1s for true, all 0s for false. Signed-off-by: TANG Tiancheng Reviewed-by: Liu Zhiwei Reviewed-by: Richard Henderson Message-ID: <20241007025700.47259-7-zhiwei_liu@linux.alibaba.com> Signed-off-by: Richard Henderson --- tcg/riscv/tcg-target-con-set.h | 2 + tcg/riscv/tcg-target-con-str.h | 1 + tcg/riscv/tcg-target.c.inc | 255 +++++++++++++++++++++++++-------- tcg/riscv/tcg-target.h | 2 +- 4 files changed, 200 insertions(+), 60 deletions(-) diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h index 6513cebc4c..97e6ecdb0f 100644 --- a/tcg/riscv/tcg-target-con-set.h +++ b/tcg/riscv/tcg-target-con-set.h @@ -26,3 +26,5 @@ C_O1_I1(v, r) C_O1_I1(v, v) C_O1_I2(v, v, v) C_O1_I2(v, v, vK) +C_O1_I2(v, v, vL) +C_O1_I4(v, v, vL, vK, vK) diff --git a/tcg/riscv/tcg-target-con-str.h b/tcg/riscv/tcg-target-con-str.h index 0aaad7b753..089efe96ca 100644 --- a/tcg/riscv/tcg-target-con-str.h +++ b/tcg/riscv/tcg-target-con-str.h @@ -18,6 +18,7 @@ REGS('v', ALL_VECTOR_REGS) CONST('I', TCG_CT_CONST_S12) CONST('J', TCG_CT_CONST_J12) CONST('K', TCG_CT_CONST_S5) +CONST('L', TCG_CT_CONST_CMP_VI) CONST('N', TCG_CT_CONST_N12) CONST('M', TCG_CT_CONST_M12) CONST('Z', TCG_CT_CONST_ZERO) diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index c8540f9a75..1893c419c6 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -106,12 +106,13 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) return TCG_REG_A0 + slot; } -#define TCG_CT_CONST_ZERO 0x100 -#define TCG_CT_CONST_S12 0x200 -#define TCG_CT_CONST_N12 0x400 -#define TCG_CT_CONST_M12 0x800 -#define TCG_CT_CONST_J12 0x1000 -#define TCG_CT_CONST_S5 0x2000 +#define TCG_CT_CONST_ZERO 0x100 +#define TCG_CT_CONST_S12 0x200 +#define TCG_CT_CONST_N12 0x400 +#define TCG_CT_CONST_M12 0x800 +#define TCG_CT_CONST_J12 0x1000 +#define TCG_CT_CONST_S5 0x2000 +#define TCG_CT_CONST_CMP_VI 0x4000 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) #define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) @@ -120,59 +121,6 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define sextreg sextract64 -/* test if a constant matches the constraint */ -static bool tcg_target_const_match(int64_t val, int ct, - TCGType type, TCGCond cond, int vece) -{ - if (ct & TCG_CT_CONST) { - return 1; - } - if ((ct & TCG_CT_CONST_ZERO) && val == 0) { - return 1; - } - if (type >= TCG_TYPE_V64) { - /* Val is replicated by VECE; extract the highest element. */ - val >>= (-8 << vece) & 63; - } - /* - * Sign extended from 12 bits: [-0x800, 0x7ff]. - * Used for most arithmetic, as this is the isa field. - */ - if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) { - return 1; - } - /* - * Sign extended from 12 bits, negated: [-0x7ff, 0x800]. - * Used for subtraction, where a constant must be handled by ADDI. - */ - if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) { - return 1; - } - /* - * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff]. - * Used by addsub2 and movcond, which may need the negative value, - * and requires the modified constant to be representable. - */ - if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) { - return 1; - } - /* - * Inverse of sign extended from 12 bits: ~[-0x800, 0x7ff]. - * Used to map ANDN back to ANDI, etc. - */ - if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) { - return 1; - } - /* - * Sign extended from 5 bits: [-0x10, 0x0f]. - * Used for vector-immediate. - */ - if ((ct & TCG_CT_CONST_S5) && val >= -0x10 && val <= 0x0f) { - return 1; - } - return 0; -} - /* * RISC-V Base ISA opcodes (IM) */ @@ -322,6 +270,9 @@ typedef enum { OPC_VS4R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3), OPC_VS8R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7), + OPC_VMERGE_VIM = 0x5c000057 | V_OPIVI, + OPC_VMERGE_VVM = 0x5c000057 | V_OPIVV, + OPC_VADD_VV = 0x57 | V_OPIVV, OPC_VADD_VI = 0x57 | V_OPIVI, OPC_VSUB_VV = 0x8000057 | V_OPIVV, @@ -332,6 +283,29 @@ typedef enum { OPC_VXOR_VV = 0x2c000057 | V_OPIVV, OPC_VXOR_VI = 0x2c000057 | V_OPIVI, + OPC_VMSEQ_VV = 0x60000057 | V_OPIVV, + OPC_VMSEQ_VI = 0x60000057 | V_OPIVI, + OPC_VMSEQ_VX = 0x60000057 | V_OPIVX, + OPC_VMSNE_VV = 0x64000057 | V_OPIVV, + OPC_VMSNE_VI = 0x64000057 | V_OPIVI, + OPC_VMSNE_VX = 0x64000057 | V_OPIVX, + + OPC_VMSLTU_VV = 0x68000057 | V_OPIVV, + OPC_VMSLTU_VX = 0x68000057 | V_OPIVX, + OPC_VMSLT_VV = 0x6c000057 | V_OPIVV, + OPC_VMSLT_VX = 0x6c000057 | V_OPIVX, + OPC_VMSLEU_VV = 0x70000057 | V_OPIVV, + OPC_VMSLEU_VX = 0x70000057 | V_OPIVX, + OPC_VMSLE_VV = 0x74000057 | V_OPIVV, + OPC_VMSLE_VX = 0x74000057 | V_OPIVX, + + OPC_VMSLEU_VI = 0x70000057 | V_OPIVI, + OPC_VMSLE_VI = 0x74000057 | V_OPIVI, + OPC_VMSGTU_VI = 0x78000057 | V_OPIVI, + OPC_VMSGTU_VX = 0x78000057 | V_OPIVX, + OPC_VMSGT_VI = 0x7c000057 | V_OPIVI, + OPC_VMSGT_VX = 0x7c000057 | V_OPIVX, + OPC_VMV_V_V = 0x5e000057 | V_OPIVV, OPC_VMV_V_I = 0x5e000057 | V_OPIVI, OPC_VMV_V_X = 0x5e000057 | V_OPIVX, @@ -339,6 +313,101 @@ typedef enum { OPC_VMVNR_V = 0x9e000057 | V_OPIVI, } RISCVInsn; +static const struct { + RISCVInsn op; + bool swap; +} tcg_cmpcond_to_rvv_vv[] = { + [TCG_COND_EQ] = { OPC_VMSEQ_VV, false }, + [TCG_COND_NE] = { OPC_VMSNE_VV, false }, + [TCG_COND_LT] = { OPC_VMSLT_VV, false }, + [TCG_COND_GE] = { OPC_VMSLE_VV, true }, + [TCG_COND_GT] = { OPC_VMSLT_VV, true }, + [TCG_COND_LE] = { OPC_VMSLE_VV, false }, + [TCG_COND_LTU] = { OPC_VMSLTU_VV, false }, + [TCG_COND_GEU] = { OPC_VMSLEU_VV, true }, + [TCG_COND_GTU] = { OPC_VMSLTU_VV, true }, + [TCG_COND_LEU] = { OPC_VMSLEU_VV, false } +}; + +static const struct { + RISCVInsn op; + int min; + int max; + bool adjust; +} tcg_cmpcond_to_rvv_vi[] = { + [TCG_COND_EQ] = { OPC_VMSEQ_VI, -16, 15, false }, + [TCG_COND_NE] = { OPC_VMSNE_VI, -16, 15, false }, + [TCG_COND_GT] = { OPC_VMSGT_VI, -16, 15, false }, + [TCG_COND_LE] = { OPC_VMSLE_VI, -16, 15, false }, + [TCG_COND_LT] = { OPC_VMSLE_VI, -15, 16, true }, + [TCG_COND_GE] = { OPC_VMSGT_VI, -15, 16, true }, + [TCG_COND_LEU] = { OPC_VMSLEU_VI, 0, 15, false }, + [TCG_COND_GTU] = { OPC_VMSGTU_VI, 0, 15, false }, + [TCG_COND_LTU] = { OPC_VMSLEU_VI, 1, 16, true }, + [TCG_COND_GEU] = { OPC_VMSGTU_VI, 1, 16, true }, +}; + +/* test if a constant matches the constraint */ +static bool tcg_target_const_match(int64_t val, int ct, + TCGType type, TCGCond cond, int vece) +{ + if (ct & TCG_CT_CONST) { + return 1; + } + if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + return 1; + } + if (type >= TCG_TYPE_V64) { + /* Val is replicated by VECE; extract the highest element. */ + val >>= (-8 << vece) & 63; + } + /* + * Sign extended from 12 bits: [-0x800, 0x7ff]. + * Used for most arithmetic, as this is the isa field. + */ + if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) { + return 1; + } + /* + * Sign extended from 12 bits, negated: [-0x7ff, 0x800]. + * Used for subtraction, where a constant must be handled by ADDI. + */ + if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) { + return 1; + } + /* + * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff]. + * Used by addsub2 and movcond, which may need the negative value, + * and requires the modified constant to be representable. + */ + if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) { + return 1; + } + /* + * Inverse of sign extended from 12 bits: ~[-0x800, 0x7ff]. + * Used to map ANDN back to ANDI, etc. + */ + if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) { + return 1; + } + /* + * Sign extended from 5 bits: [-0x10, 0x0f]. + * Used for vector-immediate. + */ + if ((ct & TCG_CT_CONST_S5) && val >= -0x10 && val <= 0x0f) { + return 1; + } + /* + * Used for vector compare OPIVI instructions. + */ + if ((ct & TCG_CT_CONST_CMP_VI) && + val >= tcg_cmpcond_to_rvv_vi[cond].min && + val <= tcg_cmpcond_to_rvv_vi[cond].max) { + return true; + } + return 0; +} + /* * RISC-V immediate and instruction encoders (excludes 16-bit RVC) */ @@ -618,6 +687,18 @@ static void tcg_out_opc_vv_vi(TCGContext *s, RISCVInsn o_vv, RISCVInsn o_vi, } } +static void tcg_out_opc_vim_mask(TCGContext *s, RISCVInsn opc, TCGReg vd, + TCGReg vs2, int32_t imm) +{ + tcg_out32(s, encode_vi(opc, vd, imm, vs2, false)); +} + +static void tcg_out_opc_vvm_mask(TCGContext *s, RISCVInsn opc, TCGReg vd, + TCGReg vs2, TCGReg vs1) +{ + tcg_out32(s, encode_v(opc, vd, vs1, vs2, false)); +} + typedef struct VsetCache { uint32_t movi_insn; uint32_t vset_insn; @@ -1408,6 +1489,48 @@ static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn, } } +static void tcg_out_cmpsel(TCGContext *s, TCGType type, unsigned vece, + TCGCond cond, TCGReg ret, + TCGReg cmp1, TCGReg cmp2, bool c_cmp2, + TCGReg val1, bool c_val1, + TCGReg val2, bool c_val2) +{ + set_vtype_len_sew(s, type, vece); + + /* Use only vmerge_vim if possible, by inverting the test. */ + if (c_val2 && !c_val1) { + TCGArg temp = val1; + cond = tcg_invert_cond(cond); + val1 = val2; + val2 = temp; + c_val1 = true; + c_val2 = false; + } + + /* Perform the comparison into V0 mask. */ + if (c_cmp2) { + tcg_out_opc_vi(s, tcg_cmpcond_to_rvv_vi[cond].op, TCG_REG_V0, cmp1, + cmp2 - tcg_cmpcond_to_rvv_vi[cond].adjust); + } else if (tcg_cmpcond_to_rvv_vv[cond].swap) { + tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op, + TCG_REG_V0, cmp2, cmp1); + } else { + tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op, + TCG_REG_V0, cmp1, cmp2); + } + if (c_val1) { + if (c_val2) { + tcg_out_opc_vi(s, OPC_VMV_V_I, ret, 0, val2); + val2 = ret; + } + /* vd[i] == v0.mask[i] ? imm : vs2[i] */ + tcg_out_opc_vim_mask(s, OPC_VMERGE_VIM, ret, val2, val1); + } else { + /* vd[i] == v0.mask[i] ? vs1[i] : vs2[i] */ + tcg_out_opc_vvm_mask(s, OPC_VMERGE_VVM, ret, val2, val1); + } +} + static void init_setting_vtype(TCGContext *s) { s->riscv_cur_type = TCG_TYPE_COUNT; @@ -2244,6 +2367,14 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, set_vtype_len(s, type); tcg_out_opc_vi(s, OPC_VXOR_VI, a0, a1, -1); break; + case INDEX_op_cmp_vec: + tcg_out_cmpsel(s, type, vece, args[3], a0, a1, a2, c2, + -1, true, 0, true); + break; + case INDEX_op_cmpsel_vec: + tcg_out_cmpsel(s, type, vece, args[5], a0, a1, a2, c2, + args[3], const_args[3], args[4], const_args[4]); + break; case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ default: @@ -2266,6 +2397,8 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_or_vec: case INDEX_op_xor_vec: case INDEX_op_not_vec: + case INDEX_op_cmp_vec: + case INDEX_op_cmpsel_vec: return 1; default: return 0; @@ -2426,6 +2559,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) return C_O1_I2(v, v, vK); case INDEX_op_sub_vec: return C_O1_I2(v, v, v); + case INDEX_op_cmp_vec: + return C_O1_I2(v, v, vL); + case INDEX_op_cmpsel_vec: + return C_O1_I4(v, v, vL, vK, vK); default: g_assert_not_reached(); } diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index acb8dfdf16..94034504b2 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -164,7 +164,7 @@ typedef enum { #define TCG_TARGET_HAS_sat_vec 0 #define TCG_TARGET_HAS_minmax_vec 0 #define TCG_TARGET_HAS_bitsel_vec 0 -#define TCG_TARGET_HAS_cmpsel_vec 0 +#define TCG_TARGET_HAS_cmpsel_vec 1 #define TCG_TARGET_HAS_tst_vec 0 From c283c0748a01ec05f24d5aa24409f42b459e8938 Mon Sep 17 00:00:00 2001 From: TANG Tiancheng Date: Mon, 7 Oct 2024 10:56:55 +0800 Subject: [PATCH 08/24] tcg/riscv: Implement vector neg ops Signed-off-by: TANG Tiancheng Reviewed-by: Liu Zhiwei Reviewed-by: Richard Henderson Message-ID: <20241007025700.47259-8-zhiwei_liu@linux.alibaba.com> Signed-off-by: Richard Henderson --- tcg/riscv/tcg-target.c.inc | 7 +++++++ tcg/riscv/tcg-target.h | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 1893c419c6..ce8d6d0293 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -276,6 +276,7 @@ typedef enum { OPC_VADD_VV = 0x57 | V_OPIVV, OPC_VADD_VI = 0x57 | V_OPIVI, OPC_VSUB_VV = 0x8000057 | V_OPIVV, + OPC_VRSUB_VI = 0xc000057 | V_OPIVI, OPC_VAND_VV = 0x24000057 | V_OPIVV, OPC_VAND_VI = 0x24000057 | V_OPIVI, OPC_VOR_VV = 0x28000057 | V_OPIVV, @@ -2367,6 +2368,10 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, set_vtype_len(s, type); tcg_out_opc_vi(s, OPC_VXOR_VI, a0, a1, -1); break; + case INDEX_op_neg_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vi(s, OPC_VRSUB_VI, a0, a1, 0); + break; case INDEX_op_cmp_vec: tcg_out_cmpsel(s, type, vece, args[3], a0, a1, a2, c2, -1, true, 0, true); @@ -2397,6 +2402,7 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_or_vec: case INDEX_op_xor_vec: case INDEX_op_not_vec: + case INDEX_op_neg_vec: case INDEX_op_cmp_vec: case INDEX_op_cmpsel_vec: return 1; @@ -2550,6 +2556,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_dupm_vec: case INDEX_op_ld_vec: return C_O1_I1(v, r); + case INDEX_op_neg_vec: case INDEX_op_not_vec: return C_O1_I1(v, v); case INDEX_op_add_vec: diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index 94034504b2..ae10381e02 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -152,7 +152,7 @@ typedef enum { #define TCG_TARGET_HAS_nor_vec 0 #define TCG_TARGET_HAS_eqv_vec 0 #define TCG_TARGET_HAS_not_vec 1 -#define TCG_TARGET_HAS_neg_vec 0 +#define TCG_TARGET_HAS_neg_vec 1 #define TCG_TARGET_HAS_abs_vec 0 #define TCG_TARGET_HAS_roti_vec 0 #define TCG_TARGET_HAS_rots_vec 0 From dc9cd4ec12074a762ca9f41a822a27aa702284d4 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 15 Oct 2024 19:04:55 +0000 Subject: [PATCH 09/24] tcg/riscv: Accept constant first argument to sub_vec Use vrsub.vi to subtract from a constant. Reviewed-by: LIU Zhiwei Signed-off-by: Richard Henderson --- tcg/riscv/tcg-target-con-set.h | 1 + tcg/riscv/tcg-target.c.inc | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h index 97e6ecdb0f..d8ce5414f5 100644 --- a/tcg/riscv/tcg-target-con-set.h +++ b/tcg/riscv/tcg-target-con-set.h @@ -25,6 +25,7 @@ C_O0_I2(v, r) C_O1_I1(v, r) C_O1_I1(v, v) C_O1_I2(v, v, v) +C_O1_I2(v, vK, v) C_O1_I2(v, v, vK) C_O1_I2(v, v, vL) C_O1_I4(v, v, vL, vK, vK) diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index ce8d6d0293..1ce2f291d3 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -2350,7 +2350,11 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_sub_vec: set_vtype_len_sew(s, type, vece); - tcg_out_opc_vv(s, OPC_VSUB_VV, a0, a1, a2); + if (const_args[1]) { + tcg_out_opc_vi(s, OPC_VRSUB_VI, a0, a2, a1); + } else { + tcg_out_opc_vv(s, OPC_VSUB_VV, a0, a1, a2); + } break; case INDEX_op_and_vec: set_vtype_len(s, type); @@ -2565,7 +2569,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_xor_vec: return C_O1_I2(v, v, vK); case INDEX_op_sub_vec: - return C_O1_I2(v, v, v); + return C_O1_I2(v, vK, v); case INDEX_op_cmp_vec: return C_O1_I2(v, v, vL); case INDEX_op_cmpsel_vec: From 101c1ef56221926718eb70d33ac2844d139d55e0 Mon Sep 17 00:00:00 2001 From: TANG Tiancheng Date: Mon, 7 Oct 2024 10:56:56 +0800 Subject: [PATCH 10/24] tcg/riscv: Implement vector sat/mul ops Signed-off-by: TANG Tiancheng Reviewed-by: Liu Zhiwei Reviewed-by: Richard Henderson Message-ID: <20241007025700.47259-9-zhiwei_liu@linux.alibaba.com> Signed-off-by: Richard Henderson --- tcg/riscv/tcg-target.c.inc | 41 ++++++++++++++++++++++++++++++++++++++ tcg/riscv/tcg-target.h | 4 ++-- 2 files changed, 43 insertions(+), 2 deletions(-) diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 1ce2f291d3..4758555565 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -284,6 +284,16 @@ typedef enum { OPC_VXOR_VV = 0x2c000057 | V_OPIVV, OPC_VXOR_VI = 0x2c000057 | V_OPIVI, + OPC_VMUL_VV = 0x94000057 | V_OPMVV, + OPC_VSADD_VV = 0x84000057 | V_OPIVV, + OPC_VSADD_VI = 0x84000057 | V_OPIVI, + OPC_VSSUB_VV = 0x8c000057 | V_OPIVV, + OPC_VSSUB_VI = 0x8c000057 | V_OPIVI, + OPC_VSADDU_VV = 0x80000057 | V_OPIVV, + OPC_VSADDU_VI = 0x80000057 | V_OPIVI, + OPC_VSSUBU_VV = 0x88000057 | V_OPIVV, + OPC_VSSUBU_VI = 0x88000057 | V_OPIVI, + OPC_VMSEQ_VV = 0x60000057 | V_OPIVV, OPC_VMSEQ_VI = 0x60000057 | V_OPIVI, OPC_VMSEQ_VX = 0x60000057 | V_OPIVX, @@ -2376,6 +2386,26 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, set_vtype_len_sew(s, type, vece); tcg_out_opc_vi(s, OPC_VRSUB_VI, a0, a1, 0); break; + case INDEX_op_mul_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv(s, OPC_VMUL_VV, a0, a1, a2); + break; + case INDEX_op_ssadd_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VSADD_VV, OPC_VSADD_VI, a0, a1, a2, c2); + break; + case INDEX_op_sssub_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VSSUB_VV, OPC_VSSUB_VI, a0, a1, a2, c2); + break; + case INDEX_op_usadd_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VSADDU_VV, OPC_VSADDU_VI, a0, a1, a2, c2); + break; + case INDEX_op_ussub_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VSSUBU_VV, OPC_VSSUBU_VI, a0, a1, a2, c2); + break; case INDEX_op_cmp_vec: tcg_out_cmpsel(s, type, vece, args[3], a0, a1, a2, c2, -1, true, 0, true); @@ -2407,6 +2437,11 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_xor_vec: case INDEX_op_not_vec: case INDEX_op_neg_vec: + case INDEX_op_mul_vec: + case INDEX_op_ssadd_vec: + case INDEX_op_sssub_vec: + case INDEX_op_usadd_vec: + case INDEX_op_ussub_vec: case INDEX_op_cmp_vec: case INDEX_op_cmpsel_vec: return 1; @@ -2567,9 +2602,15 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_and_vec: case INDEX_op_or_vec: case INDEX_op_xor_vec: + case INDEX_op_ssadd_vec: + case INDEX_op_sssub_vec: + case INDEX_op_usadd_vec: + case INDEX_op_ussub_vec: return C_O1_I2(v, v, vK); case INDEX_op_sub_vec: return C_O1_I2(v, vK, v); + case INDEX_op_mul_vec: + return C_O1_I2(v, v, v); case INDEX_op_cmp_vec: return C_O1_I2(v, v, vL); case INDEX_op_cmpsel_vec: diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index ae10381e02..1d4d8878ce 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -160,8 +160,8 @@ typedef enum { #define TCG_TARGET_HAS_shi_vec 0 #define TCG_TARGET_HAS_shs_vec 0 #define TCG_TARGET_HAS_shv_vec 0 -#define TCG_TARGET_HAS_mul_vec 0 -#define TCG_TARGET_HAS_sat_vec 0 +#define TCG_TARGET_HAS_mul_vec 1 +#define TCG_TARGET_HAS_sat_vec 1 #define TCG_TARGET_HAS_minmax_vec 0 #define TCG_TARGET_HAS_bitsel_vec 0 #define TCG_TARGET_HAS_cmpsel_vec 1 From 1631f19b04fe29ba6c3c75a8c6decd3856da4853 Mon Sep 17 00:00:00 2001 From: TANG Tiancheng Date: Mon, 7 Oct 2024 10:56:57 +0800 Subject: [PATCH 11/24] tcg/riscv: Implement vector min/max ops Signed-off-by: TANG Tiancheng Reviewed-by: Liu Zhiwei Reviewed-by: Richard Henderson Message-ID: <20241007025700.47259-10-zhiwei_liu@linux.alibaba.com> Signed-off-by: Richard Henderson --- tcg/riscv/tcg-target.c.inc | 33 +++++++++++++++++++++++++++++++++ tcg/riscv/tcg-target.h | 2 +- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 4758555565..35b244b7a2 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -294,6 +294,15 @@ typedef enum { OPC_VSSUBU_VV = 0x88000057 | V_OPIVV, OPC_VSSUBU_VI = 0x88000057 | V_OPIVI, + OPC_VMAX_VV = 0x1c000057 | V_OPIVV, + OPC_VMAX_VI = 0x1c000057 | V_OPIVI, + OPC_VMAXU_VV = 0x18000057 | V_OPIVV, + OPC_VMAXU_VI = 0x18000057 | V_OPIVI, + OPC_VMIN_VV = 0x14000057 | V_OPIVV, + OPC_VMIN_VI = 0x14000057 | V_OPIVI, + OPC_VMINU_VV = 0x10000057 | V_OPIVV, + OPC_VMINU_VI = 0x10000057 | V_OPIVI, + OPC_VMSEQ_VV = 0x60000057 | V_OPIVV, OPC_VMSEQ_VI = 0x60000057 | V_OPIVI, OPC_VMSEQ_VX = 0x60000057 | V_OPIVX, @@ -2406,6 +2415,22 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, set_vtype_len_sew(s, type, vece); tcg_out_opc_vv_vi(s, OPC_VSSUBU_VV, OPC_VSSUBU_VI, a0, a1, a2, c2); break; + case INDEX_op_smax_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VMAX_VV, OPC_VMAX_VI, a0, a1, a2, c2); + break; + case INDEX_op_smin_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VMIN_VV, OPC_VMIN_VI, a0, a1, a2, c2); + break; + case INDEX_op_umax_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VMAXU_VV, OPC_VMAXU_VI, a0, a1, a2, c2); + break; + case INDEX_op_umin_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VMINU_VV, OPC_VMINU_VI, a0, a1, a2, c2); + break; case INDEX_op_cmp_vec: tcg_out_cmpsel(s, type, vece, args[3], a0, a1, a2, c2, -1, true, 0, true); @@ -2442,6 +2467,10 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_sssub_vec: case INDEX_op_usadd_vec: case INDEX_op_ussub_vec: + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: case INDEX_op_cmp_vec: case INDEX_op_cmpsel_vec: return 1; @@ -2606,6 +2635,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_sssub_vec: case INDEX_op_usadd_vec: case INDEX_op_ussub_vec: + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: return C_O1_I2(v, v, vK); case INDEX_op_sub_vec: return C_O1_I2(v, vK, v); diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index 1d4d8878ce..7005099810 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -162,7 +162,7 @@ typedef enum { #define TCG_TARGET_HAS_shv_vec 0 #define TCG_TARGET_HAS_mul_vec 1 #define TCG_TARGET_HAS_sat_vec 1 -#define TCG_TARGET_HAS_minmax_vec 0 +#define TCG_TARGET_HAS_minmax_vec 1 #define TCG_TARGET_HAS_bitsel_vec 0 #define TCG_TARGET_HAS_cmpsel_vec 1 From cbde22f18bc7a7c16f5db1dda9030cb4786fb5f6 Mon Sep 17 00:00:00 2001 From: TANG Tiancheng Date: Mon, 7 Oct 2024 10:56:58 +0800 Subject: [PATCH 12/24] tcg/riscv: Implement vector shi/s/v ops Signed-off-by: TANG Tiancheng Reviewed-by: Liu Zhiwei Reviewed-by: Richard Henderson Message-ID: <20241007025700.47259-11-zhiwei_liu@linux.alibaba.com> Signed-off-by: Richard Henderson --- tcg/riscv/tcg-target-con-set.h | 1 + tcg/riscv/tcg-target.c.inc | 76 ++++++++++++++++++++++++++++++++++ tcg/riscv/tcg-target.h | 6 +-- 3 files changed, 80 insertions(+), 3 deletions(-) diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h index d8ce5414f5..3c4ef44eb0 100644 --- a/tcg/riscv/tcg-target-con-set.h +++ b/tcg/riscv/tcg-target-con-set.h @@ -24,6 +24,7 @@ C_O2_I4(r, r, rZ, rZ, rM, rM) C_O0_I2(v, r) C_O1_I1(v, r) C_O1_I1(v, v) +C_O1_I2(v, v, r) C_O1_I2(v, v, v) C_O1_I2(v, vK, v) C_O1_I2(v, v, vK) diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 35b244b7a2..2c78ea6507 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -326,6 +326,16 @@ typedef enum { OPC_VMSGT_VI = 0x7c000057 | V_OPIVI, OPC_VMSGT_VX = 0x7c000057 | V_OPIVX, + OPC_VSLL_VV = 0x94000057 | V_OPIVV, + OPC_VSLL_VI = 0x94000057 | V_OPIVI, + OPC_VSLL_VX = 0x94000057 | V_OPIVX, + OPC_VSRL_VV = 0xa0000057 | V_OPIVV, + OPC_VSRL_VI = 0xa0000057 | V_OPIVI, + OPC_VSRL_VX = 0xa0000057 | V_OPIVX, + OPC_VSRA_VV = 0xa4000057 | V_OPIVV, + OPC_VSRA_VI = 0xa4000057 | V_OPIVI, + OPC_VSRA_VX = 0xa4000057 | V_OPIVX, + OPC_VMV_V_V = 0x5e000057 | V_OPIVV, OPC_VMV_V_I = 0x5e000057 | V_OPIVI, OPC_VMV_V_X = 0x5e000057 | V_OPIVX, @@ -1551,6 +1561,17 @@ static void tcg_out_cmpsel(TCGContext *s, TCGType type, unsigned vece, } } +static void tcg_out_vshifti(TCGContext *s, RISCVInsn opc_vi, RISCVInsn opc_vx, + TCGReg dst, TCGReg src, unsigned imm) +{ + if (imm < 32) { + tcg_out_opc_vi(s, opc_vi, dst, src, imm); + } else { + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP0, imm); + tcg_out_opc_vx(s, opc_vx, dst, src, TCG_REG_TMP0); + } +} + static void init_setting_vtype(TCGContext *s) { s->riscv_cur_type = TCG_TYPE_COUNT; @@ -2431,6 +2452,42 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, set_vtype_len_sew(s, type, vece); tcg_out_opc_vv_vi(s, OPC_VMINU_VV, OPC_VMINU_VI, a0, a1, a2, c2); break; + case INDEX_op_shls_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vx(s, OPC_VSLL_VX, a0, a1, a2); + break; + case INDEX_op_shrs_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, a2); + break; + case INDEX_op_sars_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vx(s, OPC_VSRA_VX, a0, a1, a2); + break; + case INDEX_op_shlv_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv(s, OPC_VSLL_VV, a0, a1, a2); + break; + case INDEX_op_shrv_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv(s, OPC_VSRL_VV, a0, a1, a2); + break; + case INDEX_op_sarv_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv(s, OPC_VSRA_VV, a0, a1, a2); + break; + case INDEX_op_shli_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, a0, a1, a2); + break; + case INDEX_op_shri_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_vshifti(s, OPC_VSRL_VI, OPC_VSRL_VX, a0, a1, a2); + break; + case INDEX_op_sari_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_vshifti(s, OPC_VSRA_VI, OPC_VSRA_VX, a0, a1, a2); + break; case INDEX_op_cmp_vec: tcg_out_cmpsel(s, type, vece, args[3], a0, a1, a2, c2, -1, true, 0, true); @@ -2471,6 +2528,15 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_smin_vec: case INDEX_op_umax_vec: case INDEX_op_umin_vec: + case INDEX_op_shls_vec: + case INDEX_op_shrs_vec: + case INDEX_op_sars_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: + case INDEX_op_shri_vec: + case INDEX_op_shli_vec: + case INDEX_op_sari_vec: case INDEX_op_cmp_vec: case INDEX_op_cmpsel_vec: return 1; @@ -2626,6 +2692,9 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) return C_O1_I1(v, r); case INDEX_op_neg_vec: case INDEX_op_not_vec: + case INDEX_op_shli_vec: + case INDEX_op_shri_vec: + case INDEX_op_sari_vec: return C_O1_I1(v, v); case INDEX_op_add_vec: case INDEX_op_and_vec: @@ -2643,7 +2712,14 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_sub_vec: return C_O1_I2(v, vK, v); case INDEX_op_mul_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: return C_O1_I2(v, v, v); + case INDEX_op_shls_vec: + case INDEX_op_shrs_vec: + case INDEX_op_sars_vec: + return C_O1_I2(v, v, r); case INDEX_op_cmp_vec: return C_O1_I2(v, v, vL); case INDEX_op_cmpsel_vec: diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index 7005099810..76d30e789b 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -157,9 +157,9 @@ typedef enum { #define TCG_TARGET_HAS_roti_vec 0 #define TCG_TARGET_HAS_rots_vec 0 #define TCG_TARGET_HAS_rotv_vec 0 -#define TCG_TARGET_HAS_shi_vec 0 -#define TCG_TARGET_HAS_shs_vec 0 -#define TCG_TARGET_HAS_shv_vec 0 +#define TCG_TARGET_HAS_shi_vec 1 +#define TCG_TARGET_HAS_shs_vec 1 +#define TCG_TARGET_HAS_shv_vec 1 #define TCG_TARGET_HAS_mul_vec 1 #define TCG_TARGET_HAS_sat_vec 1 #define TCG_TARGET_HAS_minmax_vec 1 From d1843219a1365f92ac1652074ba8c352eeeff82f Mon Sep 17 00:00:00 2001 From: TANG Tiancheng Date: Mon, 7 Oct 2024 10:56:59 +0800 Subject: [PATCH 13/24] tcg/riscv: Implement vector roti/v/x ops Signed-off-by: TANG Tiancheng Reviewed-by: Liu Zhiwei Message-ID: <20241007025700.47259-12-zhiwei_liu@linux.alibaba.com> Signed-off-by: Richard Henderson --- tcg/riscv/tcg-target.c.inc | 36 ++++++++++++++++++++++++++++++++++++ tcg/riscv/tcg-target.h | 6 +++--- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 2c78ea6507..f8331e4688 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -2488,6 +2488,34 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, set_vtype_len_sew(s, type, vece); tcg_out_vshifti(s, OPC_VSRA_VI, OPC_VSRA_VX, a0, a1, a2); break; + case INDEX_op_rotli_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, TCG_REG_V0, a1, a2); + tcg_out_vshifti(s, OPC_VSRL_VI, OPC_VSRL_VX, a0, a1, + -a2 & ((8 << vece) - 1)); + tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0); + break; + case INDEX_op_rotls_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vx(s, OPC_VSLL_VX, TCG_REG_V0, a1, a2); + tcg_out_opc_reg(s, OPC_SUBW, TCG_REG_TMP0, TCG_REG_ZERO, a2); + tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, TCG_REG_TMP0); + tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0); + break; + case INDEX_op_rotlv_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vi(s, OPC_VRSUB_VI, TCG_REG_V0, a2, 0); + tcg_out_opc_vv(s, OPC_VSRL_VV, TCG_REG_V0, a1, TCG_REG_V0); + tcg_out_opc_vv(s, OPC_VSLL_VV, a0, a1, a2); + tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0); + break; + case INDEX_op_rotrv_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vi(s, OPC_VRSUB_VI, TCG_REG_V0, a2, 0); + tcg_out_opc_vv(s, OPC_VSLL_VV, TCG_REG_V0, a1, TCG_REG_V0); + tcg_out_opc_vv(s, OPC_VSRL_VV, a0, a1, a2); + tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0); + break; case INDEX_op_cmp_vec: tcg_out_cmpsel(s, type, vece, args[3], a0, a1, a2, c2, -1, true, 0, true); @@ -2537,6 +2565,10 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_shri_vec: case INDEX_op_shli_vec: case INDEX_op_sari_vec: + case INDEX_op_rotls_vec: + case INDEX_op_rotlv_vec: + case INDEX_op_rotrv_vec: + case INDEX_op_rotli_vec: case INDEX_op_cmp_vec: case INDEX_op_cmpsel_vec: return 1; @@ -2695,6 +2727,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_shli_vec: case INDEX_op_shri_vec: case INDEX_op_sari_vec: + case INDEX_op_rotli_vec: return C_O1_I1(v, v); case INDEX_op_add_vec: case INDEX_op_and_vec: @@ -2715,10 +2748,13 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_shlv_vec: case INDEX_op_shrv_vec: case INDEX_op_sarv_vec: + case INDEX_op_rotlv_vec: + case INDEX_op_rotrv_vec: return C_O1_I2(v, v, v); case INDEX_op_shls_vec: case INDEX_op_shrs_vec: case INDEX_op_sars_vec: + case INDEX_op_rotls_vec: return C_O1_I2(v, v, r); case INDEX_op_cmp_vec: return C_O1_I2(v, v, vL); diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index 76d30e789b..e6d66cd1b9 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -154,9 +154,9 @@ typedef enum { #define TCG_TARGET_HAS_not_vec 1 #define TCG_TARGET_HAS_neg_vec 1 #define TCG_TARGET_HAS_abs_vec 0 -#define TCG_TARGET_HAS_roti_vec 0 -#define TCG_TARGET_HAS_rots_vec 0 -#define TCG_TARGET_HAS_rotv_vec 0 +#define TCG_TARGET_HAS_roti_vec 1 +#define TCG_TARGET_HAS_rots_vec 1 +#define TCG_TARGET_HAS_rotv_vec 1 #define TCG_TARGET_HAS_shi_vec 1 #define TCG_TARGET_HAS_shs_vec 1 #define TCG_TARGET_HAS_shv_vec 1 From 4b7868f8c21cebda86e81f3653e055aa2e87b591 Mon Sep 17 00:00:00 2001 From: TANG Tiancheng Date: Mon, 7 Oct 2024 10:57:00 +0800 Subject: [PATCH 14/24] tcg/riscv: Enable native vector support for TCG host Signed-off-by: TANG Tiancheng Reviewed-by: Liu Zhiwei Reviewed-by: Richard Henderson Message-ID: <20241007025700.47259-13-zhiwei_liu@linux.alibaba.com> Signed-off-by: Richard Henderson --- tcg/riscv/tcg-target.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index e6d66cd1b9..334c37cbe6 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -143,9 +143,9 @@ typedef enum { #define TCG_TARGET_HAS_tst 0 /* vector instructions */ -#define TCG_TARGET_HAS_v64 0 -#define TCG_TARGET_HAS_v128 0 -#define TCG_TARGET_HAS_v256 0 +#define TCG_TARGET_HAS_v64 (cpuinfo & CPUINFO_ZVE64X) +#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_ZVE64X) +#define TCG_TARGET_HAS_v256 (cpuinfo & CPUINFO_ZVE64X) #define TCG_TARGET_HAS_andc_vec 0 #define TCG_TARGET_HAS_orc_vec 0 #define TCG_TARGET_HAS_nand_vec 0 From 9a2a5f1b63b11d22a95d3ff800cf7eb5233254e2 Mon Sep 17 00:00:00 2001 From: Dani Szebenyi Date: Tue, 22 Oct 2024 15:34:39 +0200 Subject: [PATCH 15/24] tcg/ppc: Fix tcg_out_rlw_rc The TCG IR sequence: mov_i32 tmp97,$0xc4240000 dead: 1 pref=0xffffffff mov_i32 tmp98,$0x0 pref=0xffffffff rotr_i32 tmp97,tmp97,tmp98 dead: 1 2 pref=0xffffffff was translated to `slwi r15, r14, 0` instead of `slwi r14, r14, 0` due to SH field overflow. SH field is 5 bits, and tcg_out_rlw is called in some situations with `32-n`, when `n` is 0 it results in an overflow to RA field. This commit prevents overflow of that field and adds debug assertions for the other fields Acked-by: Ilya Leoshkevich Signed-off-by: Dani Szebenyi Message-ID: <20241022133535.69351-2-szedani@linux.ibm.com> Reviewed-by: Richard Henderson Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.c.inc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index 223f079524..9a11c26fd3 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -911,7 +911,9 @@ static void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs, static void tcg_out_rlw_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs, int sh, int mb, int me, bool rc) { - tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me) | rc); + tcg_debug_assert((mb & 0x1f) == mb); + tcg_debug_assert((me & 0x1f) == me); + tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh & 0x1f) | MB(mb) | ME(me) | rc); } static void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs, From 4a75c8c7d6d1a965a1ef0a8067d0af9364edb800 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 13 Oct 2024 11:47:31 -0700 Subject: [PATCH 16/24] include/exec: Improve probe_access_full{, _mmu} documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Suggested-by: Alex Bennée Reviewed-by: Pierrick Bouvier Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson Message-ID: <20241013184733.1423747-2-richard.henderson@linaro.org> --- include/exec/exec-all.h | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 72240ef426..2e4c4cc4b4 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -368,6 +368,13 @@ int probe_access_flags(CPUArchState *env, vaddr addr, int size, * The CPUTLBEntryFull structure returned via @pfull is transient * and must be consumed or copied immediately, before any further * access or changes to TLB @mmu_idx. + * + * This function will not fault if @nonfault is set, but will + * return TLB_INVALID_MASK if the page is not mapped, or is not + * accessible with @access_type. + * + * This function will return TLB_MMIO in order to force the access + * to be handled out-of-line if plugins wish to instrument the access. */ int probe_access_full(CPUArchState *env, vaddr addr, int size, MMUAccessType access_type, int mmu_idx, @@ -375,22 +382,14 @@ int probe_access_full(CPUArchState *env, vaddr addr, int size, CPUTLBEntryFull **pfull, uintptr_t retaddr); /** - * probe_access_mmu() - Like probe_access_full except cannot fault and - * doesn't trigger instrumentation. + * probe_access_full_mmu: + * Like probe_access_full, except: * - * @env: CPUArchState - * @vaddr: virtual address to probe - * @size: size of the probe - * @access_type: read, write or execute permission - * @mmu_idx: softmmu index - * @phost: ptr to return value host address or NULL - * @pfull: ptr to return value CPUTLBEntryFull structure or NULL - * - * The CPUTLBEntryFull structure returned via @pfull is transient - * and must be consumed or copied immediately, before any further - * access or changes to TLB @mmu_idx. - * - * Returns: TLB flags as per probe_access_flags() + * This function is intended to be used for page table accesses by + * the target mmu itself. Since such page walking happens while + * handling another potential mmu fault, this function never raises + * exceptions (akin to @nonfault true for probe_access_full). + * Likewise this function does not trigger plugin instrumentation. */ int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size, MMUAccessType access_type, int mmu_idx, From b56617bbcb473c25815d1bf475e326f84563b1de Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Sat, 21 Sep 2024 08:57:12 +0000 Subject: [PATCH 17/24] target/i386: Walk NPT in guest real mode When translating virtual to physical address with a guest CPU that supports nested paging (NPT), we need to perform every page table walk access indirectly through the NPT, which we correctly do. However, we treat real mode (no page table walk) special: In that case, we currently just skip any walks and translate VA -> PA. With NPT enabled, we also need to then perform NPT walk to do GVA -> GPA -> HPA which we fail to do so far. The net result of that is that TCG VMs with NPT enabled that execute real mode code (like SeaBIOS) end up with GPA==HPA mappings which means the guest accesses host code and data. This typically shows as failure to boot guests. This patch changes the page walk logic for NPT enabled guests so that we always perform a GVA -> GPA translation and then skip any logic that requires an actual PTE. That way, all remaining logic to walk the NPT stays and we successfully walk the NPT in real mode. Cc: qemu-stable@nongnu.org Fixes: fe441054bb3f0 ("target-i386: Add NPT support") Signed-off-by: Alexander Graf Reported-by: Eduard Vlad Reviewed-by: Richard Henderson Message-ID: <20240921085712.28902-1-graf@amazon.com> Signed-off-by: Richard Henderson --- target/i386/tcg/sysemu/excp_helper.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c index 8fb05b1f53..8cb0d80177 100644 --- a/target/i386/tcg/sysemu/excp_helper.c +++ b/target/i386/tcg/sysemu/excp_helper.c @@ -150,6 +150,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, uint32_t pkr; int page_size; int error_code; + int prot; restart_all: rsvd_mask = ~MAKE_64BIT_MASK(0, env_archcpu(env)->phys_bits); @@ -298,7 +299,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, /* combine pde and pte nx, user and rw protections */ ptep &= pte ^ PG_NX_MASK; page_size = 4096; - } else { + } else if (pg_mode) { /* * Page table level 2 */ @@ -343,6 +344,15 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, ptep &= pte | PG_NX_MASK; page_size = 4096; rsvd_mask = 0; + } else { + /* + * No paging (real mode), let's tentatively resolve the address as 1:1 + * here, but conditionally still perform an NPT walk on it later. + */ + page_size = 0x40000000; + paddr = in->addr; + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + goto stage2; } do_check_protect: @@ -358,7 +368,7 @@ do_check_protect_pse36: goto do_fault_protect; } - int prot = 0; + prot = 0; if (!is_mmu_index_smap(in->mmu_idx) || !(ptep & PG_USER_MASK)) { prot |= PAGE_READ; if ((ptep & PG_RW_MASK) || !(is_user || (pg_mode & PG_MODE_WP))) { @@ -420,6 +430,7 @@ do_check_protect_pse36: /* merge offset within page */ paddr = (pte & PG_ADDRESS_MASK & ~(page_size - 1)) | (addr & (page_size - 1)); + stage2: /* * Note that NPT is walked (for both paging structures and final guest @@ -562,7 +573,7 @@ static bool get_physical_address(CPUX86State *env, vaddr addr, addr = (uint32_t)addr; } - if (likely(env->cr[0] & CR0_PG_MASK)) { + if (likely(env->cr[0] & CR0_PG_MASK || use_stage2)) { in.cr3 = env->cr[3]; in.mmu_idx = mmu_idx; in.ptw_idx = use_stage2 ? MMU_NESTED_IDX : MMU_PHYS_IDX; From 115ade42d50144c15b74368d32dc734ea277d853 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 13 Oct 2024 11:47:32 -0700 Subject: [PATCH 18/24] target/i386: Use probe_access_full_mmu in ptw_translate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The probe_access_full_mmu function was designed for this purpose, and does not report the memory operation event to plugins. Cc: qemu-stable@nongnu.org Fixes: 6d03226b422 ("plugins: force slow path when plugins instrument memory ops") Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Alex Bennée Signed-off-by: Richard Henderson Message-ID: <20241013184733.1423747-3-richard.henderson@linaro.org> --- target/i386/tcg/sysemu/excp_helper.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c index 8cb0d80177..8b046ee4be 100644 --- a/target/i386/tcg/sysemu/excp_helper.c +++ b/target/i386/tcg/sysemu/excp_helper.c @@ -62,12 +62,11 @@ typedef struct PTETranslate { static bool ptw_translate(PTETranslate *inout, hwaddr addr, uint64_t ra) { - CPUTLBEntryFull *full; int flags; inout->gaddr = addr; - flags = probe_access_full(inout->env, addr, 0, MMU_DATA_STORE, - inout->ptw_idx, true, &inout->haddr, &full, ra); + flags = probe_access_full_mmu(inout->env, addr, 0, MMU_DATA_STORE, + inout->ptw_idx, &inout->haddr, NULL); if (unlikely(flags & TLB_INVALID_MASK)) { TranslateFault *err = inout->err; @@ -440,9 +439,8 @@ do_check_protect_pse36: CPUTLBEntryFull *full; int flags, nested_page_size; - flags = probe_access_full(env, paddr, 0, access_type, - MMU_NESTED_IDX, true, - &pte_trans.haddr, &full, 0); + flags = probe_access_full_mmu(env, paddr, 0, access_type, + MMU_NESTED_IDX, &pte_trans.haddr, &full); if (unlikely(flags & TLB_INVALID_MASK)) { *err = (TranslateFault){ .error_code = env->error_code, From e46fbc7d50289a9316fb582f4f98248bc642309e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 13 Oct 2024 11:47:33 -0700 Subject: [PATCH 19/24] target/i386: Remove ra parameter from ptw_translate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This argument is no longer used. Suggested-by: Philippe Mathieu-Daudé Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson Message-ID: <20241013184733.1423747-4-richard.henderson@linaro.org> --- target/i386/tcg/sysemu/excp_helper.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c index 8b046ee4be..da187c8792 100644 --- a/target/i386/tcg/sysemu/excp_helper.c +++ b/target/i386/tcg/sysemu/excp_helper.c @@ -60,7 +60,7 @@ typedef struct PTETranslate { hwaddr gaddr; } PTETranslate; -static bool ptw_translate(PTETranslate *inout, hwaddr addr, uint64_t ra) +static bool ptw_translate(PTETranslate *inout, hwaddr addr) { int flags; @@ -166,7 +166,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 5 */ pte_addr = (in->cr3 & ~0xfff) + (((addr >> 48) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } restart_5: @@ -190,7 +190,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 4 */ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 39) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } restart_4: @@ -210,7 +210,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 3 */ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } restart_3_lma: @@ -237,7 +237,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 3 */ pte_addr = (in->cr3 & 0xffffffe0ULL) + ((addr >> 27) & 0x18); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } rsvd_mask |= PG_HI_USER_MASK; @@ -259,7 +259,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 2 */ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } restart_2_pae: @@ -285,7 +285,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 1 */ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } pte = ptw_ldq(&pte_trans, ra); @@ -303,7 +303,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 2 */ pte_addr = (in->cr3 & 0xfffff000ULL) + ((addr >> 20) & 0xffc); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } restart_2_nopae: @@ -332,7 +332,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 1 */ pte_addr = (pte & ~0xfffu) + ((addr >> 10) & 0xffc); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } pte = ptw_ldl(&pte_trans, ra); From bbd5630a75e70a0f1bcf04de74c94aa94a145628 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 14 Oct 2024 22:34:21 +0200 Subject: [PATCH 20/24] linux-user: Emulate /proc/self/maps under mmap_lock If one thread modifies the mappings and another thread prints them, a situation may occur that the printer thread sees a guest mapping without a corresponding host mapping, leading to a crash in open_self_maps_2(). Cc: qemu-stable@nongnu.org Fixes: 7b7a3366e142 ("linux-user: Use walk_memory_regions for open_self_maps") Signed-off-by: Ilya Leoshkevich Reviewed-by: Laurent Vivier Reviewed-by: Richard Henderson Message-ID: <20241014203441.387560-1-iii@linux.ibm.com> Signed-off-by: Richard Henderson --- linux-user/syscall.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 1354e75694..dd2ec0712b 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -8151,17 +8151,19 @@ static int open_self_maps_1(CPUArchState *env, int fd, bool smaps) { struct open_self_maps_data d = { .ts = get_task_state(env_cpu(env)), - .host_maps = read_self_maps(), .fd = fd, .smaps = smaps }; + mmap_lock(); + d.host_maps = read_self_maps(); if (d.host_maps) { walk_memory_regions(&d, open_self_maps_2); free_self_maps(d.host_maps); } else { walk_memory_regions(&d, open_self_maps_3); } + mmap_unlock(); return 0; } From 8704132805cf7a3259d1c5a073b3c2b92afa2616 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Thu, 17 Oct 2024 14:54:43 +0200 Subject: [PATCH 21/24] linux-user/ppc: Fix sigmask endianness issue in sigreturn MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit do_setcontext() copies the target sigmask without endianness handling and then uses target_to_host_sigset_internal(), which expects a byte-swapped one. Use target_to_host_sigset() instead. Fixes: bcd4933a23f1 ("linux-user: ppc signal handling") Signed-off-by: Ilya Leoshkevich Reviewed-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-ID: <20241017125811.447961-2-iii@linux.ibm.com> Signed-off-by: Richard Henderson --- linux-user/ppc/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-user/ppc/signal.c b/linux-user/ppc/signal.c index a1d8c0bccc..24e5a02a78 100644 --- a/linux-user/ppc/signal.c +++ b/linux-user/ppc/signal.c @@ -628,7 +628,7 @@ static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) return 1; - target_to_host_sigset_internal(&blocked, &set); + target_to_host_sigset(&blocked, &set); set_sigmask(&blocked); restore_user_regs(env, mcp, sig); From f769eb00b5d7475eded925a6b8b93f45d6bc05ea Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 22 Oct 2024 12:26:16 +0200 Subject: [PATCH 22/24] linux-user: Trace rt_sigprocmask's sigsets Add a function for formatting target sigsets. It can be useful for other syscalls in the future, so put it into the beginning of strace.c. For simplicity, do not implement the strace's ~[] output syntax. Add a rt_sigprocmask return handler. Example outputs: 753914 rt_sigprocmask(SIG_BLOCK,[SIGCHLD SIGTSTP SIGTTIN SIGTTOU],0x00007f80fddfe380,8) = 0 (oldset=[SIGTTOU]) 753914 rt_sigprocmask(SIG_SETMASK,[SIGCHLD],NULL,8) = 0 753914 rt_sigprocmask(SIG_BLOCK,NULL,0x00007f80fddff3c0,8) = 0 (oldset=[]) Signed-off-by: Ilya Leoshkevich Message-ID: <20241022102726.18520-1-iii@linux.ibm.com> Reviewed-by: Richard Henderson Signed-off-by: Richard Henderson --- linux-user/strace.c | 88 ++++++++++++++++++++++++++++++++++++------ linux-user/strace.list | 3 +- 2 files changed, 78 insertions(+), 13 deletions(-) diff --git a/linux-user/strace.c b/linux-user/strace.c index c3eb3a2706..b70eadc19e 100644 --- a/linux-user/strace.c +++ b/linux-user/strace.c @@ -160,20 +160,21 @@ static const char * const target_signal_name[] = { #undef MAKE_SIG_ENTRY }; +static void +print_signal_1(abi_ulong arg) +{ + if (arg < ARRAY_SIZE(target_signal_name)) { + qemu_log("%s", target_signal_name[arg]); + } else { + qemu_log(TARGET_ABI_FMT_lu, arg); + } +} + static void print_signal(abi_ulong arg, int last) { - const char *signal_name = NULL; - - if (arg < ARRAY_SIZE(target_signal_name)) { - signal_name = target_signal_name[arg]; - } - - if (signal_name == NULL) { - print_raw_param("%ld", arg, last); - return; - } - qemu_log("%s%s", signal_name, get_comma(last)); + print_signal_1(arg); + qemu_log("%s", get_comma(last)); } static void print_si_code(int arg) @@ -718,6 +719,51 @@ print_ipc(CPUArchState *cpu_env, const struct syscallname *name, } #endif +#ifdef TARGET_NR_rt_sigprocmask +static void print_target_sigset_t_1(target_sigset_t *set, int last) +{ + bool first = true; + int i, sig = 1; + + qemu_log("["); + for (i = 0; i < TARGET_NSIG_WORDS; i++) { + abi_ulong bits = 0; + int j; + + __get_user(bits, &set->sig[i]); + for (j = 0; j < sizeof(bits) * 8; j++) { + if (bits & ((abi_ulong)1 << j)) { + if (first) { + first = false; + } else { + qemu_log(" "); + } + print_signal_1(sig); + } + sig++; + } + } + qemu_log("]%s", get_comma(last)); +} + +static void print_target_sigset_t(abi_ulong addr, abi_ulong size, int last) +{ + if (addr && size == sizeof(target_sigset_t)) { + target_sigset_t *set; + + set = lock_user(VERIFY_READ, addr, sizeof(target_sigset_t), 1); + if (set) { + print_target_sigset_t_1(set, last); + unlock_user(set, addr, 0); + } else { + print_pointer(addr, last); + } + } else { + print_pointer(addr, last); + } +} +#endif + /* * Variants for the return value output function */ @@ -3312,11 +3358,29 @@ print_rt_sigprocmask(CPUArchState *cpu_env, const struct syscallname *name, case TARGET_SIG_SETMASK: how = "SIG_SETMASK"; break; } qemu_log("%s,", how); - print_pointer(arg1, 0); + print_target_sigset_t(arg1, arg3, 0); print_pointer(arg2, 0); print_raw_param("%u", arg3, 1); print_syscall_epilogue(name); } + +static void +print_rt_sigprocmask_ret(CPUArchState *cpu_env, const struct syscallname *name, + abi_long ret, abi_long arg0, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, + abi_long arg5) +{ + if (!print_syscall_err(ret)) { + qemu_log(TARGET_ABI_FMT_ld, ret); + if (arg2) { + qemu_log(" (oldset="); + print_target_sigset_t(arg2, arg3, 1); + qemu_log(")"); + } + } + + qemu_log("\n"); +} #endif #ifdef TARGET_NR_rt_sigqueueinfo diff --git a/linux-user/strace.list b/linux-user/strace.list index 0d69fb3150..fdf94ef32a 100644 --- a/linux-user/strace.list +++ b/linux-user/strace.list @@ -1189,7 +1189,8 @@ { TARGET_NR_rt_sigpending, "rt_sigpending" , NULL, NULL, NULL }, #endif #ifdef TARGET_NR_rt_sigprocmask -{ TARGET_NR_rt_sigprocmask, "rt_sigprocmask" , NULL, print_rt_sigprocmask, NULL }, +{ TARGET_NR_rt_sigprocmask, "rt_sigprocmask" , NULL, print_rt_sigprocmask, + print_rt_sigprocmask_ret }, #endif #ifdef TARGET_NR_rt_sigqueueinfo { TARGET_NR_rt_sigqueueinfo, "rt_sigqueueinfo" , NULL, print_rt_sigqueueinfo, NULL }, From c12df59de99c7e7e79586cfe1ca4a8e50ff04cbc Mon Sep 17 00:00:00 2001 From: Yao Zi Date: Tue, 22 Oct 2024 12:29:30 +0000 Subject: [PATCH 23/24] linux-user: Fix build failure caused by missing __u64 on musl Commit 9651cead2f ("linux-user: add openat2 support in linux-user") ships a definition of struct open_how_ver0 while assuming type __u64 is available in code, which is not the case when building QEMU on musl. Let's replaces __u64 with uint64_t. Fixes: 9651cead2f ("linux-user: add openat2 support in linux-user") Signed-off-by: Yao Zi Reviewed-by: Laurent Vivier Message-ID: <20241022122929.17465-2-ziyao@disroot.org> Reviewed-by: Richard Henderson Signed-off-by: Richard Henderson --- linux-user/syscall_defs.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h index 0ade83745e..0e08dfae3e 100644 --- a/linux-user/syscall_defs.h +++ b/linux-user/syscall_defs.h @@ -2750,9 +2750,9 @@ struct target_sched_param { /* from kernel's include/uapi/linux/openat2.h */ struct open_how_ver0 { - __u64 flags; - __u64 mode; - __u64 resolve; + uint64_t flags; + uint64_t mode; + uint64_t resolve; }; struct target_open_how_ver0 { abi_ullong flags; From 310df7a9fe400f32cde8a7edf80daad12cd9cf02 Mon Sep 17 00:00:00 2001 From: Yao Zi Date: Tue, 22 Oct 2024 16:01:37 +0000 Subject: [PATCH 24/24] linux-user/riscv: Fix definition of RISCV_HWPROBE_EXT_ZVFHMIN Current definition yields a negative 32bits value, messing up hwprobe result when Zvfhmin extension presents. Replace it by using a 1ULL bit shift value as done in kernel upstream. Link: https://github.com/torvalds/linux/commit/5ea6764d9095e234b024054f75ebbccc4f0eb146 Fixes: a3432cf227 ("linux-user/riscv: Sync hwprobe keys with Linux") Cc: qemu-stable@nongnu.org Signed-off-by: Yao Zi Message-ID: <20241022160136.21714-2-ziyao@disroot.org> Reviewed-by: Richard Henderson Signed-off-by: Richard Henderson --- linux-user/syscall.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/linux-user/syscall.c b/linux-user/syscall.c index dd2ec0712b..587954cf47 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -8945,7 +8945,7 @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count) #define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28) #define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29) #define RISCV_HWPROBE_EXT_ZVFH (1 << 30) -#define RISCV_HWPROBE_EXT_ZVFHMIN (1 << 31) +#define RISCV_HWPROBE_EXT_ZVFHMIN (1ULL << 31) #define RISCV_HWPROBE_EXT_ZFA (1ULL << 32) #define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33) #define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34)