diff --git a/docs/about/emulation.rst b/docs/about/emulation.rst index 3bc3579434..a72591ee4d 100644 --- a/docs/about/emulation.rst +++ b/docs/about/emulation.rst @@ -171,7 +171,7 @@ for that architecture. - Unified Hosting Interface (MD01069) * - RISC-V - System and User-mode - - https://github.com/riscv/riscv-semihosting-spec/blob/main/riscv-semihosting-spec.adoc + - https://github.com/riscv-non-isa/riscv-semihosting/blob/main/riscv-semihosting.adoc * - Xtensa - System - Tensilica ISS SIMCALL diff --git a/hw/riscv/riscv-iommu-bits.h b/hw/riscv/riscv-iommu-bits.h index b7cb1bc736..1017d73fc6 100644 --- a/hw/riscv/riscv-iommu-bits.h +++ b/hw/riscv/riscv-iommu-bits.h @@ -415,12 +415,16 @@ enum riscv_iommu_fq_causes { #define RISCV_IOMMU_DC_MSIPTP_MODE_OFF 0 #define RISCV_IOMMU_DC_MSIPTP_MODE_FLAT 1 +/* 2.2 Process Directory Table */ +#define RISCV_IOMMU_PDTE_VALID BIT_ULL(0) +#define RISCV_IOMMU_PDTE_PPN RISCV_IOMMU_PPN_FIELD + /* Translation attributes fields */ #define RISCV_IOMMU_PC_TA_V BIT_ULL(0) #define RISCV_IOMMU_PC_TA_RESERVED GENMASK_ULL(63, 32) /* First stage context fields */ -#define RISCV_IOMMU_PC_FSC_PPN GENMASK_ULL(43, 0) +#define RISCV_IOMMU_PC_FSC_PPN RISCV_IOMMU_ATP_PPN_FIELD #define RISCV_IOMMU_PC_FSC_RESERVED GENMASK_ULL(59, 44) enum riscv_iommu_fq_ttypes { diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c index d46beb2d64..76e0fcd873 100644 --- a/hw/riscv/riscv-iommu.c +++ b/hw/riscv/riscv-iommu.c @@ -1042,10 +1042,10 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx) return RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT; } le64_to_cpus(&de); - if (!(de & RISCV_IOMMU_PC_TA_V)) { + if (!(de & RISCV_IOMMU_PDTE_VALID)) { return RISCV_IOMMU_FQ_CAUSE_PDT_INVALID; } - addr = PPN_PHYS(get_field(de, RISCV_IOMMU_PC_FSC_PPN)); + addr = PPN_PHYS(get_field(de, RISCV_IOMMU_PDTE_PPN)); } riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK); diff --git a/linux-user/syscall.c b/linux-user/syscall.c index b32de763f7..8bfe4912e1 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -9119,35 +9119,38 @@ static void risc_hwprobe_fill_pairs(CPURISCVState *env, } } -static int cpu_set_valid(abi_long arg3, abi_long arg4) +/* + * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT. + * If the cpumast_t has no bits set: -EINVAL. + * Otherwise the cpumask_t contains some bit set: 0. + * Unlike the kernel, we do not mask cpumask_t by the set of online cpus, + * nor bound the search by cpumask_size(). + */ +static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus) { - int ret, i, tmp; - size_t host_mask_size, target_mask_size; - unsigned long *host_mask; + unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1); + int ret = -TARGET_EFAULT; - /* - * cpu_set_t represent CPU masks as bit masks of type unsigned long *. - * arg3 contains the cpu count. - */ - tmp = (8 * sizeof(abi_ulong)); - target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong); - host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) & - ~(sizeof(*host_mask) - 1); - - host_mask = alloca(host_mask_size); - - ret = target_to_host_cpu_mask(host_mask, host_mask_size, - arg4, target_mask_size); - if (ret != 0) { - return ret; - } - - for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) { - if (host_mask[i] != 0) { - return 0; + if (p) { + ret = -TARGET_EINVAL; + /* + * Since we only care about the empty/non-empty state of the cpumask_t + * not the individual bits, we do not need to repartition the bits + * from target abi_ulong to host unsigned long. + * + * Note that the kernel does not round up cpusetsize to a multiple of + * sizeof(abi_ulong). After bounding cpusetsize by cpumask_size(), + * it copies exactly cpusetsize bytes into a zeroed buffer. + */ + for (abi_ulong i = 0; i < cpusetsize; ++i) { + if (p[i]) { + ret = 0; + break; + } } + unlock_user(p, target_cpus, 0); } - return -TARGET_EINVAL; + return ret; } static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1, @@ -9164,7 +9167,7 @@ static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1, /* check cpu_set */ if (arg3 != 0) { - ret = cpu_set_valid(arg3, arg4); + ret = nonempty_cpu_set(arg3, arg4); if (ret != 0) { return ret; } diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 7de19b4183..51e49e03de 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -765,6 +765,18 @@ static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env) } #endif +static inline bool riscv_cpu_allow_16bit_insn(const RISCVCPUConfig *cfg, + target_long priv_ver, + uint32_t misa_ext) +{ + /* In priv spec version 1.12 or newer, C always implies Zca */ + if (priv_ver >= PRIV_VERSION_1_12_0) { + return cfg->ext_zca; + } else { + return misa_ext & RVC; + } +} + /* * Encode LMUL to lmul as follows: * LMUL vlmul lmul diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 49566d3c08..7948188356 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -192,6 +192,11 @@ static RISCVException cfi_ss(CPURISCVState *env, int csrno) return RISCV_EXCP_ILLEGAL_INST; } + /* If ext implemented, M-mode always have access to SSP CSR */ + if (env->priv == PRV_M) { + return RISCV_EXCP_NONE; + } + /* if bcfi not active for current env, access to csr is illegal */ if (!cpu_get_bcfien(env)) { #if !defined(CONFIG_USER_ONLY) @@ -4297,7 +4302,7 @@ static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno, } /* Update sctrstatus.WRPTR with a legal value */ - depth = 16 << depth; + depth = 16ULL << depth; env->sctrstatus = env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1)); } diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index b55f56a5eb..b9c7160468 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -151,7 +151,9 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a) tcg_gen_ext32s_tl(target_pc, target_pc); } - if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) { + if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr, + ctx->priv_ver, + ctx->misa_ext)) { TCGv t0 = tcg_temp_new(); misaligned = gen_new_label(); @@ -300,7 +302,9 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond) gen_set_label(l); /* branch taken */ - if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca && + if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr, + ctx->priv_ver, + ctx->misa_ext) && (a->imm & 0x3)) { /* misaligned */ TCGv target_pc = tcg_temp_new(); diff --git a/target/riscv/insn_trans/trans_rvzicfiss.c.inc b/target/riscv/insn_trans/trans_rvzicfiss.c.inc index e3ebc4977c..b0096adcd0 100644 --- a/target/riscv/insn_trans/trans_rvzicfiss.c.inc +++ b/target/riscv/insn_trans/trans_rvzicfiss.c.inc @@ -15,6 +15,13 @@ * You should have received a copy of the GNU General Public License along with * this program. If not, see . */ + +#define REQUIRE_ZICFISS(ctx) do { \ + if (!ctx->cfg_ptr->ext_zicfiss) { \ + return false; \ + } \ +} while (0) + static bool trans_sspopchk(DisasContext *ctx, arg_sspopchk *a) { if (!ctx->bcfi_enabled) { @@ -77,6 +84,11 @@ static bool trans_ssrdp(DisasContext *ctx, arg_ssrdp *a) static bool trans_ssamoswap_w(DisasContext *ctx, arg_amoswap_w *a) { REQUIRE_A_OR_ZAAMO(ctx); + REQUIRE_ZICFISS(ctx); + if (ctx->priv == PRV_M) { + generate_exception(ctx, RISCV_EXCP_STORE_AMO_ACCESS_FAULT); + } + if (!ctx->bcfi_enabled) { return false; } @@ -97,6 +109,11 @@ static bool trans_ssamoswap_d(DisasContext *ctx, arg_amoswap_w *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); + REQUIRE_ZICFISS(ctx); + if (ctx->priv == PRV_M) { + generate_exception(ctx, RISCV_EXCP_STORE_AMO_ACCESS_FAULT); + } + if (!ctx->bcfi_enabled) { return false; } diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 0d4220ba93..72dc48e58d 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -279,7 +279,9 @@ target_ulong helper_sret(CPURISCVState *env) } target_ulong retpc = env->sepc; - if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { + if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg, + env->priv_ver, + env->misa_ext) && (retpc & 0x3)) { riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); } @@ -357,7 +359,9 @@ static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc, riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } - if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { + if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg, + env->priv_ver, + env->misa_ext) && (retpc & 0x3)) { riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); } diff --git a/target/riscv/translate.c b/target/riscv/translate.c index eaa5d86eae..d6651f244f 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -606,7 +606,9 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) TCGv succ_pc = dest_gpr(ctx, rd); /* check misaligned: */ - if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) { + if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr, + ctx->priv_ver, + ctx->misa_ext)) { if ((imm & 0x3) != 0) { TCGv target_pc = tcg_temp_new(); gen_pc_plus_diff(target_pc, ctx, imm); diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c index f7423df226..1526de96f5 100644 --- a/target/riscv/vcrypto_helper.c +++ b/target/riscv/vcrypto_helper.c @@ -222,7 +222,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key) uint32_t total_elems = vext_get_total_elems(env, desc, 4); \ uint32_t vta = vext_vta(desc); \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \ AESState round_key; \ @@ -248,7 +248,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key) uint32_t total_elems = vext_get_total_elems(env, desc, 4); \ uint32_t vta = vext_vta(desc); \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \ AESState round_key; \ @@ -309,7 +309,7 @@ void HELPER(vaeskf1_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm, uint32_t total_elems = vext_get_total_elems(env, desc, 4); uint32_t vta = vext_vta(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, vl); uimm &= 0b1111; if (uimm > 10 || uimm == 0) { @@ -357,7 +357,7 @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm, uint32_t total_elems = vext_get_total_elems(env, desc, 4); uint32_t vta = vext_vta(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, vl); uimm &= 0b1111; if (uimm > 14 || uimm < 2) { @@ -465,7 +465,7 @@ void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { if (sew == MO_32) { @@ -582,7 +582,7 @@ void HELPER(vsha2ch32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i, @@ -602,7 +602,7 @@ void HELPER(vsha2ch64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i, @@ -622,7 +622,7 @@ void HELPER(vsha2cl32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i, @@ -642,7 +642,7 @@ void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i, @@ -676,7 +676,7 @@ void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr, uint32_t *vs1 = vs1_vptr; uint32_t *vs2 = vs2_vptr; - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (int i = env->vstart / 8; i < env->vl / 8; i++) { uint32_t w[24]; @@ -777,7 +777,7 @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm, uint32_t *vs2 = vs2_vptr; uint32_t v1[8], v2[8], v3[8]; - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (int i = env->vstart / 8; i < env->vl / 8; i++) { for (int k = 0; k < 8; k++) { @@ -802,7 +802,7 @@ void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr, uint32_t vta = vext_vta(desc); uint32_t total_elems = vext_get_total_elems(env, desc, 4); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { uint64_t Y[2] = {vd[i * 2 + 0], vd[i * 2 + 1]}; @@ -841,7 +841,7 @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env, uint32_t vta = vext_vta(desc); uint32_t total_elems = vext_get_total_elems(env, desc, 4); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { uint64_t Y[2] = {brev8(vd[i * 2 + 0]), brev8(vd[i * 2 + 1])}; @@ -879,7 +879,7 @@ void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env, uint32_t esz = sizeof(uint32_t); uint32_t total_elems = vext_get_total_elems(env, desc, esz); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = group_start; i < group_end; ++i) { uint32_t vstart = i * egs; @@ -937,7 +937,7 @@ void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc) uint32_t esz = sizeof(uint32_t); uint32_t total_elems = vext_get_total_elems(env, desc, esz); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = group_start; i < group_end; ++i) { uint32_t vstart = i * egs; @@ -973,7 +973,7 @@ void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc) uint32_t esz = sizeof(uint32_t); uint32_t total_elems = vext_get_total_elems(env, desc, esz); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = group_start; i < group_end; ++i) { uint32_t vstart = i * egs; diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 7773df6a7c..67b3bafebb 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -260,7 +260,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, target_ulong stride, uint32_t esz = 1 << log2_esz; uint32_t vma = vext_vma(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (i = env->vstart; i < env->vl; env->vstart = ++i) { k = 0; @@ -383,10 +383,7 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, uint32_t msize = nf * esz; int mmu_index = riscv_env_mmu_index(env, false); - if (env->vstart >= evl) { - env->vstart = 0; - return; - } + VSTART_CHECK_EARLY_EXIT(env, evl); #if defined(CONFIG_USER_ONLY) /* @@ -544,7 +541,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base, uint32_t esz = 1 << log2_esz; uint32_t vma = vext_vma(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); /* load bytes from guest memory */ for (i = env->vstart; i < env->vl; env->vstart = ++i) { @@ -633,47 +630,69 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env, uint32_t esz = 1 << log2_esz; uint32_t msize = nf * esz; uint32_t vma = vext_vma(desc); - target_ulong addr, offset, remain, page_split, elems; + target_ulong addr, addr_probe, addr_i, offset, remain, page_split, elems; int mmu_index = riscv_env_mmu_index(env, false); + int flags; + void *host; - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); - /* probe every access */ - for (i = env->vstart; i < env->vl; i++) { - if (!vm && !vext_elem_mask(v0, i)) { - continue; - } - addr = adjust_addr(env, base + i * (nf << log2_esz)); - if (i == 0) { - /* Allow fault on first element. */ - probe_pages(env, addr, nf << log2_esz, ra, MMU_DATA_LOAD); - } else { - remain = nf << log2_esz; - while (remain > 0) { - void *host; - int flags; + addr = base + ((env->vstart * nf) << log2_esz); + page_split = -(addr | TARGET_PAGE_MASK); + /* Get number of elements */ + elems = page_split / msize; + if (unlikely(env->vstart + elems >= env->vl)) { + elems = env->vl - env->vstart; + } - offset = -(addr | TARGET_PAGE_MASK); + /* Check page permission/pmp/watchpoint/etc. */ + flags = probe_access_flags(env, adjust_addr(env, addr), elems * msize, + MMU_DATA_LOAD, mmu_index, true, &host, ra); - /* Probe nonfault on subsequent elements. */ - flags = probe_access_flags(env, addr, offset, MMU_DATA_LOAD, - mmu_index, true, &host, 0); + /* If we are crossing a page check also the second page. */ + if (env->vl > elems) { + addr_probe = addr + (elems << log2_esz); + flags |= probe_access_flags(env, adjust_addr(env, addr_probe), + elems * msize, MMU_DATA_LOAD, mmu_index, + true, &host, ra); + } - /* - * Stop if invalid (unmapped) or mmio (transaction may fail). - * Do not stop if watchpoint, as the spec says that - * first-fault should continue to access the same - * elements regardless of any watchpoint. - */ - if (flags & ~TLB_WATCHPOINT) { - vl = i; - goto ProbeSuccess; + if (flags & ~TLB_WATCHPOINT) { + /* probe every access */ + for (i = env->vstart; i < env->vl; i++) { + if (!vm && !vext_elem_mask(v0, i)) { + continue; + } + addr_i = adjust_addr(env, base + i * (nf << log2_esz)); + if (i == 0) { + /* Allow fault on first element. */ + probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD); + } else { + remain = nf << log2_esz; + while (remain > 0) { + offset = -(addr_i | TARGET_PAGE_MASK); + + /* Probe nonfault on subsequent elements. */ + flags = probe_access_flags(env, addr_i, offset, + MMU_DATA_LOAD, mmu_index, true, + &host, 0); + + /* + * Stop if invalid (unmapped) or mmio (transaction may + * fail). Do not stop if watchpoint, as the spec says that + * first-fault should continue to access the same + * elements regardless of any watchpoint. + */ + if (flags & ~TLB_WATCHPOINT) { + vl = i; + goto ProbeSuccess; + } + if (remain <= offset) { + break; + } + remain -= offset; + addr_i = adjust_addr(env, addr_i + offset); } - if (remain <= offset) { - break; - } - remain -= offset; - addr = adjust_addr(env, addr + offset); } } } @@ -685,15 +704,6 @@ ProbeSuccess: if (env->vstart < env->vl) { if (vm) { - /* Calculate the page range of first page */ - addr = base + ((env->vstart * nf) << log2_esz); - page_split = -(addr | TARGET_PAGE_MASK); - /* Get number of elements */ - elems = page_split / msize; - if (unlikely(env->vstart + elems >= env->vl)) { - elems = env->vl - env->vstart; - } - /* Load/store elements in the first page */ if (likely(elems)) { vext_page_ldst_us(env, vd, addr, elems, nf, max_elems, @@ -1103,7 +1113,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ @@ -1137,7 +1147,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -1174,7 +1184,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ @@ -1214,7 +1224,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -1312,7 +1322,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -1361,7 +1371,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -1425,7 +1435,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ @@ -1492,7 +1502,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -2041,7 +2051,7 @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ @@ -2067,7 +2077,7 @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ *((ETYPE *)vd + H(i)) = (ETYPE)s1; \ @@ -2092,7 +2102,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \ @@ -2118,7 +2128,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -2165,8 +2175,6 @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2, uint32_t vl, uint32_t vm, int vxrm, opivv2_rm_fn *fn, uint32_t vma, uint32_t esz) { - VSTART_CHECK_EARLY_EXIT(env); - for (uint32_t i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ @@ -2190,6 +2198,8 @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2, uint32_t vta = vext_vta(desc); uint32_t vma = vext_vma(desc); + VSTART_CHECK_EARLY_EXIT(env, vl); + switch (env->vxrm) { case 0: /* rnu */ vext_vv_rm_1(vd, v0, vs1, vs2, @@ -2292,8 +2302,6 @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2, uint32_t vl, uint32_t vm, int vxrm, opivx2_rm_fn *fn, uint32_t vma, uint32_t esz) { - VSTART_CHECK_EARLY_EXIT(env); - for (uint32_t i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ @@ -2317,6 +2325,8 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2, uint32_t vta = vext_vta(desc); uint32_t vma = vext_vma(desc); + VSTART_CHECK_EARLY_EXIT(env, vl); + switch (env->vxrm) { case 0: /* rnu */ vext_vx_rm_1(vd, v0, s1, vs2, @@ -3091,7 +3101,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -3136,7 +3146,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -3724,7 +3734,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ if (vl == 0) { \ return; \ @@ -4247,7 +4257,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ @@ -4289,7 +4299,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -4484,7 +4494,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -4652,6 +4662,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t i; \ TD s1 = *((TD *)vs1 + HD(0)); \ \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ + \ for (i = env->vstart; i < vl; i++) { \ TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -4740,6 +4752,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t i; \ TD s1 = *((TD *)vs1 + HD(0)); \ \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ + \ for (i = env->vstart; i < vl; i++) { \ TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -4814,7 +4828,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t i; \ int a, b; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ a = vext_elem_mask(vs1, i); \ @@ -4904,6 +4918,8 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env, int i; bool first_mask_bit = false; + VSTART_CHECK_EARLY_EXIT(env, vl); + for (i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ @@ -4976,6 +4992,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \ uint32_t sum = 0; \ int i; \ \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -5009,7 +5027,7 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \ uint32_t vma = vext_vma(desc); \ int i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -5046,7 +5064,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vma = vext_vma(desc); \ target_ulong offset = s1, i_min, i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ i_min = MAX(env->vstart, offset); \ for (i = i_min; i < vl; i++) { \ @@ -5081,7 +5099,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vma = vext_vma(desc); \ target_ulong i_max, i_min, i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ i_min = MIN(s1 < vlmax ? vlmax - s1 : 0, vl); \ i_max = MAX(i_min, env->vstart); \ @@ -5125,7 +5143,7 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -5176,7 +5194,7 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -5253,7 +5271,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint64_t index; \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -5298,7 +5316,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint64_t index = s1; \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -5334,6 +5352,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t num = 0, i; \ \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vext_elem_mask(vs1, i)) { \ continue; \ @@ -5394,7 +5414,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c index 05b2d01e58..b490b1d398 100644 --- a/target/riscv/vector_internals.c +++ b/target/riscv/vector_internals.c @@ -66,7 +66,7 @@ void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2, uint32_t vma = vext_vma(desc); uint32_t i; - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, vl); for (i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { @@ -92,7 +92,7 @@ void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2, uint32_t vma = vext_vma(desc); uint32_t i; - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, vl); for (i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h index a11cc8366d..8eee7e5c31 100644 --- a/target/riscv/vector_internals.h +++ b/target/riscv/vector_internals.h @@ -25,11 +25,11 @@ #include "tcg/tcg-gvec-desc.h" #include "internals.h" -#define VSTART_CHECK_EARLY_EXIT(env) do { \ - if (env->vstart >= env->vl) { \ - env->vstart = 0; \ - return; \ - } \ +#define VSTART_CHECK_EARLY_EXIT(env, vl) do { \ + if (env->vstart >= vl) { \ + env->vstart = 0; \ + return; \ + } \ } while (0) static inline uint32_t vext_nf(uint32_t desc) @@ -159,7 +159,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \