Fourth RISC-V PR for 10.0
* Fix broken emulation link * Optimize the memory probing for vector fault-only-first loads * Fix access permission checks for CSR_SSP * Fixes a bug against `ssamoswap` behavior in M-mode * Fix IOMMU process directory table walk * Fix OVERFLOW_BEFORE_WIDEN in rmw_sctrdepth() * Enhance VSTART and VL checks for vector instructions * Fix handling of cpu mask in riscv_hwprobe syscall * Add check for 16-bit aligned PC for different priv versions -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmfaehkACgkQr3yVEwxT gBOagRAAsSC/0Fof5EpXc14zmaw7CtoqSCTHVYXYxIEFjRu2Nj89z1HSlB00ptjZ g/x5gxJRW8pGarYL6EAKKhk7BpswZ87DbsM/3kQwGraWN/or4SLj12E1V6+UhDi6 e8qV3oHT8/dMoi/cUc9sM2FNah6gWckxy4QwLzX41jI6wkHe72IC52u9OP6b2ny5 iky1ThDeQiZmGfj13se9cK1XFNPZgSnJFYD6k9OQTmaMzSShcM64ewv95RfiJbjA s8kDmXYrrSQbjWyrjf2JIWhm6dFagFW4u/ho5481gZ1ntw1DnqlHXKCEWSPhIBOm WzvfK0dEkmgtOW0DJ7aBdbDJWNRcYCW3xiuUlHrQ7QDRmwreTrF1mo9sD9KifwIo NPzScf/O+GPuqDKcV6SfT6rV/Jpr8yaK9WaB/KeDsmhrmsDBn4GCrxu6Z/bLadCy AnLItH8BCssSIA989VzwN0V3AsJK8cDQiRzM3/Mq8zp2yNvaBbuGLFxvAzV4sFZY PIc7jhWek8Dw1SxIwuXvh/04iNkQNbnowzCQo7q7Cokf4vQtcTSuLblq3IgAJyDn eCNXY0SgHNvA6DCxF+ZYAjpgo6ZFusGq1Yq9KzbaH+a3vYOOHhFix4wrFyyApu7+ 1nBgETtewKfHqo2+GtYr/g1O+WYruf1TC5bCdiWpvvPDR/a7zJM= =SqiB -----END PGP SIGNATURE----- Merge tag 'pull-riscv-to-apply-20250319' of https://github.com/alistair23/qemu into staging Fourth RISC-V PR for 10.0 * Fix broken emulation link * Optimize the memory probing for vector fault-only-first loads * Fix access permission checks for CSR_SSP * Fixes a bug against `ssamoswap` behavior in M-mode * Fix IOMMU process directory table walk * Fix OVERFLOW_BEFORE_WIDEN in rmw_sctrdepth() * Enhance VSTART and VL checks for vector instructions * Fix handling of cpu mask in riscv_hwprobe syscall * Add check for 16-bit aligned PC for different priv versions # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmfaehkACgkQr3yVEwxT # gBOagRAAsSC/0Fof5EpXc14zmaw7CtoqSCTHVYXYxIEFjRu2Nj89z1HSlB00ptjZ # g/x5gxJRW8pGarYL6EAKKhk7BpswZ87DbsM/3kQwGraWN/or4SLj12E1V6+UhDi6 # e8qV3oHT8/dMoi/cUc9sM2FNah6gWckxy4QwLzX41jI6wkHe72IC52u9OP6b2ny5 # iky1ThDeQiZmGfj13se9cK1XFNPZgSnJFYD6k9OQTmaMzSShcM64ewv95RfiJbjA # s8kDmXYrrSQbjWyrjf2JIWhm6dFagFW4u/ho5481gZ1ntw1DnqlHXKCEWSPhIBOm # WzvfK0dEkmgtOW0DJ7aBdbDJWNRcYCW3xiuUlHrQ7QDRmwreTrF1mo9sD9KifwIo # NPzScf/O+GPuqDKcV6SfT6rV/Jpr8yaK9WaB/KeDsmhrmsDBn4GCrxu6Z/bLadCy # AnLItH8BCssSIA989VzwN0V3AsJK8cDQiRzM3/Mq8zp2yNvaBbuGLFxvAzV4sFZY # PIc7jhWek8Dw1SxIwuXvh/04iNkQNbnowzCQo7q7Cokf4vQtcTSuLblq3IgAJyDn # eCNXY0SgHNvA6DCxF+ZYAjpgo6ZFusGq1Yq9KzbaH+a3vYOOHhFix4wrFyyApu7+ # 1nBgETtewKfHqo2+GtYr/g1O+WYruf1TC5bCdiWpvvPDR/a7zJM= # =SqiB # -----END PGP SIGNATURE----- # gpg: Signature made Wed 19 Mar 2025 04:02:33 EDT # gpg: using RSA key 6AE902B6A7CA877D6D659296AF7C95130C538013 # gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 6AE9 02B6 A7CA 877D 6D65 9296 AF7C 9513 0C53 8013 * tag 'pull-riscv-to-apply-20250319' of https://github.com/alistair23/qemu: target/riscv: Add check for 16-bit aligned PC for different priv versions. linux-user/riscv: Fix handling of cpu mask in riscv_hwprobe syscall target/riscv: fix handling of nop for vstart >= vl in some vector instruction target/riscv: refactor VSTART_CHECK_EARLY_EXIT() to accept vl as a parameter target/riscv/csr.c: fix OVERFLOW_BEFORE_WIDEN in rmw_sctrdepth() hw/riscv/riscv-iommu: Fix process directory table walk target/riscv: fixes a bug against `ssamoswap` behavior in M-mode target/riscv: fix access permission checks for CSR_SSP optimize the memory probing for vector fault-only-first loads. docs/about/emulation: Fix broken link Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
17e9c90944
@ -171,7 +171,7 @@ for that architecture.
|
||||
- Unified Hosting Interface (MD01069)
|
||||
* - RISC-V
|
||||
- System and User-mode
|
||||
- https://github.com/riscv/riscv-semihosting-spec/blob/main/riscv-semihosting-spec.adoc
|
||||
- https://github.com/riscv-non-isa/riscv-semihosting/blob/main/riscv-semihosting.adoc
|
||||
* - Xtensa
|
||||
- System
|
||||
- Tensilica ISS SIMCALL
|
||||
|
@ -415,12 +415,16 @@ enum riscv_iommu_fq_causes {
|
||||
#define RISCV_IOMMU_DC_MSIPTP_MODE_OFF 0
|
||||
#define RISCV_IOMMU_DC_MSIPTP_MODE_FLAT 1
|
||||
|
||||
/* 2.2 Process Directory Table */
|
||||
#define RISCV_IOMMU_PDTE_VALID BIT_ULL(0)
|
||||
#define RISCV_IOMMU_PDTE_PPN RISCV_IOMMU_PPN_FIELD
|
||||
|
||||
/* Translation attributes fields */
|
||||
#define RISCV_IOMMU_PC_TA_V BIT_ULL(0)
|
||||
#define RISCV_IOMMU_PC_TA_RESERVED GENMASK_ULL(63, 32)
|
||||
|
||||
/* First stage context fields */
|
||||
#define RISCV_IOMMU_PC_FSC_PPN GENMASK_ULL(43, 0)
|
||||
#define RISCV_IOMMU_PC_FSC_PPN RISCV_IOMMU_ATP_PPN_FIELD
|
||||
#define RISCV_IOMMU_PC_FSC_RESERVED GENMASK_ULL(59, 44)
|
||||
|
||||
enum riscv_iommu_fq_ttypes {
|
||||
|
@ -1042,10 +1042,10 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
|
||||
return RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT;
|
||||
}
|
||||
le64_to_cpus(&de);
|
||||
if (!(de & RISCV_IOMMU_PC_TA_V)) {
|
||||
if (!(de & RISCV_IOMMU_PDTE_VALID)) {
|
||||
return RISCV_IOMMU_FQ_CAUSE_PDT_INVALID;
|
||||
}
|
||||
addr = PPN_PHYS(get_field(de, RISCV_IOMMU_PC_FSC_PPN));
|
||||
addr = PPN_PHYS(get_field(de, RISCV_IOMMU_PDTE_PPN));
|
||||
}
|
||||
|
||||
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK);
|
||||
|
@ -9119,37 +9119,40 @@ static void risc_hwprobe_fill_pairs(CPURISCVState *env,
|
||||
}
|
||||
}
|
||||
|
||||
static int cpu_set_valid(abi_long arg3, abi_long arg4)
|
||||
{
|
||||
int ret, i, tmp;
|
||||
size_t host_mask_size, target_mask_size;
|
||||
unsigned long *host_mask;
|
||||
|
||||
/*
|
||||
* cpu_set_t represent CPU masks as bit masks of type unsigned long *.
|
||||
* arg3 contains the cpu count.
|
||||
* If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT.
|
||||
* If the cpumast_t has no bits set: -EINVAL.
|
||||
* Otherwise the cpumask_t contains some bit set: 0.
|
||||
* Unlike the kernel, we do not mask cpumask_t by the set of online cpus,
|
||||
* nor bound the search by cpumask_size().
|
||||
*/
|
||||
tmp = (8 * sizeof(abi_ulong));
|
||||
target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
|
||||
host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
|
||||
~(sizeof(*host_mask) - 1);
|
||||
static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus)
|
||||
{
|
||||
unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1);
|
||||
int ret = -TARGET_EFAULT;
|
||||
|
||||
host_mask = alloca(host_mask_size);
|
||||
|
||||
ret = target_to_host_cpu_mask(host_mask, host_mask_size,
|
||||
arg4, target_mask_size);
|
||||
if (ret != 0) {
|
||||
if (p) {
|
||||
ret = -TARGET_EINVAL;
|
||||
/*
|
||||
* Since we only care about the empty/non-empty state of the cpumask_t
|
||||
* not the individual bits, we do not need to repartition the bits
|
||||
* from target abi_ulong to host unsigned long.
|
||||
*
|
||||
* Note that the kernel does not round up cpusetsize to a multiple of
|
||||
* sizeof(abi_ulong). After bounding cpusetsize by cpumask_size(),
|
||||
* it copies exactly cpusetsize bytes into a zeroed buffer.
|
||||
*/
|
||||
for (abi_ulong i = 0; i < cpusetsize; ++i) {
|
||||
if (p[i]) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
unlock_user(p, target_cpus, 0);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
|
||||
if (host_mask[i] != 0) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -TARGET_EINVAL;
|
||||
}
|
||||
|
||||
static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
|
||||
abi_long arg2, abi_long arg3,
|
||||
abi_long arg4, abi_long arg5)
|
||||
@ -9164,7 +9167,7 @@ static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
|
||||
|
||||
/* check cpu_set */
|
||||
if (arg3 != 0) {
|
||||
ret = cpu_set_valid(arg3, arg4);
|
||||
ret = nonempty_cpu_set(arg3, arg4);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -765,6 +765,18 @@ static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool riscv_cpu_allow_16bit_insn(const RISCVCPUConfig *cfg,
|
||||
target_long priv_ver,
|
||||
uint32_t misa_ext)
|
||||
{
|
||||
/* In priv spec version 1.12 or newer, C always implies Zca */
|
||||
if (priv_ver >= PRIV_VERSION_1_12_0) {
|
||||
return cfg->ext_zca;
|
||||
} else {
|
||||
return misa_ext & RVC;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Encode LMUL to lmul as follows:
|
||||
* LMUL vlmul lmul
|
||||
|
@ -192,6 +192,11 @@ static RISCVException cfi_ss(CPURISCVState *env, int csrno)
|
||||
return RISCV_EXCP_ILLEGAL_INST;
|
||||
}
|
||||
|
||||
/* If ext implemented, M-mode always have access to SSP CSR */
|
||||
if (env->priv == PRV_M) {
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
||||
/* if bcfi not active for current env, access to csr is illegal */
|
||||
if (!cpu_get_bcfien(env)) {
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
@ -4297,7 +4302,7 @@ static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno,
|
||||
}
|
||||
|
||||
/* Update sctrstatus.WRPTR with a legal value */
|
||||
depth = 16 << depth;
|
||||
depth = 16ULL << depth;
|
||||
env->sctrstatus =
|
||||
env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
|
||||
}
|
||||
|
@ -151,7 +151,9 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
|
||||
tcg_gen_ext32s_tl(target_pc, target_pc);
|
||||
}
|
||||
|
||||
if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
|
||||
if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
|
||||
ctx->priv_ver,
|
||||
ctx->misa_ext)) {
|
||||
TCGv t0 = tcg_temp_new();
|
||||
|
||||
misaligned = gen_new_label();
|
||||
@ -300,7 +302,9 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
|
||||
|
||||
gen_set_label(l); /* branch taken */
|
||||
|
||||
if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca &&
|
||||
if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
|
||||
ctx->priv_ver,
|
||||
ctx->misa_ext) &&
|
||||
(a->imm & 0x3)) {
|
||||
/* misaligned */
|
||||
TCGv target_pc = tcg_temp_new();
|
||||
|
@ -15,6 +15,13 @@
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define REQUIRE_ZICFISS(ctx) do { \
|
||||
if (!ctx->cfg_ptr->ext_zicfiss) { \
|
||||
return false; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static bool trans_sspopchk(DisasContext *ctx, arg_sspopchk *a)
|
||||
{
|
||||
if (!ctx->bcfi_enabled) {
|
||||
@ -77,6 +84,11 @@ static bool trans_ssrdp(DisasContext *ctx, arg_ssrdp *a)
|
||||
static bool trans_ssamoswap_w(DisasContext *ctx, arg_amoswap_w *a)
|
||||
{
|
||||
REQUIRE_A_OR_ZAAMO(ctx);
|
||||
REQUIRE_ZICFISS(ctx);
|
||||
if (ctx->priv == PRV_M) {
|
||||
generate_exception(ctx, RISCV_EXCP_STORE_AMO_ACCESS_FAULT);
|
||||
}
|
||||
|
||||
if (!ctx->bcfi_enabled) {
|
||||
return false;
|
||||
}
|
||||
@ -97,6 +109,11 @@ static bool trans_ssamoswap_d(DisasContext *ctx, arg_amoswap_w *a)
|
||||
{
|
||||
REQUIRE_64BIT(ctx);
|
||||
REQUIRE_A_OR_ZAAMO(ctx);
|
||||
REQUIRE_ZICFISS(ctx);
|
||||
if (ctx->priv == PRV_M) {
|
||||
generate_exception(ctx, RISCV_EXCP_STORE_AMO_ACCESS_FAULT);
|
||||
}
|
||||
|
||||
if (!ctx->bcfi_enabled) {
|
||||
return false;
|
||||
}
|
||||
|
@ -279,7 +279,9 @@ target_ulong helper_sret(CPURISCVState *env)
|
||||
}
|
||||
|
||||
target_ulong retpc = env->sepc;
|
||||
if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
|
||||
if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg,
|
||||
env->priv_ver,
|
||||
env->misa_ext) && (retpc & 0x3)) {
|
||||
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
|
||||
}
|
||||
|
||||
@ -357,7 +359,9 @@ static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc,
|
||||
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
|
||||
}
|
||||
|
||||
if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
|
||||
if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg,
|
||||
env->priv_ver,
|
||||
env->misa_ext) && (retpc & 0x3)) {
|
||||
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
|
||||
}
|
||||
|
||||
|
@ -606,7 +606,9 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
|
||||
TCGv succ_pc = dest_gpr(ctx, rd);
|
||||
|
||||
/* check misaligned: */
|
||||
if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
|
||||
if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
|
||||
ctx->priv_ver,
|
||||
ctx->misa_ext)) {
|
||||
if ((imm & 0x3) != 0) {
|
||||
TCGv target_pc = tcg_temp_new();
|
||||
gen_pc_plus_diff(target_pc, ctx, imm);
|
||||
|
@ -222,7 +222,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
|
||||
uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
|
||||
uint32_t vta = vext_vta(desc); \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
|
||||
AESState round_key; \
|
||||
@ -248,7 +248,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
|
||||
uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
|
||||
uint32_t vta = vext_vta(desc); \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
|
||||
AESState round_key; \
|
||||
@ -309,7 +309,7 @@ void HELPER(vaeskf1_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
|
||||
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
|
||||
uint32_t vta = vext_vta(desc);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl);
|
||||
|
||||
uimm &= 0b1111;
|
||||
if (uimm > 10 || uimm == 0) {
|
||||
@ -357,7 +357,7 @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
|
||||
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
|
||||
uint32_t vta = vext_vta(desc);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl);
|
||||
|
||||
uimm &= 0b1111;
|
||||
if (uimm > 14 || uimm < 2) {
|
||||
@ -465,7 +465,7 @@ void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
|
||||
uint32_t total_elems;
|
||||
uint32_t vta = vext_vta(desc);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
|
||||
if (sew == MO_32) {
|
||||
@ -582,7 +582,7 @@ void HELPER(vsha2ch32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
|
||||
uint32_t total_elems;
|
||||
uint32_t vta = vext_vta(desc);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
|
||||
vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
|
||||
@ -602,7 +602,7 @@ void HELPER(vsha2ch64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
|
||||
uint32_t total_elems;
|
||||
uint32_t vta = vext_vta(desc);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
|
||||
vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
|
||||
@ -622,7 +622,7 @@ void HELPER(vsha2cl32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
|
||||
uint32_t total_elems;
|
||||
uint32_t vta = vext_vta(desc);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
|
||||
vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
|
||||
@ -642,7 +642,7 @@ void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
|
||||
uint32_t total_elems;
|
||||
uint32_t vta = vext_vta(desc);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
|
||||
vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
|
||||
@ -676,7 +676,7 @@ void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
|
||||
uint32_t *vs1 = vs1_vptr;
|
||||
uint32_t *vs2 = vs2_vptr;
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
for (int i = env->vstart / 8; i < env->vl / 8; i++) {
|
||||
uint32_t w[24];
|
||||
@ -777,7 +777,7 @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
|
||||
uint32_t *vs2 = vs2_vptr;
|
||||
uint32_t v1[8], v2[8], v3[8];
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
for (int i = env->vstart / 8; i < env->vl / 8; i++) {
|
||||
for (int k = 0; k < 8; k++) {
|
||||
@ -802,7 +802,7 @@ void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
|
||||
uint32_t vta = vext_vta(desc);
|
||||
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
|
||||
uint64_t Y[2] = {vd[i * 2 + 0], vd[i * 2 + 1]};
|
||||
@ -841,7 +841,7 @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
|
||||
uint32_t vta = vext_vta(desc);
|
||||
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
|
||||
uint64_t Y[2] = {brev8(vd[i * 2 + 0]), brev8(vd[i * 2 + 1])};
|
||||
@ -879,7 +879,7 @@ void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env,
|
||||
uint32_t esz = sizeof(uint32_t);
|
||||
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
for (uint32_t i = group_start; i < group_end; ++i) {
|
||||
uint32_t vstart = i * egs;
|
||||
@ -937,7 +937,7 @@ void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
|
||||
uint32_t esz = sizeof(uint32_t);
|
||||
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
for (uint32_t i = group_start; i < group_end; ++i) {
|
||||
uint32_t vstart = i * egs;
|
||||
@ -973,7 +973,7 @@ void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
|
||||
uint32_t esz = sizeof(uint32_t);
|
||||
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
for (uint32_t i = group_start; i < group_end; ++i) {
|
||||
uint32_t vstart = i * egs;
|
||||
|
@ -260,7 +260,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, target_ulong stride,
|
||||
uint32_t esz = 1 << log2_esz;
|
||||
uint32_t vma = vext_vma(desc);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
for (i = env->vstart; i < env->vl; env->vstart = ++i) {
|
||||
k = 0;
|
||||
@ -383,10 +383,7 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
|
||||
uint32_t msize = nf * esz;
|
||||
int mmu_index = riscv_env_mmu_index(env, false);
|
||||
|
||||
if (env->vstart >= evl) {
|
||||
env->vstart = 0;
|
||||
return;
|
||||
}
|
||||
VSTART_CHECK_EARLY_EXIT(env, evl);
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
/*
|
||||
@ -544,7 +541,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
|
||||
uint32_t esz = 1 << log2_esz;
|
||||
uint32_t vma = vext_vma(desc);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
/* load bytes from guest memory */
|
||||
for (i = env->vstart; i < env->vl; env->vstart = ++i) {
|
||||
@ -633,35 +630,56 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
|
||||
uint32_t esz = 1 << log2_esz;
|
||||
uint32_t msize = nf * esz;
|
||||
uint32_t vma = vext_vma(desc);
|
||||
target_ulong addr, offset, remain, page_split, elems;
|
||||
target_ulong addr, addr_probe, addr_i, offset, remain, page_split, elems;
|
||||
int mmu_index = riscv_env_mmu_index(env, false);
|
||||
int flags;
|
||||
void *host;
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, env->vl);
|
||||
|
||||
addr = base + ((env->vstart * nf) << log2_esz);
|
||||
page_split = -(addr | TARGET_PAGE_MASK);
|
||||
/* Get number of elements */
|
||||
elems = page_split / msize;
|
||||
if (unlikely(env->vstart + elems >= env->vl)) {
|
||||
elems = env->vl - env->vstart;
|
||||
}
|
||||
|
||||
/* Check page permission/pmp/watchpoint/etc. */
|
||||
flags = probe_access_flags(env, adjust_addr(env, addr), elems * msize,
|
||||
MMU_DATA_LOAD, mmu_index, true, &host, ra);
|
||||
|
||||
/* If we are crossing a page check also the second page. */
|
||||
if (env->vl > elems) {
|
||||
addr_probe = addr + (elems << log2_esz);
|
||||
flags |= probe_access_flags(env, adjust_addr(env, addr_probe),
|
||||
elems * msize, MMU_DATA_LOAD, mmu_index,
|
||||
true, &host, ra);
|
||||
}
|
||||
|
||||
if (flags & ~TLB_WATCHPOINT) {
|
||||
/* probe every access */
|
||||
for (i = env->vstart; i < env->vl; i++) {
|
||||
if (!vm && !vext_elem_mask(v0, i)) {
|
||||
continue;
|
||||
}
|
||||
addr = adjust_addr(env, base + i * (nf << log2_esz));
|
||||
addr_i = adjust_addr(env, base + i * (nf << log2_esz));
|
||||
if (i == 0) {
|
||||
/* Allow fault on first element. */
|
||||
probe_pages(env, addr, nf << log2_esz, ra, MMU_DATA_LOAD);
|
||||
probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD);
|
||||
} else {
|
||||
remain = nf << log2_esz;
|
||||
while (remain > 0) {
|
||||
void *host;
|
||||
int flags;
|
||||
|
||||
offset = -(addr | TARGET_PAGE_MASK);
|
||||
offset = -(addr_i | TARGET_PAGE_MASK);
|
||||
|
||||
/* Probe nonfault on subsequent elements. */
|
||||
flags = probe_access_flags(env, addr, offset, MMU_DATA_LOAD,
|
||||
mmu_index, true, &host, 0);
|
||||
flags = probe_access_flags(env, addr_i, offset,
|
||||
MMU_DATA_LOAD, mmu_index, true,
|
||||
&host, 0);
|
||||
|
||||
/*
|
||||
* Stop if invalid (unmapped) or mmio (transaction may fail).
|
||||
* Do not stop if watchpoint, as the spec says that
|
||||
* Stop if invalid (unmapped) or mmio (transaction may
|
||||
* fail). Do not stop if watchpoint, as the spec says that
|
||||
* first-fault should continue to access the same
|
||||
* elements regardless of any watchpoint.
|
||||
*/
|
||||
@ -673,7 +691,8 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
|
||||
break;
|
||||
}
|
||||
remain -= offset;
|
||||
addr = adjust_addr(env, addr + offset);
|
||||
addr_i = adjust_addr(env, addr_i + offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -685,15 +704,6 @@ ProbeSuccess:
|
||||
|
||||
if (env->vstart < env->vl) {
|
||||
if (vm) {
|
||||
/* Calculate the page range of first page */
|
||||
addr = base + ((env->vstart * nf) << log2_esz);
|
||||
page_split = -(addr | TARGET_PAGE_MASK);
|
||||
/* Get number of elements */
|
||||
elems = page_split / msize;
|
||||
if (unlikely(env->vstart + elems >= env->vl)) {
|
||||
elems = env->vl - env->vstart;
|
||||
}
|
||||
|
||||
/* Load/store elements in the first page */
|
||||
if (likely(elems)) {
|
||||
vext_page_ldst_us(env, vd, addr, elems, nf, max_elems,
|
||||
@ -1103,7 +1113,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
||||
uint32_t vta = vext_vta(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
|
||||
@ -1137,7 +1147,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
||||
uint32_t vta = vext_vta(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
||||
@ -1174,7 +1184,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
||||
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
|
||||
@ -1214,7 +1224,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
|
||||
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
||||
@ -1312,7 +1322,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
@ -1361,7 +1371,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
@ -1425,7 +1435,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
|
||||
@ -1492,7 +1502,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
||||
@ -2041,7 +2051,7 @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
|
||||
uint32_t vta = vext_vta(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
|
||||
@ -2067,7 +2077,7 @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
|
||||
uint32_t vta = vext_vta(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
*((ETYPE *)vd + H(i)) = (ETYPE)s1; \
|
||||
@ -2092,7 +2102,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
||||
uint32_t vta = vext_vta(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \
|
||||
@ -2118,7 +2128,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
|
||||
uint32_t vta = vext_vta(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
||||
@ -2165,8 +2175,6 @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
|
||||
uint32_t vl, uint32_t vm, int vxrm,
|
||||
opivv2_rm_fn *fn, uint32_t vma, uint32_t esz)
|
||||
{
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
|
||||
for (uint32_t i = env->vstart; i < vl; i++) {
|
||||
if (!vm && !vext_elem_mask(v0, i)) {
|
||||
/* set masked-off elements to 1s */
|
||||
@ -2190,6 +2198,8 @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
|
||||
uint32_t vta = vext_vta(desc);
|
||||
uint32_t vma = vext_vma(desc);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl);
|
||||
|
||||
switch (env->vxrm) {
|
||||
case 0: /* rnu */
|
||||
vext_vv_rm_1(vd, v0, vs1, vs2,
|
||||
@ -2292,8 +2302,6 @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
|
||||
uint32_t vl, uint32_t vm, int vxrm,
|
||||
opivx2_rm_fn *fn, uint32_t vma, uint32_t esz)
|
||||
{
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
|
||||
for (uint32_t i = env->vstart; i < vl; i++) {
|
||||
if (!vm && !vext_elem_mask(v0, i)) {
|
||||
/* set masked-off elements to 1s */
|
||||
@ -2317,6 +2325,8 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
|
||||
uint32_t vta = vext_vta(desc);
|
||||
uint32_t vma = vext_vma(desc);
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl);
|
||||
|
||||
switch (env->vxrm) {
|
||||
case 0: /* rnu */
|
||||
vext_vx_rm_1(vd, v0, s1, vs2,
|
||||
@ -3091,7 +3101,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
@ -3136,7 +3146,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
@ -3724,7 +3734,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
if (vl == 0) { \
|
||||
return; \
|
||||
@ -4247,7 +4257,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
|
||||
@ -4289,7 +4299,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
||||
@ -4484,7 +4494,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
|
||||
uint32_t vta = vext_vta(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
||||
@ -4652,6 +4662,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
|
||||
uint32_t i; \
|
||||
TD s1 = *((TD *)vs1 + HD(0)); \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
@ -4740,6 +4752,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
|
||||
uint32_t i; \
|
||||
TD s1 = *((TD *)vs1 + HD(0)); \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
@ -4814,7 +4828,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
|
||||
uint32_t i; \
|
||||
int a, b; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
a = vext_elem_mask(vs1, i); \
|
||||
@ -4904,6 +4918,8 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
|
||||
int i;
|
||||
bool first_mask_bit = false;
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl);
|
||||
|
||||
for (i = env->vstart; i < vl; i++) {
|
||||
if (!vm && !vext_elem_mask(v0, i)) {
|
||||
/* set masked-off elements to 1s */
|
||||
@ -4976,6 +4992,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
|
||||
uint32_t sum = 0; \
|
||||
int i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
/* set masked-off elements to 1s */ \
|
||||
@ -5009,7 +5027,7 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
int i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
@ -5046,7 +5064,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
target_ulong offset = s1, i_min, i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
i_min = MAX(env->vstart, offset); \
|
||||
for (i = i_min; i < vl; i++) { \
|
||||
@ -5081,7 +5099,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
target_ulong i_max, i_min, i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
i_min = MIN(s1 < vlmax ? vlmax - s1 : 0, vl); \
|
||||
i_max = MAX(i_min, env->vstart); \
|
||||
@ -5125,7 +5143,7 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
@ -5176,7 +5194,7 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
@ -5253,7 +5271,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
||||
uint64_t index; \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
@ -5298,7 +5316,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
||||
uint64_t index = s1; \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
@ -5334,6 +5352,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
||||
uint32_t vta = vext_vta(desc); \
|
||||
uint32_t num = 0, i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vext_elem_mask(vs1, i)) { \
|
||||
continue; \
|
||||
@ -5394,7 +5414,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
|
@ -66,7 +66,7 @@ void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
|
||||
uint32_t vma = vext_vma(desc);
|
||||
uint32_t i;
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl);
|
||||
|
||||
for (i = env->vstart; i < vl; i++) {
|
||||
if (!vm && !vext_elem_mask(v0, i)) {
|
||||
@ -92,7 +92,7 @@ void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
|
||||
uint32_t vma = vext_vma(desc);
|
||||
uint32_t i;
|
||||
|
||||
VSTART_CHECK_EARLY_EXIT(env);
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl);
|
||||
|
||||
for (i = env->vstart; i < vl; i++) {
|
||||
if (!vm && !vext_elem_mask(v0, i)) {
|
||||
|
@ -25,8 +25,8 @@
|
||||
#include "tcg/tcg-gvec-desc.h"
|
||||
#include "internals.h"
|
||||
|
||||
#define VSTART_CHECK_EARLY_EXIT(env) do { \
|
||||
if (env->vstart >= env->vl) { \
|
||||
#define VSTART_CHECK_EARLY_EXIT(env, vl) do { \
|
||||
if (env->vstart >= vl) { \
|
||||
env->vstart = 0; \
|
||||
return; \
|
||||
} \
|
||||
@ -159,7 +159,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
|
||||
uint32_t vma = vext_vma(desc); \
|
||||
uint32_t i; \
|
||||
\
|
||||
VSTART_CHECK_EARLY_EXIT(env); \
|
||||
VSTART_CHECK_EARLY_EXIT(env, vl); \
|
||||
\
|
||||
for (i = env->vstart; i < vl; i++) { \
|
||||
if (!vm && !vext_elem_mask(v0, i)) { \
|
||||
|
Loading…
x
Reference in New Issue
Block a user