target-arm queue:

* target/arm: fix error in a code comment
  * virt: Suppress external aborts on virt-2.10 and earlier
  * target/arm: Correct condition for v8M callee stack push
  * target/arm: Don't read r4 from v8M exception stackframe twice
  * target/arm: Support SVE in system emulation mode
  * target/arm: Implement v8M hardware stack limit checking
  * hw/display/bcm2835_fb: Silence Coverity warning about multiply overflow
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABCAAGBQJbu2KdAAoJEDwlJe0UNgzePAwQAINYD6WRW9yC1fcU70zbyon9
 jAwZG54X1+ADIXhd02vzUeqkXjDj14eK2V3mz7w8bdJ8ykOxWImAQvcgJgT9050W
 zNj8nf917V6SjLNOrap1jtkkK+hmd89RwCHo5ofuQhfoiiW4PBVG+1/3RN637v4/
 EnASk97AIQBdckbHVUGyPTICJUUGiPbKgBytY5hPa9mWVf9IbSC86cgWcuG8Sxq3
 iUG4bmmc6CbiSCSsSWsSNUV6bDJgXxo/iz/LdI/e/gGchvY9SPcW+YpekIdyrbif
 ajPiKxvdzsO8tINauHEWiUaxmnZlpN6KpdJepb4/2TEW+AAtrosFFmWTSEf9f7Ee
 tr+rSoSWpW7zgHzZHWAmXPdJgYlu3rCp4JpW3Xd6jpLweJz/rUKQ9jRr+N2yBhuG
 97LEG+9a/6/iv/5nuFOdxeaF6l8QU+nyZ5+4RZBaSFiP4PYvNL1UWrSfqqwDyuvN
 liiEt1bj5MD8t/TozEpQ4qaRyAwAKU3kFf1bwTAzG/Vw+/IF3DNy6eollvGA8PLG
 A/bzMPnZKipWjq8E4qT5nc+zj2jAQDoKMqhGa/NUKWT/V9MG2cB87szNB5N+9Hd+
 9nl+W2plWJjT3PlCE5Y0CcGxjinx9TnQZ41LktTiu0LXUEHOp7avA3+Ai7g7q2ta
 kTVS47FNTQYQvdZonq99
 =VPJe
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20181008' into staging

target-arm queue:
 * target/arm: fix error in a code comment
 * virt: Suppress external aborts on virt-2.10 and earlier
 * target/arm: Correct condition for v8M callee stack push
 * target/arm: Don't read r4 from v8M exception stackframe twice
 * target/arm: Support SVE in system emulation mode
 * target/arm: Implement v8M hardware stack limit checking
 * hw/display/bcm2835_fb: Silence Coverity warning about multiply overflow

# gpg: Signature made Mon 08 Oct 2018 14:58:53 BST
# gpg:                using RSA key 3C2525ED14360CDE
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>"
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>"
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>"
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20181008: (33 commits)
  hw/display/bcm2835_fb: Silence Coverity warning about multiply overflow
  target/arm: Add v8M stack checks for MSR to SP_NS
  target/arm: Add v8M stack checks for VLDM/VSTM
  target/arm: Add v8M stack checks for Thumb push/pop
  target/arm: Add v8M stack checks for T32 load/store single
  target/arm: Add v8M stack checks for Thumb2 LDM/STM
  target/arm: Add v8M stack checks for LDRD/STRD (imm)
  target/arm: Add v8M stack limit checks on NS function calls
  target/arm: Add v8M stack checks on exception entry
  target/arm: Add some comments in Thumb decode
  target/arm: Add v8M stack checks on ADD/SUB/MOV of SP
  target/arm: Move v7m_using_psp() to internals.h
  target/arm: Define new EXCP type for v8M stack overflows
  target/arm: Define new TBFLAG for v8M stack checking
  target/arm: Pass TCGMemOpIdx to sve memory helpers
  target/arm: Rewrite vector gather first-fault loads
  target/arm: Rewrite vector gather stores
  target/arm: Rewrite vector gather loads
  target/arm: Split contiguous stores for endianness
  target/arm: Split contiguous loads for endianness
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2018-10-08 15:01:30 +01:00
commit 7c69b7c849
16 changed files with 2611 additions and 1096 deletions

View File

@ -1926,6 +1926,8 @@ static void virt_machine_2_10_options(MachineClass *mc)
{ {
virt_machine_2_11_options(mc); virt_machine_2_11_options(mc);
SET_MACHINE_COMPAT(mc, VIRT_COMPAT_2_10); SET_MACHINE_COMPAT(mc, VIRT_COMPAT_2_10);
/* before 2.11 we never faulted accesses to bad addresses */
mc->ignore_memory_transaction_failures = true;
} }
DEFINE_VIRT_MACHINE(2, 10) DEFINE_VIRT_MACHINE(2, 10)

View File

@ -190,7 +190,7 @@ static void fb_update_display(void *opaque)
} }
if (s->invalidate) { if (s->invalidate) {
hwaddr base = s->config.base + xoff + yoff * src_width; hwaddr base = s->config.base + xoff + (hwaddr)yoff * src_width;
framebuffer_update_memory_section(&s->fbsection, s->dma_mr, framebuffer_update_memory_section(&s->fbsection, s->dma_mr,
base, base,
s->config.yres, src_width); s->config.yres, src_width);

View File

@ -56,6 +56,7 @@
#define EXCP_SEMIHOST 16 /* semihosting call */ #define EXCP_SEMIHOST 16 /* semihosting call */
#define EXCP_NOCP 17 /* v7M NOCP UsageFault */ #define EXCP_NOCP 17 /* v7M NOCP UsageFault */
#define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */ #define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */
#define EXCP_STKOF 19 /* v8M STKOF UsageFault */
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */ /* NB: add new EXCP_ defines to the array in arm_log_exception() too */
#define ARMV7M_EXCP_RESET 1 #define ARMV7M_EXCP_RESET 1
@ -910,12 +911,20 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
void aarch64_sve_change_el(CPUARMState *env, int old_el, int new_el);
#else
static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
static inline void aarch64_sve_change_el(CPUARMState *env, int o, int n) { }
#endif #endif
target_ulong do_arm_semihosting(CPUARMState *env); target_ulong do_arm_semihosting(CPUARMState *env);
void aarch64_sync_32_to_64(CPUARMState *env); void aarch64_sync_32_to_64(CPUARMState *env);
void aarch64_sync_64_to_32(CPUARMState *env); void aarch64_sync_64_to_32(CPUARMState *env);
int fp_exception_el(CPUARMState *env, int cur_el);
int sve_exception_el(CPUARMState *env, int cur_el);
uint32_t sve_zcr_len_for_el(CPUARMState *env, int el);
static inline bool is_a64(CPUARMState *env) static inline bool is_a64(CPUARMState *env)
{ {
return env->aarch64; return env->aarch64;
@ -1336,8 +1345,10 @@ FIELD(V7M_CCR, UNALIGN_TRP, 3, 1)
FIELD(V7M_CCR, DIV_0_TRP, 4, 1) FIELD(V7M_CCR, DIV_0_TRP, 4, 1)
FIELD(V7M_CCR, BFHFNMIGN, 8, 1) FIELD(V7M_CCR, BFHFNMIGN, 8, 1)
FIELD(V7M_CCR, STKALIGN, 9, 1) FIELD(V7M_CCR, STKALIGN, 9, 1)
FIELD(V7M_CCR, STKOFHFNMIGN, 10, 1)
FIELD(V7M_CCR, DC, 16, 1) FIELD(V7M_CCR, DC, 16, 1)
FIELD(V7M_CCR, IC, 17, 1) FIELD(V7M_CCR, IC, 17, 1)
FIELD(V7M_CCR, BP, 18, 1)
/* V7M SCR bits */ /* V7M SCR bits */
FIELD(V7M_SCR, SLEEPONEXIT, 1, 1) FIELD(V7M_SCR, SLEEPONEXIT, 1, 1)
@ -1378,6 +1389,7 @@ FIELD(V7M_CFSR, UNDEFINSTR, 16 + 0, 1)
FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1) FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1)
FIELD(V7M_CFSR, INVPC, 16 + 2, 1) FIELD(V7M_CFSR, INVPC, 16 + 2, 1)
FIELD(V7M_CFSR, NOCP, 16 + 3, 1) FIELD(V7M_CFSR, NOCP, 16 + 3, 1)
FIELD(V7M_CFSR, STKOF, 16 + 4, 1)
FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1) FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1)
FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1) FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1)
@ -2842,6 +2854,9 @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
/* For M profile only, Handler (ie not Thread) mode */ /* For M profile only, Handler (ie not Thread) mode */
#define ARM_TBFLAG_HANDLER_SHIFT 21 #define ARM_TBFLAG_HANDLER_SHIFT 21
#define ARM_TBFLAG_HANDLER_MASK (1 << ARM_TBFLAG_HANDLER_SHIFT) #define ARM_TBFLAG_HANDLER_MASK (1 << ARM_TBFLAG_HANDLER_SHIFT)
/* For M profile only, whether we should generate stack-limit checks */
#define ARM_TBFLAG_STACKCHECK_SHIFT 22
#define ARM_TBFLAG_STACKCHECK_MASK (1 << ARM_TBFLAG_STACKCHECK_SHIFT)
/* Bit usage when in AArch64 state */ /* Bit usage when in AArch64 state */
#define ARM_TBFLAG_TBI0_SHIFT 0 /* TBI0 for EL0/1 or TBI for EL2/3 */ #define ARM_TBFLAG_TBI0_SHIFT 0 /* TBI0 for EL0/1 or TBI for EL2/3 */
@ -2884,6 +2899,8 @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
(((F) & ARM_TBFLAG_BE_DATA_MASK) >> ARM_TBFLAG_BE_DATA_SHIFT) (((F) & ARM_TBFLAG_BE_DATA_MASK) >> ARM_TBFLAG_BE_DATA_SHIFT)
#define ARM_TBFLAG_HANDLER(F) \ #define ARM_TBFLAG_HANDLER(F) \
(((F) & ARM_TBFLAG_HANDLER_MASK) >> ARM_TBFLAG_HANDLER_SHIFT) (((F) & ARM_TBFLAG_HANDLER_MASK) >> ARM_TBFLAG_HANDLER_SHIFT)
#define ARM_TBFLAG_STACKCHECK(F) \
(((F) & ARM_TBFLAG_STACKCHECK_MASK) >> ARM_TBFLAG_STACKCHECK_SHIFT)
#define ARM_TBFLAG_TBI0(F) \ #define ARM_TBFLAG_TBI0(F) \
(((F) & ARM_TBFLAG_TBI0_MASK) >> ARM_TBFLAG_TBI0_SHIFT) (((F) & ARM_TBFLAG_TBI0_MASK) >> ARM_TBFLAG_TBI0_SHIFT)
#define ARM_TBFLAG_TBI1(F) \ #define ARM_TBFLAG_TBI1(F) \

View File

@ -410,45 +410,3 @@ static void aarch64_cpu_register_types(void)
} }
type_init(aarch64_cpu_register_types) type_init(aarch64_cpu_register_types)
/* The manual says that when SVE is enabled and VQ is widened the
* implementation is allowed to zero the previously inaccessible
* portion of the registers. The corollary to that is that when
* SVE is enabled and VQ is narrowed we are also allowed to zero
* the now inaccessible portion of the registers.
*
* The intent of this is that no predicate bit beyond VQ is ever set.
* Which means that some operations on predicate registers themselves
* may operate on full uint64_t or even unrolled across the maximum
* uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
* may well be cheaper than conditionals to restrict the operation
* to the relevant portion of a uint16_t[16].
*
* TODO: Need to call this for changes to the real system registers
* and EL state changes.
*/
void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
{
int i, j;
uint64_t pmask;
assert(vq >= 1 && vq <= ARM_MAX_VQ);
assert(vq <= arm_env_get_cpu(env)->sve_max_vq);
/* Zap the high bits of the zregs. */
for (i = 0; i < 32; i++) {
memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
}
/* Zap the high bits of the pregs and ffr. */
pmask = 0;
if (vq & 3) {
pmask = ~(-1ULL << (16 * (vq & 3)));
}
for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
for (i = 0; i < 17; ++i) {
env->vfp.pregs[i].p[j] &= pmask;
}
pmask = 0;
}
}

View File

@ -1128,20 +1128,35 @@ DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld2hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld3hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld4hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld2ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld3ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld4ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld2dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld3dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld4dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
@ -1150,13 +1165,21 @@ DEF_HELPER_FLAGS_4(sve_ld1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1hsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1hdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1hss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1hds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1sdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1sds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
@ -1166,17 +1189,28 @@ DEF_HELPER_FLAGS_4(sve_ldff1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1hsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1hdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1hss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1hds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1sdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1sds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldff1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
@ -1186,218 +1220,357 @@ DEF_HELPER_FLAGS_4(sve_ldnf1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1hsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1hdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1hss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1hds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1sdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1sds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ldnf1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldnf1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st2hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st3hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st4hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st2ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st3ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st4ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st2dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st3dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st4dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1bh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1bh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1bs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1bs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1bd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1bd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1hs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1hs_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1hd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1hd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1hs_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1hd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1sd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_st1sd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1sd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldbsu_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldbsu_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldhsu_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhsu_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldssu_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhsu_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldss_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldss_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldbss_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldbss_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldhss_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhss_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldhss_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldbsu_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldbsu_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldhsu_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhsu_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldssu_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhsu_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldss_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldss_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldbss_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldbss_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldhss_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhss_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldhss_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldbdu_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldbdu_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldhdu_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhdu_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsdu_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhdu_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldddu_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldsdu_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsdu_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_lddd_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_lddd_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldbds_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldbds_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldhds_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhds_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsds_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhds_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsds_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsds_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldbdu_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldbdu_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldhdu_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhdu_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsdu_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhdu_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldddu_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldsdu_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsdu_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_lddd_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_lddd_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldbds_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldbds_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldhds_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhds_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsds_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhds_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsds_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsds_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldbdu_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldbdu_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldhdu_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhdu_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsdu_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhdu_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldddu_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldsdu_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsdu_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_lddd_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_lddd_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldbds_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldbds_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldhds_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhds_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsds_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldhds_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsds_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsds_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffbsu_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffbsu_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffhsu_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffssu_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffss_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffss_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffbss_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffbss_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffhss_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhss_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffhss_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffbsu_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffbsu_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffhsu_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffssu_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffss_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffss_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffbss_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffbss_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffhss_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhss_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffhss_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffbdu_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffbdu_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffhdu_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsdu_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffddu_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffdd_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffdd_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffbds_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffbds_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffhds_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhds_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsds_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhds_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsds_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsds_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffbdu_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffbdu_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffhdu_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsdu_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffddu_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffdd_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffdd_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffbds_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffbds_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffhds_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhds_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsds_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhds_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsds_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsds_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffbdu_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffbdu_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffhdu_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsdu_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffddu_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffdd_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffdd_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffbds_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffbds_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffhds_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhds_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsds_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_ldffhds_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsds_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffsds_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stbs_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_stbs_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_sths_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_sths_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stss_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_sths_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stss_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stss_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stbs_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_stbs_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_sths_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_sths_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stss_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_sths_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stss_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stss_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stbd_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_stbd_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_sthd_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_sthd_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stsd_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_sthd_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stdd_zsu, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_stsd_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stsd_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stdd_le_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stdd_be_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stbd_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_stbd_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_sthd_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_sthd_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stsd_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_sthd_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stdd_zss, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_stsd_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stsd_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stdd_le_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stdd_be_zss, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stbd_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_stbd_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_sthd_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_sthd_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stsd_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_sthd_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stdd_zd, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_6(sve_stsd_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stsd_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stdd_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stdd_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32) void, env, ptr, ptr, ptr, tl, i32)

View File

@ -4400,78 +4400,105 @@ static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
REGINFO_SENTINEL REGINFO_SENTINEL
}; };
/* Return the exception level to which SVE-disabled exceptions should /* Return the exception level to which exceptions should be taken
* be taken, or 0 if SVE is enabled. * via SVEAccessTrap. If an exception should be routed through
* AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
* take care of raising that exception.
* C.f. the ARM pseudocode function CheckSVEEnabled.
*/ */
static int sve_exception_el(CPUARMState *env) int sve_exception_el(CPUARMState *env, int el)
{ {
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
unsigned current_el = arm_current_el(env); if (el <= 1) {
bool disabled = false;
/* The CPACR.ZEN controls traps to EL1: /* The CPACR.ZEN controls traps to EL1:
* 0, 2 : trap EL0 and EL1 accesses * 0, 2 : trap EL0 and EL1 accesses
* 1 : trap only EL0 accesses * 1 : trap only EL0 accesses
* 3 : trap no accesses * 3 : trap no accesses
*/
if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
disabled = true;
} else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
disabled = el == 0;
}
if (disabled) {
/* route_to_el2 */
return (arm_feature(env, ARM_FEATURE_EL2)
&& !arm_is_secure(env)
&& (env->cp15.hcr_el2 & HCR_TGE) ? 2 : 1);
}
/* Check CPACR.FPEN. */
if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
disabled = true;
} else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
disabled = el == 0;
}
if (disabled) {
return 0;
}
}
/* CPTR_EL2. Since TZ and TFP are positive,
* they will be zero when EL2 is not present.
*/ */
switch (extract32(env->cp15.cpacr_el1, 16, 2)) { if (el <= 2 && !arm_is_secure_below_el3(env)) {
default: if (env->cp15.cptr_el[2] & CPTR_TZ) {
if (current_el <= 1) { return 2;
/* Trap to PL1, which might be EL1 or EL3 */
if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
return 3;
}
return 1;
} }
break; if (env->cp15.cptr_el[2] & CPTR_TFP) {
case 1: return 0;
if (current_el == 0) {
return 1;
} }
break;
case 3:
break;
} }
/* Similarly for CPACR.FPEN, after having checked ZEN. */ /* CPTR_EL3. Since EZ is negative we must check for EL3. */
switch (extract32(env->cp15.cpacr_el1, 20, 2)) { if (arm_feature(env, ARM_FEATURE_EL3)
default: && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
if (current_el <= 1) {
if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
return 3;
}
return 1;
}
break;
case 1:
if (current_el == 0) {
return 1;
}
break;
case 3:
break;
}
/* CPTR_EL2. Check both TZ and TFP. */
if (current_el <= 2
&& (env->cp15.cptr_el[2] & (CPTR_TFP | CPTR_TZ))
&& !arm_is_secure_below_el3(env)) {
return 2;
}
/* CPTR_EL3. Check both EZ and TFP. */
if (!(env->cp15.cptr_el[3] & CPTR_EZ)
|| (env->cp15.cptr_el[3] & CPTR_TFP)) {
return 3; return 3;
} }
#endif #endif
return 0; return 0;
} }
/*
* Given that SVE is enabled, return the vector length for EL.
*/
uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
{
ARMCPU *cpu = arm_env_get_cpu(env);
uint32_t zcr_len = cpu->sve_max_vq - 1;
if (el <= 1) {
zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
}
if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
}
if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
}
return zcr_len;
}
static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value) uint64_t value)
{ {
int cur_el = arm_current_el(env);
int old_len = sve_zcr_len_for_el(env, cur_el);
int new_len;
/* Bits other than [3:0] are RAZ/WI. */ /* Bits other than [3:0] are RAZ/WI. */
raw_write(env, ri, value & 0xf); raw_write(env, ri, value & 0xf);
/*
* Because we arrived here, we know both FP and SVE are enabled;
* otherwise we would have trapped access to the ZCR_ELn register.
*/
new_len = sve_zcr_len_for_el(env, cur_el);
if (new_len < old_len) {
aarch64_sve_narrow_vq(env, new_len + 1);
}
} }
static const ARMCPRegInfo zcr_el1_reginfo = { static const ARMCPRegInfo zcr_el1_reginfo = {
@ -5018,9 +5045,10 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST, .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = 0 }, .resetvalue = 0 },
{ .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST, .access = PL1_R, .type = ARM_CP_CONST,
/* At present, only SVEver == 0 is defined anyway. */
.resetvalue = 0 }, .resetvalue = 0 },
{ .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
@ -6554,18 +6582,6 @@ pend_fault:
return false; return false;
} }
/* Return true if we're using the process stack pointer (not the MSP) */
static bool v7m_using_psp(CPUARMState *env)
{
/* Handler mode always uses the main stack; for thread mode
* the CONTROL.SPSEL bit determines the answer.
* Note that in v7M it is not possible to be in Handler mode with
* CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
*/
return !arm_v7m_is_handler_mode(env) &&
env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
}
/* Write to v7M CONTROL.SPSEL bit for the specified security bank. /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
* This may change the current stack pointer between Main and Process * This may change the current stack pointer between Main and Process
* stack pointers if it is done for the CONTROL register for the current * stack pointers if it is done for the CONTROL register for the current
@ -6722,6 +6738,10 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
"BLXNS with misaligned SP is UNPREDICTABLE\n"); "BLXNS with misaligned SP is UNPREDICTABLE\n");
} }
if (sp < v7m_sp_limit(env)) {
raise_exception(env, EXCP_STKOF, 0, 1);
}
saved_psr = env->v7m.exception; saved_psr = env->v7m.exception;
if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) { if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
saved_psr |= XPSR_SFPA; saved_psr |= XPSR_SFPA;
@ -6851,6 +6871,8 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
uint32_t frameptr; uint32_t frameptr;
ARMMMUIdx mmu_idx; ARMMMUIdx mmu_idx;
bool stacked_ok; bool stacked_ok;
uint32_t limit;
bool want_psp;
if (dotailchain) { if (dotailchain) {
bool mode = lr & R_V7M_EXCRET_MODE_MASK; bool mode = lr & R_V7M_EXCRET_MODE_MASK;
@ -6860,12 +6882,34 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv); mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode, frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
lr & R_V7M_EXCRET_SPSEL_MASK); lr & R_V7M_EXCRET_SPSEL_MASK);
want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
if (want_psp) {
limit = env->v7m.psplim[M_REG_S];
} else {
limit = env->v7m.msplim[M_REG_S];
}
} else { } else {
mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
frame_sp_p = &env->regs[13]; frame_sp_p = &env->regs[13];
limit = v7m_sp_limit(env);
} }
frameptr = *frame_sp_p - 0x28; frameptr = *frame_sp_p - 0x28;
if (frameptr < limit) {
/*
* Stack limit failure: set SP to the limit value, and generate
* STKOF UsageFault. Stack pushes below the limit must not be
* performed. It is IMPDEF whether pushes above the limit are
* performed; we choose not to.
*/
qemu_log_mask(CPU_LOG_INT,
"...STKOF during callee-saves register stacking\n");
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
env->v7m.secure);
*frame_sp_p = limit;
return true;
}
/* Write as much of the stack frame as we can. A write failure may /* Write as much of the stack frame as we can. A write failure may
* cause us to pend a derived exception. * cause us to pend a derived exception.
@ -6889,10 +6933,7 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx,
ignore_faults); ignore_faults);
/* Update SP regardless of whether any of the stack accesses failed. /* Update SP regardless of whether any of the stack accesses failed. */
* When we implement v8M stack limit checking then this attempt to
* update SP might also fail and result in a derived exception.
*/
*frame_sp_p = frameptr; *frame_sp_p = frameptr;
return !stacked_ok; return !stacked_ok;
@ -6938,7 +6979,7 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
* not already saved. * not already saved.
*/ */
if (lr & R_V7M_EXCRET_DCRS_MASK && if (lr & R_V7M_EXCRET_DCRS_MASK &&
!(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) { !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
push_failed = v7m_push_callee_stack(cpu, lr, dotailchain, push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
ignore_stackfaults); ignore_stackfaults);
} }
@ -7040,6 +7081,26 @@ static bool v7m_push_stack(ARMCPU *cpu)
frameptr -= 0x20; frameptr -= 0x20;
if (arm_feature(env, ARM_FEATURE_V8)) {
uint32_t limit = v7m_sp_limit(env);
if (frameptr < limit) {
/*
* Stack limit failure: set SP to the limit value, and generate
* STKOF UsageFault. Stack pushes below the limit must not be
* performed. It is IMPDEF whether pushes above the limit are
* performed; we choose not to.
*/
qemu_log_mask(CPU_LOG_INT,
"...STKOF during stacking\n");
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
env->v7m.secure);
env->regs[13] = limit;
return true;
}
}
/* Write as much of the stack frame as we can. If we fail a stack /* Write as much of the stack frame as we can. If we fail a stack
* write this will result in a derived exception being pended * write this will result in a derived exception being pended
* (which may be taken in preference to the one we started with * (which may be taken in preference to the one we started with
@ -7055,10 +7116,7 @@ static bool v7m_push_stack(ARMCPU *cpu)
v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) && v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) &&
v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false); v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false);
/* Update SP regardless of whether any of the stack accesses failed. /* Update SP regardless of whether any of the stack accesses failed. */
* When we implement v8M stack limit checking then this attempt to
* update SP might also fail and result in a derived exception.
*/
env->regs[13] = frameptr; env->regs[13] = frameptr;
return !stacked_ok; return !stacked_ok;
@ -7303,7 +7361,6 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
} }
pop_ok = pop_ok && pop_ok = pop_ok &&
v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) && v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) && v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
@ -7512,6 +7569,7 @@ static void arm_log_exception(int idx)
[EXCP_SEMIHOST] = "Semihosting call", [EXCP_SEMIHOST] = "Semihosting call",
[EXCP_NOCP] = "v7M NOCP UsageFault", [EXCP_NOCP] = "v7M NOCP UsageFault",
[EXCP_INVSTATE] = "v7M INVSTATE UsageFault", [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
[EXCP_STKOF] = "v8M STKOF UsageFault",
}; };
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
@ -7667,6 +7725,10 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK; env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
break; break;
case EXCP_STKOF:
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
break;
case EXCP_SWI: case EXCP_SWI:
/* The PC already points to the next instruction. */ /* The PC already points to the next instruction. */
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure); armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
@ -8310,8 +8372,11 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
unsigned int new_el = env->exception.target_el; unsigned int new_el = env->exception.target_el;
target_ulong addr = env->cp15.vbar_el[new_el]; target_ulong addr = env->cp15.vbar_el[new_el];
unsigned int new_mode = aarch64_pstate_mode(new_el, true); unsigned int new_mode = aarch64_pstate_mode(new_el, true);
unsigned int cur_el = arm_current_el(env);
if (arm_current_el(env) < new_el) { aarch64_sve_change_el(env, cur_el, new_el);
if (cur_el < new_el) {
/* Entry vector offset depends on whether the implemented EL /* Entry vector offset depends on whether the implemented EL
* immediately lower than the target level is using AArch32 or AArch64 * immediately lower than the target level is using AArch32 or AArch64
*/ */
@ -10929,11 +10994,23 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
* currently in handler mode or not, using the NS CONTROL.SPSEL. * currently in handler mode or not, using the NS CONTROL.SPSEL.
*/ */
bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
uint32_t limit;
if (!env->v7m.secure) { if (!env->v7m.secure) {
return; return;
} }
if (!arm_v7m_is_handler_mode(env) && spsel) {
limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
if (val < limit) {
CPUState *cs = CPU(arm_env_get_cpu(env));
cpu_restore_state(cs, GETPC(), true);
raise_exception(env, EXCP_STKOF, 0, 1);
}
if (is_psp) {
env->v7m.other_ss_psp = val; env->v7m.other_ss_psp = val;
} else { } else {
env->v7m.other_ss_msp = val; env->v7m.other_ss_msp = val;
@ -12516,11 +12593,10 @@ uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
/* Return the exception level to which FP-disabled exceptions should /* Return the exception level to which FP-disabled exceptions should
* be taken, or 0 if FP is enabled. * be taken, or 0 if FP is enabled.
*/ */
static inline int fp_exception_el(CPUARMState *env) int fp_exception_el(CPUARMState *env, int cur_el)
{ {
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
int fpen; int fpen;
int cur_el = arm_current_el(env);
/* CPACR and the CPTR registers don't exist before v6, so FP is /* CPACR and the CPTR registers don't exist before v6, so FP is
* always accessible * always accessible
@ -12583,7 +12659,8 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *pflags) target_ulong *cs_base, uint32_t *pflags)
{ {
ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
int fp_el = fp_exception_el(env); int current_el = arm_current_el(env);
int fp_el = fp_exception_el(env, current_el);
uint32_t flags; uint32_t flags;
if (is_a64(env)) { if (is_a64(env)) {
@ -12594,7 +12671,7 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT); flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
if (arm_feature(env, ARM_FEATURE_SVE)) { if (arm_feature(env, ARM_FEATURE_SVE)) {
int sve_el = sve_exception_el(env); int sve_el = sve_exception_el(env, current_el);
uint32_t zcr_len; uint32_t zcr_len;
/* If SVE is disabled, but FP is enabled, /* If SVE is disabled, but FP is enabled,
@ -12603,19 +12680,7 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
if (sve_el != 0 && fp_el == 0) { if (sve_el != 0 && fp_el == 0) {
zcr_len = 0; zcr_len = 0;
} else { } else {
int current_el = arm_current_el(env); zcr_len = sve_zcr_len_for_el(env, current_el);
ARMCPU *cpu = arm_env_get_cpu(env);
zcr_len = cpu->sve_max_vq - 1;
if (current_el <= 1) {
zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
}
if (current_el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
}
if (current_el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
}
} }
flags |= sve_el << ARM_TBFLAG_SVEEXC_EL_SHIFT; flags |= sve_el << ARM_TBFLAG_SVEEXC_EL_SHIFT;
flags |= zcr_len << ARM_TBFLAG_ZCR_LEN_SHIFT; flags |= zcr_len << ARM_TBFLAG_ZCR_LEN_SHIFT;
@ -12668,6 +12733,98 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
flags |= ARM_TBFLAG_HANDLER_MASK; flags |= ARM_TBFLAG_HANDLER_MASK;
} }
/* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is
* suppressing them because the requested execution priority is less than 0.
*/
if (arm_feature(env, ARM_FEATURE_V8) &&
arm_feature(env, ARM_FEATURE_M) &&
!((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
flags |= ARM_TBFLAG_STACKCHECK_MASK;
}
*pflags = flags; *pflags = flags;
*cs_base = 0; *cs_base = 0;
} }
#ifdef TARGET_AARCH64
/*
* The manual says that when SVE is enabled and VQ is widened the
* implementation is allowed to zero the previously inaccessible
* portion of the registers. The corollary to that is that when
* SVE is enabled and VQ is narrowed we are also allowed to zero
* the now inaccessible portion of the registers.
*
* The intent of this is that no predicate bit beyond VQ is ever set.
* Which means that some operations on predicate registers themselves
* may operate on full uint64_t or even unrolled across the maximum
* uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
* may well be cheaper than conditionals to restrict the operation
* to the relevant portion of a uint16_t[16].
*/
void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
{
int i, j;
uint64_t pmask;
assert(vq >= 1 && vq <= ARM_MAX_VQ);
assert(vq <= arm_env_get_cpu(env)->sve_max_vq);
/* Zap the high bits of the zregs. */
for (i = 0; i < 32; i++) {
memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
}
/* Zap the high bits of the pregs and ffr. */
pmask = 0;
if (vq & 3) {
pmask = ~(-1ULL << (16 * (vq & 3)));
}
for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
for (i = 0; i < 17; ++i) {
env->vfp.pregs[i].p[j] &= pmask;
}
pmask = 0;
}
}
/*
* Notice a change in SVE vector size when changing EL.
*/
void aarch64_sve_change_el(CPUARMState *env, int old_el, int new_el)
{
int old_len, new_len;
/* Nothing to do if no SVE. */
if (!arm_feature(env, ARM_FEATURE_SVE)) {
return;
}
/* Nothing to do if FP is disabled in either EL. */
if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
return;
}
/*
* DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
* at ELx, or not available because the EL is in AArch32 state, then
* for all purposes other than a direct read, the ZCR_ELx.LEN field
* has an effective value of 0".
*
* Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
* If we ignore aa32 state, we would fail to see the vq4->vq0 transition
* from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
* we already have the correct register contents when encountering the
* vq0->vq0 transition between EL0->EL1.
*/
old_len = (arm_el_is_aa64(env, old_el) && !sve_exception_el(env, old_el)
? sve_zcr_len_for_el(env, old_el) : 0);
new_len = (arm_el_is_aa64(env, new_el) && !sve_exception_el(env, new_el)
? sve_zcr_len_for_el(env, new_el) : 0);
/* When changing vector length, clear inaccessible state. */
if (new_len < old_len) {
aarch64_sve_narrow_vq(env, new_len + 1);
}
}
#endif

View File

@ -69,6 +69,8 @@ DEF_HELPER_2(v7m_blxns, void, env, i32)
DEF_HELPER_3(v7m_tt, i32, env, i32, i32) DEF_HELPER_3(v7m_tt, i32, env, i32, i32)
DEF_HELPER_2(v8m_stackcheck, void, env, i32)
DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32) DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32)
DEF_HELPER_3(set_cp_reg, void, env, ptr, i32) DEF_HELPER_3(set_cp_reg, void, env, ptr, i32)
DEF_HELPER_2(get_cp_reg, i32, env, ptr) DEF_HELPER_2(get_cp_reg, i32, env, ptr)

View File

@ -94,6 +94,15 @@ FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
#define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
#define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
/**
* raise_exception: Raise the specified exception.
* Raise a guest exception with the specified value, syndrome register
* and target exception level. This should be called from helper functions,
* and never returns because we will longjump back up to the CPU main loop.
*/
void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp,
uint32_t syndrome, uint32_t target_el);
/* /*
* For AArch64, map a given EL to an index in the banked_spsr array. * For AArch64, map a given EL to an index in the banked_spsr array.
* Note that this mapping and the AArch32 mapping defined in bank_number() * Note that this mapping and the AArch32 mapping defined in bank_number()
@ -796,4 +805,39 @@ static inline uint32_t arm_debug_exception_fsr(CPUARMState *env)
} }
} }
/* Note make_memop_idx reserves 4 bits for mmu_idx, and MO_BSWAP is bit 3.
* Thus a TCGMemOpIdx, without any MO_ALIGN bits, fits in 8 bits.
*/
#define MEMOPIDX_SHIFT 8
/**
* v7m_using_psp: Return true if using process stack pointer
* Return true if the CPU is currently using the process stack
* pointer, or false if it is using the main stack pointer.
*/
static inline bool v7m_using_psp(CPUARMState *env)
{
/* Handler mode always uses the main stack; for thread mode
* the CONTROL.SPSEL bit determines the answer.
* Note that in v7M it is not possible to be in Handler mode with
* CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
*/
return !arm_v7m_is_handler_mode(env) &&
env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
}
/**
* v7m_sp_limit: Return SP limit for current CPU state
* Return the SP limit value for the current CPU security state
* and stack pointer.
*/
static inline uint32_t v7m_sp_limit(CPUARMState *env)
{
if (v7m_using_psp(env)) {
return env->v7m.psplim[env->v7m.secure];
} else {
return env->v7m.msplim[env->v7m.secure];
}
}
#endif #endif

View File

@ -310,7 +310,7 @@ static int compare_u64(const void *a, const void *b)
return 0; return 0;
} }
/* Initialize the CPUState's cpreg list according to the kernel's /* Initialize the ARMCPU cpreg list according to the kernel's
* definition of what CPU registers it knows about (and throw away * definition of what CPU registers it knows about (and throw away
* the previous TCG-created cpreg list). * the previous TCG-created cpreg list).
*/ */

View File

@ -50,9 +50,9 @@ void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
/** /**
* kvm_arm_init_cpreg_list: * kvm_arm_init_cpreg_list:
* @cs: CPUState * @cpu: ARMCPU
* *
* Initialize the CPUState's cpreg list according to the kernel's * Initialize the ARMCPU cpreg list according to the kernel's
* definition of what CPU registers it knows about (and throw away * definition of what CPU registers it knows about (and throw away
* the previous TCG-created cpreg list). * the previous TCG-created cpreg list).
* *

View File

@ -28,8 +28,8 @@
#define SIGNBIT (uint32_t)0x80000000 #define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63) #define SIGNBIT64 ((uint64_t)1 << 63)
static void raise_exception(CPUARMState *env, uint32_t excp, void raise_exception(CPUARMState *env, uint32_t excp,
uint32_t syndrome, uint32_t target_el) uint32_t syndrome, uint32_t target_el)
{ {
CPUState *cs = CPU(arm_env_get_cpu(env)); CPUState *cs = CPU(arm_env_get_cpu(env));
@ -238,6 +238,25 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
#endif /* !defined(CONFIG_USER_ONLY) */ #endif /* !defined(CONFIG_USER_ONLY) */
void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
{
/*
* Perform the v8M stack limit check for SP updates from translated code,
* raising an exception if the limit is breached.
*/
if (newvalue < v7m_sp_limit(env)) {
CPUState *cs = CPU(arm_env_get_cpu(env));
/*
* Stack limit exceptions are a rare case, so rather than syncing
* PC/condbits before the call, we use cpu_restore_state() to
* get them right before raising the exception.
*/
cpu_restore_state(cs, GETPC(), true);
raise_exception(env, EXCP_STKOF, 0, 1);
}
}
uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b) uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
{ {
uint32_t res = a + b; uint32_t res = a + b;
@ -1082,6 +1101,7 @@ void HELPER(exception_return)(CPUARMState *env)
"AArch64 EL%d PC 0x%" PRIx64 "\n", "AArch64 EL%d PC 0x%" PRIx64 "\n",
cur_el, new_el, env->pc); cur_el, new_el, env->pc);
} }
aarch64_sve_change_el(env, cur_el, new_el);
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
arm_call_el_change_hook(arm_env_get_cpu(env)); arm_call_el_change_hook(arm_env_get_cpu(env));

File diff suppressed because it is too large Load Diff

View File

@ -166,11 +166,15 @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
cpu_fprintf(f, "\n"); cpu_fprintf(f, "\n");
return; return;
} }
if (fp_exception_el(env, el) != 0) {
cpu_fprintf(f, " FPU disabled\n");
return;
}
cpu_fprintf(f, " FPCR=%08x FPSR=%08x\n", cpu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
vfp_get_fpcr(env), vfp_get_fpsr(env)); vfp_get_fpcr(env), vfp_get_fpsr(env));
if (arm_feature(env, ARM_FEATURE_SVE)) { if (arm_feature(env, ARM_FEATURE_SVE) && sve_exception_el(env, el) == 0) {
int j, zcr_len = env->vfp.zcr_el[1] & 0xf; /* fix for system mode */ int j, zcr_len = sve_zcr_len_for_el(env, el);
for (i = 0; i <= FFR_PRED_NUM; i++) { for (i = 0; i <= FFR_PRED_NUM; i++) {
bool eol; bool eol;

View File

@ -4600,62 +4600,97 @@ static const uint8_t dtype_esz[16] = {
3, 2, 1, 3 3, 2, 1, 3
}; };
static TCGMemOpIdx sve_memopidx(DisasContext *s, int dtype)
{
return make_memop_idx(s->be_data | dtype_mop[dtype], get_mem_index(s));
}
static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
gen_helper_gvec_mem *fn) int dtype, gen_helper_gvec_mem *fn)
{ {
unsigned vsz = vec_full_reg_size(s); unsigned vsz = vec_full_reg_size(s);
TCGv_ptr t_pg; TCGv_ptr t_pg;
TCGv_i32 desc; TCGv_i32 t_desc;
int desc;
/* For e.g. LD4, there are not enough arguments to pass all 4 /* For e.g. LD4, there are not enough arguments to pass all 4
* registers as pointers, so encode the regno into the data field. * registers as pointers, so encode the regno into the data field.
* For consistency, do this even for LD1. * For consistency, do this even for LD1.
*/ */
desc = tcg_const_i32(simd_desc(vsz, vsz, zt)); desc = sve_memopidx(s, dtype);
desc |= zt << MEMOPIDX_SHIFT;
desc = simd_desc(vsz, vsz, desc);
t_desc = tcg_const_i32(desc);
t_pg = tcg_temp_new_ptr(); t_pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
fn(cpu_env, t_pg, addr, desc); fn(cpu_env, t_pg, addr, t_desc);
tcg_temp_free_ptr(t_pg); tcg_temp_free_ptr(t_pg);
tcg_temp_free_i32(desc); tcg_temp_free_i32(t_desc);
} }
static void do_ld_zpa(DisasContext *s, int zt, int pg, static void do_ld_zpa(DisasContext *s, int zt, int pg,
TCGv_i64 addr, int dtype, int nreg) TCGv_i64 addr, int dtype, int nreg)
{ {
static gen_helper_gvec_mem * const fns[16][4] = { static gen_helper_gvec_mem * const fns[2][16][4] = {
{ gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r, /* Little-endian */
gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r }, { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
{ gen_helper_sve_ld1bhu_r, NULL, NULL, NULL }, gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
{ gen_helper_sve_ld1bsu_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1bdu_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1sds_r, NULL, NULL, NULL }, { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1hh_r, gen_helper_sve_ld2hh_r, { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r,
gen_helper_sve_ld3hh_r, gen_helper_sve_ld4hh_r }, gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r },
{ gen_helper_sve_ld1hsu_r, NULL, NULL, NULL }, { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1hdu_r, NULL, NULL, NULL }, { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1hds_r, NULL, NULL, NULL }, { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1hss_r, NULL, NULL, NULL }, { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1ss_r, gen_helper_sve_ld2ss_r, { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r,
gen_helper_sve_ld3ss_r, gen_helper_sve_ld4ss_r }, gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r },
{ gen_helper_sve_ld1sdu_r, NULL, NULL, NULL }, { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1bds_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1bss_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1bhs_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1dd_r, gen_helper_sve_ld2dd_r, { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r,
gen_helper_sve_ld3dd_r, gen_helper_sve_ld4dd_r }, gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } },
/* Big-endian */
{ { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
{ gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r,
gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r },
{ gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r,
gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r },
{ gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r,
gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } }
}; };
gen_helper_gvec_mem *fn = fns[dtype][nreg]; gen_helper_gvec_mem *fn = fns[s->be_data == MO_BE][dtype][nreg];
/* While there are holes in the table, they are not /* While there are holes in the table, they are not
* accessible via the instruction encoding. * accessible via the instruction encoding.
*/ */
assert(fn != NULL); assert(fn != NULL);
do_mem_zpa(s, zt, pg, addr, fn); do_mem_zpa(s, zt, pg, addr, dtype, fn);
} }
static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a, uint32_t insn) static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a, uint32_t insn)
@ -4689,59 +4724,104 @@ static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a, uint32_t insn)
static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a, uint32_t insn) static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a, uint32_t insn)
{ {
static gen_helper_gvec_mem * const fns[16] = { static gen_helper_gvec_mem * const fns[2][16] = {
gen_helper_sve_ldff1bb_r, /* Little-endian */
gen_helper_sve_ldff1bhu_r, { gen_helper_sve_ldff1bb_r,
gen_helper_sve_ldff1bsu_r, gen_helper_sve_ldff1bhu_r,
gen_helper_sve_ldff1bdu_r, gen_helper_sve_ldff1bsu_r,
gen_helper_sve_ldff1bdu_r,
gen_helper_sve_ldff1sds_r, gen_helper_sve_ldff1sds_le_r,
gen_helper_sve_ldff1hh_r, gen_helper_sve_ldff1hh_le_r,
gen_helper_sve_ldff1hsu_r, gen_helper_sve_ldff1hsu_le_r,
gen_helper_sve_ldff1hdu_r, gen_helper_sve_ldff1hdu_le_r,
gen_helper_sve_ldff1hds_r, gen_helper_sve_ldff1hds_le_r,
gen_helper_sve_ldff1hss_r, gen_helper_sve_ldff1hss_le_r,
gen_helper_sve_ldff1ss_r, gen_helper_sve_ldff1ss_le_r,
gen_helper_sve_ldff1sdu_r, gen_helper_sve_ldff1sdu_le_r,
gen_helper_sve_ldff1bds_r, gen_helper_sve_ldff1bds_r,
gen_helper_sve_ldff1bss_r, gen_helper_sve_ldff1bss_r,
gen_helper_sve_ldff1bhs_r, gen_helper_sve_ldff1bhs_r,
gen_helper_sve_ldff1dd_r, gen_helper_sve_ldff1dd_le_r },
/* Big-endian */
{ gen_helper_sve_ldff1bb_r,
gen_helper_sve_ldff1bhu_r,
gen_helper_sve_ldff1bsu_r,
gen_helper_sve_ldff1bdu_r,
gen_helper_sve_ldff1sds_be_r,
gen_helper_sve_ldff1hh_be_r,
gen_helper_sve_ldff1hsu_be_r,
gen_helper_sve_ldff1hdu_be_r,
gen_helper_sve_ldff1hds_be_r,
gen_helper_sve_ldff1hss_be_r,
gen_helper_sve_ldff1ss_be_r,
gen_helper_sve_ldff1sdu_be_r,
gen_helper_sve_ldff1bds_r,
gen_helper_sve_ldff1bss_r,
gen_helper_sve_ldff1bhs_r,
gen_helper_sve_ldff1dd_be_r },
}; };
if (sve_access_check(s)) { if (sve_access_check(s)) {
TCGv_i64 addr = new_tmp_a64(s); TCGv_i64 addr = new_tmp_a64(s);
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
do_mem_zpa(s, a->rd, a->pg, addr, fns[a->dtype]); do_mem_zpa(s, a->rd, a->pg, addr, a->dtype,
fns[s->be_data == MO_BE][a->dtype]);
} }
return true; return true;
} }
static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a, uint32_t insn) static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a, uint32_t insn)
{ {
static gen_helper_gvec_mem * const fns[16] = { static gen_helper_gvec_mem * const fns[2][16] = {
gen_helper_sve_ldnf1bb_r, /* Little-endian */
gen_helper_sve_ldnf1bhu_r, { gen_helper_sve_ldnf1bb_r,
gen_helper_sve_ldnf1bsu_r, gen_helper_sve_ldnf1bhu_r,
gen_helper_sve_ldnf1bdu_r, gen_helper_sve_ldnf1bsu_r,
gen_helper_sve_ldnf1bdu_r,
gen_helper_sve_ldnf1sds_r, gen_helper_sve_ldnf1sds_le_r,
gen_helper_sve_ldnf1hh_r, gen_helper_sve_ldnf1hh_le_r,
gen_helper_sve_ldnf1hsu_r, gen_helper_sve_ldnf1hsu_le_r,
gen_helper_sve_ldnf1hdu_r, gen_helper_sve_ldnf1hdu_le_r,
gen_helper_sve_ldnf1hds_r, gen_helper_sve_ldnf1hds_le_r,
gen_helper_sve_ldnf1hss_r, gen_helper_sve_ldnf1hss_le_r,
gen_helper_sve_ldnf1ss_r, gen_helper_sve_ldnf1ss_le_r,
gen_helper_sve_ldnf1sdu_r, gen_helper_sve_ldnf1sdu_le_r,
gen_helper_sve_ldnf1bds_r, gen_helper_sve_ldnf1bds_r,
gen_helper_sve_ldnf1bss_r, gen_helper_sve_ldnf1bss_r,
gen_helper_sve_ldnf1bhs_r, gen_helper_sve_ldnf1bhs_r,
gen_helper_sve_ldnf1dd_r, gen_helper_sve_ldnf1dd_le_r },
/* Big-endian */
{ gen_helper_sve_ldnf1bb_r,
gen_helper_sve_ldnf1bhu_r,
gen_helper_sve_ldnf1bsu_r,
gen_helper_sve_ldnf1bdu_r,
gen_helper_sve_ldnf1sds_be_r,
gen_helper_sve_ldnf1hh_be_r,
gen_helper_sve_ldnf1hsu_be_r,
gen_helper_sve_ldnf1hdu_be_r,
gen_helper_sve_ldnf1hds_be_r,
gen_helper_sve_ldnf1hss_be_r,
gen_helper_sve_ldnf1ss_be_r,
gen_helper_sve_ldnf1sdu_be_r,
gen_helper_sve_ldnf1bds_r,
gen_helper_sve_ldnf1bss_r,
gen_helper_sve_ldnf1bhs_r,
gen_helper_sve_ldnf1dd_be_r },
}; };
if (sve_access_check(s)) { if (sve_access_check(s)) {
@ -4751,30 +4831,57 @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a, uint32_t insn)
TCGv_i64 addr = new_tmp_a64(s); TCGv_i64 addr = new_tmp_a64(s);
tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off); tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off);
do_mem_zpa(s, a->rd, a->pg, addr, fns[a->dtype]); do_mem_zpa(s, a->rd, a->pg, addr, a->dtype,
fns[s->be_data == MO_BE][a->dtype]);
} }
return true; return true;
} }
static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int msz) static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int msz)
{ {
static gen_helper_gvec_mem * const fns[4] = { static gen_helper_gvec_mem * const fns[2][4] = {
gen_helper_sve_ld1bb_r, gen_helper_sve_ld1hh_r, { gen_helper_sve_ld1bb_r, gen_helper_sve_ld1hh_le_r,
gen_helper_sve_ld1ss_r, gen_helper_sve_ld1dd_r, gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld1dd_le_r },
{ gen_helper_sve_ld1bb_r, gen_helper_sve_ld1hh_be_r,
gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld1dd_be_r },
}; };
unsigned vsz = vec_full_reg_size(s); unsigned vsz = vec_full_reg_size(s);
TCGv_ptr t_pg; TCGv_ptr t_pg;
TCGv_i32 desc; TCGv_i32 t_desc;
int desc, poff;
/* Load the first quadword using the normal predicated load helpers. */ /* Load the first quadword using the normal predicated load helpers. */
desc = tcg_const_i32(simd_desc(16, 16, zt)); desc = sve_memopidx(s, msz_dtype(msz));
t_pg = tcg_temp_new_ptr(); desc |= zt << MEMOPIDX_SHIFT;
desc = simd_desc(16, 16, desc);
t_desc = tcg_const_i32(desc);
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); poff = pred_full_reg_offset(s, pg);
fns[msz](cpu_env, t_pg, addr, desc); if (vsz > 16) {
/*
* Zero-extend the first 16 bits of the predicate into a temporary.
* This avoids triggering an assert making sure we don't have bits
* set within a predicate beyond VQ, but we have lowered VQ to 1
* for this load operation.
*/
TCGv_i64 tmp = tcg_temp_new_i64();
#ifdef HOST_WORDS_BIGENDIAN
poff += 6;
#endif
tcg_gen_ld16u_i64(tmp, cpu_env, poff);
poff = offsetof(CPUARMState, vfp.preg_tmp);
tcg_gen_st_i64(tmp, cpu_env, poff);
tcg_temp_free_i64(tmp);
}
t_pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_pg, cpu_env, poff);
fns[s->be_data == MO_BE][msz](cpu_env, t_pg, addr, t_desc);
tcg_temp_free_ptr(t_pg); tcg_temp_free_ptr(t_pg);
tcg_temp_free_i32(desc); tcg_temp_free_i32(t_desc);
/* Replicate that first quadword. */ /* Replicate that first quadword. */
if (vsz > 16) { if (vsz > 16) {
@ -4860,35 +4967,73 @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a, uint32_t insn)
static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
int msz, int esz, int nreg) int msz, int esz, int nreg)
{ {
static gen_helper_gvec_mem * const fn_single[4][4] = { static gen_helper_gvec_mem * const fn_single[2][4][4] = {
{ gen_helper_sve_st1bb_r, gen_helper_sve_st1bh_r, { { gen_helper_sve_st1bb_r,
gen_helper_sve_st1bs_r, gen_helper_sve_st1bd_r }, gen_helper_sve_st1bh_r,
{ NULL, gen_helper_sve_st1hh_r, gen_helper_sve_st1bs_r,
gen_helper_sve_st1hs_r, gen_helper_sve_st1hd_r }, gen_helper_sve_st1bd_r },
{ NULL, NULL, { NULL,
gen_helper_sve_st1ss_r, gen_helper_sve_st1sd_r }, gen_helper_sve_st1hh_le_r,
{ NULL, NULL, NULL, gen_helper_sve_st1dd_r }, gen_helper_sve_st1hs_le_r,
gen_helper_sve_st1hd_le_r },
{ NULL, NULL,
gen_helper_sve_st1ss_le_r,
gen_helper_sve_st1sd_le_r },
{ NULL, NULL, NULL,
gen_helper_sve_st1dd_le_r } },
{ { gen_helper_sve_st1bb_r,
gen_helper_sve_st1bh_r,
gen_helper_sve_st1bs_r,
gen_helper_sve_st1bd_r },
{ NULL,
gen_helper_sve_st1hh_be_r,
gen_helper_sve_st1hs_be_r,
gen_helper_sve_st1hd_be_r },
{ NULL, NULL,
gen_helper_sve_st1ss_be_r,
gen_helper_sve_st1sd_be_r },
{ NULL, NULL, NULL,
gen_helper_sve_st1dd_be_r } },
}; };
static gen_helper_gvec_mem * const fn_multiple[3][4] = { static gen_helper_gvec_mem * const fn_multiple[2][3][4] = {
{ gen_helper_sve_st2bb_r, gen_helper_sve_st2hh_r, { { gen_helper_sve_st2bb_r,
gen_helper_sve_st2ss_r, gen_helper_sve_st2dd_r }, gen_helper_sve_st2hh_le_r,
{ gen_helper_sve_st3bb_r, gen_helper_sve_st3hh_r, gen_helper_sve_st2ss_le_r,
gen_helper_sve_st3ss_r, gen_helper_sve_st3dd_r }, gen_helper_sve_st2dd_le_r },
{ gen_helper_sve_st4bb_r, gen_helper_sve_st4hh_r, { gen_helper_sve_st3bb_r,
gen_helper_sve_st4ss_r, gen_helper_sve_st4dd_r }, gen_helper_sve_st3hh_le_r,
gen_helper_sve_st3ss_le_r,
gen_helper_sve_st3dd_le_r },
{ gen_helper_sve_st4bb_r,
gen_helper_sve_st4hh_le_r,
gen_helper_sve_st4ss_le_r,
gen_helper_sve_st4dd_le_r } },
{ { gen_helper_sve_st2bb_r,
gen_helper_sve_st2hh_be_r,
gen_helper_sve_st2ss_be_r,
gen_helper_sve_st2dd_be_r },
{ gen_helper_sve_st3bb_r,
gen_helper_sve_st3hh_be_r,
gen_helper_sve_st3ss_be_r,
gen_helper_sve_st3dd_be_r },
{ gen_helper_sve_st4bb_r,
gen_helper_sve_st4hh_be_r,
gen_helper_sve_st4ss_be_r,
gen_helper_sve_st4dd_be_r } },
}; };
gen_helper_gvec_mem *fn; gen_helper_gvec_mem *fn;
int be = s->be_data == MO_BE;
if (nreg == 0) { if (nreg == 0) {
/* ST1 */ /* ST1 */
fn = fn_single[msz][esz]; fn = fn_single[be][msz][esz];
} else { } else {
/* ST2, ST3, ST4 -- msz == esz, enforced by encoding */ /* ST2, ST3, ST4 -- msz == esz, enforced by encoding */
assert(msz == esz); assert(msz == esz);
fn = fn_multiple[nreg - 1][msz]; fn = fn_multiple[be][nreg - 1][msz];
} }
assert(fn != NULL); assert(fn != NULL);
do_mem_zpa(s, zt, pg, addr, fn); do_mem_zpa(s, zt, pg, addr, msz_dtype(msz), fn);
} }
static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a, uint32_t insn) static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a, uint32_t insn)
@ -4926,111 +5071,203 @@ static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a, uint32_t insn)
*** SVE gather loads / scatter stores *** SVE gather loads / scatter stores
*/ */
static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, int scale, static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
TCGv_i64 scalar, gen_helper_gvec_mem_scatter *fn) int scale, TCGv_i64 scalar, int msz,
gen_helper_gvec_mem_scatter *fn)
{ {
unsigned vsz = vec_full_reg_size(s); unsigned vsz = vec_full_reg_size(s);
TCGv_i32 desc = tcg_const_i32(simd_desc(vsz, vsz, scale));
TCGv_ptr t_zm = tcg_temp_new_ptr(); TCGv_ptr t_zm = tcg_temp_new_ptr();
TCGv_ptr t_pg = tcg_temp_new_ptr(); TCGv_ptr t_pg = tcg_temp_new_ptr();
TCGv_ptr t_zt = tcg_temp_new_ptr(); TCGv_ptr t_zt = tcg_temp_new_ptr();
TCGv_i32 t_desc;
int desc;
desc = sve_memopidx(s, msz_dtype(msz));
desc |= scale << MEMOPIDX_SHIFT;
desc = simd_desc(vsz, vsz, desc);
t_desc = tcg_const_i32(desc);
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm)); tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm));
tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt)); tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt));
fn(cpu_env, t_zt, t_pg, t_zm, scalar, desc); fn(cpu_env, t_zt, t_pg, t_zm, scalar, t_desc);
tcg_temp_free_ptr(t_zt); tcg_temp_free_ptr(t_zt);
tcg_temp_free_ptr(t_zm); tcg_temp_free_ptr(t_zm);
tcg_temp_free_ptr(t_pg); tcg_temp_free_ptr(t_pg);
tcg_temp_free_i32(desc); tcg_temp_free_i32(t_desc);
} }
/* Indexed by [ff][xs][u][msz]. */ /* Indexed by [be][ff][xs][u][msz]. */
static gen_helper_gvec_mem_scatter * const gather_load_fn32[2][2][2][3] = { static gen_helper_gvec_mem_scatter * const gather_load_fn32[2][2][2][2][3] = {
{ { { gen_helper_sve_ldbss_zsu, /* Little-endian */
gen_helper_sve_ldhss_zsu, { { { { gen_helper_sve_ldbss_zsu,
NULL, }, gen_helper_sve_ldhss_le_zsu,
{ gen_helper_sve_ldbsu_zsu, NULL, },
gen_helper_sve_ldhsu_zsu, { gen_helper_sve_ldbsu_zsu,
gen_helper_sve_ldssu_zsu, } }, gen_helper_sve_ldhsu_le_zsu,
{ { gen_helper_sve_ldbss_zss, gen_helper_sve_ldss_le_zsu, } },
gen_helper_sve_ldhss_zss, { { gen_helper_sve_ldbss_zss,
NULL, }, gen_helper_sve_ldhss_le_zss,
{ gen_helper_sve_ldbsu_zss, NULL, },
gen_helper_sve_ldhsu_zss, { gen_helper_sve_ldbsu_zss,
gen_helper_sve_ldssu_zss, } } }, gen_helper_sve_ldhsu_le_zss,
gen_helper_sve_ldss_le_zss, } } },
{ { { gen_helper_sve_ldffbss_zsu, /* First-fault */
gen_helper_sve_ldffhss_zsu, { { { gen_helper_sve_ldffbss_zsu,
NULL, }, gen_helper_sve_ldffhss_le_zsu,
{ gen_helper_sve_ldffbsu_zsu, NULL, },
gen_helper_sve_ldffhsu_zsu, { gen_helper_sve_ldffbsu_zsu,
gen_helper_sve_ldffssu_zsu, } }, gen_helper_sve_ldffhsu_le_zsu,
{ { gen_helper_sve_ldffbss_zss, gen_helper_sve_ldffss_le_zsu, } },
gen_helper_sve_ldffhss_zss, { { gen_helper_sve_ldffbss_zss,
NULL, }, gen_helper_sve_ldffhss_le_zss,
{ gen_helper_sve_ldffbsu_zss, NULL, },
gen_helper_sve_ldffhsu_zss, { gen_helper_sve_ldffbsu_zss,
gen_helper_sve_ldffssu_zss, } } } gen_helper_sve_ldffhsu_le_zss,
gen_helper_sve_ldffss_le_zss, } } } },
/* Big-endian */
{ { { { gen_helper_sve_ldbss_zsu,
gen_helper_sve_ldhss_be_zsu,
NULL, },
{ gen_helper_sve_ldbsu_zsu,
gen_helper_sve_ldhsu_be_zsu,
gen_helper_sve_ldss_be_zsu, } },
{ { gen_helper_sve_ldbss_zss,
gen_helper_sve_ldhss_be_zss,
NULL, },
{ gen_helper_sve_ldbsu_zss,
gen_helper_sve_ldhsu_be_zss,
gen_helper_sve_ldss_be_zss, } } },
/* First-fault */
{ { { gen_helper_sve_ldffbss_zsu,
gen_helper_sve_ldffhss_be_zsu,
NULL, },
{ gen_helper_sve_ldffbsu_zsu,
gen_helper_sve_ldffhsu_be_zsu,
gen_helper_sve_ldffss_be_zsu, } },
{ { gen_helper_sve_ldffbss_zss,
gen_helper_sve_ldffhss_be_zss,
NULL, },
{ gen_helper_sve_ldffbsu_zss,
gen_helper_sve_ldffhsu_be_zss,
gen_helper_sve_ldffss_be_zss, } } } },
}; };
/* Note that we overload xs=2 to indicate 64-bit offset. */ /* Note that we overload xs=2 to indicate 64-bit offset. */
static gen_helper_gvec_mem_scatter * const gather_load_fn64[2][3][2][4] = { static gen_helper_gvec_mem_scatter * const gather_load_fn64[2][2][3][2][4] = {
{ { { gen_helper_sve_ldbds_zsu, /* Little-endian */
gen_helper_sve_ldhds_zsu, { { { { gen_helper_sve_ldbds_zsu,
gen_helper_sve_ldsds_zsu, gen_helper_sve_ldhds_le_zsu,
NULL, }, gen_helper_sve_ldsds_le_zsu,
{ gen_helper_sve_ldbdu_zsu, NULL, },
gen_helper_sve_ldhdu_zsu, { gen_helper_sve_ldbdu_zsu,
gen_helper_sve_ldsdu_zsu, gen_helper_sve_ldhdu_le_zsu,
gen_helper_sve_ldddu_zsu, } }, gen_helper_sve_ldsdu_le_zsu,
{ { gen_helper_sve_ldbds_zss, gen_helper_sve_lddd_le_zsu, } },
gen_helper_sve_ldhds_zss, { { gen_helper_sve_ldbds_zss,
gen_helper_sve_ldsds_zss, gen_helper_sve_ldhds_le_zss,
NULL, }, gen_helper_sve_ldsds_le_zss,
{ gen_helper_sve_ldbdu_zss, NULL, },
gen_helper_sve_ldhdu_zss, { gen_helper_sve_ldbdu_zss,
gen_helper_sve_ldsdu_zss, gen_helper_sve_ldhdu_le_zss,
gen_helper_sve_ldddu_zss, } }, gen_helper_sve_ldsdu_le_zss,
{ { gen_helper_sve_ldbds_zd, gen_helper_sve_lddd_le_zss, } },
gen_helper_sve_ldhds_zd, { { gen_helper_sve_ldbds_zd,
gen_helper_sve_ldsds_zd, gen_helper_sve_ldhds_le_zd,
NULL, }, gen_helper_sve_ldsds_le_zd,
{ gen_helper_sve_ldbdu_zd, NULL, },
gen_helper_sve_ldhdu_zd, { gen_helper_sve_ldbdu_zd,
gen_helper_sve_ldsdu_zd, gen_helper_sve_ldhdu_le_zd,
gen_helper_sve_ldddu_zd, } } }, gen_helper_sve_ldsdu_le_zd,
gen_helper_sve_lddd_le_zd, } } },
{ { { gen_helper_sve_ldffbds_zsu, /* First-fault */
gen_helper_sve_ldffhds_zsu, { { { gen_helper_sve_ldffbds_zsu,
gen_helper_sve_ldffsds_zsu, gen_helper_sve_ldffhds_le_zsu,
NULL, }, gen_helper_sve_ldffsds_le_zsu,
{ gen_helper_sve_ldffbdu_zsu, NULL, },
gen_helper_sve_ldffhdu_zsu, { gen_helper_sve_ldffbdu_zsu,
gen_helper_sve_ldffsdu_zsu, gen_helper_sve_ldffhdu_le_zsu,
gen_helper_sve_ldffddu_zsu, } }, gen_helper_sve_ldffsdu_le_zsu,
{ { gen_helper_sve_ldffbds_zss, gen_helper_sve_ldffdd_le_zsu, } },
gen_helper_sve_ldffhds_zss, { { gen_helper_sve_ldffbds_zss,
gen_helper_sve_ldffsds_zss, gen_helper_sve_ldffhds_le_zss,
NULL, }, gen_helper_sve_ldffsds_le_zss,
{ gen_helper_sve_ldffbdu_zss, NULL, },
gen_helper_sve_ldffhdu_zss, { gen_helper_sve_ldffbdu_zss,
gen_helper_sve_ldffsdu_zss, gen_helper_sve_ldffhdu_le_zss,
gen_helper_sve_ldffddu_zss, } }, gen_helper_sve_ldffsdu_le_zss,
{ { gen_helper_sve_ldffbds_zd, gen_helper_sve_ldffdd_le_zss, } },
gen_helper_sve_ldffhds_zd, { { gen_helper_sve_ldffbds_zd,
gen_helper_sve_ldffsds_zd, gen_helper_sve_ldffhds_le_zd,
NULL, }, gen_helper_sve_ldffsds_le_zd,
{ gen_helper_sve_ldffbdu_zd, NULL, },
gen_helper_sve_ldffhdu_zd, { gen_helper_sve_ldffbdu_zd,
gen_helper_sve_ldffsdu_zd, gen_helper_sve_ldffhdu_le_zd,
gen_helper_sve_ldffddu_zd, } } } gen_helper_sve_ldffsdu_le_zd,
gen_helper_sve_ldffdd_le_zd, } } } },
/* Big-endian */
{ { { { gen_helper_sve_ldbds_zsu,
gen_helper_sve_ldhds_be_zsu,
gen_helper_sve_ldsds_be_zsu,
NULL, },
{ gen_helper_sve_ldbdu_zsu,
gen_helper_sve_ldhdu_be_zsu,
gen_helper_sve_ldsdu_be_zsu,
gen_helper_sve_lddd_be_zsu, } },
{ { gen_helper_sve_ldbds_zss,
gen_helper_sve_ldhds_be_zss,
gen_helper_sve_ldsds_be_zss,
NULL, },
{ gen_helper_sve_ldbdu_zss,
gen_helper_sve_ldhdu_be_zss,
gen_helper_sve_ldsdu_be_zss,
gen_helper_sve_lddd_be_zss, } },
{ { gen_helper_sve_ldbds_zd,
gen_helper_sve_ldhds_be_zd,
gen_helper_sve_ldsds_be_zd,
NULL, },
{ gen_helper_sve_ldbdu_zd,
gen_helper_sve_ldhdu_be_zd,
gen_helper_sve_ldsdu_be_zd,
gen_helper_sve_lddd_be_zd, } } },
/* First-fault */
{ { { gen_helper_sve_ldffbds_zsu,
gen_helper_sve_ldffhds_be_zsu,
gen_helper_sve_ldffsds_be_zsu,
NULL, },
{ gen_helper_sve_ldffbdu_zsu,
gen_helper_sve_ldffhdu_be_zsu,
gen_helper_sve_ldffsdu_be_zsu,
gen_helper_sve_ldffdd_be_zsu, } },
{ { gen_helper_sve_ldffbds_zss,
gen_helper_sve_ldffhds_be_zss,
gen_helper_sve_ldffsds_be_zss,
NULL, },
{ gen_helper_sve_ldffbdu_zss,
gen_helper_sve_ldffhdu_be_zss,
gen_helper_sve_ldffsdu_be_zss,
gen_helper_sve_ldffdd_be_zss, } },
{ { gen_helper_sve_ldffbds_zd,
gen_helper_sve_ldffhds_be_zd,
gen_helper_sve_ldffsds_be_zd,
NULL, },
{ gen_helper_sve_ldffbdu_zd,
gen_helper_sve_ldffhdu_be_zd,
gen_helper_sve_ldffsdu_be_zd,
gen_helper_sve_ldffdd_be_zd, } } } },
}; };
static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a, uint32_t insn) static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a, uint32_t insn)
{ {
gen_helper_gvec_mem_scatter *fn = NULL; gen_helper_gvec_mem_scatter *fn = NULL;
int be = s->be_data == MO_BE;
if (!sve_access_check(s)) { if (!sve_access_check(s)) {
return true; return true;
@ -5038,22 +5275,23 @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a, uint32_t insn)
switch (a->esz) { switch (a->esz) {
case MO_32: case MO_32:
fn = gather_load_fn32[a->ff][a->xs][a->u][a->msz]; fn = gather_load_fn32[be][a->ff][a->xs][a->u][a->msz];
break; break;
case MO_64: case MO_64:
fn = gather_load_fn64[a->ff][a->xs][a->u][a->msz]; fn = gather_load_fn64[be][a->ff][a->xs][a->u][a->msz];
break; break;
} }
assert(fn != NULL); assert(fn != NULL);
do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz, do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
cpu_reg_sp(s, a->rn), fn); cpu_reg_sp(s, a->rn), a->msz, fn);
return true; return true;
} }
static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a, uint32_t insn) static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a, uint32_t insn)
{ {
gen_helper_gvec_mem_scatter *fn = NULL; gen_helper_gvec_mem_scatter *fn = NULL;
int be = s->be_data == MO_BE;
TCGv_i64 imm; TCGv_i64 imm;
if (a->esz < a->msz || (a->esz == a->msz && !a->u)) { if (a->esz < a->msz || (a->esz == a->msz && !a->u)) {
@ -5065,10 +5303,10 @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a, uint32_t insn)
switch (a->esz) { switch (a->esz) {
case MO_32: case MO_32:
fn = gather_load_fn32[a->ff][0][a->u][a->msz]; fn = gather_load_fn32[be][a->ff][0][a->u][a->msz];
break; break;
case MO_64: case MO_64:
fn = gather_load_fn64[a->ff][2][a->u][a->msz]; fn = gather_load_fn64[be][a->ff][2][a->u][a->msz];
break; break;
} }
assert(fn != NULL); assert(fn != NULL);
@ -5077,40 +5315,63 @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a, uint32_t insn)
* by loading the immediate into the scalar parameter. * by loading the immediate into the scalar parameter.
*/ */
imm = tcg_const_i64(a->imm << a->msz); imm = tcg_const_i64(a->imm << a->msz);
do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, fn); do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, fn);
tcg_temp_free_i64(imm); tcg_temp_free_i64(imm);
return true; return true;
} }
/* Indexed by [xs][msz]. */ /* Indexed by [be][xs][msz]. */
static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][3] = { static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][3] = {
{ gen_helper_sve_stbs_zsu, /* Little-endian */
gen_helper_sve_sths_zsu, { { gen_helper_sve_stbs_zsu,
gen_helper_sve_stss_zsu, }, gen_helper_sve_sths_le_zsu,
{ gen_helper_sve_stbs_zss, gen_helper_sve_stss_le_zsu, },
gen_helper_sve_sths_zss, { gen_helper_sve_stbs_zss,
gen_helper_sve_stss_zss, }, gen_helper_sve_sths_le_zss,
gen_helper_sve_stss_le_zss, } },
/* Big-endian */
{ { gen_helper_sve_stbs_zsu,
gen_helper_sve_sths_be_zsu,
gen_helper_sve_stss_be_zsu, },
{ gen_helper_sve_stbs_zss,
gen_helper_sve_sths_be_zss,
gen_helper_sve_stss_be_zss, } },
}; };
/* Note that we overload xs=2 to indicate 64-bit offset. */ /* Note that we overload xs=2 to indicate 64-bit offset. */
static gen_helper_gvec_mem_scatter * const scatter_store_fn64[3][4] = { static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][3][4] = {
{ gen_helper_sve_stbd_zsu, /* Little-endian */
gen_helper_sve_sthd_zsu, { { gen_helper_sve_stbd_zsu,
gen_helper_sve_stsd_zsu, gen_helper_sve_sthd_le_zsu,
gen_helper_sve_stdd_zsu, }, gen_helper_sve_stsd_le_zsu,
{ gen_helper_sve_stbd_zss, gen_helper_sve_stdd_le_zsu, },
gen_helper_sve_sthd_zss, { gen_helper_sve_stbd_zss,
gen_helper_sve_stsd_zss, gen_helper_sve_sthd_le_zss,
gen_helper_sve_stdd_zss, }, gen_helper_sve_stsd_le_zss,
{ gen_helper_sve_stbd_zd, gen_helper_sve_stdd_le_zss, },
gen_helper_sve_sthd_zd, { gen_helper_sve_stbd_zd,
gen_helper_sve_stsd_zd, gen_helper_sve_sthd_le_zd,
gen_helper_sve_stdd_zd, }, gen_helper_sve_stsd_le_zd,
gen_helper_sve_stdd_le_zd, } },
/* Big-endian */
{ { gen_helper_sve_stbd_zsu,
gen_helper_sve_sthd_be_zsu,
gen_helper_sve_stsd_be_zsu,
gen_helper_sve_stdd_be_zsu, },
{ gen_helper_sve_stbd_zss,
gen_helper_sve_sthd_be_zss,
gen_helper_sve_stsd_be_zss,
gen_helper_sve_stdd_be_zss, },
{ gen_helper_sve_stbd_zd,
gen_helper_sve_sthd_be_zd,
gen_helper_sve_stsd_be_zd,
gen_helper_sve_stdd_be_zd, } },
}; };
static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a, uint32_t insn) static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a, uint32_t insn)
{ {
gen_helper_gvec_mem_scatter *fn; gen_helper_gvec_mem_scatter *fn;
int be = s->be_data == MO_BE;
if (a->esz < a->msz || (a->msz == 0 && a->scale)) { if (a->esz < a->msz || (a->msz == 0 && a->scale)) {
return false; return false;
@ -5120,22 +5381,23 @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a, uint32_t insn)
} }
switch (a->esz) { switch (a->esz) {
case MO_32: case MO_32:
fn = scatter_store_fn32[a->xs][a->msz]; fn = scatter_store_fn32[be][a->xs][a->msz];
break; break;
case MO_64: case MO_64:
fn = scatter_store_fn64[a->xs][a->msz]; fn = scatter_store_fn64[be][a->xs][a->msz];
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz, do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
cpu_reg_sp(s, a->rn), fn); cpu_reg_sp(s, a->rn), a->msz, fn);
return true; return true;
} }
static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a, uint32_t insn) static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a, uint32_t insn)
{ {
gen_helper_gvec_mem_scatter *fn = NULL; gen_helper_gvec_mem_scatter *fn = NULL;
int be = s->be_data == MO_BE;
TCGv_i64 imm; TCGv_i64 imm;
if (a->esz < a->msz) { if (a->esz < a->msz) {
@ -5147,10 +5409,10 @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a, uint32_t insn)
switch (a->esz) { switch (a->esz) {
case MO_32: case MO_32:
fn = scatter_store_fn32[0][a->msz]; fn = scatter_store_fn32[be][0][a->msz];
break; break;
case MO_64: case MO_64:
fn = scatter_store_fn64[2][a->msz]; fn = scatter_store_fn64[be][2][a->msz];
break; break;
} }
assert(fn != NULL); assert(fn != NULL);
@ -5159,7 +5421,7 @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a, uint32_t insn)
* by loading the immediate into the scalar parameter. * by loading the immediate into the scalar parameter.
*/ */
imm = tcg_const_i64(a->imm << a->msz); imm = tcg_const_i64(a->imm << a->msz);
do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, fn); do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, fn);
tcg_temp_free_i64(imm); tcg_temp_free_i64(imm);
return true; return true;
} }

View File

@ -239,6 +239,23 @@ static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
tcg_temp_free_i32(var); tcg_temp_free_i32(var);
} }
/*
* Variant of store_reg which applies v8M stack-limit checks before updating
* SP. If the check fails this will result in an exception being taken.
* We disable the stack checks for CONFIG_USER_ONLY because we have
* no idea what the stack limits should be in that case.
* If stack checking is not being done this just acts like store_reg().
*/
static void store_sp_checked(DisasContext *s, TCGv_i32 var)
{
#ifndef CONFIG_USER_ONLY
if (s->v8m_stackcheck) {
gen_helper_v8m_stackcheck(cpu_env, var);
}
#endif
store_reg(s, 13, var);
}
/* Value extensions. */ /* Value extensions. */
#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var) #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
#define gen_uxth(var) tcg_gen_ext16u_i32(var, var) #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
@ -4212,6 +4229,18 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
if (insn & (1 << 24)) /* pre-decrement */ if (insn & (1 << 24)) /* pre-decrement */
tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2)); tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
if (s->v8m_stackcheck && rn == 13 && w) {
/*
* Here 'addr' is the lowest address we will store to,
* and is either the old SP (if post-increment) or
* the new SP (if pre-decrement). For post-increment
* where the old value is below the limit and the new
* value is above, it is UNKNOWN whether the limit check
* triggers; we choose to trigger.
*/
gen_helper_v8m_stackcheck(cpu_env, addr);
}
if (dp) if (dp)
offset = 8; offset = 8;
else else
@ -10261,6 +10290,8 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
* 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
* - load/store dual (pre-indexed) * - load/store dual (pre-indexed)
*/ */
bool wback = extract32(insn, 21, 1);
if (rn == 15) { if (rn == 15) {
if (insn & (1 << 21)) { if (insn & (1 << 21)) {
/* UNPREDICTABLE */ /* UNPREDICTABLE */
@ -10272,8 +10303,29 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
addr = load_reg(s, rn); addr = load_reg(s, rn);
} }
offset = (insn & 0xff) * 4; offset = (insn & 0xff) * 4;
if ((insn & (1 << 23)) == 0) if ((insn & (1 << 23)) == 0) {
offset = -offset; offset = -offset;
}
if (s->v8m_stackcheck && rn == 13 && wback) {
/*
* Here 'addr' is the current SP; if offset is +ve we're
* moving SP up, else down. It is UNKNOWN whether the limit
* check triggers when SP starts below the limit and ends
* up above it; check whichever of the current and final
* SP is lower, so QEMU will trigger in that situation.
*/
if ((int32_t)offset < 0) {
TCGv_i32 newsp = tcg_temp_new_i32();
tcg_gen_addi_i32(newsp, addr, offset);
gen_helper_v8m_stackcheck(cpu_env, newsp);
tcg_temp_free_i32(newsp);
} else {
gen_helper_v8m_stackcheck(cpu_env, addr);
}
}
if (insn & (1 << 24)) { if (insn & (1 << 24)) {
tcg_gen_addi_i32(addr, addr, offset); tcg_gen_addi_i32(addr, addr, offset);
offset = 0; offset = 0;
@ -10297,7 +10349,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
gen_aa32_st32(s, tmp, addr, get_mem_index(s)); gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp);
} }
if (insn & (1 << 21)) { if (wback) {
/* Base writeback. */ /* Base writeback. */
tcg_gen_addi_i32(addr, addr, offset - 4); tcg_gen_addi_i32(addr, addr, offset - 4);
store_reg(s, rn, addr); store_reg(s, rn, addr);
@ -10484,6 +10536,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
} else { } else {
int i, loaded_base = 0; int i, loaded_base = 0;
TCGv_i32 loaded_var; TCGv_i32 loaded_var;
bool wback = extract32(insn, 21, 1);
/* Load/store multiple. */ /* Load/store multiple. */
addr = load_reg(s, rn); addr = load_reg(s, rn);
offset = 0; offset = 0;
@ -10491,10 +10544,26 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
if (insn & (1 << i)) if (insn & (1 << i))
offset += 4; offset += 4;
} }
if (insn & (1 << 24)) { if (insn & (1 << 24)) {
tcg_gen_addi_i32(addr, addr, -offset); tcg_gen_addi_i32(addr, addr, -offset);
} }
if (s->v8m_stackcheck && rn == 13 && wback) {
/*
* If the writeback is incrementing SP rather than
* decrementing it, and the initial SP is below the
* stack limit but the final written-back SP would
* be above, then then we must not perform any memory
* accesses, but it is IMPDEF whether we generate
* an exception. We choose to do so in this case.
* At this point 'addr' is the lowest address, so
* either the original SP (if incrementing) or our
* final SP (if decrementing), so that's what we check.
*/
gen_helper_v8m_stackcheck(cpu_env, addr);
}
loaded_var = NULL; loaded_var = NULL;
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
if ((insn & (1 << i)) == 0) if ((insn & (1 << i)) == 0)
@ -10522,7 +10591,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
if (loaded_base) { if (loaded_base) {
store_reg(s, rn, loaded_var); store_reg(s, rn, loaded_var);
} }
if (insn & (1 << 21)) { if (wback) {
/* Base register writeback. */ /* Base register writeback. */
if (insn & (1 << 24)) { if (insn & (1 << 24)) {
tcg_gen_addi_i32(addr, addr, -offset); tcg_gen_addi_i32(addr, addr, -offset);
@ -10583,7 +10652,13 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2)) if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
goto illegal_op; goto illegal_op;
tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp2);
if (rd != 15) { if (rd == 13 &&
((op == 2 && rn == 15) ||
(op == 8 && rn == 13) ||
(op == 13 && rn == 13))) {
/* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
store_sp_checked(s, tmp);
} else if (rd != 15) {
store_reg(s, rd, tmp); store_reg(s, rd, tmp);
} else { } else {
tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp);
@ -10600,6 +10675,10 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
tmp2 = load_reg(s, rm); tmp2 = load_reg(s, rm);
if ((insn & 0x70) != 0) if ((insn & 0x70) != 0)
goto illegal_op; goto illegal_op;
/*
* 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
* - MOV, MOVS (register-shifted register), flagsetting
*/
op = (insn >> 21) & 3; op = (insn >> 21) & 3;
logic_cc = (insn & (1 << 20)) != 0; logic_cc = (insn & (1 << 20)) != 0;
gen_arm_shift_reg(tmp, op, tmp2, logic_cc); gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
@ -11267,8 +11346,15 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
gen_jmp(s, s->pc + offset); gen_jmp(s, s->pc + offset);
} }
} else { } else {
/* Data processing immediate. */ /*
* 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
* - Data-processing (modified immediate, plain binary immediate)
*/
if (insn & (1 << 25)) { if (insn & (1 << 25)) {
/*
* 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
* - Data-processing (plain binary immediate)
*/
if (insn & (1 << 24)) { if (insn & (1 << 24)) {
if (insn & (1 << 20)) if (insn & (1 << 20))
goto illegal_op; goto illegal_op;
@ -11364,6 +11450,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, imm); tcg_gen_movi_i32(tmp, imm);
} }
store_reg(s, rd, tmp);
} else { } else {
/* Add/sub 12-bit immediate. */ /* Add/sub 12-bit immediate. */
if (rn == 15) { if (rn == 15) {
@ -11374,17 +11461,27 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
offset += imm; offset += imm;
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, offset); tcg_gen_movi_i32(tmp, offset);
store_reg(s, rd, tmp);
} else { } else {
tmp = load_reg(s, rn); tmp = load_reg(s, rn);
if (insn & (1 << 23)) if (insn & (1 << 23))
tcg_gen_subi_i32(tmp, tmp, imm); tcg_gen_subi_i32(tmp, tmp, imm);
else else
tcg_gen_addi_i32(tmp, tmp, imm); tcg_gen_addi_i32(tmp, tmp, imm);
if (rn == 13 && rd == 13) {
/* ADD SP, SP, imm or SUB SP, SP, imm */
store_sp_checked(s, tmp);
} else {
store_reg(s, rd, tmp);
}
} }
} }
store_reg(s, rd, tmp);
} }
} else { } else {
/*
* 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
* - Data-processing (modified immediate)
*/
int shifter_out = 0; int shifter_out = 0;
/* modified 12-bit immediate. */ /* modified 12-bit immediate. */
shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12); shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
@ -11426,7 +11523,11 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
goto illegal_op; goto illegal_op;
tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp2);
rd = (insn >> 8) & 0xf; rd = (insn >> 8) & 0xf;
if (rd != 15) { if (rd == 13 && rn == 13
&& (op == 8 || op == 13)) {
/* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
store_sp_checked(s, tmp);
} else if (rd != 15) {
store_reg(s, rd, tmp); store_reg(s, rd, tmp);
} else { } else {
tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp);
@ -11535,7 +11636,6 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
imm = -imm; imm = -imm;
/* Fall through. */ /* Fall through. */
case 0xf: /* Pre-increment. */ case 0xf: /* Pre-increment. */
tcg_gen_addi_i32(addr, addr, imm);
writeback = 1; writeback = 1;
break; break;
default: default:
@ -11547,6 +11647,28 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
issinfo = writeback ? ISSInvalid : rs; issinfo = writeback ? ISSInvalid : rs;
if (s->v8m_stackcheck && rn == 13 && writeback) {
/*
* Stackcheck. Here we know 'addr' is the current SP;
* if imm is +ve we're moving SP up, else down. It is
* UNKNOWN whether the limit check triggers when SP starts
* below the limit and ends up above it; we chose to do so.
*/
if ((int32_t)imm < 0) {
TCGv_i32 newsp = tcg_temp_new_i32();
tcg_gen_addi_i32(newsp, addr, imm);
gen_helper_v8m_stackcheck(cpu_env, newsp);
tcg_temp_free_i32(newsp);
} else {
gen_helper_v8m_stackcheck(cpu_env, addr);
}
}
if (writeback && !postinc) {
tcg_gen_addi_i32(addr, addr, imm);
}
if (insn & (1 << 20)) { if (insn & (1 << 20)) {
/* Load. */ /* Load. */
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
@ -11629,7 +11751,11 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
rd = insn & 7; rd = insn & 7;
op = (insn >> 11) & 3; op = (insn >> 11) & 3;
if (op == 3) { if (op == 3) {
/* add/subtract */ /*
* 0b0001_1xxx_xxxx_xxxx
* - Add, subtract (three low registers)
* - Add, subtract (two low registers and immediate)
*/
rn = (insn >> 3) & 7; rn = (insn >> 3) & 7;
tmp = load_reg(s, rn); tmp = load_reg(s, rn);
if (insn & (1 << 10)) { if (insn & (1 << 10)) {
@ -11666,7 +11792,10 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
} }
break; break;
case 2: case 3: case 2: case 3:
/* arithmetic large immediate */ /*
* 0b001x_xxxx_xxxx_xxxx
* - Add, subtract, compare, move (one low register and immediate)
*/
op = (insn >> 11) & 3; op = (insn >> 11) & 3;
rd = (insn >> 8) & 0x7; rd = (insn >> 8) & 0x7;
if (op == 0) { /* mov */ if (op == 0) { /* mov */
@ -11732,7 +11861,12 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
tmp2 = load_reg(s, rm); tmp2 = load_reg(s, rm);
tcg_gen_add_i32(tmp, tmp, tmp2); tcg_gen_add_i32(tmp, tmp, tmp2);
tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp); if (rd == 13) {
/* ADD SP, SP, reg */
store_sp_checked(s, tmp);
} else {
store_reg(s, rd, tmp);
}
break; break;
case 1: /* cmp */ case 1: /* cmp */
tmp = load_reg(s, rd); tmp = load_reg(s, rd);
@ -11743,7 +11877,12 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
break; break;
case 2: /* mov/cpy */ case 2: /* mov/cpy */
tmp = load_reg(s, rm); tmp = load_reg(s, rm);
store_reg(s, rd, tmp); if (rd == 13) {
/* MOV SP, reg */
store_sp_checked(s, tmp);
} else {
store_reg(s, rd, tmp);
}
break; break;
case 3: case 3:
{ {
@ -11793,7 +11932,10 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
break; break;
} }
/* data processing register */ /*
* 0b0100_00xx_xxxx_xxxx
* - Data-processing (two low registers)
*/
rd = insn & 7; rd = insn & 7;
rm = (insn >> 3) & 7; rm = (insn >> 3) & 7;
op = (insn >> 6) & 0xf; op = (insn >> 6) & 0xf;
@ -12071,7 +12213,10 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
break; break;
case 10: case 10:
/* add to high reg */ /*
* 0b1010_xxxx_xxxx_xxxx
* - Add PC/SP (immediate)
*/
rd = (insn >> 8) & 7; rd = (insn >> 8) & 7;
if (insn & (1 << 11)) { if (insn & (1 << 11)) {
/* SP */ /* SP */
@ -12091,13 +12236,17 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
op = (insn >> 8) & 0xf; op = (insn >> 8) & 0xf;
switch (op) { switch (op) {
case 0: case 0:
/* adjust stack pointer */ /*
* 0b1011_0000_xxxx_xxxx
* - ADD (SP plus immediate)
* - SUB (SP minus immediate)
*/
tmp = load_reg(s, 13); tmp = load_reg(s, 13);
val = (insn & 0x7f) * 4; val = (insn & 0x7f) * 4;
if (insn & (1 << 7)) if (insn & (1 << 7))
val = -(int32_t)val; val = -(int32_t)val;
tcg_gen_addi_i32(tmp, tmp, val); tcg_gen_addi_i32(tmp, tmp, val);
store_reg(s, 13, tmp); store_sp_checked(s, tmp);
break; break;
case 2: /* sign/zero extend. */ case 2: /* sign/zero extend. */
@ -12114,7 +12263,10 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
store_reg(s, rd, tmp); store_reg(s, rd, tmp);
break; break;
case 4: case 5: case 0xc: case 0xd: case 4: case 5: case 0xc: case 0xd:
/* push/pop */ /*
* 0b1011_x10x_xxxx_xxxx
* - push/pop
*/
addr = load_reg(s, 13); addr = load_reg(s, 13);
if (insn & (1 << 8)) if (insn & (1 << 8))
offset = 4; offset = 4;
@ -12127,6 +12279,17 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
if ((insn & (1 << 11)) == 0) { if ((insn & (1 << 11)) == 0) {
tcg_gen_addi_i32(addr, addr, -offset); tcg_gen_addi_i32(addr, addr, -offset);
} }
if (s->v8m_stackcheck) {
/*
* Here 'addr' is the lower of "old SP" and "new SP";
* if this is a pop that starts below the limit and ends
* above it, it is UNKNOWN whether the limit check triggers;
* we choose to trigger.
*/
gen_helper_v8m_stackcheck(cpu_env, addr);
}
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
if (insn & (1 << i)) { if (insn & (1 << i)) {
if (insn & (1 << 11)) { if (insn & (1 << 11)) {
@ -12451,6 +12614,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags); dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
regime_is_secure(env, dc->mmu_idx); regime_is_secure(env, dc->mmu_idx);
dc->v8m_stackcheck = ARM_TBFLAG_STACKCHECK(dc->base.tb->flags);
dc->cp_regs = cpu->cp_regs; dc->cp_regs = cpu->cp_regs;
dc->features = env->features; dc->features = env->features;

View File

@ -38,6 +38,7 @@ typedef struct DisasContext {
int vec_stride; int vec_stride;
bool v7m_handler_mode; bool v7m_handler_mode;
bool v8m_secure; /* true if v8M and we're in Secure mode */ bool v8m_secure; /* true if v8M and we're in Secure mode */
bool v8m_stackcheck; /* true if we need to perform v8M stack limit checks */
/* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI
* so that top level loop can generate correct syndrome information. * so that top level loop can generate correct syndrome information.
*/ */