From eae0c3b659fbad5168c9bb9784b49d255185e35c Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Wed, 12 Mar 2025 13:25:05 +0000 Subject: [PATCH 01/17] target/arm: Move A32_BANKED_REG_{GET,SET} macros to cpregs.h The A32_BANKED_REG_{GET,SET} macros are only used inside target/arm; move their definitions to cpregs.h. There's no need to have them defined in all the code that includes cpu.h. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/cpregs.h | 28 ++++++++++++++++++++++++++++ target/arm/cpu.h | 27 --------------------------- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h index 52377c6eb5..2183de8eda 100644 --- a/target/arm/cpregs.h +++ b/target/arm/cpregs.h @@ -1157,4 +1157,32 @@ static inline bool arm_cpreg_traps_in_nv(const ARMCPRegInfo *ri) return ri->opc1 == 4 || ri->opc1 == 5; } +/* Macros for accessing a specified CP register bank */ +#define A32_BANKED_REG_GET(_env, _regname, _secure) \ + ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns) + +#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \ + do { \ + if (_secure) { \ + (_env)->cp15._regname##_s = (_val); \ + } else { \ + (_env)->cp15._regname##_ns = (_val); \ + } \ + } while (0) + +/* + * Macros for automatically accessing a specific CP register bank depending on + * the current secure state of the system. These macros are not intended for + * supporting instruction translation reads/writes as these are dependent + * solely on the SCR.NS bit and not the mode. + */ +#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \ + A32_BANKED_REG_GET((_env), _regname, \ + (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3))) + +#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \ + A32_BANKED_REG_SET((_env), _regname, \ + (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \ + (_val)) + #endif /* TARGET_ARM_CPREGS_H */ diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 8f52380c88..15d3a79b0a 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -2684,33 +2684,6 @@ static inline bool access_secure_reg(CPUARMState *env) return ret; } -/* Macros for accessing a specified CP register bank */ -#define A32_BANKED_REG_GET(_env, _regname, _secure) \ - ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns) - -#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \ - do { \ - if (_secure) { \ - (_env)->cp15._regname##_s = (_val); \ - } else { \ - (_env)->cp15._regname##_ns = (_val); \ - } \ - } while (0) - -/* Macros for automatically accessing a specific CP register bank depending on - * the current secure state of the system. These macros are not intended for - * supporting instruction translation reads/writes as these are dependent - * solely on the SCR.NS bit and not the mode. - */ -#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \ - A32_BANKED_REG_GET((_env), _regname, \ - (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3))) - -#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \ - A32_BANKED_REG_SET((_env), _regname, \ - (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \ - (_val)) - uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, uint32_t cur_el, bool secure); From 23560ada94bd22cb9e8d27b7e9389f6369f6d74d Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Wed, 12 Mar 2025 13:25:05 +0000 Subject: [PATCH 02/17] target/arm: Un-inline access_secure_reg() We would like to move arm_el_is_aa64() to internals.h; however, it is used by access_secure_reg(). Make that function not be inline, so that it can stay in cpu.h. access_secure_reg() is used only in two places: * in hflags.c * in the user-mode arm emulators, to decide whether to store the TLS value in the secure or non-secure banked field The second of these is not on a super-hot path that would care about the inlining (and incidentally will always use the NS banked field because our user-mode CPUs never set ARM_FEATURE_EL3); put the definition of access_secure_reg() in hflags.c, near its only use inside target/arm. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/cpu.h | 12 +++--------- target/arm/tcg/hflags.c | 9 +++++++++ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 15d3a79b0a..12d2706f2b 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -2668,21 +2668,15 @@ static inline bool arm_el_is_aa64(CPUARMState *env, int el) return aa64; } -/* Function for determining whether guest cp register reads and writes should +/* + * Function for determining whether guest cp register reads and writes should * access the secure or non-secure bank of a cp register. When EL3 is * operating in AArch32 state, the NS-bit determines whether the secure * instance of a cp register should be used. When EL3 is AArch64 (or if * it doesn't exist at all) then there is no register banking, and all * accesses are to the non-secure version. */ -static inline bool access_secure_reg(CPUARMState *env) -{ - bool ret = (arm_feature(env, ARM_FEATURE_EL3) && - !arm_el_is_aa64(env, 3) && - !(env->cp15.scr_el3 & SCR_NS)); - - return ret; -} +bool access_secure_reg(CPUARMState *env); uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, uint32_t cur_el, bool secure); diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c index 9e6a1869f9..8d79b8b7ae 100644 --- a/target/arm/tcg/hflags.c +++ b/target/arm/tcg/hflags.c @@ -63,6 +63,15 @@ static bool aprofile_require_alignment(CPUARMState *env, int el, uint64_t sctlr) #endif } +bool access_secure_reg(CPUARMState *env) +{ + bool ret = (arm_feature(env, ARM_FEATURE_EL3) && + !arm_el_is_aa64(env, 3) && + !(env->cp15.scr_el3 & SCR_NS)); + + return ret; +} + static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el, ARMMMUIdx mmu_idx, CPUARMTBFlags flags) From fe0f88ab87632075ae9404685672a8f172a3ae6f Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Wed, 12 Mar 2025 13:25:05 +0000 Subject: [PATCH 03/17] linux-user/aarch64: Remove unused get/put_user macros At the top of linux-user/aarch64/cpu_loop.c we define a set of macros for reading and writing data and code words, but we never use these macros. Delete them. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- linux-user/aarch64/cpu_loop.c | 48 ----------------------------------- 1 file changed, 48 deletions(-) diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c index c5d8a483a3..fea43cefa6 100644 --- a/linux-user/aarch64/cpu_loop.c +++ b/linux-user/aarch64/cpu_loop.c @@ -27,54 +27,6 @@ #include "target/arm/syndrome.h" #include "target/arm/cpu-features.h" -#define get_user_code_u32(x, gaddr, env) \ - ({ abi_long __r = get_user_u32((x), (gaddr)); \ - if (!__r && bswap_code(arm_sctlr_b(env))) { \ - (x) = bswap32(x); \ - } \ - __r; \ - }) - -#define get_user_code_u16(x, gaddr, env) \ - ({ abi_long __r = get_user_u16((x), (gaddr)); \ - if (!__r && bswap_code(arm_sctlr_b(env))) { \ - (x) = bswap16(x); \ - } \ - __r; \ - }) - -#define get_user_data_u32(x, gaddr, env) \ - ({ abi_long __r = get_user_u32((x), (gaddr)); \ - if (!__r && arm_cpu_bswap_data(env)) { \ - (x) = bswap32(x); \ - } \ - __r; \ - }) - -#define get_user_data_u16(x, gaddr, env) \ - ({ abi_long __r = get_user_u16((x), (gaddr)); \ - if (!__r && arm_cpu_bswap_data(env)) { \ - (x) = bswap16(x); \ - } \ - __r; \ - }) - -#define put_user_data_u32(x, gaddr, env) \ - ({ typeof(x) __x = (x); \ - if (arm_cpu_bswap_data(env)) { \ - __x = bswap32(__x); \ - } \ - put_user_u32(__x, (gaddr)); \ - }) - -#define put_user_data_u16(x, gaddr, env) \ - ({ typeof(x) __x = (x); \ - if (arm_cpu_bswap_data(env)) { \ - __x = bswap16(__x); \ - } \ - put_user_u16(__x, (gaddr)); \ - }) - /* AArch64 main loop */ void cpu_loop(CPUARMState *env) { From 63d8b11d0aeb84ba53510cdf66612940a372451f Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Wed, 12 Mar 2025 13:25:06 +0000 Subject: [PATCH 04/17] linux-user/arm: Remove unused get_put_user macros In linux-user/arm/cpu_loop.c we define a full set of get/put macros for both code and data (since the endianness handling is different between the two). However the only one we actually use is get_user_code_u32(). Remove the rest. We leave a comment noting how data-side accesses should be handled for big-endian, because that's a subtle point and we just removed the macros that were effectively documenting it. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- linux-user/arm/cpu_loop.c | 43 ++++----------------------------------- 1 file changed, 4 insertions(+), 39 deletions(-) diff --git a/linux-user/arm/cpu_loop.c b/linux-user/arm/cpu_loop.c index 10d8561f9b..7416e3216e 100644 --- a/linux-user/arm/cpu_loop.c +++ b/linux-user/arm/cpu_loop.c @@ -36,45 +36,10 @@ __r; \ }) -#define get_user_code_u16(x, gaddr, env) \ - ({ abi_long __r = get_user_u16((x), (gaddr)); \ - if (!__r && bswap_code(arm_sctlr_b(env))) { \ - (x) = bswap16(x); \ - } \ - __r; \ - }) - -#define get_user_data_u32(x, gaddr, env) \ - ({ abi_long __r = get_user_u32((x), (gaddr)); \ - if (!__r && arm_cpu_bswap_data(env)) { \ - (x) = bswap32(x); \ - } \ - __r; \ - }) - -#define get_user_data_u16(x, gaddr, env) \ - ({ abi_long __r = get_user_u16((x), (gaddr)); \ - if (!__r && arm_cpu_bswap_data(env)) { \ - (x) = bswap16(x); \ - } \ - __r; \ - }) - -#define put_user_data_u32(x, gaddr, env) \ - ({ typeof(x) __x = (x); \ - if (arm_cpu_bswap_data(env)) { \ - __x = bswap32(__x); \ - } \ - put_user_u32(__x, (gaddr)); \ - }) - -#define put_user_data_u16(x, gaddr, env) \ - ({ typeof(x) __x = (x); \ - if (arm_cpu_bswap_data(env)) { \ - __x = bswap16(__x); \ - } \ - put_user_u16(__x, (gaddr)); \ - }) +/* + * Note that if we need to do data accesses here, they should do a + * bswap if arm_cpu_bswap_data() returns true. + */ /* * Similar to code in accel/tcg/user-exec.c, but outside the execution loop. From fefc1220ad68d816627aebc393cbf2cb34ff6924 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Wed, 12 Mar 2025 13:25:06 +0000 Subject: [PATCH 05/17] target/arm: Move arm_cpu_data_is_big_endian() etc to internals.h The arm_cpu_data_is_big_endian() and related functions are now used only in target/arm; they can be moved to internals.h. The motivation here is that we would like to move arm_current_el() to internals.h. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/cpu.h | 48 ------------------------------------------ target/arm/internals.h | 48 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 12d2706f2b..8a59f70516 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -3032,47 +3032,6 @@ static inline bool arm_sctlr_b(CPUARMState *env) uint64_t arm_sctlr(CPUARMState *env, int el); -static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, - bool sctlr_b) -{ -#ifdef CONFIG_USER_ONLY - /* - * In system mode, BE32 is modelled in line with the - * architecture (as word-invariant big-endianness), where loads - * and stores are done little endian but from addresses which - * are adjusted by XORing with the appropriate constant. So the - * endianness to use for the raw data access is not affected by - * SCTLR.B. - * In user mode, however, we model BE32 as byte-invariant - * big-endianness (because user-only code cannot tell the - * difference), and so we need to use a data access endianness - * that depends on SCTLR.B. - */ - if (sctlr_b) { - return true; - } -#endif - /* In 32bit endianness is determined by looking at CPSR's E bit */ - return env->uncached_cpsr & CPSR_E; -} - -static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr) -{ - return sctlr & (el ? SCTLR_EE : SCTLR_E0E); -} - -/* Return true if the processor is in big-endian mode. */ -static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) -{ - if (!is_a64(env)) { - return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env)); - } else { - int cur_el = arm_current_el(env); - uint64_t sctlr = arm_sctlr(env, cur_el); - return arm_cpu_data_is_big_endian_a64(cur_el, sctlr); - } -} - #include "exec/cpu-all.h" /* @@ -3258,13 +3217,6 @@ static inline bool bswap_code(bool sctlr_b) #endif } -#ifdef CONFIG_USER_ONLY -static inline bool arm_cpu_bswap_data(CPUARMState *env) -{ - return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env); -} -#endif - void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc, uint64_t *cs_base, uint32_t *flags); diff --git a/target/arm/internals.h b/target/arm/internals.h index bb96238919..c2c59e6030 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -392,6 +392,54 @@ static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) return arm_rmode_to_sf_map[rmode]; } +static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, + bool sctlr_b) +{ +#ifdef CONFIG_USER_ONLY + /* + * In system mode, BE32 is modelled in line with the + * architecture (as word-invariant big-endianness), where loads + * and stores are done little endian but from addresses which + * are adjusted by XORing with the appropriate constant. So the + * endianness to use for the raw data access is not affected by + * SCTLR.B. + * In user mode, however, we model BE32 as byte-invariant + * big-endianness (because user-only code cannot tell the + * difference), and so we need to use a data access endianness + * that depends on SCTLR.B. + */ + if (sctlr_b) { + return true; + } +#endif + /* In 32bit endianness is determined by looking at CPSR's E bit */ + return env->uncached_cpsr & CPSR_E; +} + +static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr) +{ + return sctlr & (el ? SCTLR_EE : SCTLR_E0E); +} + +/* Return true if the processor is in big-endian mode. */ +static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) +{ + if (!is_a64(env)) { + return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env)); + } else { + int cur_el = arm_current_el(env); + uint64_t sctlr = arm_sctlr(env, cur_el); + return arm_cpu_data_is_big_endian_a64(cur_el, sctlr); + } +} + +#ifdef CONFIG_USER_ONLY +static inline bool arm_cpu_bswap_data(CPUARMState *env) +{ + return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env); +} +#endif + static inline void aarch64_save_sp(CPUARMState *env, int el) { if (env->pstate & PSTATE_SP) { From 2beb051191b526608e0f269559962f4d2f618850 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Wed, 12 Mar 2025 13:25:07 +0000 Subject: [PATCH 06/17] target/arm: Move arm_current_el() and arm_el_is_aa64() to internals.h The functions arm_current_el() and arm_el_is_aa64() are used only in target/arm and in hw/intc/arm_gicv3_cpuif.c. They're functions that query internal state of the CPU. Move them out of cpu.h and into internals.h. This means we need to include internals.h in arm_gicv3_cpuif.c, but this is justifiable because that file is implementing the GICv3 CPU interface, which really is part of the CPU proper; we just ended up implementing it in code in hw/intc/ for historical reasons. The motivation for this move is that we'd like to change arm_el_is_aa64() to add a condition that uses cpu_isar_feature(); but we don't want to include cpu-features.h in cpu.h. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- hw/intc/arm_gicv3_cpuif.c | 1 + target/arm/arch_dump.c | 1 + target/arm/cpu.h | 66 -------------------------------------- target/arm/internals.h | 67 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 69 insertions(+), 66 deletions(-) diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index 7f1d071c19..de37465bc8 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -22,6 +22,7 @@ #include "cpu.h" #include "target/arm/cpregs.h" #include "target/arm/cpu-features.h" +#include "target/arm/internals.h" #include "system/tcg.h" #include "system/qtest.h" diff --git a/target/arm/arch_dump.c b/target/arm/arch_dump.c index 5c943dc27b..c40df4e7fd 100644 --- a/target/arm/arch_dump.c +++ b/target/arm/arch_dump.c @@ -23,6 +23,7 @@ #include "elf.h" #include "system/dump.h" #include "cpu-features.h" +#include "internals.h" /* struct user_pt_regs from arch/arm64/include/uapi/asm/ptrace.h */ struct aarch64_user_regs { diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 8a59f70516..a8177c6c2e 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -2635,39 +2635,6 @@ uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space); uint64_t arm_hcr_el2_eff(CPUARMState *env); uint64_t arm_hcrx_el2_eff(CPUARMState *env); -/* Return true if the specified exception level is running in AArch64 state. */ -static inline bool arm_el_is_aa64(CPUARMState *env, int el) -{ - /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, - * and if we're not in EL0 then the state of EL0 isn't well defined.) - */ - assert(el >= 1 && el <= 3); - bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); - - /* The highest exception level is always at the maximum supported - * register width, and then lower levels have a register width controlled - * by bits in the SCR or HCR registers. - */ - if (el == 3) { - return aa64; - } - - if (arm_feature(env, ARM_FEATURE_EL3) && - ((env->cp15.scr_el3 & SCR_NS) || !(env->cp15.scr_el3 & SCR_EEL2))) { - aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW); - } - - if (el == 2) { - return aa64; - } - - if (arm_is_el2_enabled(env)) { - aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); - } - - return aa64; -} - /* * Function for determining whether guest cp register reads and writes should * access the secure or non-secure bank of a cp register. When EL3 is @@ -2699,39 +2666,6 @@ static inline bool arm_v7m_is_handler_mode(CPUARMState *env) return env->v7m.exception != 0; } -/* Return the current Exception Level (as per ARMv8; note that this differs - * from the ARMv7 Privilege Level). - */ -static inline int arm_current_el(CPUARMState *env) -{ - if (arm_feature(env, ARM_FEATURE_M)) { - return arm_v7m_is_handler_mode(env) || - !(env->v7m.control[env->v7m.secure] & 1); - } - - if (is_a64(env)) { - return extract32(env->pstate, 2, 2); - } - - switch (env->uncached_cpsr & 0x1f) { - case ARM_CPU_MODE_USR: - return 0; - case ARM_CPU_MODE_HYP: - return 2; - case ARM_CPU_MODE_MON: - return 3; - default: - if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { - /* If EL3 is 32-bit then all secure privileged modes run in - * EL3 - */ - return 3; - } - - return 1; - } -} - /** * write_list_to_cpustate * @cpu: ARMCPU diff --git a/target/arm/internals.h b/target/arm/internals.h index c2c59e6030..d161a3e396 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -392,6 +392,73 @@ static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) return arm_rmode_to_sf_map[rmode]; } +/* Return true if the specified exception level is running in AArch64 state. */ +static inline bool arm_el_is_aa64(CPUARMState *env, int el) +{ + /* + * This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, + * and if we're not in EL0 then the state of EL0 isn't well defined.) + */ + assert(el >= 1 && el <= 3); + bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); + + /* + * The highest exception level is always at the maximum supported + * register width, and then lower levels have a register width controlled + * by bits in the SCR or HCR registers. + */ + if (el == 3) { + return aa64; + } + + if (arm_feature(env, ARM_FEATURE_EL3) && + ((env->cp15.scr_el3 & SCR_NS) || !(env->cp15.scr_el3 & SCR_EEL2))) { + aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW); + } + + if (el == 2) { + return aa64; + } + + if (arm_is_el2_enabled(env)) { + aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); + } + + return aa64; +} + +/* + * Return the current Exception Level (as per ARMv8; note that this differs + * from the ARMv7 Privilege Level). + */ +static inline int arm_current_el(CPUARMState *env) +{ + if (arm_feature(env, ARM_FEATURE_M)) { + return arm_v7m_is_handler_mode(env) || + !(env->v7m.control[env->v7m.secure] & 1); + } + + if (is_a64(env)) { + return extract32(env->pstate, 2, 2); + } + + switch (env->uncached_cpsr & 0x1f) { + case ARM_CPU_MODE_USR: + return 0; + case ARM_CPU_MODE_HYP: + return 2; + case ARM_CPU_MODE_MON: + return 3; + default: + if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { + /* If EL3 is 32-bit then all secure privileged modes run in EL3 */ + return 3; + } + + return 1; + } +} + static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, bool sctlr_b) { From 5d71c6820f3b91763b5807311969cc0362d457d9 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Wed, 12 Mar 2025 13:25:07 +0000 Subject: [PATCH 07/17] target/arm: SCR_EL3.RW should be treated as 1 if EL2 doesn't support AArch32 The definition of SCR_EL3.RW says that its effective value is 1 if: - EL2 is implemented and does not support AArch32, and SCR_EL3.NS is 1 - the effective value of SCR_EL3.{EEL2,NS} is {1,0} (i.e. we are Secure and Secure EL2 is disabled) We implement the second of these in arm_el_is_aa64(), but forgot the first. Provide a new function arm_scr_rw_eff() to return the effective value of SCR_EL3.RW, and use it in arm_el_is_aa64() and the other places that currently look directly at the bit value. (scr_write() enforces that the RW bit is RAO/WI if neither EL1 nor EL2 have AArch32 support, but if EL1 does but EL2 does not then the bit must still be writeable.) This will mean that if code at EL3 attempts to perform an exception return to AArch32 EL2 when EL2 is AArch64-only we will correctly handle this as an illegal exception return: it will be caught by the "return to an EL which is configured for a different register width" check in HELPER(exception_return). We do already have some CPU types which don't implement AArch32 above EL0, so this is technically a bug; it doesn't seem worth backporting to stable because no sensible guest code will be deliberately attempting to set the RW bit to a value corresponding to an unimplemented execution state and then checking that we did the right thing. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/helper.c | 4 ++-- target/arm/internals.h | 26 +++++++++++++++++++++++--- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/target/arm/helper.c b/target/arm/helper.c index f0ead22937..3df7d5347c 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -9818,7 +9818,7 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, uint64_t hcr_el2; if (arm_feature(env, ARM_FEATURE_EL3)) { - rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); + rw = arm_scr_rw_eff(env); } else { /* * Either EL2 is the highest EL (and so the EL2 register width @@ -10627,7 +10627,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) switch (new_el) { case 3: - is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; + is_aa64 = arm_scr_rw_eff(env); break; case 2: hcr = arm_hcr_el2_eff(env); diff --git a/target/arm/internals.h b/target/arm/internals.h index d161a3e396..28585c0755 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -392,6 +392,27 @@ static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) return arm_rmode_to_sf_map[rmode]; } +/* Return the effective value of SCR_EL3.RW */ +static inline bool arm_scr_rw_eff(CPUARMState *env) +{ + /* + * SCR_EL3.RW has an effective value of 1 if: + * - we are NS and EL2 is implemented but doesn't support AArch32 + * - we are S and EL2 is enabled (in which case it must be AArch64) + */ + ARMCPU *cpu = env_archcpu(env); + + if (env->cp15.scr_el3 & SCR_RW) { + return true; + } + if (env->cp15.scr_el3 & SCR_NS) { + return arm_feature(env, ARM_FEATURE_EL2) && + !cpu_isar_feature(aa64_aa32_el2, cpu); + } else { + return env->cp15.scr_el3 & SCR_EEL2; + } +} + /* Return true if the specified exception level is running in AArch64 state. */ static inline bool arm_el_is_aa64(CPUARMState *env, int el) { @@ -411,9 +432,8 @@ static inline bool arm_el_is_aa64(CPUARMState *env, int el) return aa64; } - if (arm_feature(env, ARM_FEATURE_EL3) && - ((env->cp15.scr_el3 & SCR_NS) || !(env->cp15.scr_el3 & SCR_EEL2))) { - aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW); + if (arm_feature(env, ARM_FEATURE_EL3)) { + aa64 = aa64 && arm_scr_rw_eff(env); } if (el == 2) { From 39ec3fc030166c594a64d1d197e29fa9d100d4c5 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Wed, 12 Mar 2025 13:25:07 +0000 Subject: [PATCH 08/17] target/arm: HCR_EL2.RW should be RAO/WI if EL1 doesn't support AArch32 When EL1 doesn't support AArch32, the HCR_EL2.RW bit is supposed to be RAO/WI. Enforce the RAO/WI behaviour. Note that we handle "reset value should honour RES1 bits" in the same way that SCR_EL3 does, via a reset function. We do already have some CPU types which don't implement AArch32 above EL0, so this is technically a bug; it doesn't seem worth backporting to stable because no sensible guest code will be deliberately attempting to set the RW bit to a value corresponding to an unimplemented execution state and then checking that we did the right thing. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/helper.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/target/arm/helper.c b/target/arm/helper.c index 3df7d5347c..bb445e30cd 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -5326,6 +5326,11 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) /* Clear RES0 bits. */ value &= valid_mask; + /* RW is RAO/WI if EL1 is AArch64 only */ + if (!cpu_isar_feature(aa64_aa32_el1, cpu)) { + value |= HCR_RW; + } + /* * These bits change the MMU setup: * HCR_VM enables stage 2 translation @@ -5383,6 +5388,12 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32)); } +static void hcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* hcr_write will set the RES1 bits on an AArch64-only CPU */ + hcr_write(env, ri, 0); +} + /* * Return the effective value of HCR_EL2, at the given security state. * Bits that are not included here: @@ -5618,6 +5629,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), .nv2_redirect_offset = 0x78, + .resetfn = hcr_reset, .writefn = hcr_write, .raw_writefn = raw_write }, { .name = "HCR", .state = ARM_CP_STATE_AA32, .type = ARM_CP_ALIAS | ARM_CP_IO, From 44ab8c248dee2d899dfe858ce1962fedcd3398a1 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Wed, 12 Mar 2025 13:25:08 +0000 Subject: [PATCH 09/17] target/arm: Add cpu local variable to exception_return helper We already call env_archcpu() multiple times within the exception_return helper function, and we're about to want to add another use of the ARMCPU pointer. Add a local variable cpu so we can call env_archcpu() just once. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/helper-a64.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index 32f0647ca4..e2bdf07833 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -631,6 +631,7 @@ static void cpsr_write_from_spsr_elx(CPUARMState *env, void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) { + ARMCPU *cpu = env_archcpu(env); int cur_el = arm_current_el(env); unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el); uint32_t spsr = env->banked_spsr[spsr_idx]; @@ -682,7 +683,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) } bql_lock(); - arm_call_pre_el_change_hook(env_archcpu(env)); + arm_call_pre_el_change_hook(cpu); bql_unlock(); if (!return_to_aa64) { @@ -710,7 +711,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) int tbii; env->aarch64 = true; - spsr &= aarch64_pstate_valid_mask(&env_archcpu(env)->isar); + spsr &= aarch64_pstate_valid_mask(&cpu->isar); pstate_write(env, spsr); if (!arm_singlestep_active(env)) { env->pstate &= ~PSTATE_SS; @@ -749,7 +750,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64); bql_lock(); - arm_call_el_change_hook(env_archcpu(env)); + arm_call_el_change_hook(cpu); bql_unlock(); return; From 097d68ac2fd9bd40d0b6a3b3992c86a1f79d7187 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Wed, 12 Mar 2025 13:25:08 +0000 Subject: [PATCH 10/17] target/arm: Forbid return to AArch32 when CPU is AArch64-only In the Arm ARM, rule R_TYTWB states that returning to AArch32 is an illegal exception return if: * AArch32 is not supported at any exception level * the target EL is configured for AArch64 via SCR_EL3.RW or HCR_EL2.RW or via CPU state at reset We check the second of these, but not the first (which can only be relevant for the case of a return to EL0, because if AArch32 is not supported at one of the higher ELs then the RW bits will have an effective value of 1 and the the "configured for AArch64" condition will hold also). Add the missing condition. Although this is technically a bug (because we have one AArch64-only CPU: a64fx) it isn't worth backporting to stable because no sensible guest code will deliberately try to return to a nonexistent execution state to check that it gets an illegal exception return. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson --- target/arm/tcg/helper-a64.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index e2bdf07833..9244848efe 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -678,6 +678,11 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) goto illegal_return; } + if (!return_to_aa64 && !cpu_isar_feature(aa64_aa32, cpu)) { + /* Return to AArch32 when CPU is AArch64-only */ + goto illegal_return; + } + if (new_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) { goto illegal_return; } From adb478a584dcf9c112fe8a6f9a7369162d3239fb Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Fri, 7 Mar 2025 15:28:38 +0000 Subject: [PATCH 11/17] MAINTAINERS: Fix status for Arm boards I "maintain" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I'm down as the only listed maintainer for quite a lot of Arm SoC and board types. In some cases this is only as the "maintainer of last resort" and I'm not in practice doing anything beyond patch review and the odd bit of tidyup. Move these entries in MAINTAINERS from "Maintained" to "Odd Fixes", to better represent reality. Entries for other boards and SoCs where I do more actively care (or where there is a listed co-maintainer) remain as they are. Signed-off-by: Peter Maydell Reviewed-by: Philippe Mathieu-Daudé Message-id: 20250307152838.3226398-1-peter.maydell@linaro.org --- MAINTAINERS | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 31b395fdfa..8f470a1c9b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -786,7 +786,7 @@ F: docs/system/arm/kzm.rst Integrator CP M: Peter Maydell L: qemu-arm@nongnu.org -S: Maintained +S: Odd Fixes F: hw/arm/integratorcp.c F: hw/misc/arm_integrator_debug.c F: include/hw/misc/arm_integrator_debug.h @@ -867,7 +867,7 @@ F: docs/system/arm/mps2.rst Musca M: Peter Maydell L: qemu-arm@nongnu.org -S: Maintained +S: Odd Fixes F: hw/arm/musca.c F: docs/system/arm/musca.rst @@ -915,7 +915,7 @@ F: tests/functional/test_aarch64_raspi4.py Real View M: Peter Maydell L: qemu-arm@nongnu.org -S: Maintained +S: Odd Fixes F: hw/arm/realview* F: hw/cpu/realview_mpcore.c F: hw/intc/realview_gic.c @@ -965,7 +965,7 @@ F: tests/functional/test_arm_collie.py Stellaris M: Peter Maydell L: qemu-arm@nongnu.org -S: Maintained +S: Odd Fixes F: hw/*/stellaris* F: hw/display/ssd03* F: include/hw/input/gamepad.h @@ -995,7 +995,7 @@ F: docs/system/arm/stm32.rst Versatile Express M: Peter Maydell L: qemu-arm@nongnu.org -S: Maintained +S: Odd Fixes F: hw/arm/vexpress.c F: hw/display/sii9022.c F: docs/system/arm/vexpress.rst @@ -1004,7 +1004,7 @@ F: tests/functional/test_arm_vexpress.py Versatile PB M: Peter Maydell L: qemu-arm@nongnu.org -S: Maintained +S: Odd Fixes F: hw/*/versatile* F: hw/i2c/arm_sbcon_i2c.c F: include/hw/i2c/arm_sbcon_i2c.h @@ -2003,7 +2003,7 @@ F: include/hw/hyperv/vmbus*.h OMAP M: Peter Maydell L: qemu-arm@nongnu.org -S: Maintained +S: Odd Fixes F: hw/*/omap* F: include/hw/arm/omap.h F: docs/system/arm/sx1.rst From 5b14454d37854f5c4227d642133a477a07e49759 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 11 Mar 2025 16:37:17 +0100 Subject: [PATCH 13/17] Revert "hw/char/pl011: Warn when using disabled receiver" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The guest does not control whether characters are sent on the UART. Sending them before the guest happens to boot will now result in a "guest error" log entry that is only because of timing, even if the guest _would_ later setup the receiver correctly. This reverts the bulk of commit abf2b6a028670bd2890bb3aee7e103fe53e4b0df, and instead adds a comment about why we don't check the enable bits. Cc: Philippe Mathieu-Daudé Cc: Peter Maydell Signed-off-by: Paolo Bonzini Message-id: 20250311153717.206129-1-pbonzini@redhat.com [PMM: expanded comment] Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- hw/char/pl011.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/hw/char/pl011.c b/hw/char/pl011.c index 23a9db8c57..0e9ec1301d 100644 --- a/hw/char/pl011.c +++ b/hw/char/pl011.c @@ -490,16 +490,17 @@ static int pl011_can_receive(void *opaque) unsigned fifo_depth = pl011_get_fifo_depth(s); unsigned fifo_available = fifo_depth - s->read_count; - if (!(s->cr & CR_UARTEN)) { - qemu_log_mask(LOG_GUEST_ERROR, - "PL011 receiving data on disabled UART\n"); - } - if (!(s->cr & CR_RXE)) { - qemu_log_mask(LOG_GUEST_ERROR, - "PL011 receiving data on disabled RX UART\n"); - } - trace_pl011_can_receive(s->lcr, s->read_count, fifo_depth, fifo_available); + /* + * In theory we should check the UART and RX enable bits here and + * return 0 if they are not set (so the guest can't receive data + * until you have enabled the UART). In practice we suspect there + * is at least some guest code out there which has been tested only + * on QEMU and which never bothers to enable the UART because we + * historically never enforced that. So we effectively keep the + * UART continuously enabled regardless of the enable bits. + */ + trace_pl011_can_receive(s->lcr, s->read_count, fifo_depth, fifo_available); return fifo_available; } From e6c38d2ab55d66c74ceade5699e22cabe9058d22 Mon Sep 17 00:00:00 2001 From: Joe Komlodi Date: Mon, 10 Mar 2025 20:36:22 +0000 Subject: [PATCH 14/17] util/cacheflush: Make first DSB unconditional on aarch64 On ARM hosts with CTR_EL0.DIC and CTR_EL0.IDC set, this would only cause an ISB to be executed during cache maintenance, which could lead to QEMU executing TBs containing garbage instructions. This seems to be because the ISB finishes executing instructions and flushes the pipeline, but the ISB doesn't guarantee that writes from the executed instructions are committed. If a small enough TB is created, it's possible that the writes setting up the TB aren't committed by the time the TB is executed. This function is intended to be a port of the gcc implementation (https://github.com/gcc-mirror/gcc/blob/85b46d0795ac76bc192cb8f88b646a647acf98c1/libgcc/config/aarch64/sync-cache.c#L67) which makes the first DSB unconditional, so we can fix the synchronization issue by doing that as well. Cc: qemu-stable@nongnu.org Fixes: 664a79735e4deb1 ("util: Specialize flush_idcache_range for aarch64") Signed-off-by: Joe Komlodi Message-id: 20250310203622.1827940-2-komlodi@google.com Reviewed-by: Peter Maydell Reviewed-by: Richard Henderson Signed-off-by: Peter Maydell --- util/cacheflush.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/util/cacheflush.c b/util/cacheflush.c index a08906155a..1d12899a39 100644 --- a/util/cacheflush.c +++ b/util/cacheflush.c @@ -279,9 +279,11 @@ void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len) for (p = rw & -dcache_lsize; p < rw + len; p += dcache_lsize) { asm volatile("dc\tcvau, %0" : : "r" (p) : "memory"); } - asm volatile("dsb\tish" : : : "memory"); } + /* DSB unconditionally to ensure any outstanding writes are committed. */ + asm volatile("dsb\tish" : : : "memory"); + /* * If CTR_EL0.DIC is enabled, Instruction cache cleaning to the Point * of Unification is not required for instruction to data coherence. From 298a04998fa4a6dc977abe9234d98dfcdab98423 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 7 Mar 2025 11:04:14 -0800 Subject: [PATCH 15/17] target/arm: Make DisasContext.{fp, sve}_access_checked tristate The check for fp_excp_el in assert_fp_access_checked is incorrect. For SME, with StreamingMode enabled, the access is really against the streaming mode vectors, and access to the normal fp registers is allowed to be disabled. C.f. sme_enabled_check. Convert sve_access_checked to match, even though we don't currently check the exception state. Cc: qemu-stable@nongnu.org Fixes: 3d74825f4d6 ("target/arm: Add SME enablement checks") Signed-off-by: Richard Henderson Message-id: 20250307190415.982049-2-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/tcg/translate-a64.c | 17 +++++++++-------- target/arm/tcg/translate-a64.h | 2 +- target/arm/tcg/translate.h | 10 +++++++--- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 8bef391bb0..48e0ac75b1 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -1381,14 +1381,14 @@ static bool fp_access_check_only(DisasContext *s) { if (s->fp_excp_el) { assert(!s->fp_access_checked); - s->fp_access_checked = true; + s->fp_access_checked = -1; gen_exception_insn_el(s, 0, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false, 0), s->fp_excp_el); return false; } - s->fp_access_checked = true; + s->fp_access_checked = 1; return true; } @@ -1465,13 +1465,13 @@ bool sve_access_check(DisasContext *s) syn_sve_access_trap(), s->sve_excp_el); goto fail_exit; } - s->sve_access_checked = true; + s->sve_access_checked = 1; return fp_access_check(s); fail_exit: /* Assert that we only raise one exception per instruction. */ assert(!s->sve_access_checked); - s->sve_access_checked = true; + s->sve_access_checked = -1; return false; } @@ -1500,8 +1500,9 @@ bool sme_enabled_check(DisasContext *s) * sme_excp_el by itself for cpregs access checks. */ if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) { - s->fp_access_checked = true; - return sme_access_check(s); + bool ret = sme_access_check(s); + s->fp_access_checked = (ret ? 1 : -1); + return ret; } return fp_access_check_only(s); } @@ -10257,8 +10258,8 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) s->insn = insn; s->base.pc_next = pc + 4; - s->fp_access_checked = false; - s->sve_access_checked = false; + s->fp_access_checked = 0; + s->sve_access_checked = 0; if (s->pstate_il) { /* diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h index 7d3b59ccd9..b2420f59eb 100644 --- a/target/arm/tcg/translate-a64.h +++ b/target/arm/tcg/translate-a64.h @@ -65,7 +65,7 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, static inline void assert_fp_access_checked(DisasContext *s) { #ifdef CONFIG_DEBUG_TCG - if (unlikely(!s->fp_access_checked || s->fp_excp_el)) { + if (unlikely(s->fp_access_checked <= 0)) { fprintf(stderr, "target-arm: FP access check missing for " "instruction 0x%08x\n", s->insn); abort(); diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index f8dc2f0d4b..53e485d28a 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -92,15 +92,19 @@ typedef struct DisasContext { bool aarch64; bool thumb; bool lse2; - /* Because unallocated encodings generate different exception syndrome + /* + * Because unallocated encodings generate different exception syndrome * information from traps due to FP being disabled, we can't do a single * "is fp access disabled" check at a high level in the decode tree. * To help in catching bugs where the access check was forgotten in some * code path, we set this flag when the access check is done, and assert * that it is set at the point where we actually touch the FP regs. + * 0: not checked, + * 1: checked, access ok + * -1: checked, access denied */ - bool fp_access_checked; - bool sve_access_checked; + int8_t fp_access_checked; + int8_t sve_access_checked; /* ARMv8 single-step state (this is distinct from the QEMU gdbstub * single-step support). */ From cc7abc35dfa790ba6c20473c03745428c1c626b6 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 7 Mar 2025 11:04:15 -0800 Subject: [PATCH 16/17] target/arm: Simplify pstate_sm check in sve_access_check In StreamingMode, fp_access_checked is handled already. We cannot fall through to fp_access_check lest we fall foul of the double-check assertion. Cc: qemu-stable@nongnu.org Fixes: 285b1d5fcef ("target/arm: Handle SME in sve_access_check") Signed-off-by: Richard Henderson Message-id: 20250307190415.982049-3-richard.henderson@linaro.org Reviewed-by: Peter Maydell [PMM: move declaration of 'ret' to top of block] Signed-off-by: Peter Maydell --- target/arm/tcg/translate-a64.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 48e0ac75b1..39014325df 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -1456,23 +1456,23 @@ static int fp_access_check_vector_hsd(DisasContext *s, bool is_q, MemOp esz) bool sve_access_check(DisasContext *s) { if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) { + bool ret; + assert(dc_isar_feature(aa64_sme, s)); - if (!sme_sm_enabled_check(s)) { - goto fail_exit; - } - } else if (s->sve_excp_el) { + ret = sme_sm_enabled_check(s); + s->sve_access_checked = (ret ? 1 : -1); + return ret; + } + if (s->sve_excp_el) { + /* Assert that we only raise one exception per instruction. */ + assert(!s->sve_access_checked); gen_exception_insn_el(s, 0, EXCP_UDEF, syn_sve_access_trap(), s->sve_excp_el); - goto fail_exit; + s->sve_access_checked = -1; + return false; } s->sve_access_checked = 1; return fp_access_check(s); - - fail_exit: - /* Assert that we only raise one exception per instruction. */ - assert(!s->sve_access_checked); - s->sve_access_checked = -1; - return false; } /* From a019e15edfd62beae1e2f6adc0fa7415ba20b14c Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 10 Mar 2025 10:29:50 +0000 Subject: [PATCH 17/17] meson.build: Set RUST_BACKTRACE for all tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want to capture potential Rust backtraces on panics in our test logs, which isn't Rust's default behaviour. Set RUST_BACKTRACE=1 in the add_test_setup environments, so that all our tests get run with this environment variable set. This makes the setting of that variable in the gitlab CI template redundant, so we can remove it. Signed-off-by: Peter Maydell Reviewed-by: Daniel P. Berrangé Reviewed-by: Philippe Mathieu-Daudé Message-id: 20250310102950.3752908-1-peter.maydell@linaro.org --- .gitlab-ci.d/buildtest-template.yml | 1 - meson.build | 9 ++++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.d/buildtest-template.yml b/.gitlab-ci.d/buildtest-template.yml index 4cc1923931..39da7698b0 100644 --- a/.gitlab-ci.d/buildtest-template.yml +++ b/.gitlab-ci.d/buildtest-template.yml @@ -63,7 +63,6 @@ stage: test image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG script: - - export RUST_BACKTRACE=1 - source scripts/ci/gitlab-ci-section - section_start buildenv "Setting up to run tests" - scripts/git-submodule.sh update roms/SLOF diff --git a/meson.build b/meson.build index 2f43fd81bf..7f75256acf 100644 --- a/meson.build +++ b/meson.build @@ -5,9 +5,12 @@ project('qemu', ['c'], meson_version: '>=1.5.0', meson.add_devenv({ 'MESON_BUILD_ROOT' : meson.project_build_root() }) -add_test_setup('quick', exclude_suites: ['slow', 'thorough'], is_default: true) -add_test_setup('slow', exclude_suites: ['thorough'], env: ['G_TEST_SLOW=1', 'SPEED=slow']) -add_test_setup('thorough', env: ['G_TEST_SLOW=1', 'SPEED=thorough']) +add_test_setup('quick', exclude_suites: ['slow', 'thorough'], is_default: true, + env: ['RUST_BACKTRACE=1']) +add_test_setup('slow', exclude_suites: ['thorough'], + env: ['G_TEST_SLOW=1', 'SPEED=slow', 'RUST_BACKTRACE=1']) +add_test_setup('thorough', + env: ['G_TEST_SLOW=1', 'SPEED=thorough', 'RUST_BACKTRACE=1']) meson.add_postconf_script(find_program('scripts/symlink-install-tree.py'))