From e65ecb665c88a7d61e5d21253c9672dedd72f84b Mon Sep 17 00:00:00 2001 From: Yuquan Wang Date: Mon, 17 Jul 2023 11:05:07 +0100 Subject: [PATCH 1/7] hw/arm/sbsa-ref: set 'slots' property of xhci This extends the slots of xhci to 64, since the default xhci_sysbus just supports one slot. Signed-off-by: Wang Yuquan Signed-off-by: Chen Baozi Reviewed-by: Richard Henderson Reviewed-by: Marcin Juszkiewicz Tested-by: Marcin Juszkiewicz Message-id: 20230710063750.473510-2-wangyuquan1236@phytium.com.cn Signed-off-by: Peter Maydell --- hw/arm/sbsa-ref.c | 1 + 1 file changed, 1 insertion(+) diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c index 64e1cbce17..bc89eb4806 100644 --- a/hw/arm/sbsa-ref.c +++ b/hw/arm/sbsa-ref.c @@ -611,6 +611,7 @@ static void create_xhci(const SBSAMachineState *sms) hwaddr base = sbsa_ref_memmap[SBSA_XHCI].base; int irq = sbsa_ref_irqmap[SBSA_XHCI]; DeviceState *dev = qdev_new(TYPE_XHCI_SYSBUS); + qdev_prop_set_uint32(dev, "slots", XHCI_MAXSLOTS); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); From aab746106c5afcf940bd7a146a099cd485dfe6e5 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 17 Jul 2023 11:05:07 +0100 Subject: [PATCH 2/7] linux-user: Remove pointless NULL check in clock_adjtime handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the code for TARGET_NR_clock_adjtime, we set the pointer phtx to the address of the local variable htx. This means it can never be NULL, but later in the code we check it for NULL anyway. Coverity complains about this (CID 1507683) because the NULL check comes after a call to clock_adjtime() that assumes it is non-NULL. Since phtx is always &htx, and is used only in three places, it's not really necessary. Remove it, bringing the code structure in to line with that for TARGET_NR_clock_adjtime64, which already uses a simple '&htx' when it wants a pointer to 'htx'. Signed-off-by: Peter Maydell Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-id: 20230623144410.1837261-1-peter.maydell@linaro.org --- linux-user/syscall.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 1464151826..c99ef9c01e 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -11190,16 +11190,14 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) case TARGET_NR_clock_adjtime: { - struct timex htx, *phtx = &htx; + struct timex htx; - if (target_to_host_timex(phtx, arg2) != 0) { + if (target_to_host_timex(&htx, arg2) != 0) { return -TARGET_EFAULT; } - ret = get_errno(clock_adjtime(arg1, phtx)); - if (!is_error(ret) && phtx) { - if (host_to_target_timex(arg2, phtx) != 0) { - return -TARGET_EFAULT; - } + ret = get_errno(clock_adjtime(arg1, &htx)); + if (!is_error(ret) && host_to_target_timex(arg2, &htx)) { + return -TARGET_EFAULT; } } return ret; From 34eed551273cb42dbbff4d8fceb7b3576436aff3 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 17 Jul 2023 11:05:07 +0100 Subject: [PATCH 3/7] target/arm/ptw.c: Add comments to S1Translate struct fields Add comments to the in_* fields in the S1Translate struct that explain what they're doing. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20230710152130.3928330-2-peter.maydell@linaro.org --- target/arm/ptw.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/target/arm/ptw.c b/target/arm/ptw.c index 9aaff1546a..21749375f9 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -19,10 +19,50 @@ #endif typedef struct S1Translate { + /* + * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk. + * Together with in_space, specifies the architectural translation regime. + */ ARMMMUIdx in_mmu_idx; + /* + * in_ptw_idx: specifies which mmuidx to use for the actual + * page table descriptor load operations. This will be one of the + * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes. + * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, + * this field is updated accordingly. + */ ARMMMUIdx in_ptw_idx; + /* + * in_space: the security space for this walk. This plus + * the in_mmu_idx specify the architectural translation regime. + * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, + * this field is updated accordingly. + * + * Note that the security space for the in_ptw_idx may be different + * from that for the in_mmu_idx. We do not need to explicitly track + * the in_ptw_idx security space because: + * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx + * itself specifies the security space + * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security + * space used for ptw reads is the same as that of the security + * space of the stage 1 translation for all cases except where + * stage 1 is Secure; in that case the only possibilities for + * the ptw read are Secure and NonSecure, and the in_ptw_idx + * value being Stage2 vs Stage2_S distinguishes those. + */ ARMSecuritySpace in_space; + /* + * in_secure: whether the translation regime is a Secure one. + * This is always equal to arm_space_is_secure(in_space). + * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, + * this field is updated accordingly. + */ bool in_secure; + /* + * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug + * accesses will not update the guest page table access flags + * and will not change the state of the softmmu TLBs. + */ bool in_debug; /* * If this is stage 2 of a stage 1+2 page table walk, then this must From 3f74da440ddad65dee5a22d63b2048a5ee16a5e2 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 17 Jul 2023 11:05:08 +0100 Subject: [PATCH 4/7] target/arm: Fix S1_ptw_translate() debug path In commit fe4a5472ccd6 we rearranged the logic in S1_ptw_translate() so that the debug-access "call get_phys_addr_*" codepath is used both when S1 is doing ptw reads from stage 2 and when it is doing ptw reads from physical memory. However, we didn't update the calculation of s2ptw->in_space and s2ptw->in_secure to account for the "ptw reads from physical memory" case. This meant that debug accesses when in Secure state broke. Create a new function S2_security_space() which returns the correct security space to use for the ptw load, and use it to determine the correct .in_secure and .in_space fields for the stage 2 lookup for the ptw load. Reported-by: Jean-Philippe Brucker Signed-off-by: Peter Maydell Tested-by: Jean-Philippe Brucker Reviewed-by: Richard Henderson Message-id: 20230710152130.3928330-3-peter.maydell@linaro.org Fixes: fe4a5472ccd6 ("target/arm: Use get_phys_addr_with_struct in S1_ptw_translate") Signed-off-by: Peter Maydell --- target/arm/ptw.c | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/target/arm/ptw.c b/target/arm/ptw.c index 21749375f9..c0b9cee584 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -485,11 +485,39 @@ static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs) } } +static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space, + ARMMMUIdx s2_mmu_idx) +{ + /* + * Return the security space to use for stage 2 when doing + * the S1 page table descriptor load. + */ + if (regime_is_stage2(s2_mmu_idx)) { + /* + * The security space for ptw reads is almost always the same + * as that of the security space of the stage 1 translation. + * The only exception is when stage 1 is Secure; in that case + * the ptw read might be to the Secure or the NonSecure space + * (but never Realm or Root), and the s2_mmu_idx tells us which. + * Root translations are always single-stage. + */ + if (s1_space == ARMSS_Secure) { + return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S); + } else { + assert(s2_mmu_idx != ARMMMUIdx_Stage2_S); + assert(s1_space != ARMSS_Root); + return s1_space; + } + } else { + /* ptw loads are from phys: the mmu idx itself says which space */ + return arm_phys_to_space(s2_mmu_idx); + } +} + /* Translate a S1 pagetable walk through S2 if needed. */ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, hwaddr addr, ARMMMUFaultInfo *fi) { - ARMSecuritySpace space = ptw->in_space; bool is_secure = ptw->in_secure; ARMMMUIdx mmu_idx = ptw->in_mmu_idx; ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx; @@ -502,13 +530,12 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, * From gdbstub, do not use softmmu so that we don't modify the * state of the cpu at all, including softmmu tlb contents. */ + ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx); S1Translate s2ptw = { .in_mmu_idx = s2_mmu_idx, .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx), - .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S, - .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure - : space == ARMSS_Realm ? ARMSS_Realm - : ARMSS_NonSecure), + .in_secure = arm_space_is_secure(s2_space), + .in_space = s2_space, .in_debug = true, }; GetPhysAddrResult s2 = { }; From eeb9578c36e78af6ee661f491e3f8744b1e0b6fb Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 17 Jul 2023 11:05:08 +0100 Subject: [PATCH 5/7] target/arm/ptw.c: Account for FEAT_RME when applying {N}SW, SA bits In get_phys_addr_twostage() the code that applies the effects of VSTCR.{SA,SW} and VTCR.{NSA,NSW} only updates result->f.attrs.secure. Now we also have f.attrs.space for FEAT_RME, we need to keep the two in sync. These bits only have an effect for Secure space translations, not for Root, so use the input in_space field to determine whether to apply them rather than the input is_secure. This doesn't actually make a difference because Root translations are never two-stage, but it's a little clearer. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20230710152130.3928330-4-peter.maydell@linaro.org --- target/arm/ptw.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/target/arm/ptw.c b/target/arm/ptw.c index c0b9cee584..8f94100c61 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -3118,6 +3118,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, hwaddr ipa; int s1_prot, s1_lgpgsz; bool is_secure = ptw->in_secure; + ARMSecuritySpace in_space = ptw->in_space; bool ret, ipa_secure; ARMCacheAttrs cacheattrs1; ARMSecuritySpace ipa_space; @@ -3200,11 +3201,13 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, * Check if IPA translates to secure or non-secure PA space. * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA. */ - result->f.attrs.secure = - (is_secure - && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) - && (ipa_secure - || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)))); + if (in_space == ARMSS_Secure) { + result->f.attrs.secure = + !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) + && (ipa_secure + || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))); + result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure); + } return false; } From e60a7d0d4d89c8bca8a74a877e31abce50e848e3 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 17 Jul 2023 11:05:08 +0100 Subject: [PATCH 6/7] accel/tcg: Zero-pad PC in TCG CPU exec trace lines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit f0a08b0913befbd we changed the type of the PC from target_ulong to vaddr. In doing so we inadvertently dropped the zero-padding on the PC in trace lines (the second item inside the [] in these lines). They used to look like this on AArch64, for instance: Trace 0: 0x7f2260000100 [00000000/0000000040000000/00000061/ff200000] and now they look like this: Trace 0: 0x7f4f50000100 [00000000/40000000/00000061/ff200000] and if the PC happens to be somewhere low like 0x5000 then the field is shown as /5000/. This is because TARGET_FMT_lx is a "%08x" or "%016x" specifier, depending on TARGET_LONG_SIZE, whereas VADDR_PRIx is just PRIx64 with no width specifier. Restore the zero-padding by adding an 016 width specifier to this tracing and a couple of others that were similarly recently changed to use VADDR_PRIx without a width specifier. We can't unfortunately restore the "32-bit guests are padded to 8 hex digits and 64-bit guests to 16 hex digits" behaviour so easily. Fixes: f0a08b0913befbd ("accel/tcg/cpu-exec.c: Widen pc to vaddr") Signed-off-by: Peter Maydell Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Anton Johansson Message-id: 20230711165434.4123674-1-peter.maydell@linaro.org --- accel/tcg/cpu-exec.c | 4 ++-- accel/tcg/translate-all.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index fdd6d3e0e4..e2c494e75e 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -298,7 +298,7 @@ static void log_cpu_exec(vaddr pc, CPUState *cpu, if (qemu_log_in_addr_range(pc)) { qemu_log_mask(CPU_LOG_EXEC, "Trace %d: %p [%08" PRIx64 - "/%" VADDR_PRIx "/%08x/%08x] %s\n", + "/%016" VADDR_PRIx "/%08x/%08x] %s\n", cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, tb->flags, tb->cflags, lookup_symbol(pc)); @@ -487,7 +487,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) if (qemu_loglevel_mask(CPU_LOG_EXEC)) { vaddr pc = log_pc(cpu, last_tb); if (qemu_log_in_addr_range(pc)) { - qemu_log("Stopped execution of TB chain before %p [%" + qemu_log("Stopped execution of TB chain before %p [%016" VADDR_PRIx "] %s\n", last_tb->tc.ptr, pc, lookup_symbol(pc)); } diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 4c17474fa2..a1782db5dd 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -637,7 +637,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) if (qemu_loglevel_mask(CPU_LOG_EXEC)) { vaddr pc = log_pc(cpu, tb); if (qemu_log_in_addr_range(pc)) { - qemu_log("cpu_io_recompile: rewound execution of TB to %" + qemu_log("cpu_io_recompile: rewound execution of TB to %016" VADDR_PRIx "\n", pc); } } From c2c1c4a35c7c2b1a4140b0942b9797c857e476a4 Mon Sep 17 00:00:00 2001 From: Tong Ho Date: Wed, 26 Apr 2023 14:16:07 -0700 Subject: [PATCH 7/7] hw/nvram: Avoid unnecessary Xilinx eFuse backstore write MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a check in the bit-set operation to write the backstore only if the affected bit is 0 before. With this in place, there will be no need for callers to do the checking in order to avoid unnecessary writes. Signed-off-by: Tong Ho Reviewed-by: Alistair Francis Reviewed-by: Francisco Iglesias Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Peter Maydell --- hw/nvram/xlnx-efuse.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/hw/nvram/xlnx-efuse.c b/hw/nvram/xlnx-efuse.c index fdfffaab99..655c40b8d1 100644 --- a/hw/nvram/xlnx-efuse.c +++ b/hw/nvram/xlnx-efuse.c @@ -143,6 +143,8 @@ static bool efuse_ro_bits_find(XlnxEFuse *s, uint32_t k) bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit) { + uint32_t set, *row; + if (efuse_ro_bits_find(s, bit)) { g_autofree char *path = object_get_canonical_path(OBJECT(s)); @@ -152,8 +154,13 @@ bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit) return false; } - s->fuse32[bit / 32] |= 1 << (bit % 32); - efuse_bdrv_sync(s, bit); + /* Avoid back-end write unless there is a real update */ + row = &s->fuse32[bit / 32]; + set = 1 << (bit % 32); + if (!(set & *row)) { + *row |= set; + efuse_bdrv_sync(s, bit); + } return true; }