target-arm queue:
* hw/arm/smmu-common: Remove the repeated ttb field * hw/gpio: npcm7xx: fixup out-of-bounds access * tests/functional/test_arm_sx1: Check whether the serial console is working * target/arm: Fix minor bugs in generic timer register handling * target/arm: Implement SEL2 physical and virtual timers * target/arm: Correct STRD, LDRD atomicity and fault behaviour * target/arm: Make dummy debug registers RAZ, not NOP * util/qemu-timer.c: Don't warp timer from timerlist_rearm() * include/exec/memop.h: Expand comment for MO_ATOM_SUBALIGN * hw/arm/smmu: Introduce smmu_configs_inv_sid_range() helper * target/rx: Set exception vector base to 0xffffff80 * target/rx: Remove TCG_CALL_NO_WG from helpers which write env -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmfLCzgZHHBldGVyLm1h eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3pwVEACgJJm1zdtRd87AnA0eY29a uG8M35+VS/bNbA6IXzz1hFHUFh1smrda0C7VOefRqThEhkDObh1gfKWK3YeBenDn FQsI6Hwu23ozTCgOniheU8SGbTtIvVxRRX4S91xNZgJ15riEATDnTisZv8iUChdr DcZopuH0uRiOq7TWuRjxqvhaqH6WusvHzK0mizTqr9UhbqPHVl7CZfr1/AtJLpZF 32ix0JMofFWS52LFI19KWPlQG5Z3+lOw2ASyTf4cCaoCG6FTMv22E1x8mbMc2i96 WrsB+NdhlBVRu7mskOP2Br09AbQZ/Fy7AGlDhgZebipOUVMlpDj1RXj/BDH3H/px qsjOk3V3gzM2bD+KvJuO4FlGXgEbOzGsGBwwY152C/6DYW5uTha/H1Pp+/iR8kcS HvAsqNLh/uF7O1Kn8qzCNvglKDC3z0C4X15Sj8SjGz8Xtn1Ign/GVkDv8ZCoR39K ltnXwvhzlDMkcGFFfEn33MYZZYqB15nX5a78/cStB/aOGPtZwUJ+2udLDzmug5ve 9oY9WMqqBDVxo4+qcAeZ+aem2VD6w79mhJyy1xmqOkifhFWqQ2VbDrKtqnrqhPK/ neyWrd2zCF6fY1wvb7vVKMy7aC5jI2K6qVU7ueZGCGKU2MtvbVaFJFByOjnVjv6o c65VNXkbaCIedrSlalMO4w== =8typ -----END PGP SIGNATURE----- Merge tag 'pull-target-arm-20250307' of https://git.linaro.org/people/pmaydell/qemu-arm into staging target-arm queue: * hw/arm/smmu-common: Remove the repeated ttb field * hw/gpio: npcm7xx: fixup out-of-bounds access * tests/functional/test_arm_sx1: Check whether the serial console is working * target/arm: Fix minor bugs in generic timer register handling * target/arm: Implement SEL2 physical and virtual timers * target/arm: Correct STRD, LDRD atomicity and fault behaviour * target/arm: Make dummy debug registers RAZ, not NOP * util/qemu-timer.c: Don't warp timer from timerlist_rearm() * include/exec/memop.h: Expand comment for MO_ATOM_SUBALIGN * hw/arm/smmu: Introduce smmu_configs_inv_sid_range() helper * target/rx: Set exception vector base to 0xffffff80 * target/rx: Remove TCG_CALL_NO_WG from helpers which write env # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmfLCzgZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3pwVEACgJJm1zdtRd87AnA0eY29a # uG8M35+VS/bNbA6IXzz1hFHUFh1smrda0C7VOefRqThEhkDObh1gfKWK3YeBenDn # FQsI6Hwu23ozTCgOniheU8SGbTtIvVxRRX4S91xNZgJ15riEATDnTisZv8iUChdr # DcZopuH0uRiOq7TWuRjxqvhaqH6WusvHzK0mizTqr9UhbqPHVl7CZfr1/AtJLpZF # 32ix0JMofFWS52LFI19KWPlQG5Z3+lOw2ASyTf4cCaoCG6FTMv22E1x8mbMc2i96 # WrsB+NdhlBVRu7mskOP2Br09AbQZ/Fy7AGlDhgZebipOUVMlpDj1RXj/BDH3H/px # qsjOk3V3gzM2bD+KvJuO4FlGXgEbOzGsGBwwY152C/6DYW5uTha/H1Pp+/iR8kcS # HvAsqNLh/uF7O1Kn8qzCNvglKDC3z0C4X15Sj8SjGz8Xtn1Ign/GVkDv8ZCoR39K # ltnXwvhzlDMkcGFFfEn33MYZZYqB15nX5a78/cStB/aOGPtZwUJ+2udLDzmug5ve # 9oY9WMqqBDVxo4+qcAeZ+aem2VD6w79mhJyy1xmqOkifhFWqQ2VbDrKtqnrqhPK/ # neyWrd2zCF6fY1wvb7vVKMy7aC5jI2K6qVU7ueZGCGKU2MtvbVaFJFByOjnVjv6o # c65VNXkbaCIedrSlalMO4w== # =8typ # -----END PGP SIGNATURE----- # gpg: Signature made Fri 07 Mar 2025 23:05:28 HKT # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [full] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full] # gpg: aka "Peter Maydell <peter@archaic.org.uk>" [unknown] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * tag 'pull-target-arm-20250307' of https://git.linaro.org/people/pmaydell/qemu-arm: (21 commits) target/rx: Remove TCG_CALL_NO_WG from helpers which write env target/rx: Set exception vector base to 0xffffff80 hw/arm/smmu: Introduce smmu_configs_inv_sid_range() helper include/exec/memop.h: Expand comment for MO_ATOM_SUBALIGN util/qemu-timer.c: Don't warp timer from timerlist_rearm() target/arm: Make dummy debug registers RAZ, not NOP target/arm: Drop unused address_offset from op_addr_{rr, ri}_post() target/arm: Correct STRD atomicity target/arm: Correct LDRD atomicity and fault behaviour hw/arm: enable secure EL2 timers for sbsa machine hw/arm: enable secure EL2 timers for virt machine target/arm: Document the architectural names of our GTIMERs target/arm: Implement SEL2 physical and virtual timers target/arm: Refactor handling of timer offset for direct register accesses target/arm: Always apply CNTVOFF_EL2 for CNTV_TVAL_EL02 accesses target/arm: Make CNTPS_* UNDEF from Secure EL1 when Secure EL2 is enabled target/arm: Don't apply CNTVOFF_EL2 for EL2_VIRT timer target/arm: Apply correct timer offset when calculating deadlines tests/functional/test_arm_sx1: Check whether the serial console is working hw/gpio: npcm7xx: fixup out-of-bounds access ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
ffbc5e661f
@ -2010,6 +2010,7 @@ S: Maintained
|
||||
F: hw/*/omap*
|
||||
F: include/hw/arm/omap.h
|
||||
F: docs/system/arm/sx1.rst
|
||||
F: tests/functional/test_arm_sx1.py
|
||||
|
||||
IPack
|
||||
M: Alberto Garcia <berto@igalia.com>
|
||||
|
@ -484,6 +484,8 @@ static void create_gic(SBSAMachineState *sms, MemoryRegion *mem)
|
||||
[GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ,
|
||||
[GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ,
|
||||
[GTIMER_HYPVIRT] = ARCH_TIMER_NS_EL2_VIRT_IRQ,
|
||||
[GTIMER_S_EL2_PHYS] = ARCH_TIMER_S_EL2_IRQ,
|
||||
[GTIMER_S_EL2_VIRT] = ARCH_TIMER_S_EL2_VIRT_IRQ,
|
||||
};
|
||||
|
||||
for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) {
|
||||
|
@ -225,6 +225,27 @@ static gboolean smmu_hash_remove_by_vmid_ipa(gpointer key, gpointer value,
|
||||
((entry->iova & ~info->mask) == info->iova);
|
||||
}
|
||||
|
||||
static gboolean
|
||||
smmu_hash_remove_by_sid_range(gpointer key, gpointer value, gpointer user_data)
|
||||
{
|
||||
SMMUDevice *sdev = (SMMUDevice *)key;
|
||||
uint32_t sid = smmu_get_sid(sdev);
|
||||
SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data;
|
||||
|
||||
if (sid < sid_range->start || sid > sid_range->end) {
|
||||
return false;
|
||||
}
|
||||
trace_smmu_config_cache_inv(sid);
|
||||
return true;
|
||||
}
|
||||
|
||||
void smmu_configs_inv_sid_range(SMMUState *s, SMMUSIDRange sid_range)
|
||||
{
|
||||
trace_smmu_configs_inv_sid_range(sid_range.start, sid_range.end);
|
||||
g_hash_table_foreach_remove(s->configs, smmu_hash_remove_by_sid_range,
|
||||
&sid_range);
|
||||
}
|
||||
|
||||
void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova,
|
||||
uint8_t tg, uint64_t num_pages, uint8_t ttl)
|
||||
{
|
||||
|
@ -141,9 +141,4 @@ typedef struct SMMUIOTLBPageInvInfo {
|
||||
uint64_t mask;
|
||||
} SMMUIOTLBPageInvInfo;
|
||||
|
||||
typedef struct SMMUSIDRange {
|
||||
uint32_t start;
|
||||
uint32_t end;
|
||||
} SMMUSIDRange;
|
||||
|
||||
#endif
|
||||
|
@ -903,7 +903,7 @@ static void smmuv3_flush_config(SMMUDevice *sdev)
|
||||
SMMUv3State *s = sdev->smmu;
|
||||
SMMUState *bc = &s->smmu_state;
|
||||
|
||||
trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
|
||||
trace_smmu_config_cache_inv(smmu_get_sid(sdev));
|
||||
g_hash_table_remove(bc->configs, sdev);
|
||||
}
|
||||
|
||||
@ -1277,20 +1277,6 @@ static void smmuv3_range_inval(SMMUState *s, Cmd *cmd, SMMUStage stage)
|
||||
}
|
||||
}
|
||||
|
||||
static gboolean
|
||||
smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data)
|
||||
{
|
||||
SMMUDevice *sdev = (SMMUDevice *)key;
|
||||
uint32_t sid = smmu_get_sid(sdev);
|
||||
SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data;
|
||||
|
||||
if (sid < sid_range->start || sid > sid_range->end) {
|
||||
return false;
|
||||
}
|
||||
trace_smmuv3_config_cache_inv(sid);
|
||||
return true;
|
||||
}
|
||||
|
||||
static int smmuv3_cmdq_consume(SMMUv3State *s)
|
||||
{
|
||||
SMMUState *bs = ARM_SMMU(s);
|
||||
@ -1373,8 +1359,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
|
||||
sid_range.end = sid_range.start + mask;
|
||||
|
||||
trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end);
|
||||
g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste,
|
||||
&sid_range);
|
||||
smmu_configs_inv_sid_range(bs, sid_range);
|
||||
break;
|
||||
}
|
||||
case SMMU_CMD_CFGI_CD:
|
||||
|
@ -22,6 +22,8 @@ smmu_iotlb_inv_asid_vmid(int asid, int vmid) "IOTLB invalidate asid=%d vmid=%d"
|
||||
smmu_iotlb_inv_vmid(int vmid) "IOTLB invalidate vmid=%d"
|
||||
smmu_iotlb_inv_vmid_s1(int vmid) "IOTLB invalidate vmid=%d"
|
||||
smmu_iotlb_inv_iova(int asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64
|
||||
smmu_configs_inv_sid_range(uint32_t start, uint32_t end) "Config cache INV SID range from 0x%x to 0x%x"
|
||||
smmu_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x"
|
||||
smmu_inv_notifiers_mr(const char *name) "iommu mr=%s"
|
||||
smmu_iotlb_lookup_hit(int asid, int vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d"
|
||||
smmu_iotlb_lookup_miss(int asid, int vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d"
|
||||
@ -59,7 +61,6 @@ smmuv3_cmdq_tlbi_nh(int vmid) "vmid=%d"
|
||||
smmuv3_cmdq_tlbi_nsnh(void) ""
|
||||
smmuv3_cmdq_tlbi_nh_asid(int asid) "asid=%d"
|
||||
smmuv3_cmdq_tlbi_s12_vmid(int vmid) "vmid=%d"
|
||||
smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x"
|
||||
smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s"
|
||||
smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s"
|
||||
smmuv3_inv_notifiers_iova(const char *name, int asid, int vmid, uint64_t iova, uint8_t tg, uint64_t num_pages, int stage) "iommu mr=%s asid=%d vmid=%d iova=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" stage=%d"
|
||||
|
@ -882,6 +882,8 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
|
||||
[GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ,
|
||||
[GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ,
|
||||
[GTIMER_HYPVIRT] = ARCH_TIMER_NS_EL2_VIRT_IRQ,
|
||||
[GTIMER_S_EL2_PHYS] = ARCH_TIMER_S_EL2_IRQ,
|
||||
[GTIMER_S_EL2_VIRT] = ARCH_TIMER_S_EL2_VIRT_IRQ,
|
||||
};
|
||||
|
||||
for (unsigned irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) {
|
||||
|
@ -220,8 +220,6 @@ static void npcm7xx_gpio_regs_write(void *opaque, hwaddr addr, uint64_t v,
|
||||
return;
|
||||
}
|
||||
|
||||
diff = s->regs[reg] ^ value;
|
||||
|
||||
switch (reg) {
|
||||
case NPCM7XX_GPIO_TLOCK1:
|
||||
case NPCM7XX_GPIO_TLOCK2:
|
||||
@ -242,6 +240,7 @@ static void npcm7xx_gpio_regs_write(void *opaque, hwaddr addr, uint64_t v,
|
||||
case NPCM7XX_GPIO_PU:
|
||||
case NPCM7XX_GPIO_PD:
|
||||
case NPCM7XX_GPIO_IEM:
|
||||
diff = s->regs[reg] ^ value;
|
||||
s->regs[reg] = value;
|
||||
npcm7xx_gpio_update_pins(s, diff);
|
||||
break;
|
||||
|
@ -91,8 +91,12 @@ typedef enum MemOp {
|
||||
* Depending on alignment, one or both will be single-copy atomic.
|
||||
* This is the atomicity e.g. of Arm FEAT_LSE2 LDP.
|
||||
* MO_ATOM_SUBALIGN: the operation is single-copy atomic by parts
|
||||
* by the alignment. E.g. if the address is 0 mod 4, then each
|
||||
* 4-byte subobject is single-copy atomic.
|
||||
* by the alignment. E.g. if an 8-byte value is accessed at an
|
||||
* address which is 0 mod 8, then the whole 8-byte access is
|
||||
* single-copy atomic; otherwise, if it is accessed at 0 mod 4
|
||||
* then each 4-byte subobject is single-copy atomic; otherwise
|
||||
* if it is accessed at 0 mod 2 then the four 2-byte subobjects
|
||||
* are single-copy atomic.
|
||||
* This is the atomicity e.g. of IBM Power.
|
||||
* MO_ATOM_NONE: the operation has no atomicity requirements.
|
||||
*
|
||||
|
@ -22,6 +22,8 @@
|
||||
#define QEMU_ARM_BSA_H
|
||||
|
||||
/* These are architectural INTID values */
|
||||
#define ARCH_TIMER_S_EL2_VIRT_IRQ 19
|
||||
#define ARCH_TIMER_S_EL2_IRQ 20
|
||||
#define VIRTUAL_PMU_IRQ 23
|
||||
#define ARCH_GIC_MAINT_IRQ 25
|
||||
#define ARCH_TIMER_NS_EL2_IRQ 26
|
||||
|
@ -110,7 +110,6 @@ typedef struct SMMUTransCfg {
|
||||
/* Used by stage-1 only. */
|
||||
bool aa64; /* arch64 or aarch32 translation table */
|
||||
bool record_faults; /* record fault events */
|
||||
uint64_t ttb; /* TT base address */
|
||||
uint8_t oas; /* output address width */
|
||||
uint8_t tbi; /* Top Byte Ignore */
|
||||
int asid;
|
||||
@ -143,6 +142,11 @@ typedef struct SMMUIOTLBKey {
|
||||
uint8_t level;
|
||||
} SMMUIOTLBKey;
|
||||
|
||||
typedef struct SMMUSIDRange {
|
||||
uint32_t start;
|
||||
uint32_t end;
|
||||
} SMMUSIDRange;
|
||||
|
||||
struct SMMUState {
|
||||
/* <private> */
|
||||
SysBusDevice dev;
|
||||
@ -220,6 +224,7 @@ void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova,
|
||||
uint8_t tg, uint64_t num_pages, uint8_t ttl);
|
||||
void smmu_iotlb_inv_ipa(SMMUState *s, int vmid, dma_addr_t ipa, uint8_t tg,
|
||||
uint64_t num_pages, uint8_t ttl);
|
||||
void smmu_configs_inv_sid_range(SMMUState *s, SMMUSIDRange sid_range);
|
||||
/* Unmap the range of all the notifiers registered to any IOMMU mr */
|
||||
void smmu_inv_notifiers_all(SMMUState *s);
|
||||
|
||||
|
@ -2069,6 +2069,10 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||
arm_gt_stimer_cb, cpu);
|
||||
cpu->gt_timer[GTIMER_HYPVIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
|
||||
arm_gt_hvtimer_cb, cpu);
|
||||
cpu->gt_timer[GTIMER_S_EL2_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
|
||||
arm_gt_sel2timer_cb, cpu);
|
||||
cpu->gt_timer[GTIMER_S_EL2_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
|
||||
arm_gt_sel2vtimer_cb, cpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1171,6 +1171,8 @@ void arm_gt_vtimer_cb(void *opaque);
|
||||
void arm_gt_htimer_cb(void *opaque);
|
||||
void arm_gt_stimer_cb(void *opaque);
|
||||
void arm_gt_hvtimer_cb(void *opaque);
|
||||
void arm_gt_sel2timer_cb(void *opaque);
|
||||
void arm_gt_sel2vtimer_cb(void *opaque);
|
||||
|
||||
unsigned int gt_cntfrq_period_ns(ARMCPU *cpu);
|
||||
void gt_rme_post_el_change(ARMCPU *cpu, void *opaque);
|
||||
|
@ -1037,7 +1037,7 @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
|
||||
{ .name = "DBGVCR",
|
||||
.cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
|
||||
.access = PL1_RW, .accessfn = access_tda,
|
||||
.type = ARM_CP_NOP },
|
||||
.type = ARM_CP_CONST, .resetvalue = 0 },
|
||||
/*
|
||||
* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
|
||||
* Channel but Linux may try to access this register. The 32-bit
|
||||
@ -1046,7 +1046,7 @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
|
||||
{ .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
|
||||
.access = PL1_RW, .accessfn = access_tdcc,
|
||||
.type = ARM_CP_NOP },
|
||||
.type = ARM_CP_CONST, .resetvalue = 0 },
|
||||
/*
|
||||
* Dummy DBGCLAIM registers.
|
||||
* "The architecture does not define any functionality for the CLAIM tag bits.",
|
||||
@ -1075,7 +1075,8 @@ static const ARMCPRegInfo debug_aa32_el1_reginfo[] = {
|
||||
{ .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
|
||||
.access = PL2_RW, .accessfn = access_dbgvcr32,
|
||||
.type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP },
|
||||
.type = ARM_CP_CONST | ARM_CP_EL3_NO_EL2_KEEP,
|
||||
.resetvalue = 0 },
|
||||
};
|
||||
|
||||
static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
|
||||
|
@ -10,12 +10,14 @@
|
||||
#define TARGET_ARM_GTIMER_H
|
||||
|
||||
enum {
|
||||
GTIMER_PHYS = 0,
|
||||
GTIMER_VIRT = 1,
|
||||
GTIMER_HYP = 2,
|
||||
GTIMER_SEC = 3,
|
||||
GTIMER_HYPVIRT = 4,
|
||||
#define NUM_GTIMERS 5
|
||||
GTIMER_PHYS = 0, /* CNTP_* ; EL1 physical timer */
|
||||
GTIMER_VIRT = 1, /* CNTV_* ; EL1 virtual timer */
|
||||
GTIMER_HYP = 2, /* CNTHP_* ; EL2 physical timer */
|
||||
GTIMER_SEC = 3, /* CNTPS_* ; EL3 physical timer */
|
||||
GTIMER_HYPVIRT = 4, /* CNTHV_* ; EL2 virtual timer ; only if FEAT_VHE */
|
||||
GTIMER_S_EL2_PHYS = 5, /* CNTHPS_* ; only if FEAT_SEL2 */
|
||||
GTIMER_S_EL2_VIRT = 6, /* CNTHVS_* ; only if FEAT_SEL2 */
|
||||
#define NUM_GTIMERS 7
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -2387,6 +2387,9 @@ static CPAccessResult gt_stimer_access(CPUARMState *env,
|
||||
if (!arm_is_secure(env)) {
|
||||
return CP_ACCESS_UNDEFINED;
|
||||
}
|
||||
if (arm_is_el2_enabled(env)) {
|
||||
return CP_ACCESS_UNDEFINED;
|
||||
}
|
||||
if (!(env->cp15.scr_el3 & SCR_ST)) {
|
||||
return CP_ACCESS_TRAP_EL3;
|
||||
}
|
||||
@ -2401,6 +2404,45 @@ static CPAccessResult gt_stimer_access(CPUARMState *env,
|
||||
}
|
||||
}
|
||||
|
||||
static CPAccessResult gt_sel2timer_access(CPUARMState *env,
|
||||
const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
/*
|
||||
* The AArch64 register view of the secure EL2 timers are mostly
|
||||
* accessible from EL3 and EL2 although can also be trapped to EL2
|
||||
* from EL1 depending on nested virt config.
|
||||
*/
|
||||
switch (arm_current_el(env)) {
|
||||
case 0: /* UNDEFINED */
|
||||
return CP_ACCESS_UNDEFINED;
|
||||
case 1:
|
||||
if (!arm_is_secure(env)) {
|
||||
/* UNDEFINED */
|
||||
return CP_ACCESS_UNDEFINED;
|
||||
} else if (arm_hcr_el2_eff(env) & HCR_NV) {
|
||||
/* Aarch64.SystemAccessTrap(EL2, 0x18) */
|
||||
return CP_ACCESS_TRAP_EL2;
|
||||
}
|
||||
/* UNDEFINED */
|
||||
return CP_ACCESS_UNDEFINED;
|
||||
case 2:
|
||||
if (!arm_is_secure(env)) {
|
||||
/* UNDEFINED */
|
||||
return CP_ACCESS_UNDEFINED;
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
case 3:
|
||||
if (env->cp15.scr_el3 & SCR_EEL2) {
|
||||
return CP_ACCESS_OK;
|
||||
} else {
|
||||
return CP_ACCESS_UNDEFINED;
|
||||
}
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t gt_get_countervalue(CPUARMState *env)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
@ -2452,12 +2494,80 @@ static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint64_t gt_phys_cnt_offset(CPUARMState *env)
|
||||
static uint64_t gt_indirect_access_timer_offset(CPUARMState *env, int timeridx)
|
||||
{
|
||||
if (arm_current_el(env) >= 2) {
|
||||
/*
|
||||
* Return the timer offset to use for indirect accesses to the timer.
|
||||
* This is the Offset value as defined in D12.2.4.1 "Operation of the
|
||||
* CompareValue views of the timers".
|
||||
*
|
||||
* The condition here is not always the same as the condition for
|
||||
* whether to apply an offset register when doing a direct read of
|
||||
* the counter sysreg; those conditions are described in the
|
||||
* access pseudocode for each counter register.
|
||||
*/
|
||||
switch (timeridx) {
|
||||
case GTIMER_PHYS:
|
||||
return gt_phys_raw_cnt_offset(env);
|
||||
case GTIMER_VIRT:
|
||||
return env->cp15.cntvoff_el2;
|
||||
case GTIMER_HYP:
|
||||
case GTIMER_SEC:
|
||||
case GTIMER_HYPVIRT:
|
||||
case GTIMER_S_EL2_PHYS:
|
||||
case GTIMER_S_EL2_VIRT:
|
||||
return 0;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx)
|
||||
{
|
||||
/*
|
||||
* Return the timer offset to use for direct accesses to the
|
||||
* counter registers CNTPCT and CNTVCT, and for direct accesses
|
||||
* to the CNT*_TVAL registers.
|
||||
*
|
||||
* This isn't exactly the same as the indirect-access offset,
|
||||
* because here we also care about what EL the register access
|
||||
* is being made from.
|
||||
*
|
||||
* This corresponds to the access pseudocode for the registers.
|
||||
*/
|
||||
uint64_t hcr;
|
||||
|
||||
switch (timeridx) {
|
||||
case GTIMER_PHYS:
|
||||
if (arm_current_el(env) >= 2) {
|
||||
return 0;
|
||||
}
|
||||
return gt_phys_raw_cnt_offset(env);
|
||||
case GTIMER_VIRT:
|
||||
switch (arm_current_el(env)) {
|
||||
case 2:
|
||||
hcr = arm_hcr_el2_eff(env);
|
||||
if (hcr & HCR_E2H) {
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
case 0:
|
||||
hcr = arm_hcr_el2_eff(env);
|
||||
if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return env->cp15.cntvoff_el2;
|
||||
case GTIMER_HYP:
|
||||
case GTIMER_SEC:
|
||||
case GTIMER_HYPVIRT:
|
||||
case GTIMER_S_EL2_PHYS:
|
||||
case GTIMER_S_EL2_VIRT:
|
||||
return 0;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
return gt_phys_raw_cnt_offset(env);
|
||||
}
|
||||
|
||||
static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
|
||||
@ -2469,8 +2579,7 @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
|
||||
* Timer enabled: calculate and set current ISTATUS, irq, and
|
||||
* reset timer to when ISTATUS next has to change
|
||||
*/
|
||||
uint64_t offset = timeridx == GTIMER_VIRT ?
|
||||
cpu->env.cp15.cntvoff_el2 : gt_phys_raw_cnt_offset(&cpu->env);
|
||||
uint64_t offset = gt_indirect_access_timer_offset(&cpu->env, timeridx);
|
||||
uint64_t count = gt_get_countervalue(&cpu->env);
|
||||
/* Note that this must be unsigned 64 bit arithmetic: */
|
||||
int istatus = count - offset >= gt->cval;
|
||||
@ -2533,34 +2642,14 @@ static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
||||
static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
return gt_get_countervalue(env) - gt_phys_cnt_offset(env);
|
||||
}
|
||||
|
||||
uint64_t gt_virt_cnt_offset(CPUARMState *env)
|
||||
{
|
||||
uint64_t hcr;
|
||||
|
||||
switch (arm_current_el(env)) {
|
||||
case 2:
|
||||
hcr = arm_hcr_el2_eff(env);
|
||||
if (hcr & HCR_E2H) {
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
case 0:
|
||||
hcr = arm_hcr_el2_eff(env);
|
||||
if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return env->cp15.cntvoff_el2;
|
||||
uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_PHYS);
|
||||
return gt_get_countervalue(env) - offset;
|
||||
}
|
||||
|
||||
static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
|
||||
uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_VIRT);
|
||||
return gt_get_countervalue(env) - offset;
|
||||
}
|
||||
|
||||
static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
@ -2572,45 +2661,36 @@ static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
gt_recalc_timer(env_archcpu(env), timeridx);
|
||||
}
|
||||
|
||||
static uint64_t do_tval_read(CPUARMState *env, int timeridx, uint64_t offset)
|
||||
{
|
||||
return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
|
||||
(gt_get_countervalue(env) - offset));
|
||||
}
|
||||
|
||||
static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
int timeridx)
|
||||
{
|
||||
uint64_t offset = 0;
|
||||
uint64_t offset = gt_direct_access_timer_offset(env, timeridx);
|
||||
|
||||
switch (timeridx) {
|
||||
case GTIMER_VIRT:
|
||||
case GTIMER_HYPVIRT:
|
||||
offset = gt_virt_cnt_offset(env);
|
||||
break;
|
||||
case GTIMER_PHYS:
|
||||
offset = gt_phys_cnt_offset(env);
|
||||
break;
|
||||
}
|
||||
return do_tval_read(env, timeridx, offset);
|
||||
}
|
||||
|
||||
return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
|
||||
(gt_get_countervalue(env) - offset));
|
||||
static void do_tval_write(CPUARMState *env, int timeridx, uint64_t value,
|
||||
uint64_t offset)
|
||||
{
|
||||
trace_arm_gt_tval_write(timeridx, value);
|
||||
env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
|
||||
sextract64(value, 0, 32);
|
||||
gt_recalc_timer(env_archcpu(env), timeridx);
|
||||
}
|
||||
|
||||
static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
int timeridx,
|
||||
uint64_t value)
|
||||
{
|
||||
uint64_t offset = 0;
|
||||
uint64_t offset = gt_direct_access_timer_offset(env, timeridx);
|
||||
|
||||
switch (timeridx) {
|
||||
case GTIMER_VIRT:
|
||||
case GTIMER_HYPVIRT:
|
||||
offset = gt_virt_cnt_offset(env);
|
||||
break;
|
||||
case GTIMER_PHYS:
|
||||
offset = gt_phys_cnt_offset(env);
|
||||
break;
|
||||
}
|
||||
|
||||
trace_arm_gt_tval_write(timeridx, value);
|
||||
env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
|
||||
sextract64(value, 0, 32);
|
||||
gt_recalc_timer(env_archcpu(env), timeridx);
|
||||
do_tval_write(env, timeridx, value, offset);
|
||||
}
|
||||
|
||||
static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
@ -2742,13 +2822,21 @@ static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
||||
static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
return gt_tval_read(env, ri, GTIMER_VIRT);
|
||||
/*
|
||||
* This is CNTV_TVAL_EL02; unlike the underlying CNTV_TVAL_EL0
|
||||
* we always apply CNTVOFF_EL2. Special case that here rather
|
||||
* than going into the generic gt_tval_read() and then having
|
||||
* to re-detect that it's this register.
|
||||
* Note that the accessfn/perms mean we know we're at EL2 or EL3 here.
|
||||
*/
|
||||
return do_tval_read(env, GTIMER_VIRT, env->cp15.cntvoff_el2);
|
||||
}
|
||||
|
||||
static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
gt_tval_write(env, ri, GTIMER_VIRT, value);
|
||||
/* Similarly for writes to CNTV_TVAL_EL02 */
|
||||
do_tval_write(env, GTIMER_VIRT, value, env->cp15.cntvoff_el2);
|
||||
}
|
||||
|
||||
static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
@ -2908,6 +2996,62 @@ static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
gt_ctl_write(env, ri, GTIMER_SEC, value);
|
||||
}
|
||||
|
||||
static void gt_sec_pel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
gt_timer_reset(env, ri, GTIMER_S_EL2_PHYS);
|
||||
}
|
||||
|
||||
static void gt_sec_pel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
gt_cval_write(env, ri, GTIMER_S_EL2_PHYS, value);
|
||||
}
|
||||
|
||||
static uint64_t gt_sec_pel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
return gt_tval_read(env, ri, GTIMER_S_EL2_PHYS);
|
||||
}
|
||||
|
||||
static void gt_sec_pel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
gt_tval_write(env, ri, GTIMER_S_EL2_PHYS, value);
|
||||
}
|
||||
|
||||
static void gt_sec_pel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
gt_ctl_write(env, ri, GTIMER_S_EL2_PHYS, value);
|
||||
}
|
||||
|
||||
static void gt_sec_vel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
gt_timer_reset(env, ri, GTIMER_S_EL2_VIRT);
|
||||
}
|
||||
|
||||
static void gt_sec_vel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
gt_cval_write(env, ri, GTIMER_S_EL2_VIRT, value);
|
||||
}
|
||||
|
||||
static uint64_t gt_sec_vel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
return gt_tval_read(env, ri, GTIMER_S_EL2_VIRT);
|
||||
}
|
||||
|
||||
static void gt_sec_vel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
gt_tval_write(env, ri, GTIMER_S_EL2_VIRT, value);
|
||||
}
|
||||
|
||||
static void gt_sec_vel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
gt_ctl_write(env, ri, GTIMER_S_EL2_VIRT, value);
|
||||
}
|
||||
|
||||
static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
gt_timer_reset(env, ri, GTIMER_HYPVIRT);
|
||||
@ -2964,6 +3108,20 @@ void arm_gt_stimer_cb(void *opaque)
|
||||
gt_recalc_timer(cpu, GTIMER_SEC);
|
||||
}
|
||||
|
||||
void arm_gt_sel2timer_cb(void *opaque)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
|
||||
gt_recalc_timer(cpu, GTIMER_S_EL2_PHYS);
|
||||
}
|
||||
|
||||
void arm_gt_sel2vtimer_cb(void *opaque)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
|
||||
gt_recalc_timer(cpu, GTIMER_S_EL2_VIRT);
|
||||
}
|
||||
|
||||
void arm_gt_hvtimer_cb(void *opaque)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
@ -5688,6 +5846,56 @@ static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
|
||||
.access = PL2_RW, .accessfn = sel2_access,
|
||||
.nv2_redirect_offset = 0x48,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* Secure EL2 Physical Timer */
|
||||
{ .name = "CNTHPS_TVAL_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 0,
|
||||
.type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
|
||||
.accessfn = gt_sel2timer_access,
|
||||
.readfn = gt_sec_pel2_tval_read,
|
||||
.writefn = gt_sec_pel2_tval_write,
|
||||
.resetfn = gt_sec_pel2_timer_reset,
|
||||
},
|
||||
{ .name = "CNTHPS_CTL_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 1,
|
||||
.type = ARM_CP_IO, .access = PL2_RW,
|
||||
.accessfn = gt_sel2timer_access,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].ctl),
|
||||
.resetvalue = 0,
|
||||
.writefn = gt_sec_pel2_ctl_write, .raw_writefn = raw_write,
|
||||
},
|
||||
{ .name = "CNTHPS_CVAL_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 2,
|
||||
.type = ARM_CP_IO, .access = PL2_RW,
|
||||
.accessfn = gt_sel2timer_access,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].cval),
|
||||
.writefn = gt_sec_pel2_cval_write, .raw_writefn = raw_write,
|
||||
},
|
||||
/* Secure EL2 Virtual Timer */
|
||||
{ .name = "CNTHVS_TVAL_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 0,
|
||||
.type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
|
||||
.accessfn = gt_sel2timer_access,
|
||||
.readfn = gt_sec_vel2_tval_read,
|
||||
.writefn = gt_sec_vel2_tval_write,
|
||||
.resetfn = gt_sec_vel2_timer_reset,
|
||||
},
|
||||
{ .name = "CNTHVS_CTL_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 1,
|
||||
.type = ARM_CP_IO, .access = PL2_RW,
|
||||
.accessfn = gt_sel2timer_access,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].ctl),
|
||||
.resetvalue = 0,
|
||||
.writefn = gt_sec_vel2_ctl_write, .raw_writefn = raw_write,
|
||||
},
|
||||
{ .name = "CNTHVS_CVAL_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 2,
|
||||
.type = ARM_CP_IO, .access = PL2_RW,
|
||||
.accessfn = gt_sel2timer_access,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].cval),
|
||||
.writefn = gt_sec_vel2_cval_write, .raw_writefn = raw_write,
|
||||
},
|
||||
#endif
|
||||
};
|
||||
|
||||
static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
@ -1819,9 +1819,10 @@ int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type);
|
||||
uint64_t gt_get_countervalue(CPUARMState *env);
|
||||
/*
|
||||
* Return the currently applicable offset between the system counter
|
||||
* and CNTVCT_EL0 (this will be either 0 or the value of CNTVOFF_EL2).
|
||||
* and the counter for the specified timer, as used for direct register
|
||||
* accesses.
|
||||
*/
|
||||
uint64_t gt_virt_cnt_offset(CPUARMState *env);
|
||||
uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx);
|
||||
|
||||
/*
|
||||
* Return mask of ARMMMUIdxBit values corresponding to an "invalidate
|
||||
|
@ -427,7 +427,13 @@ void HELPER(wfit)(CPUARMState *env, uint64_t timeout)
|
||||
int target_el = check_wfx_trap(env, false, &excp);
|
||||
/* The WFIT should time out when CNTVCT_EL0 >= the specified value. */
|
||||
uint64_t cntval = gt_get_countervalue(env);
|
||||
uint64_t offset = gt_virt_cnt_offset(env);
|
||||
/*
|
||||
* We want the value that we would get if we read CNTVCT_EL0 from
|
||||
* the current exception level, so the direct_access offset, not
|
||||
* the indirect_access one. Compare the pseudocode LocalTimeoutEvent(),
|
||||
* which calls VirtualCounterTimer().
|
||||
*/
|
||||
uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_VIRT);
|
||||
uint64_t cntvct = cntval - offset;
|
||||
uint64_t nexttick;
|
||||
|
||||
|
@ -4941,7 +4941,7 @@ static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a)
|
||||
}
|
||||
|
||||
static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
|
||||
TCGv_i32 addr, int address_offset)
|
||||
TCGv_i32 addr)
|
||||
{
|
||||
if (!a->p) {
|
||||
TCGv_i32 ofs = load_reg(s, a->rm);
|
||||
@ -4954,7 +4954,6 @@ static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
|
||||
} else if (!a->w) {
|
||||
return;
|
||||
}
|
||||
tcg_gen_addi_i32(addr, addr, address_offset);
|
||||
store_reg(s, a->rn, addr);
|
||||
}
|
||||
|
||||
@ -4974,7 +4973,7 @@ static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
|
||||
* Perform base writeback before the loaded value to
|
||||
* ensure correct behavior with overlapping index registers.
|
||||
*/
|
||||
op_addr_rr_post(s, a, addr, 0);
|
||||
op_addr_rr_post(s, a, addr);
|
||||
store_reg_from_load(s, a->rt, tmp);
|
||||
return true;
|
||||
}
|
||||
@ -4999,14 +4998,53 @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
|
||||
disas_set_da_iss(s, mop, issinfo);
|
||||
|
||||
op_addr_rr_post(s, a, addr, 0);
|
||||
op_addr_rr_post(s, a, addr);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void do_ldrd_load(DisasContext *s, TCGv_i32 addr, int rt, int rt2)
|
||||
{
|
||||
/*
|
||||
* LDRD is required to be an atomic 64-bit access if the
|
||||
* address is 8-aligned, two atomic 32-bit accesses if
|
||||
* it's only 4-aligned, and to give an alignment fault
|
||||
* if it's not 4-aligned. This is MO_ALIGN_4 | MO_ATOM_SUBALIGN.
|
||||
* Rt is always the word from the lower address, and Rt2 the
|
||||
* data from the higher address, regardless of endianness.
|
||||
* So (like gen_load_exclusive) we avoid gen_aa32_ld_i64()
|
||||
* so we don't get its SCTLR_B check, and instead do a 64-bit access
|
||||
* using MO_BE if appropriate and then split the two halves.
|
||||
*
|
||||
* For M-profile, and for A-profile before LPAE, the 64-bit
|
||||
* atomicity is not required. We could model that using
|
||||
* the looser MO_ATOM_IFALIGN_PAIR, but providing a higher
|
||||
* level of atomicity than required is harmless (we would not
|
||||
* currently generate better code for IFALIGN_PAIR here).
|
||||
*
|
||||
* This also gives us the correct behaviour of not updating
|
||||
* rt if the load of rt2 faults; this is required for cases
|
||||
* like "ldrd r2, r3, [r2]" where rt is also the base register.
|
||||
*/
|
||||
int mem_idx = get_mem_index(s);
|
||||
MemOp opc = MO_64 | MO_ALIGN_4 | MO_ATOM_SUBALIGN | s->be_data;
|
||||
TCGv taddr = gen_aa32_addr(s, addr, opc);
|
||||
TCGv_i64 t64 = tcg_temp_new_i64();
|
||||
TCGv_i32 tmp = tcg_temp_new_i32();
|
||||
TCGv_i32 tmp2 = tcg_temp_new_i32();
|
||||
|
||||
tcg_gen_qemu_ld_i64(t64, taddr, mem_idx, opc);
|
||||
if (s->be_data == MO_BE) {
|
||||
tcg_gen_extr_i64_i32(tmp2, tmp, t64);
|
||||
} else {
|
||||
tcg_gen_extr_i64_i32(tmp, tmp2, t64);
|
||||
}
|
||||
store_reg(s, rt, tmp);
|
||||
store_reg(s, rt2, tmp2);
|
||||
}
|
||||
|
||||
static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
|
||||
{
|
||||
int mem_idx = get_mem_index(s);
|
||||
TCGv_i32 addr, tmp;
|
||||
TCGv_i32 addr;
|
||||
|
||||
if (!ENABLE_ARCH_5TE) {
|
||||
return false;
|
||||
@ -5017,25 +5055,49 @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
|
||||
}
|
||||
addr = op_addr_rr_pre(s, a);
|
||||
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
|
||||
store_reg(s, a->rt, tmp);
|
||||
|
||||
tcg_gen_addi_i32(addr, addr, 4);
|
||||
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
|
||||
store_reg(s, a->rt + 1, tmp);
|
||||
do_ldrd_load(s, addr, a->rt, a->rt + 1);
|
||||
|
||||
/* LDRD w/ base writeback is undefined if the registers overlap. */
|
||||
op_addr_rr_post(s, a, addr, -4);
|
||||
op_addr_rr_post(s, a, addr);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void do_strd_store(DisasContext *s, TCGv_i32 addr, int rt, int rt2)
|
||||
{
|
||||
/*
|
||||
* STRD is required to be an atomic 64-bit access if the
|
||||
* address is 8-aligned, two atomic 32-bit accesses if
|
||||
* it's only 4-aligned, and to give an alignment fault
|
||||
* if it's not 4-aligned.
|
||||
* Rt is always the word from the lower address, and Rt2 the
|
||||
* data from the higher address, regardless of endianness.
|
||||
* So (like gen_store_exclusive) we avoid gen_aa32_ld_i64()
|
||||
* so we don't get its SCTLR_B check, and instead do a 64-bit access
|
||||
* using MO_BE if appropriate, using a value constructed
|
||||
* by putting the two halves together in the right order.
|
||||
*
|
||||
* As with LDRD, the 64-bit atomicity is not required for
|
||||
* M-profile, or for A-profile before LPAE, and we provide
|
||||
* the higher guarantee always for simplicity.
|
||||
*/
|
||||
int mem_idx = get_mem_index(s);
|
||||
MemOp opc = MO_64 | MO_ALIGN_4 | MO_ATOM_SUBALIGN | s->be_data;
|
||||
TCGv taddr = gen_aa32_addr(s, addr, opc);
|
||||
TCGv_i32 t1 = load_reg(s, rt);
|
||||
TCGv_i32 t2 = load_reg(s, rt2);
|
||||
TCGv_i64 t64 = tcg_temp_new_i64();
|
||||
|
||||
if (s->be_data == MO_BE) {
|
||||
tcg_gen_concat_i32_i64(t64, t2, t1);
|
||||
} else {
|
||||
tcg_gen_concat_i32_i64(t64, t1, t2);
|
||||
}
|
||||
tcg_gen_qemu_st_i64(t64, taddr, mem_idx, opc);
|
||||
}
|
||||
|
||||
static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
|
||||
{
|
||||
int mem_idx = get_mem_index(s);
|
||||
TCGv_i32 addr, tmp;
|
||||
TCGv_i32 addr;
|
||||
|
||||
if (!ENABLE_ARCH_5TE) {
|
||||
return false;
|
||||
@ -5046,15 +5108,9 @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
|
||||
}
|
||||
addr = op_addr_rr_pre(s, a);
|
||||
|
||||
tmp = load_reg(s, a->rt);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
|
||||
do_strd_store(s, addr, a->rt, a->rt + 1);
|
||||
|
||||
tcg_gen_addi_i32(addr, addr, 4);
|
||||
|
||||
tmp = load_reg(s, a->rt + 1);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
|
||||
|
||||
op_addr_rr_post(s, a, addr, -4);
|
||||
op_addr_rr_post(s, a, addr);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -5090,13 +5146,14 @@ static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a)
|
||||
}
|
||||
|
||||
static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a,
|
||||
TCGv_i32 addr, int address_offset)
|
||||
TCGv_i32 addr)
|
||||
{
|
||||
int address_offset = 0;
|
||||
if (!a->p) {
|
||||
if (a->u) {
|
||||
address_offset += a->imm;
|
||||
address_offset = a->imm;
|
||||
} else {
|
||||
address_offset -= a->imm;
|
||||
address_offset = -a->imm;
|
||||
}
|
||||
} else if (!a->w) {
|
||||
return;
|
||||
@ -5121,7 +5178,7 @@ static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
|
||||
* Perform base writeback before the loaded value to
|
||||
* ensure correct behavior with overlapping index registers.
|
||||
*/
|
||||
op_addr_ri_post(s, a, addr, 0);
|
||||
op_addr_ri_post(s, a, addr);
|
||||
store_reg_from_load(s, a->rt, tmp);
|
||||
return true;
|
||||
}
|
||||
@ -5146,29 +5203,20 @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
|
||||
disas_set_da_iss(s, mop, issinfo);
|
||||
|
||||
op_addr_ri_post(s, a, addr, 0);
|
||||
op_addr_ri_post(s, a, addr);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
|
||||
{
|
||||
int mem_idx = get_mem_index(s);
|
||||
TCGv_i32 addr, tmp;
|
||||
TCGv_i32 addr;
|
||||
|
||||
addr = op_addr_ri_pre(s, a);
|
||||
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
|
||||
store_reg(s, a->rt, tmp);
|
||||
|
||||
tcg_gen_addi_i32(addr, addr, 4);
|
||||
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
|
||||
store_reg(s, rt2, tmp);
|
||||
do_ldrd_load(s, addr, a->rt, rt2);
|
||||
|
||||
/* LDRD w/ base writeback is undefined if the registers overlap. */
|
||||
op_addr_ri_post(s, a, addr, -4);
|
||||
op_addr_ri_post(s, a, addr);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -5191,20 +5239,13 @@ static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
|
||||
|
||||
static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
|
||||
{
|
||||
int mem_idx = get_mem_index(s);
|
||||
TCGv_i32 addr, tmp;
|
||||
TCGv_i32 addr;
|
||||
|
||||
addr = op_addr_ri_pre(s, a);
|
||||
|
||||
tmp = load_reg(s, a->rt);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
|
||||
do_strd_store(s, addr, a->rt, rt2);
|
||||
|
||||
tcg_gen_addi_i32(addr, addr, 4);
|
||||
|
||||
tmp = load_reg(s, rt2);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
|
||||
|
||||
op_addr_ri_post(s, a, addr, -4);
|
||||
op_addr_ri_post(s, a, addr);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ void rx_cpu_do_interrupt(CPUState *cs)
|
||||
cpu_stl_data(env, env->isp, env->pc);
|
||||
|
||||
if (vec < 0x100) {
|
||||
env->pc = cpu_ldl_data(env, 0xffffffc0 + vec * 4);
|
||||
env->pc = cpu_ldl_data(env, 0xffffff80 + vec * 4);
|
||||
} else {
|
||||
env->pc = cpu_ldl_data(env, env->intb + (vec & 0xff) * 4);
|
||||
}
|
||||
|
@ -4,27 +4,27 @@ DEF_HELPER_1(raise_privilege_violation, noreturn, env)
|
||||
DEF_HELPER_1(wait, noreturn, env)
|
||||
DEF_HELPER_2(rxint, noreturn, env, i32)
|
||||
DEF_HELPER_1(rxbrk, noreturn, env)
|
||||
DEF_HELPER_FLAGS_3(fadd, TCG_CALL_NO_WG, f32, env, f32, f32)
|
||||
DEF_HELPER_FLAGS_3(fsub, TCG_CALL_NO_WG, f32, env, f32, f32)
|
||||
DEF_HELPER_FLAGS_3(fmul, TCG_CALL_NO_WG, f32, env, f32, f32)
|
||||
DEF_HELPER_FLAGS_3(fdiv, TCG_CALL_NO_WG, f32, env, f32, f32)
|
||||
DEF_HELPER_FLAGS_3(fcmp, TCG_CALL_NO_WG, void, env, f32, f32)
|
||||
DEF_HELPER_FLAGS_2(ftoi, TCG_CALL_NO_WG, i32, env, f32)
|
||||
DEF_HELPER_FLAGS_2(round, TCG_CALL_NO_WG, i32, env, f32)
|
||||
DEF_HELPER_FLAGS_2(itof, TCG_CALL_NO_WG, f32, env, i32)
|
||||
DEF_HELPER_3(fadd, f32, env, f32, f32)
|
||||
DEF_HELPER_3(fsub, f32, env, f32, f32)
|
||||
DEF_HELPER_3(fmul, f32, env, f32, f32)
|
||||
DEF_HELPER_3(fdiv, f32, env, f32, f32)
|
||||
DEF_HELPER_3(fcmp, void, env, f32, f32)
|
||||
DEF_HELPER_2(ftoi, i32, env, f32)
|
||||
DEF_HELPER_2(round, i32, env, f32)
|
||||
DEF_HELPER_2(itof, f32, env, i32)
|
||||
DEF_HELPER_2(set_fpsw, void, env, i32)
|
||||
DEF_HELPER_FLAGS_2(racw, TCG_CALL_NO_WG, void, env, i32)
|
||||
DEF_HELPER_FLAGS_2(set_psw_rte, TCG_CALL_NO_WG, void, env, i32)
|
||||
DEF_HELPER_FLAGS_2(set_psw, TCG_CALL_NO_WG, void, env, i32)
|
||||
DEF_HELPER_2(racw, void, env, i32)
|
||||
DEF_HELPER_2(set_psw_rte, void, env, i32)
|
||||
DEF_HELPER_2(set_psw, void, env, i32)
|
||||
DEF_HELPER_1(pack_psw, i32, env)
|
||||
DEF_HELPER_FLAGS_3(div, TCG_CALL_NO_WG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(divu, TCG_CALL_NO_WG, i32, env, i32, i32)
|
||||
DEF_HELPER_FLAGS_1(scmpu, TCG_CALL_NO_WG, void, env)
|
||||
DEF_HELPER_3(div, i32, env, i32, i32)
|
||||
DEF_HELPER_3(divu, i32, env, i32, i32)
|
||||
DEF_HELPER_1(scmpu, void, env)
|
||||
DEF_HELPER_1(smovu, void, env)
|
||||
DEF_HELPER_1(smovf, void, env)
|
||||
DEF_HELPER_1(smovb, void, env)
|
||||
DEF_HELPER_2(sstr, void, env, i32)
|
||||
DEF_HELPER_FLAGS_2(swhile, TCG_CALL_NO_WG, void, env, i32)
|
||||
DEF_HELPER_FLAGS_2(suntil, TCG_CALL_NO_WG, void, env, i32)
|
||||
DEF_HELPER_FLAGS_2(rmpa, TCG_CALL_NO_WG, void, env, i32)
|
||||
DEF_HELPER_2(swhile, void, env, i32)
|
||||
DEF_HELPER_2(suntil, void, env, i32)
|
||||
DEF_HELPER_2(rmpa, void, env, i32)
|
||||
DEF_HELPER_1(satr, void, env)
|
||||
|
@ -43,7 +43,8 @@ class SX1Test(LinuxKernelTest):
|
||||
self.vm.add_args('-append', f'kunit.enable=0 rdinit=/sbin/init {self.CONSOLE_ARGS}')
|
||||
self.vm.add_args('-no-reboot')
|
||||
self.launch_kernel(zimage_path,
|
||||
initrd=initrd_path)
|
||||
initrd=initrd_path,
|
||||
wait_for='Boot successful')
|
||||
self.vm.wait(timeout=120)
|
||||
|
||||
def test_arm_sx1_sd(self):
|
||||
@ -54,7 +55,7 @@ class SX1Test(LinuxKernelTest):
|
||||
self.vm.add_args('-no-reboot')
|
||||
self.vm.add_args('-snapshot')
|
||||
self.vm.add_args('-drive', f'format=raw,if=sd,file={sd_fs_path}')
|
||||
self.launch_kernel(zimage_path)
|
||||
self.launch_kernel(zimage_path, wait_for='Boot successful')
|
||||
self.vm.wait(timeout=120)
|
||||
|
||||
def test_arm_sx1_flash(self):
|
||||
@ -65,7 +66,7 @@ class SX1Test(LinuxKernelTest):
|
||||
self.vm.add_args('-no-reboot')
|
||||
self.vm.add_args('-snapshot')
|
||||
self.vm.add_args('-drive', f'format=raw,if=pflash,file={flash_path}')
|
||||
self.launch_kernel(zimage_path)
|
||||
self.launch_kernel(zimage_path, wait_for='Boot successful')
|
||||
self.vm.wait(timeout=120)
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -409,10 +409,6 @@ static bool timer_mod_ns_locked(QEMUTimerList *timer_list,
|
||||
|
||||
static void timerlist_rearm(QEMUTimerList *timer_list)
|
||||
{
|
||||
/* Interrupt execution to force deadline recalculation. */
|
||||
if (icount_enabled() && timer_list->clock->type == QEMU_CLOCK_VIRTUAL) {
|
||||
icount_start_warp_timer();
|
||||
}
|
||||
timerlist_notify(timer_list);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user