accel/tcg: Simplify tlb_plugin_lookup
Now that we defer address space update and tlb_flush until the next async_run_on_cpu, the plugin run at the end of the instruction no longer has to contend with a flushed tlb. Therefore, delete SavedIOTLB entirely. Properly return false from tlb_plugin_lookup when we do not have a tlb match. Fixes a bug in which SavedIOTLB had stale data, because there were multiple i/o accesses within a single insn. Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
e8967b6152
commit
da6aef48d9
@ -1367,21 +1367,6 @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
|
|
||||||
* This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
|
|
||||||
* because of the side effect of io_writex changing memory layout.
|
|
||||||
*/
|
|
||||||
static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
|
|
||||||
hwaddr mr_offset)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_PLUGIN
|
|
||||||
SavedIOTLB *saved = &cs->saved_iotlb;
|
|
||||||
saved->section = section;
|
|
||||||
saved->mr_offset = mr_offset;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
|
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
|
||||||
int mmu_idx, vaddr addr, uintptr_t retaddr,
|
int mmu_idx, vaddr addr, uintptr_t retaddr,
|
||||||
MMUAccessType access_type, MemOp op)
|
MMUAccessType access_type, MemOp op)
|
||||||
@ -1401,12 +1386,6 @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
|
|||||||
cpu_io_recompile(cpu, retaddr);
|
cpu_io_recompile(cpu, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* The memory_region_dispatch may trigger a flush/resize
|
|
||||||
* so for plugins we save the iotlb_data just in case.
|
|
||||||
*/
|
|
||||||
save_iotlb_data(cpu, section, mr_offset);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
QEMU_IOTHREAD_LOCK_GUARD();
|
QEMU_IOTHREAD_LOCK_GUARD();
|
||||||
r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
|
r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
|
||||||
@ -1441,12 +1420,6 @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
|
|||||||
}
|
}
|
||||||
cpu->mem_io_pc = retaddr;
|
cpu->mem_io_pc = retaddr;
|
||||||
|
|
||||||
/*
|
|
||||||
* The memory_region_dispatch may trigger a flush/resize
|
|
||||||
* so for plugins we save the iotlb_data just in case.
|
|
||||||
*/
|
|
||||||
save_iotlb_data(cpu, section, mr_offset);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
QEMU_IOTHREAD_LOCK_GUARD();
|
QEMU_IOTHREAD_LOCK_GUARD();
|
||||||
r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
|
r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
|
||||||
@ -1729,45 +1702,39 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
|
|||||||
* in the softmmu lookup code (or helper). We don't handle re-fills or
|
* in the softmmu lookup code (or helper). We don't handle re-fills or
|
||||||
* checking the victim table. This is purely informational.
|
* checking the victim table. This is purely informational.
|
||||||
*
|
*
|
||||||
* This almost never fails as the memory access being instrumented
|
* The one corner case is i/o write, which can cause changes to the
|
||||||
* should have just filled the TLB. The one corner case is io_writex
|
* address space. Those changes, and the corresponding tlb flush,
|
||||||
* which can cause TLB flushes and potential resizing of the TLBs
|
* should be delayed until the next TB, so even then this ought not fail.
|
||||||
* losing the information we need. In those cases we need to recover
|
* But check, Just in Case.
|
||||||
* data from a copy of the CPUTLBEntryFull. As long as this always occurs
|
|
||||||
* from the same thread (which a mem callback will be) this is safe.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
|
bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
|
||||||
bool is_store, struct qemu_plugin_hwaddr *data)
|
bool is_store, struct qemu_plugin_hwaddr *data)
|
||||||
{
|
{
|
||||||
CPUArchState *env = cpu->env_ptr;
|
CPUArchState *env = cpu->env_ptr;
|
||||||
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
|
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
|
||||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||||
uint64_t tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
|
MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
|
||||||
|
uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
|
||||||
|
|
||||||
if (likely(tlb_hit(tlb_addr, addr))) {
|
if (unlikely(!tlb_hit(tlb_addr, addr))) {
|
||||||
/* We must have an iotlb entry for MMIO */
|
return false;
|
||||||
if (tlb_addr & TLB_MMIO) {
|
|
||||||
CPUTLBEntryFull *full;
|
|
||||||
full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
|
|
||||||
data->is_io = true;
|
|
||||||
data->v.io.section =
|
|
||||||
iotlb_to_section(cpu, full->xlat_section, full->attrs);
|
|
||||||
data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
|
|
||||||
} else {
|
|
||||||
data->is_io = false;
|
|
||||||
data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
SavedIOTLB *saved = &cpu->saved_iotlb;
|
|
||||||
data->is_io = true;
|
|
||||||
data->v.io.section = saved->section;
|
|
||||||
data->v.io.offset = saved->mr_offset;
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
/* We must have an iotlb entry for MMIO */
|
||||||
|
if (tlb_addr & TLB_MMIO) {
|
||||||
|
CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
|
||||||
|
hwaddr xlat = full->xlat_section;
|
||||||
|
|
||||||
|
data->is_io = true;
|
||||||
|
data->v.io.offset = (xlat & TARGET_PAGE_MASK) + addr;
|
||||||
|
data->v.io.section =
|
||||||
|
iotlb_to_section(cpu, xlat & ~TARGET_PAGE_MASK, full->attrs);
|
||||||
|
} else {
|
||||||
|
data->is_io = false;
|
||||||
|
data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -227,17 +227,6 @@ struct CPUWatchpoint {
|
|||||||
QTAILQ_ENTRY(CPUWatchpoint) entry;
|
QTAILQ_ENTRY(CPUWatchpoint) entry;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_PLUGIN
|
|
||||||
/*
|
|
||||||
* For plugins we sometime need to save the resolved iotlb data before
|
|
||||||
* the memory regions get moved around by io_writex.
|
|
||||||
*/
|
|
||||||
typedef struct SavedIOTLB {
|
|
||||||
MemoryRegionSection *section;
|
|
||||||
hwaddr mr_offset;
|
|
||||||
} SavedIOTLB;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct KVMState;
|
struct KVMState;
|
||||||
struct kvm_run;
|
struct kvm_run;
|
||||||
|
|
||||||
@ -409,8 +398,6 @@ struct CPUState {
|
|||||||
|
|
||||||
#ifdef CONFIG_PLUGIN
|
#ifdef CONFIG_PLUGIN
|
||||||
GArray *plugin_mem_cbs;
|
GArray *plugin_mem_cbs;
|
||||||
/* saved iotlb data from io_writex */
|
|
||||||
SavedIOTLB saved_iotlb;
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* TODO Move common fields from CPUArchState here. */
|
/* TODO Move common fields from CPUArchState here. */
|
||||||
|
@ -129,7 +129,6 @@ typedef struct QString QString;
|
|||||||
typedef struct RAMBlock RAMBlock;
|
typedef struct RAMBlock RAMBlock;
|
||||||
typedef struct Range Range;
|
typedef struct Range Range;
|
||||||
typedef struct ReservedRegion ReservedRegion;
|
typedef struct ReservedRegion ReservedRegion;
|
||||||
typedef struct SavedIOTLB SavedIOTLB;
|
|
||||||
typedef struct SHPCDevice SHPCDevice;
|
typedef struct SHPCDevice SHPCDevice;
|
||||||
typedef struct SSIBus SSIBus;
|
typedef struct SSIBus SSIBus;
|
||||||
typedef struct TCGHelperInfo TCGHelperInfo;
|
typedef struct TCGHelperInfo TCGHelperInfo;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user