accel/tcg: Rename tb_invalidate_phys_page_range and drop end parameter
This function is is never called with a real range, only for a single page. Drop the second parameter and rename to tb_invalidate_phys_page. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
67aabbb312
commit
d6d1fd2973
@ -565,25 +565,26 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Invalidate all TBs which intersect with the target physical address range
|
* Invalidate all TBs which intersect with the target physical
|
||||||
* [start;end[. NOTE: start and end must refer to the *same* physical page.
|
* address page @addr.
|
||||||
* 'is_cpu_write_access' should be true if called from a real cpu write
|
|
||||||
* access: the virtual CPU will exit the current TB if code is modified inside
|
|
||||||
* this TB.
|
|
||||||
*
|
*
|
||||||
* Called with mmap_lock held for user-mode emulation
|
* Called with mmap_lock held for user-mode emulation
|
||||||
*/
|
*/
|
||||||
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
|
void tb_invalidate_phys_page(tb_page_addr_t addr)
|
||||||
{
|
{
|
||||||
struct page_collection *pages;
|
struct page_collection *pages;
|
||||||
|
tb_page_addr_t start, end;
|
||||||
PageDesc *p;
|
PageDesc *p;
|
||||||
|
|
||||||
assert_memory_lock();
|
assert_memory_lock();
|
||||||
|
|
||||||
p = page_find(start >> TARGET_PAGE_BITS);
|
p = page_find(addr >> TARGET_PAGE_BITS);
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
start = addr & TARGET_PAGE_MASK;
|
||||||
|
end = start + TARGET_PAGE_SIZE;
|
||||||
pages = page_collection_lock(start, end);
|
pages = page_collection_lock(start, end);
|
||||||
tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
|
tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
|
||||||
page_collection_unlock(pages);
|
page_collection_unlock(pages);
|
||||||
|
4
cpu.c
4
cpu.c
@ -277,7 +277,7 @@ void list_cpus(const char *optarg)
|
|||||||
void tb_invalidate_phys_addr(target_ulong addr)
|
void tb_invalidate_phys_addr(target_ulong addr)
|
||||||
{
|
{
|
||||||
mmap_lock();
|
mmap_lock();
|
||||||
tb_invalidate_phys_page_range(addr, addr + 1);
|
tb_invalidate_phys_page(addr);
|
||||||
mmap_unlock();
|
mmap_unlock();
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
@ -298,7 +298,7 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ram_addr = memory_region_get_ram_addr(mr) + addr;
|
ram_addr = memory_region_get_ram_addr(mr) + addr;
|
||||||
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
|
tb_invalidate_phys_page(ram_addr);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ void page_collection_unlock(struct page_collection *set);
|
|||||||
void tb_invalidate_phys_page_fast(struct page_collection *pages,
|
void tb_invalidate_phys_page_fast(struct page_collection *pages,
|
||||||
tb_page_addr_t start, int len,
|
tb_page_addr_t start, int len,
|
||||||
uintptr_t retaddr);
|
uintptr_t retaddr);
|
||||||
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
|
void tb_invalidate_phys_page(tb_page_addr_t addr);
|
||||||
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
|
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
|
||||||
|
|
||||||
#ifdef CONFIG_USER_ONLY
|
#ifdef CONFIG_USER_ONLY
|
||||||
|
Loading…
x
Reference in New Issue
Block a user