accel/tcg: Replace target_ulong in some APIs

accel/tcg: Remove CONFIG_PROFILER
 accel/tcg: Store some tlb flags in CPUTLBEntryFull
 tcg: Issue memory barriers as required for the guest memory model
 tcg: Fix temporary variable in tcg_gen_gvec_andcs
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmSZsPgdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV+kWAf+ODI9qRvVbb4/uYv8
 k7wMhCxX9kk5bRVr+QcqDn9RekAdsyOKSdkAAv4NeRFqHs3ukxhMxu0N2aiVXGDw
 WtpsV73FrivAXaCxRj0aaYCsX8qFUQM4eWORZX2+V4AO0BtMHx1loK3bUQwdBTqN
 jgkpn8BYeFdfUJjvvEj9XeSJ7s0n/p7esaf6VKajef/PbrcgYAeHg72tb5Vv5LTI
 oxhU4icpaq/FT+SolnGzh4nRV7yqji9qFJ2INb0Uanx/WxCMD6CQJ0rDw55UouvH
 t7zGDn8FKDZJGQGxAbUav3evqWcBlkG5VzuhQli3P1+WbGF9jV0KI1nelOuafCKI
 0enECg==
 =XvZb
 -----END PGP SIGNATURE-----

Merge tag 'pull-tcg-20230626' of https://gitlab.com/rth7680/qemu into staging

accel/tcg: Replace target_ulong in some APIs
accel/tcg: Remove CONFIG_PROFILER
accel/tcg: Store some tlb flags in CPUTLBEntryFull
tcg: Issue memory barriers as required for the guest memory model
tcg: Fix temporary variable in tcg_gen_gvec_andcs

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmSZsPgdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV+kWAf+ODI9qRvVbb4/uYv8
# k7wMhCxX9kk5bRVr+QcqDn9RekAdsyOKSdkAAv4NeRFqHs3ukxhMxu0N2aiVXGDw
# WtpsV73FrivAXaCxRj0aaYCsX8qFUQM4eWORZX2+V4AO0BtMHx1loK3bUQwdBTqN
# jgkpn8BYeFdfUJjvvEj9XeSJ7s0n/p7esaf6VKajef/PbrcgYAeHg72tb5Vv5LTI
# oxhU4icpaq/FT+SolnGzh4nRV7yqji9qFJ2INb0Uanx/WxCMD6CQJ0rDw55UouvH
# t7zGDn8FKDZJGQGxAbUav3evqWcBlkG5VzuhQli3P1+WbGF9jV0KI1nelOuafCKI
# 0enECg==
# =XvZb
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 26 Jun 2023 05:38:32 PM CEST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate]

* tag 'pull-tcg-20230626' of https://gitlab.com/rth7680/qemu: (22 commits)
  accel/tcg: Renumber TLB_DISCARD_WRITE
  accel/tcg: Move TLB_WATCHPOINT to TLB_SLOW_FLAGS_MASK
  accel/tcg: Store some tlb flags in CPUTLBEntryFull
  accel/tcg: Remove check_tcg_memory_orders_compatible
  tcg: Add host memory barriers to cpu_ldst.h interfaces
  tcg: Do not elide memory barriers for !CF_PARALLEL in system mode
  target/microblaze: Define TCG_GUEST_DEFAULT_MO
  tcg: Fix temporary variable in tcg_gen_gvec_andcs
  accel/tcg: remove CONFIG_PROFILER
  tests/plugin: Remove duplicate insn log from libinsn.so
  softfloat: use QEMU_FLATTEN to avoid mistaken isra inlining
  cpu: Replace target_ulong with hwaddr in tb_invalidate_phys_addr()
  accel/tcg: Replace target_ulong with vaddr in translator_*()
  accel/tcg: Replace target_ulong with vaddr in *_mmu_lookup()
  accel: Replace target_ulong with vaddr in probe_*()
  accel/tcg: Widen pc to vaddr in CPUJumpCache
  accel/tcg/cpu-exec.c: Widen pc to vaddr
  accel/tcg/cputlb.c: Widen addr in MMULookupPageData
  accel/tcg/cputlb.c: Widen CPUTLBEntry access functions
  target: Widen pc/cs_base in cpu_get_tb_cpu_state
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-06-26 17:40:37 +02:00
commit 4329d049d5
63 changed files with 469 additions and 781 deletions

View File

@ -18,7 +18,7 @@ void tb_flush(CPUState *cpu)
{ {
} }
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) void tlb_set_dirty(CPUState *cpu, vaddr vaddr)
{ {
} }
@ -26,14 +26,14 @@ void tcg_flush_jmp_cache(CPUState *cpu)
{ {
} }
int probe_access_flags(CPUArchState *env, target_ulong addr, int size, int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr) bool nonfault, void **phost, uintptr_t retaddr)
{ {
g_assert_not_reached(); g_assert_not_reached();
} }
void *probe_access(CPUArchState *env, target_ulong addr, int size, void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{ {
/* Handled by hardware accelerator. */ /* Handled by hardware accelerator. */

View File

@ -169,8 +169,8 @@ uint32_t curr_cflags(CPUState *cpu)
} }
struct tb_desc { struct tb_desc {
target_ulong pc; vaddr pc;
target_ulong cs_base; uint64_t cs_base;
CPUArchState *env; CPUArchState *env;
tb_page_addr_t page_addr0; tb_page_addr_t page_addr0;
uint32_t flags; uint32_t flags;
@ -193,7 +193,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
return true; return true;
} else { } else {
tb_page_addr_t phys_page1; tb_page_addr_t phys_page1;
target_ulong virt_page1; vaddr virt_page1;
/* /*
* We know that the first page matched, and an otherwise valid TB * We know that the first page matched, and an otherwise valid TB
@ -214,8 +214,8 @@ static bool tb_lookup_cmp(const void *p, const void *d)
return false; return false;
} }
static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
target_ulong cs_base, uint32_t flags, uint64_t cs_base, uint32_t flags,
uint32_t cflags) uint32_t cflags)
{ {
tb_page_addr_t phys_pc; tb_page_addr_t phys_pc;
@ -238,9 +238,9 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
} }
/* Might cause an exception, so have a longjmp destination ready */ /* Might cause an exception, so have a longjmp destination ready */
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
target_ulong cs_base, uint64_t cs_base, uint32_t flags,
uint32_t flags, uint32_t cflags) uint32_t cflags)
{ {
TranslationBlock *tb; TranslationBlock *tb;
CPUJumpCache *jc; CPUJumpCache *jc;
@ -292,13 +292,13 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
return tb; return tb;
} }
static void log_cpu_exec(target_ulong pc, CPUState *cpu, static void log_cpu_exec(vaddr pc, CPUState *cpu,
const TranslationBlock *tb) const TranslationBlock *tb)
{ {
if (qemu_log_in_addr_range(pc)) { if (qemu_log_in_addr_range(pc)) {
qemu_log_mask(CPU_LOG_EXEC, qemu_log_mask(CPU_LOG_EXEC,
"Trace %d: %p [%08" PRIx64 "Trace %d: %p [%08" PRIx64
"/" TARGET_FMT_lx "/%08x/%08x] %s\n", "/%" VADDR_PRIx "/%08x/%08x] %s\n",
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
tb->flags, tb->cflags, lookup_symbol(pc)); tb->flags, tb->cflags, lookup_symbol(pc));
@ -323,7 +323,7 @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu,
} }
} }
static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc, static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
uint32_t *cflags) uint32_t *cflags)
{ {
CPUBreakpoint *bp; CPUBreakpoint *bp;
@ -389,7 +389,7 @@ static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc,
return false; return false;
} }
static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc, static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc,
uint32_t *cflags) uint32_t *cflags)
{ {
return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) && return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
@ -408,7 +408,8 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
TranslationBlock *tb; TranslationBlock *tb;
target_ulong cs_base, pc; vaddr pc;
uint64_t cs_base;
uint32_t flags, cflags; uint32_t flags, cflags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
@ -484,10 +485,10 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
cc->set_pc(cpu, last_tb->pc); cc->set_pc(cpu, last_tb->pc);
} }
if (qemu_loglevel_mask(CPU_LOG_EXEC)) { if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
target_ulong pc = log_pc(cpu, last_tb); vaddr pc = log_pc(cpu, last_tb);
if (qemu_log_in_addr_range(pc)) { if (qemu_log_in_addr_range(pc)) {
qemu_log("Stopped execution of TB chain before %p [" qemu_log("Stopped execution of TB chain before %p [%"
TARGET_FMT_lx "] %s\n", VADDR_PRIx "] %s\n",
last_tb->tc.ptr, pc, lookup_symbol(pc)); last_tb->tc.ptr, pc, lookup_symbol(pc));
} }
} }
@ -529,7 +530,8 @@ void cpu_exec_step_atomic(CPUState *cpu)
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb; TranslationBlock *tb;
target_ulong cs_base, pc; vaddr pc;
uint64_t cs_base;
uint32_t flags, cflags; uint32_t flags, cflags;
int tb_exit; int tb_exit;
@ -880,8 +882,8 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
} }
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
target_ulong pc, vaddr pc, TranslationBlock **last_tb,
TranslationBlock **last_tb, int *tb_exit) int *tb_exit)
{ {
int32_t insns_left; int32_t insns_left;
@ -942,7 +944,8 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
while (!cpu_handle_interrupt(cpu, &last_tb)) { while (!cpu_handle_interrupt(cpu, &last_tb)) {
TranslationBlock *tb; TranslationBlock *tb;
target_ulong cs_base, pc; vaddr pc;
uint64_t cs_base;
uint32_t flags, cflags; uint32_t flags, cflags;
cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags); cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);

View File

@ -99,7 +99,7 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
desc->window_max_entries = max_entries; desc->window_max_entries = max_entries;
} }
static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
{ {
CPUJumpCache *jc = cpu->tb_jmp_cache; CPUJumpCache *jc = cpu->tb_jmp_cache;
int i, i0; int i, i0;
@ -427,7 +427,7 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu)
} }
static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
target_ulong page, target_ulong mask) vaddr page, vaddr mask)
{ {
page &= mask; page &= mask;
mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
@ -437,8 +437,7 @@ static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
page == (tlb_entry->addr_code & mask)); page == (tlb_entry->addr_code & mask));
} }
static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
target_ulong page)
{ {
return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
} }
@ -454,8 +453,8 @@ static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
/* Called with tlb_c.lock held */ /* Called with tlb_c.lock held */
static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
target_ulong page, vaddr page,
target_ulong mask) vaddr mask)
{ {
if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
memset(tlb_entry, -1, sizeof(*tlb_entry)); memset(tlb_entry, -1, sizeof(*tlb_entry));
@ -464,16 +463,15 @@ static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
return false; return false;
} }
static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page)
target_ulong page)
{ {
return tlb_flush_entry_mask_locked(tlb_entry, page, -1); return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
} }
/* Called with tlb_c.lock held */ /* Called with tlb_c.lock held */
static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
target_ulong page, vaddr page,
target_ulong mask) vaddr mask)
{ {
CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
int k; int k;
@ -487,21 +485,20 @@ static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
} }
static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
target_ulong page) vaddr page)
{ {
tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
} }
static void tlb_flush_page_locked(CPUArchState *env, int midx, static void tlb_flush_page_locked(CPUArchState *env, int midx, vaddr page)
target_ulong page)
{ {
target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; vaddr lp_addr = env_tlb(env)->d[midx].large_page_addr;
target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; vaddr lp_mask = env_tlb(env)->d[midx].large_page_mask;
/* Check if we need to flush due to large pages. */ /* Check if we need to flush due to large pages. */
if ((page & lp_mask) == lp_addr) { if ((page & lp_mask) == lp_addr) {
tlb_debug("forcing full flush midx %d (" tlb_debug("forcing full flush midx %d (%"
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", VADDR_PRIx "/%" VADDR_PRIx ")\n",
midx, lp_addr, lp_mask); midx, lp_addr, lp_mask);
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
} else { } else {
@ -522,7 +519,7 @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
* at @addr from the tlbs indicated by @idxmap from @cpu. * at @addr from the tlbs indicated by @idxmap from @cpu.
*/ */
static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
target_ulong addr, vaddr addr,
uint16_t idxmap) uint16_t idxmap)
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
@ -530,7 +527,7 @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
assert_cpu_is_self(cpu); assert_cpu_is_self(cpu);
tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); tlb_debug("page addr: %" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
qemu_spin_lock(&env_tlb(env)->c.lock); qemu_spin_lock(&env_tlb(env)->c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
@ -561,15 +558,15 @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
run_on_cpu_data data) run_on_cpu_data data)
{ {
target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; vaddr addr_and_idxmap = data.target_ptr;
target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
} }
typedef struct { typedef struct {
target_ulong addr; vaddr addr;
uint16_t idxmap; uint16_t idxmap;
} TLBFlushPageByMMUIdxData; } TLBFlushPageByMMUIdxData;
@ -592,9 +589,9 @@ static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
g_free(d); g_free(d);
} }
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
{ {
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
/* This should already be page aligned */ /* This should already be page aligned */
addr &= TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK;
@ -620,15 +617,15 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
} }
} }
void tlb_flush_page(CPUState *cpu, target_ulong addr) void tlb_flush_page(CPUState *cpu, vaddr addr)
{ {
tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
} }
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
uint16_t idxmap) uint16_t idxmap)
{ {
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
/* This should already be page aligned */ /* This should already be page aligned */
addr &= TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK;
@ -660,16 +657,16 @@ void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
} }
void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
{ {
tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
} }
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
target_ulong addr, vaddr addr,
uint16_t idxmap) uint16_t idxmap)
{ {
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
/* This should already be page aligned */ /* This should already be page aligned */
addr &= TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK;
@ -706,18 +703,18 @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
} }
} }
void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
{ {
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
} }
static void tlb_flush_range_locked(CPUArchState *env, int midx, static void tlb_flush_range_locked(CPUArchState *env, int midx,
target_ulong addr, target_ulong len, vaddr addr, vaddr len,
unsigned bits) unsigned bits)
{ {
CPUTLBDesc *d = &env_tlb(env)->d[midx]; CPUTLBDesc *d = &env_tlb(env)->d[midx];
CPUTLBDescFast *f = &env_tlb(env)->f[midx]; CPUTLBDescFast *f = &env_tlb(env)->f[midx];
target_ulong mask = MAKE_64BIT_MASK(0, bits); vaddr mask = MAKE_64BIT_MASK(0, bits);
/* /*
* If @bits is smaller than the tlb size, there may be multiple entries * If @bits is smaller than the tlb size, there may be multiple entries
@ -731,7 +728,7 @@ static void tlb_flush_range_locked(CPUArchState *env, int midx,
*/ */
if (mask < f->mask || len > f->mask) { if (mask < f->mask || len > f->mask) {
tlb_debug("forcing full flush midx %d (" tlb_debug("forcing full flush midx %d ("
TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n", "%" VADDR_PRIx "/%" VADDR_PRIx "+%" VADDR_PRIx ")\n",
midx, addr, mask, len); midx, addr, mask, len);
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
return; return;
@ -744,14 +741,14 @@ static void tlb_flush_range_locked(CPUArchState *env, int midx,
*/ */
if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
tlb_debug("forcing full flush midx %d (" tlb_debug("forcing full flush midx %d ("
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", "%" VADDR_PRIx "/%" VADDR_PRIx ")\n",
midx, d->large_page_addr, d->large_page_mask); midx, d->large_page_addr, d->large_page_mask);
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
return; return;
} }
for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) { for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
target_ulong page = addr + i; vaddr page = addr + i;
CPUTLBEntry *entry = tlb_entry(env, midx, page); CPUTLBEntry *entry = tlb_entry(env, midx, page);
if (tlb_flush_entry_mask_locked(entry, page, mask)) { if (tlb_flush_entry_mask_locked(entry, page, mask)) {
@ -762,8 +759,8 @@ static void tlb_flush_range_locked(CPUArchState *env, int midx,
} }
typedef struct { typedef struct {
target_ulong addr; vaddr addr;
target_ulong len; vaddr len;
uint16_t idxmap; uint16_t idxmap;
uint16_t bits; uint16_t bits;
} TLBFlushRangeData; } TLBFlushRangeData;
@ -776,7 +773,7 @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
assert_cpu_is_self(cpu); assert_cpu_is_self(cpu);
tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n", tlb_debug("range: %" VADDR_PRIx "/%u+%" VADDR_PRIx " mmu_map:0x%x\n",
d.addr, d.bits, d.len, d.idxmap); d.addr, d.bits, d.len, d.idxmap);
qemu_spin_lock(&env_tlb(env)->c.lock); qemu_spin_lock(&env_tlb(env)->c.lock);
@ -801,7 +798,7 @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
* overlap the flushed pages, which includes the previous. * overlap the flushed pages, which includes the previous.
*/ */
d.addr -= TARGET_PAGE_SIZE; d.addr -= TARGET_PAGE_SIZE;
for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) { for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
tb_jmp_cache_clear_page(cpu, d.addr); tb_jmp_cache_clear_page(cpu, d.addr);
d.addr += TARGET_PAGE_SIZE; d.addr += TARGET_PAGE_SIZE;
} }
@ -815,8 +812,8 @@ static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
g_free(d); g_free(d);
} }
void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
target_ulong len, uint16_t idxmap, vaddr len, uint16_t idxmap,
unsigned bits) unsigned bits)
{ {
TLBFlushRangeData d; TLBFlushRangeData d;
@ -851,14 +848,14 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
} }
} }
void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits) uint16_t idxmap, unsigned bits)
{ {
tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
} }
void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
target_ulong addr, target_ulong len, vaddr addr, vaddr len,
uint16_t idxmap, unsigned bits) uint16_t idxmap, unsigned bits)
{ {
TLBFlushRangeData d; TLBFlushRangeData d;
@ -898,16 +895,16 @@ void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
} }
void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
target_ulong addr, vaddr addr, uint16_t idxmap,
uint16_t idxmap, unsigned bits) unsigned bits)
{ {
tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
idxmap, bits); idxmap, bits);
} }
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
target_ulong addr, vaddr addr,
target_ulong len, vaddr len,
uint16_t idxmap, uint16_t idxmap,
unsigned bits) unsigned bits)
{ {
@ -949,7 +946,7 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
} }
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
target_ulong addr, vaddr addr,
uint16_t idxmap, uint16_t idxmap,
unsigned bits) unsigned bits)
{ {
@ -1055,32 +1052,32 @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
/* Called with tlb_c.lock held */ /* Called with tlb_c.lock held */
static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
target_ulong vaddr) vaddr addr)
{ {
if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) {
tlb_entry->addr_write = vaddr; tlb_entry->addr_write = addr;
} }
} }
/* update the TLB corresponding to virtual page vaddr /* update the TLB corresponding to virtual page vaddr
so that it is no longer dirty */ so that it is no longer dirty */
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) void tlb_set_dirty(CPUState *cpu, vaddr addr)
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
int mmu_idx; int mmu_idx;
assert_cpu_is_self(cpu); assert_cpu_is_self(cpu);
vaddr &= TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK;
qemu_spin_lock(&env_tlb(env)->c.lock); qemu_spin_lock(&env_tlb(env)->c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, addr), addr);
} }
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
int k; int k;
for (k = 0; k < CPU_VTLB_SIZE; k++) { for (k = 0; k < CPU_VTLB_SIZE; k++) {
tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], addr);
} }
} }
qemu_spin_unlock(&env_tlb(env)->c.lock); qemu_spin_unlock(&env_tlb(env)->c.lock);
@ -1089,20 +1086,20 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
/* Our TLB does not support large pages, so remember the area covered by /* Our TLB does not support large pages, so remember the area covered by
large pages and trigger a full TLB flush if these are invalidated. */ large pages and trigger a full TLB flush if these are invalidated. */
static void tlb_add_large_page(CPUArchState *env, int mmu_idx, static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
target_ulong vaddr, target_ulong size) vaddr addr, uint64_t size)
{ {
target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; vaddr lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
target_ulong lp_mask = ~(size - 1); vaddr lp_mask = ~(size - 1);
if (lp_addr == (target_ulong)-1) { if (lp_addr == (vaddr)-1) {
/* No previous large page. */ /* No previous large page. */
lp_addr = vaddr; lp_addr = addr;
} else { } else {
/* Extend the existing region to include the new page. /* Extend the existing region to include the new page.
This is a compromise between unnecessary flushes and This is a compromise between unnecessary flushes and
the cost of maintaining a full variable size TLB. */ the cost of maintaining a full variable size TLB. */
lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
while (((lp_addr ^ vaddr) & lp_mask) != 0) { while (((lp_addr ^ addr) & lp_mask) != 0) {
lp_mask <<= 1; lp_mask <<= 1;
} }
} }
@ -1110,6 +1107,24 @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
} }
static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
target_ulong address, int flags,
MMUAccessType access_type, bool enable)
{
if (enable) {
address |= flags & TLB_FLAGS_MASK;
flags &= TLB_SLOW_FLAGS_MASK;
if (flags) {
address |= TLB_FORCE_SLOW;
}
} else {
address = -1;
flags = 0;
}
ent->addr_idx[access_type] = address;
full->slow_flags[access_type] = flags;
}
/* /*
* Add a new TLB entry. At most one entry for a given virtual address * Add a new TLB entry. At most one entry for a given virtual address
* is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
@ -1119,19 +1134,17 @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
* critical section. * critical section.
*/ */
void tlb_set_page_full(CPUState *cpu, int mmu_idx, void tlb_set_page_full(CPUState *cpu, int mmu_idx,
target_ulong vaddr, CPUTLBEntryFull *full) vaddr addr, CPUTLBEntryFull *full)
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
CPUTLB *tlb = env_tlb(env); CPUTLB *tlb = env_tlb(env);
CPUTLBDesc *desc = &tlb->d[mmu_idx]; CPUTLBDesc *desc = &tlb->d[mmu_idx];
MemoryRegionSection *section; MemoryRegionSection *section;
unsigned int index; unsigned int index, read_flags, write_flags;
target_ulong address;
target_ulong write_address;
uintptr_t addend; uintptr_t addend;
CPUTLBEntry *te, tn; CPUTLBEntry *te, tn;
hwaddr iotlb, xlat, sz, paddr_page; hwaddr iotlb, xlat, sz, paddr_page;
target_ulong vaddr_page; vaddr addr_page;
int asidx, wp_flags, prot; int asidx, wp_flags, prot;
bool is_ram, is_romd; bool is_ram, is_romd;
@ -1141,9 +1154,9 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
sz = TARGET_PAGE_SIZE; sz = TARGET_PAGE_SIZE;
} else { } else {
sz = (hwaddr)1 << full->lg_page_size; sz = (hwaddr)1 << full->lg_page_size;
tlb_add_large_page(env, mmu_idx, vaddr, sz); tlb_add_large_page(env, mmu_idx, addr, sz);
} }
vaddr_page = vaddr & TARGET_PAGE_MASK; addr_page = addr & TARGET_PAGE_MASK;
paddr_page = full->phys_addr & TARGET_PAGE_MASK; paddr_page = full->phys_addr & TARGET_PAGE_MASK;
prot = full->prot; prot = full->prot;
@ -1152,17 +1165,17 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
&xlat, &sz, full->attrs, &prot); &xlat, &sz, full->attrs, &prot);
assert(sz >= TARGET_PAGE_SIZE); assert(sz >= TARGET_PAGE_SIZE);
tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" HWADDR_FMT_plx tlb_debug("vaddr=%" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
" prot=%x idx=%d\n", " prot=%x idx=%d\n",
vaddr, full->phys_addr, prot, mmu_idx); addr, full->phys_addr, prot, mmu_idx);
address = vaddr_page; read_flags = 0;
if (full->lg_page_size < TARGET_PAGE_BITS) { if (full->lg_page_size < TARGET_PAGE_BITS) {
/* Repeat the MMU check and TLB fill on every access. */ /* Repeat the MMU check and TLB fill on every access. */
address |= TLB_INVALID_MASK; read_flags |= TLB_INVALID_MASK;
} }
if (full->attrs.byte_swap) { if (full->attrs.byte_swap) {
address |= TLB_BSWAP; read_flags |= TLB_BSWAP;
} }
is_ram = memory_region_is_ram(section->mr); is_ram = memory_region_is_ram(section->mr);
@ -1176,7 +1189,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
addend = 0; addend = 0;
} }
write_address = address; write_flags = read_flags;
if (is_ram) { if (is_ram) {
iotlb = memory_region_get_ram_addr(section->mr) + xlat; iotlb = memory_region_get_ram_addr(section->mr) + xlat;
/* /*
@ -1185,9 +1198,9 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
*/ */
if (prot & PAGE_WRITE) { if (prot & PAGE_WRITE) {
if (section->readonly) { if (section->readonly) {
write_address |= TLB_DISCARD_WRITE; write_flags |= TLB_DISCARD_WRITE;
} else if (cpu_physical_memory_is_clean(iotlb)) { } else if (cpu_physical_memory_is_clean(iotlb)) {
write_address |= TLB_NOTDIRTY; write_flags |= TLB_NOTDIRTY;
} }
} }
} else { } else {
@ -1198,17 +1211,17 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
* Reads to romd devices go through the ram_ptr found above, * Reads to romd devices go through the ram_ptr found above,
* but of course reads to I/O must go through MMIO. * but of course reads to I/O must go through MMIO.
*/ */
write_address |= TLB_MMIO; write_flags |= TLB_MMIO;
if (!is_romd) { if (!is_romd) {
address = write_address; read_flags = write_flags;
} }
} }
wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
TARGET_PAGE_SIZE); TARGET_PAGE_SIZE);
index = tlb_index(env, mmu_idx, vaddr_page); index = tlb_index(env, mmu_idx, addr_page);
te = tlb_entry(env, mmu_idx, vaddr_page); te = tlb_entry(env, mmu_idx, addr_page);
/* /*
* Hold the TLB lock for the rest of the function. We could acquire/release * Hold the TLB lock for the rest of the function. We could acquire/release
@ -1223,13 +1236,13 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
tlb->c.dirty |= 1 << mmu_idx; tlb->c.dirty |= 1 << mmu_idx;
/* Make sure there's no cached translation for the new page. */ /* Make sure there's no cached translation for the new page. */
tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); tlb_flush_vtlb_page_locked(env, mmu_idx, addr_page);
/* /*
* Only evict the old entry to the victim tlb if it's for a * Only evict the old entry to the victim tlb if it's for a
* different page; otherwise just overwrite the stale data. * different page; otherwise just overwrite the stale data.
*/ */
if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
CPUTLBEntry *tv = &desc->vtable[vidx]; CPUTLBEntry *tv = &desc->vtable[vidx];
@ -1245,7 +1258,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
* TARGET_PAGE_BITS, and either * TARGET_PAGE_BITS, and either
* + the ram_addr_t of the page base of the target RAM (RAM) * + the ram_addr_t of the page base of the target RAM (RAM)
* + the offset within section->mr of the page base (I/O, ROMD) * + the offset within section->mr of the page base (I/O, ROMD)
* We subtract the vaddr_page (which is page aligned and thus won't * We subtract addr_page (which is page aligned and thus won't
* disturb the low bits) to give an offset which can be added to the * disturb the low bits) to give an offset which can be added to the
* (non-page-aligned) vaddr of the eventual memory access to get * (non-page-aligned) vaddr of the eventual memory access to get
* the MemoryRegion offset for the access. Note that the vaddr we * the MemoryRegion offset for the access. Note that the vaddr we
@ -1253,45 +1266,39 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
*/ */
desc->fulltlb[index] = *full; desc->fulltlb[index] = *full;
desc->fulltlb[index].xlat_section = iotlb - vaddr_page; full = &desc->fulltlb[index];
desc->fulltlb[index].phys_addr = paddr_page; full->xlat_section = iotlb - addr_page;
full->phys_addr = paddr_page;
/* Now calculate the new entry */ /* Now calculate the new entry */
tn.addend = addend - vaddr_page; tn.addend = addend - addr_page;
if (prot & PAGE_READ) {
tn.addr_read = address; tlb_set_compare(full, &tn, addr_page, read_flags,
MMU_INST_FETCH, prot & PAGE_EXEC);
if (wp_flags & BP_MEM_READ) { if (wp_flags & BP_MEM_READ) {
tn.addr_read |= TLB_WATCHPOINT; read_flags |= TLB_WATCHPOINT;
}
} else {
tn.addr_read = -1;
} }
tlb_set_compare(full, &tn, addr_page, read_flags,
MMU_DATA_LOAD, prot & PAGE_READ);
if (prot & PAGE_EXEC) {
tn.addr_code = address;
} else {
tn.addr_code = -1;
}
tn.addr_write = -1;
if (prot & PAGE_WRITE) {
tn.addr_write = write_address;
if (prot & PAGE_WRITE_INV) { if (prot & PAGE_WRITE_INV) {
tn.addr_write |= TLB_INVALID_MASK; write_flags |= TLB_INVALID_MASK;
} }
if (wp_flags & BP_MEM_WRITE) { if (wp_flags & BP_MEM_WRITE) {
tn.addr_write |= TLB_WATCHPOINT; write_flags |= TLB_WATCHPOINT;
}
} }
tlb_set_compare(full, &tn, addr_page, write_flags,
MMU_DATA_STORE, prot & PAGE_WRITE);
copy_tlb_helper_locked(te, &tn); copy_tlb_helper_locked(te, &tn);
tlb_n_used_entries_inc(env, mmu_idx); tlb_n_used_entries_inc(env, mmu_idx);
qemu_spin_unlock(&tlb->c.lock); qemu_spin_unlock(&tlb->c.lock);
} }
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
hwaddr paddr, MemTxAttrs attrs, int prot, hwaddr paddr, MemTxAttrs attrs, int prot,
int mmu_idx, target_ulong size) int mmu_idx, uint64_t size)
{ {
CPUTLBEntryFull full = { CPUTLBEntryFull full = {
.phys_addr = paddr, .phys_addr = paddr,
@ -1301,14 +1308,14 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
}; };
assert(is_power_of_2(size)); assert(is_power_of_2(size));
tlb_set_page_full(cpu, mmu_idx, vaddr, &full); tlb_set_page_full(cpu, mmu_idx, addr, &full);
} }
void tlb_set_page(CPUState *cpu, target_ulong vaddr, void tlb_set_page(CPUState *cpu, vaddr addr,
hwaddr paddr, int prot, hwaddr paddr, int prot,
int mmu_idx, target_ulong size) int mmu_idx, uint64_t size)
{ {
tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
prot, mmu_idx, size); prot, mmu_idx, size);
} }
@ -1317,7 +1324,7 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
* be discarded and looked up again (e.g. via tlb_entry()). * be discarded and looked up again (e.g. via tlb_entry()).
*/ */
static void tlb_fill(CPUState *cpu, target_ulong addr, int size, static void tlb_fill(CPUState *cpu, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{ {
bool ok; bool ok;
@ -1357,7 +1364,7 @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
} }
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full, static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
int mmu_idx, target_ulong addr, uintptr_t retaddr, int mmu_idx, vaddr addr, uintptr_t retaddr,
MMUAccessType access_type, MemOp op) MMUAccessType access_type, MemOp op)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
@ -1407,7 +1414,7 @@ static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
} }
static void io_writex(CPUArchState *env, CPUTLBEntryFull *full, static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
int mmu_idx, uint64_t val, target_ulong addr, int mmu_idx, uint64_t val, vaddr addr,
uintptr_t retaddr, MemOp op) uintptr_t retaddr, MemOp op)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
@ -1449,14 +1456,14 @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
/* Return true if ADDR is present in the victim tlb, and has been copied /* Return true if ADDR is present in the victim tlb, and has been copied
back to the main tlb. */ back to the main tlb. */
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
MMUAccessType access_type, target_ulong page) MMUAccessType access_type, vaddr page)
{ {
size_t vidx; size_t vidx;
assert_cpu_is_self(env_cpu(env)); assert_cpu_is_self(env_cpu(env));
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
target_ulong cmp = tlb_read_idx(vtlb, access_type); uint64_t cmp = tlb_read_idx(vtlb, access_type);
if (cmp == page) { if (cmp == page) {
/* Found entry in victim tlb, swap tlb and iotlb. */ /* Found entry in victim tlb, swap tlb and iotlb. */
@ -1502,7 +1509,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
} }
} }
static int probe_access_internal(CPUArchState *env, target_ulong addr, static int probe_access_internal(CPUArchState *env, vaddr addr,
int fault_size, MMUAccessType access_type, int fault_size, MMUAccessType access_type,
int mmu_idx, bool nonfault, int mmu_idx, bool nonfault,
void **phost, CPUTLBEntryFull **pfull, void **phost, CPUTLBEntryFull **pfull,
@ -1510,9 +1517,10 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
{ {
uintptr_t index = tlb_index(env, mmu_idx, addr); uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
target_ulong tlb_addr = tlb_read_idx(entry, access_type); uint64_t tlb_addr = tlb_read_idx(entry, access_type);
target_ulong page_addr = addr & TARGET_PAGE_MASK; vaddr page_addr = addr & TARGET_PAGE_MASK;
int flags = TLB_FLAGS_MASK; int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
CPUTLBEntryFull *full;
if (!tlb_hit_page(tlb_addr, page_addr)) { if (!tlb_hit_page(tlb_addr, page_addr)) {
if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) { if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) {
@ -1541,7 +1549,8 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
} }
flags &= tlb_addr; flags &= tlb_addr;
*pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index]; *pfull = full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
flags |= full->slow_flags[access_type];
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
@ -1554,7 +1563,7 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
return flags; return flags;
} }
int probe_access_full(CPUArchState *env, target_ulong addr, int size, int probe_access_full(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, CPUTLBEntryFull **pfull, bool nonfault, void **phost, CPUTLBEntryFull **pfull,
uintptr_t retaddr) uintptr_t retaddr)
@ -1571,7 +1580,7 @@ int probe_access_full(CPUArchState *env, target_ulong addr, int size,
return flags; return flags;
} }
int probe_access_flags(CPUArchState *env, target_ulong addr, int size, int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr) bool nonfault, void **phost, uintptr_t retaddr)
{ {
@ -1592,7 +1601,7 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
return flags; return flags;
} }
void *probe_access(CPUArchState *env, target_ulong addr, int size, void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{ {
CPUTLBEntryFull *full; CPUTLBEntryFull *full;
@ -1651,7 +1660,7 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
* NOTE: This function will trigger an exception if the page is * NOTE: This function will trigger an exception if the page is
* not executable. * not executable.
*/ */
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp) void **hostp)
{ {
CPUTLBEntryFull *full; CPUTLBEntryFull *full;
@ -1691,13 +1700,13 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
* from the same thread (which a mem callback will be) this is safe. * from the same thread (which a mem callback will be) this is safe.
*/ */
bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
bool is_store, struct qemu_plugin_hwaddr *data) bool is_store, struct qemu_plugin_hwaddr *data)
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
uintptr_t index = tlb_index(env, mmu_idx, addr); uintptr_t index = tlb_index(env, mmu_idx, addr);
target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; uint64_t tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
if (likely(tlb_hit(tlb_addr, addr))) { if (likely(tlb_hit(tlb_addr, addr))) {
/* We must have an iotlb entry for MMIO */ /* We must have an iotlb entry for MMIO */
@ -1732,7 +1741,7 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
typedef struct MMULookupPageData { typedef struct MMULookupPageData {
CPUTLBEntryFull *full; CPUTLBEntryFull *full;
void *haddr; void *haddr;
target_ulong addr; vaddr addr;
int flags; int flags;
int size; int size;
} MMULookupPageData; } MMULookupPageData;
@ -1759,11 +1768,13 @@ typedef struct MMULookupLocals {
static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data, static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
int mmu_idx, MMUAccessType access_type, uintptr_t ra) int mmu_idx, MMUAccessType access_type, uintptr_t ra)
{ {
target_ulong addr = data->addr; vaddr addr = data->addr;
uintptr_t index = tlb_index(env, mmu_idx, addr); uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
target_ulong tlb_addr = tlb_read_idx(entry, access_type); uint64_t tlb_addr = tlb_read_idx(entry, access_type);
bool maybe_resized = false; bool maybe_resized = false;
CPUTLBEntryFull *full;
int flags;
/* If the TLB entry is for a different page, reload and try again. */ /* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) { if (!tlb_hit(tlb_addr, addr)) {
@ -1777,8 +1788,12 @@ static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK; tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
} }
data->flags = tlb_addr & TLB_FLAGS_MASK; full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
data->full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
flags |= full->slow_flags[access_type];
data->full = full;
data->flags = flags;
/* Compute haddr speculatively; depending on flags it might be invalid. */ /* Compute haddr speculatively; depending on flags it might be invalid. */
data->haddr = (void *)((uintptr_t)addr + entry->addend); data->haddr = (void *)((uintptr_t)addr + entry->addend);
@ -1799,7 +1814,7 @@ static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data,
MMUAccessType access_type, uintptr_t ra) MMUAccessType access_type, uintptr_t ra)
{ {
CPUTLBEntryFull *full = data->full; CPUTLBEntryFull *full = data->full;
target_ulong addr = data->addr; vaddr addr = data->addr;
int flags = data->flags; int flags = data->flags;
int size = data->size; int size = data->size;
@ -1830,7 +1845,7 @@ static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data,
* Resolve the translation for the page(s) beginning at @addr, for MemOp.size * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
* bytes. Return true if the lookup crosses a page boundary. * bytes. Return true if the lookup crosses a page boundary.
*/ */
static bool mmu_lookup(CPUArchState *env, target_ulong addr, MemOpIdx oi, static bool mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType type, MMULookupLocals *l) uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
{ {
unsigned a_bits; unsigned a_bits;
@ -1901,15 +1916,15 @@ static bool mmu_lookup(CPUArchState *env, target_ulong addr, MemOpIdx oi,
* Probe for an atomic operation. Do not allow unaligned operations, * Probe for an atomic operation. Do not allow unaligned operations,
* or io operations to proceed. Return the host address. * or io operations to proceed. Return the host address.
*/ */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
MemOpIdx oi, int size, uintptr_t retaddr) int size, uintptr_t retaddr)
{ {
uintptr_t mmu_idx = get_mmuidx(oi); uintptr_t mmu_idx = get_mmuidx(oi);
MemOp mop = get_memop(oi); MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop); int a_bits = get_alignment_bits(mop);
uintptr_t index; uintptr_t index;
CPUTLBEntry *tlbe; CPUTLBEntry *tlbe;
target_ulong tlb_addr; vaddr tlb_addr;
void *hostaddr; void *hostaddr;
CPUTLBEntryFull *full; CPUTLBEntryFull *full;
@ -1966,7 +1981,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
*/ */
goto stop_the_world; goto stop_the_world;
} }
/* Collect TLB_WATCHPOINT for read. */ /* Collect tlb flags for read. */
tlb_addr |= tlbe->addr_read; tlb_addr |= tlbe->addr_read;
/* Notice an IO access or a needs-MMU-lookup access */ /* Notice an IO access or a needs-MMU-lookup access */
@ -1983,9 +1998,19 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
notdirty_write(env_cpu(env), addr, size, full, retaddr); notdirty_write(env_cpu(env), addr, size, full, retaddr);
} }
if (unlikely(tlb_addr & TLB_WATCHPOINT)) { if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, int wp_flags = 0;
BP_MEM_READ | BP_MEM_WRITE, retaddr);
if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
wp_flags |= BP_MEM_WRITE;
}
if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
wp_flags |= BP_MEM_READ;
}
if (wp_flags) {
cpu_check_watchpoint(env_cpu(env), addr, size,
full->attrs, wp_flags, retaddr);
}
} }
return hostaddr; return hostaddr;
@ -2027,7 +2052,7 @@ static uint64_t do_ld_mmio_beN(CPUArchState *env, MMULookupPageData *p,
MMUAccessType type, uintptr_t ra) MMUAccessType type, uintptr_t ra)
{ {
CPUTLBEntryFull *full = p->full; CPUTLBEntryFull *full = p->full;
target_ulong addr = p->addr; vaddr addr = p->addr;
int i, size = p->size; int i, size = p->size;
QEMU_IOTHREAD_LOCK_GUARD(); QEMU_IOTHREAD_LOCK_GUARD();
@ -2336,12 +2361,13 @@ static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
return ret; return ret;
} }
static uint8_t do_ld1_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, static uint8_t do_ld1_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type) uintptr_t ra, MMUAccessType access_type)
{ {
MMULookupLocals l; MMULookupLocals l;
bool crosspage; bool crosspage;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
tcg_debug_assert(!crosspage); tcg_debug_assert(!crosspage);
@ -2355,7 +2381,7 @@ tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
} }
static uint16_t do_ld2_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, static uint16_t do_ld2_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type) uintptr_t ra, MMUAccessType access_type)
{ {
MMULookupLocals l; MMULookupLocals l;
@ -2363,6 +2389,7 @@ static uint16_t do_ld2_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
uint16_t ret; uint16_t ret;
uint8_t a, b; uint8_t a, b;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) { if (likely(!crosspage)) {
return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
@ -2386,13 +2413,14 @@ tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
} }
static uint32_t do_ld4_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, static uint32_t do_ld4_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type) uintptr_t ra, MMUAccessType access_type)
{ {
MMULookupLocals l; MMULookupLocals l;
bool crosspage; bool crosspage;
uint32_t ret; uint32_t ret;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) { if (likely(!crosspage)) {
return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
@ -2413,13 +2441,14 @@ tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
} }
static uint64_t do_ld8_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, static uint64_t do_ld8_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type) uintptr_t ra, MMUAccessType access_type)
{ {
MMULookupLocals l; MMULookupLocals l;
bool crosspage; bool crosspage;
uint64_t ret; uint64_t ret;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) { if (likely(!crosspage)) {
return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
@ -2463,7 +2492,7 @@ tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr); return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
} }
static Int128 do_ld16_mmu(CPUArchState *env, target_ulong addr, static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
MMULookupLocals l; MMULookupLocals l;
@ -2472,6 +2501,7 @@ static Int128 do_ld16_mmu(CPUArchState *env, target_ulong addr,
Int128 ret; Int128 ret;
int first; int first;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l); crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
if (likely(!crosspage)) { if (likely(!crosspage)) {
/* Perform the load host endian. */ /* Perform the load host endian. */
@ -2620,7 +2650,7 @@ static uint64_t do_st_mmio_leN(CPUArchState *env, MMULookupPageData *p,
uint64_t val_le, int mmu_idx, uintptr_t ra) uint64_t val_le, int mmu_idx, uintptr_t ra)
{ {
CPUTLBEntryFull *full = p->full; CPUTLBEntryFull *full = p->full;
target_ulong addr = p->addr; vaddr addr = p->addr;
int i, size = p->size; int i, size = p->size;
QEMU_IOTHREAD_LOCK_GUARD(); QEMU_IOTHREAD_LOCK_GUARD();
@ -2805,19 +2835,21 @@ void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
bool crosspage; bool crosspage;
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8); tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
tcg_debug_assert(!crosspage); tcg_debug_assert(!crosspage);
do_st_1(env, &l.page[0], val, l.mmu_idx, ra); do_st_1(env, &l.page[0], val, l.mmu_idx, ra);
} }
static void do_st2_mmu(CPUArchState *env, target_ulong addr, uint16_t val, static void do_st2_mmu(CPUArchState *env, vaddr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
MMULookupLocals l; MMULookupLocals l;
bool crosspage; bool crosspage;
uint8_t a, b; uint8_t a, b;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) { if (likely(!crosspage)) {
do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra); do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
@ -2840,12 +2872,13 @@ void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
do_st2_mmu(env, addr, val, oi, retaddr); do_st2_mmu(env, addr, val, oi, retaddr);
} }
static void do_st4_mmu(CPUArchState *env, target_ulong addr, uint32_t val, static void do_st4_mmu(CPUArchState *env, vaddr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
MMULookupLocals l; MMULookupLocals l;
bool crosspage; bool crosspage;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) { if (likely(!crosspage)) {
do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra); do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
@ -2867,12 +2900,13 @@ void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
do_st4_mmu(env, addr, val, oi, retaddr); do_st4_mmu(env, addr, val, oi, retaddr);
} }
static void do_st8_mmu(CPUArchState *env, target_ulong addr, uint64_t val, static void do_st8_mmu(CPUArchState *env, vaddr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
MMULookupLocals l; MMULookupLocals l;
bool crosspage; bool crosspage;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) { if (likely(!crosspage)) {
do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra); do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
@ -2894,7 +2928,7 @@ void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
do_st8_mmu(env, addr, val, oi, retaddr); do_st8_mmu(env, addr, val, oi, retaddr);
} }
static void do_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
MemOpIdx oi, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
MMULookupLocals l; MMULookupLocals l;
@ -2902,6 +2936,7 @@ static void do_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val,
uint64_t a, b; uint64_t a, b;
int first; int first;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) { if (likely(!crosspage)) {
/* Swap to host endian if necessary, then store. */ /* Swap to host endian if necessary, then store. */

View File

@ -42,8 +42,8 @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
#endif /* CONFIG_SOFTMMU */ #endif /* CONFIG_SOFTMMU */
TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
target_ulong cs_base, uint32_t flags, uint64_t cs_base, uint32_t flags,
int cflags); int cflags);
void page_init(void); void page_init(void);
void tb_htable_init(void); void tb_htable_init(void);
@ -55,7 +55,7 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc); uintptr_t host_pc);
/* Return the current PC from CPU, which may be cached in TB. */ /* Return the current PC from CPU, which may be cached in TB. */
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb) static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
{ {
if (tb_cflags(tb) & CF_PCREL) { if (tb_cflags(tb) & CF_PCREL) {
return cpu->cc->get_pc(cpu); return cpu->cc->get_pc(cpu);
@ -78,4 +78,38 @@ extern int64_t max_advance;
extern bool one_insn_per_tb; extern bool one_insn_per_tb;
/**
* tcg_req_mo:
* @type: TCGBar
*
* Filter @type to the barrier that is required for the guest
* memory ordering vs the host memory ordering. A non-zero
* result indicates that some barrier is required.
*
* If TCG_GUEST_DEFAULT_MO is not defined, assume that the
* guest requires strict ordering.
*
* This is a macro so that it's constant even without optimization.
*/
#ifdef TCG_GUEST_DEFAULT_MO
# define tcg_req_mo(type) \
((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
#else
# define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
#endif
/**
* cpu_req_mo:
* @type: TCGBar
*
* If tcg_req_mo indicates a barrier for @type is required
* for the guest memory model, issue a host memory barrier.
*/
#define cpu_req_mo(type) \
do { \
if (tcg_req_mo(type)) { \
smp_mb(); \
} \
} while (0)
#endif /* ACCEL_TCG_INTERNAL_H */ #endif /* ACCEL_TCG_INTERNAL_H */

View File

@ -81,37 +81,6 @@ HumanReadableText *qmp_x_query_opcount(Error **errp)
return human_readable_text_from_str(buf); return human_readable_text_from_str(buf);
} }
#ifdef CONFIG_PROFILER
int64_t dev_time;
HumanReadableText *qmp_x_query_profile(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
static int64_t last_cpu_exec_time;
int64_t cpu_exec_time;
int64_t delta;
cpu_exec_time = tcg_cpu_exec_time();
delta = cpu_exec_time - last_cpu_exec_time;
g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n",
dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n",
delta, delta / (double)NANOSECONDS_PER_SECOND);
last_cpu_exec_time = cpu_exec_time;
dev_time = 0;
return human_readable_text_from_str(buf);
}
#else
HumanReadableText *qmp_x_query_profile(Error **errp)
{
error_setg(errp, "Internal profiler not compiled");
return NULL;
}
#endif
static void hmp_tcg_register(void) static void hmp_tcg_register(void)
{ {
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit); monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);

View File

@ -35,16 +35,16 @@
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1) #define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE) #define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) static inline unsigned int tb_jmp_cache_hash_page(vaddr pc)
{ {
target_ulong tmp; vaddr tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
} }
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
{ {
target_ulong tmp; vaddr tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
| (tmp & TB_JMP_ADDR_MASK)); | (tmp & TB_JMP_ADDR_MASK));
@ -53,7 +53,7 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
#else #else
/* In user-mode we can get better hashing because we do not have a TLB */ /* In user-mode we can get better hashing because we do not have a TLB */
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
{ {
return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1); return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
} }
@ -61,7 +61,7 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
#endif /* CONFIG_SOFTMMU */ #endif /* CONFIG_SOFTMMU */
static inline static inline
uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t tb_hash_func(tb_page_addr_t phys_pc, vaddr pc,
uint32_t flags, uint64_t flags2, uint32_t cf_mask) uint32_t flags, uint64_t flags2, uint32_t cf_mask)
{ {
return qemu_xxhash8(phys_pc, pc, flags2, flags, cf_mask); return qemu_xxhash8(phys_pc, pc, flags2, flags, cf_mask);

View File

@ -21,7 +21,7 @@ struct CPUJumpCache {
struct rcu_head rcu; struct rcu_head rcu;
struct { struct {
TranslationBlock *tb; TranslationBlock *tb;
target_ulong pc; vaddr pc;
} array[TB_JMP_CACHE_SIZE]; } array[TB_JMP_CACHE_SIZE];
}; };

View File

@ -98,7 +98,7 @@ static void tb_remove_all(void)
/* Call with mmap_lock held. */ /* Call with mmap_lock held. */
static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2) static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
{ {
target_ulong addr; vaddr addr;
int flags; int flags;
assert_memory_lock(); assert_memory_lock();

View File

@ -70,20 +70,10 @@ void tcg_cpus_destroy(CPUState *cpu)
int tcg_cpus_exec(CPUState *cpu) int tcg_cpus_exec(CPUState *cpu)
{ {
int ret; int ret;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
assert(tcg_enabled()); assert(tcg_enabled());
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
cpu_exec_start(cpu); cpu_exec_start(cpu);
ret = cpu_exec(cpu); ret = cpu_exec(cpu);
cpu_exec_end(cpu); cpu_exec_end(cpu);
#ifdef CONFIG_PROFILER
qatomic_set(&tcg_ctx->prof.cpu_exec_time,
tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
#endif
return ret; return ret;
} }

View File

@ -64,37 +64,23 @@ DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
* they can set the appropriate CONFIG flags in ${target}-softmmu.mak * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
* *
* Once a guest architecture has been converted to the new primitives * Once a guest architecture has been converted to the new primitives
* there are two remaining limitations to check. * there is one remaining limitation to check:
*
* - The guest can't be oversized (e.g. 64 bit guest on 32 bit host) * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
* - The host must have a stronger memory order than the guest
*
* It may be possible in future to support strong guests on weak hosts
* but that will require tagging all load/stores in a guest with their
* implicit memory order requirements which would likely slow things
* down a lot.
*/ */
static bool check_tcg_memory_orders_compatible(void)
{
#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
#else
return false;
#endif
}
static bool default_mttcg_enabled(void) static bool default_mttcg_enabled(void)
{ {
if (icount_enabled() || TCG_OVERSIZED_GUEST) { if (icount_enabled() || TCG_OVERSIZED_GUEST) {
return false; return false;
} else { }
#ifdef TARGET_SUPPORTS_MTTCG #ifdef TARGET_SUPPORTS_MTTCG
return check_tcg_memory_orders_compatible(); # ifndef TCG_GUEST_DEFAULT_MO
# error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO"
# endif
return true;
#else #else
return false; return false;
#endif #endif
}
} }
static void tcg_accel_instance_init(Object *obj) static void tcg_accel_instance_init(Object *obj)
@ -162,11 +148,6 @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp)
warn_report("Guest not yet converted to MTTCG - " warn_report("Guest not yet converted to MTTCG - "
"you may get unexpected results"); "you may get unexpected results");
#endif #endif
if (!check_tcg_memory_orders_compatible()) {
warn_report("Guest expects a stronger memory ordering "
"than the host provides");
error_printf("This may cause strange/hard to debug errors\n");
}
s->mttcg_enabled = true; s->mttcg_enabled = true;
} }
} else if (strcmp(value, "single") == 0) { } else if (strcmp(value, "single") == 0) {

View File

@ -202,10 +202,6 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc) uintptr_t host_pc)
{ {
uint64_t data[TARGET_INSN_START_WORDS]; uint64_t data[TARGET_INSN_START_WORDS];
#ifdef CONFIG_PROFILER
TCGProfile *prof = &tcg_ctx->prof;
int64_t ti = profile_getclock();
#endif
int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data); int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
if (insns_left < 0) { if (insns_left < 0) {
@ -222,12 +218,6 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
} }
cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data); cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
#ifdef CONFIG_PROFILER
qatomic_set(&prof->restore_time,
prof->restore_time + profile_getclock() - ti);
qatomic_set(&prof->restore_count, prof->restore_count + 1);
#endif
} }
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc) bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
@ -274,7 +264,7 @@ void page_init(void)
* Return the size of the generated code, or negative on error. * Return the size of the generated code, or negative on error.
*/ */
static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb, static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
target_ulong pc, void *host_pc, vaddr pc, void *host_pc,
int *max_insns, int64_t *ti) int *max_insns, int64_t *ti)
{ {
int ret = sigsetjmp(tcg_ctx->jmp_trans, 0); int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
@ -290,19 +280,12 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
tcg_ctx->cpu = NULL; tcg_ctx->cpu = NULL;
*max_insns = tb->icount; *max_insns = tb->icount;
#ifdef CONFIG_PROFILER
qatomic_set(&tcg_ctx->prof.tb_count, tcg_ctx->prof.tb_count + 1);
qatomic_set(&tcg_ctx->prof.interm_time,
tcg_ctx->prof.interm_time + profile_getclock() - *ti);
*ti = profile_getclock();
#endif
return tcg_gen_code(tcg_ctx, tb, pc); return tcg_gen_code(tcg_ctx, tb, pc);
} }
/* Called with mmap_lock held for user mode emulation. */ /* Called with mmap_lock held for user mode emulation. */
TranslationBlock *tb_gen_code(CPUState *cpu, TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base, vaddr pc, uint64_t cs_base,
uint32_t flags, int cflags) uint32_t flags, int cflags)
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
@ -310,9 +293,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb_page_addr_t phys_pc; tb_page_addr_t phys_pc;
tcg_insn_unit *gen_code_buf; tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size, max_insns; int gen_code_size, search_size, max_insns;
#ifdef CONFIG_PROFILER
TCGProfile *prof = &tcg_ctx->prof;
#endif
int64_t ti; int64_t ti;
void *host_pc; void *host_pc;
@ -371,12 +351,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb_overflow: tb_overflow:
#ifdef CONFIG_PROFILER
/* includes aborted translations because of exceptions */
qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
ti = profile_getclock();
#endif
trace_translate_block(tb, pc, tb->tc.ptr); trace_translate_block(tb, pc, tb->tc.ptr);
gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti); gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
@ -431,13 +405,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
*/ */
perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf)); perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
#ifdef CONFIG_PROFILER
qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
#endif
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
qemu_log_in_addr_range(pc)) { qemu_log_in_addr_range(pc)) {
FILE *logfile = qemu_log_trylock(); FILE *logfile = qemu_log_trylock();
@ -580,7 +547,8 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
/* The exception probably happened in a helper. The CPU state should /* The exception probably happened in a helper. The CPU state should
have been saved before calling it. Fetch the PC from there. */ have been saved before calling it. Fetch the PC from there. */
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
target_ulong pc, cs_base; vaddr pc;
uint64_t cs_base;
tb_page_addr_t addr; tb_page_addr_t addr;
uint32_t flags; uint32_t flags;
@ -634,10 +602,10 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n; cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
if (qemu_loglevel_mask(CPU_LOG_EXEC)) { if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
target_ulong pc = log_pc(cpu, tb); vaddr pc = log_pc(cpu, tb);
if (qemu_log_in_addr_range(pc)) { if (qemu_log_in_addr_range(pc)) {
qemu_log("cpu_io_recompile: rewound execution of TB to " qemu_log("cpu_io_recompile: rewound execution of TB to %"
TARGET_FMT_lx "\n", pc); VADDR_PRIx "\n", pc);
} }
} }

View File

@ -117,7 +117,7 @@ static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags,
} }
} }
bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest) bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
{ {
/* Suppress goto_tb if requested. */ /* Suppress goto_tb if requested. */
if (tb_cflags(db->tb) & CF_NO_GOTO_TB) { if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
@ -129,8 +129,8 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
} }
void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc, vaddr pc, void *host_pc, const TranslatorOps *ops,
const TranslatorOps *ops, DisasContextBase *db) DisasContextBase *db)
{ {
uint32_t cflags = tb_cflags(tb); uint32_t cflags = tb_cflags(tb);
TCGOp *icount_start_insn; TCGOp *icount_start_insn;
@ -235,10 +235,10 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
} }
static void *translator_access(CPUArchState *env, DisasContextBase *db, static void *translator_access(CPUArchState *env, DisasContextBase *db,
target_ulong pc, size_t len) vaddr pc, size_t len)
{ {
void *host; void *host;
target_ulong base, end; vaddr base, end;
TranslationBlock *tb; TranslationBlock *tb;
tb = db->tb; tb = db->tb;

View File

@ -721,7 +721,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
return current_tb_invalidated ? 2 : 1; return current_tb_invalidated ? 2 : 1;
} }
static int probe_access_internal(CPUArchState *env, target_ulong addr, static int probe_access_internal(CPUArchState *env, vaddr addr,
int fault_size, MMUAccessType access_type, int fault_size, MMUAccessType access_type,
bool nonfault, uintptr_t ra) bool nonfault, uintptr_t ra)
{ {
@ -759,7 +759,7 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
} }
int probe_access_flags(CPUArchState *env, target_ulong addr, int size, int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t ra) bool nonfault, void **phost, uintptr_t ra)
{ {
@ -771,7 +771,7 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
return flags; return flags;
} }
void *probe_access(CPUArchState *env, target_ulong addr, int size, void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t ra) MMUAccessType access_type, int mmu_idx, uintptr_t ra)
{ {
int flags; int flags;
@ -783,7 +783,7 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
return size ? g2h(env_cpu(env), addr) : NULL; return size ? g2h(env_cpu(env), addr) : NULL;
} }
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp) void **hostp)
{ {
int flags; int flags;
@ -889,7 +889,7 @@ void page_reset_target_data(target_ulong start, target_ulong last) { }
/* The softmmu versions of these helpers are in cputlb.c. */ /* The softmmu versions of these helpers are in cputlb.c. */
static void *cpu_mmu_lookup(CPUArchState *env, abi_ptr addr, static void *cpu_mmu_lookup(CPUArchState *env, vaddr addr,
MemOp mop, uintptr_t ra, MMUAccessType type) MemOp mop, uintptr_t ra, MMUAccessType type)
{ {
int a_bits = get_alignment_bits(mop); int a_bits = get_alignment_bits(mop);
@ -914,6 +914,7 @@ static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr,
uint8_t ret; uint8_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_8); tcg_debug_assert((mop & MO_SIZE) == MO_8);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = ldub_p(haddr); ret = ldub_p(haddr);
clear_helper_retaddr(); clear_helper_retaddr();
@ -947,6 +948,7 @@ static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr,
uint16_t ret; uint16_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_16); tcg_debug_assert((mop & MO_SIZE) == MO_16);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_2(env, ra, haddr, mop); ret = load_atom_2(env, ra, haddr, mop);
clear_helper_retaddr(); clear_helper_retaddr();
@ -984,6 +986,7 @@ static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr,
uint32_t ret; uint32_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_32); tcg_debug_assert((mop & MO_SIZE) == MO_32);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_4(env, ra, haddr, mop); ret = load_atom_4(env, ra, haddr, mop);
clear_helper_retaddr(); clear_helper_retaddr();
@ -1021,6 +1024,7 @@ static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr,
uint64_t ret; uint64_t ret;
tcg_debug_assert((mop & MO_SIZE) == MO_64); tcg_debug_assert((mop & MO_SIZE) == MO_64);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_8(env, ra, haddr, mop); ret = load_atom_8(env, ra, haddr, mop);
clear_helper_retaddr(); clear_helper_retaddr();
@ -1052,6 +1056,7 @@ static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr,
Int128 ret; Int128 ret;
tcg_debug_assert((mop & MO_SIZE) == MO_128); tcg_debug_assert((mop & MO_SIZE) == MO_128);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_16(env, ra, haddr, mop); ret = load_atom_16(env, ra, haddr, mop);
clear_helper_retaddr(); clear_helper_retaddr();
@ -1087,6 +1092,7 @@ static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
void *haddr; void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_8); tcg_debug_assert((mop & MO_SIZE) == MO_8);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
stb_p(haddr, val); stb_p(haddr, val);
clear_helper_retaddr(); clear_helper_retaddr();
@ -1111,6 +1117,7 @@ static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
void *haddr; void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_16); tcg_debug_assert((mop & MO_SIZE) == MO_16);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) { if (mop & MO_BSWAP) {
@ -1139,6 +1146,7 @@ static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
void *haddr; void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_32); tcg_debug_assert((mop & MO_SIZE) == MO_32);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) { if (mop & MO_BSWAP) {
@ -1167,6 +1175,7 @@ static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
void *haddr; void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_64); tcg_debug_assert((mop & MO_SIZE) == MO_64);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) { if (mop & MO_BSWAP) {
@ -1195,6 +1204,7 @@ static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
void *haddr; void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_128); tcg_debug_assert((mop & MO_SIZE) == MO_128);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) { if (mop & MO_BSWAP) {
@ -1324,8 +1334,8 @@ uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
/* /*
* Do not allow unaligned operations to proceed. Return the host address. * Do not allow unaligned operations to proceed. Return the host address.
*/ */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
MemOpIdx oi, int size, uintptr_t retaddr) int size, uintptr_t retaddr)
{ {
MemOp mop = get_memop(oi); MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop); int a_bits = get_alignment_bits(mop);

2
cpu.c
View File

@ -293,7 +293,7 @@ void list_cpus(void)
} }
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
void tb_invalidate_phys_addr(target_ulong addr) void tb_invalidate_phys_addr(hwaddr addr)
{ {
mmap_lock(); mmap_lock();
tb_invalidate_phys_page(addr); tb_invalidate_phys_page(addr);

View File

@ -593,27 +593,27 @@ static void unpack_raw64(FloatParts64 *r, const FloatFmt *fmt, uint64_t raw)
}; };
} }
static inline void float16_unpack_raw(FloatParts64 *p, float16 f) static void QEMU_FLATTEN float16_unpack_raw(FloatParts64 *p, float16 f)
{ {
unpack_raw64(p, &float16_params, f); unpack_raw64(p, &float16_params, f);
} }
static inline void bfloat16_unpack_raw(FloatParts64 *p, bfloat16 f) static void QEMU_FLATTEN bfloat16_unpack_raw(FloatParts64 *p, bfloat16 f)
{ {
unpack_raw64(p, &bfloat16_params, f); unpack_raw64(p, &bfloat16_params, f);
} }
static inline void float32_unpack_raw(FloatParts64 *p, float32 f) static void QEMU_FLATTEN float32_unpack_raw(FloatParts64 *p, float32 f)
{ {
unpack_raw64(p, &float32_params, f); unpack_raw64(p, &float32_params, f);
} }
static inline void float64_unpack_raw(FloatParts64 *p, float64 f) static void QEMU_FLATTEN float64_unpack_raw(FloatParts64 *p, float64 f)
{ {
unpack_raw64(p, &float64_params, f); unpack_raw64(p, &float64_params, f);
} }
static void floatx80_unpack_raw(FloatParts128 *p, floatx80 f) static void QEMU_FLATTEN floatx80_unpack_raw(FloatParts128 *p, floatx80 f)
{ {
*p = (FloatParts128) { *p = (FloatParts128) {
.cls = float_class_unclassified, .cls = float_class_unclassified,
@ -623,7 +623,7 @@ static void floatx80_unpack_raw(FloatParts128 *p, floatx80 f)
}; };
} }
static void float128_unpack_raw(FloatParts128 *p, float128 f) static void QEMU_FLATTEN float128_unpack_raw(FloatParts128 *p, float128 f)
{ {
const int f_size = float128_params.frac_size - 64; const int f_size = float128_params.frac_size - 64;
const int e_size = float128_params.exp_size; const int e_size = float128_params.exp_size;
@ -650,27 +650,27 @@ static uint64_t pack_raw64(const FloatParts64 *p, const FloatFmt *fmt)
return ret; return ret;
} }
static inline float16 float16_pack_raw(const FloatParts64 *p) static float16 QEMU_FLATTEN float16_pack_raw(const FloatParts64 *p)
{ {
return make_float16(pack_raw64(p, &float16_params)); return make_float16(pack_raw64(p, &float16_params));
} }
static inline bfloat16 bfloat16_pack_raw(const FloatParts64 *p) static bfloat16 QEMU_FLATTEN bfloat16_pack_raw(const FloatParts64 *p)
{ {
return pack_raw64(p, &bfloat16_params); return pack_raw64(p, &bfloat16_params);
} }
static inline float32 float32_pack_raw(const FloatParts64 *p) static float32 QEMU_FLATTEN float32_pack_raw(const FloatParts64 *p)
{ {
return make_float32(pack_raw64(p, &float32_params)); return make_float32(pack_raw64(p, &float32_params));
} }
static inline float64 float64_pack_raw(const FloatParts64 *p) static float64 QEMU_FLATTEN float64_pack_raw(const FloatParts64 *p)
{ {
return make_float64(pack_raw64(p, &float64_params)); return make_float64(pack_raw64(p, &float64_params));
} }
static float128 float128_pack_raw(const FloatParts128 *p) static float128 QEMU_FLATTEN float128_pack_raw(const FloatParts128 *p)
{ {
const int f_size = float128_params.frac_size - 64; const int f_size = float128_params.frac_size - 64;
const int e_size = float128_params.exp_size; const int e_size = float128_params.exp_size;

View File

@ -360,21 +360,6 @@ SRST
Show host USB devices. Show host USB devices.
ERST ERST
#if defined(CONFIG_TCG)
{
.name = "profile",
.args_type = "",
.params = "",
.help = "show profiling information",
.cmd_info_hrt = qmp_x_query_profile,
},
#endif
SRST
``info profile``
Show profiling information.
ERST
{ {
.name = "capture", .name = "capture",
.args_type = "", .args_type = "",

View File

@ -325,19 +325,32 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2)) #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
/* Set if TLB entry is an IO callback. */ /* Set if TLB entry is an IO callback. */
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3)) #define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
/* Set if TLB entry contains a watchpoint. */
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
/* Set if TLB entry requires byte swap. */
#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
/* Set if TLB entry writes ignored. */ /* Set if TLB entry writes ignored. */
#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6)) #define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 4))
/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
/* Use this mask to check interception with an alignment mask /*
* Use this mask to check interception with an alignment mask
* in a TCG backend. * in a TCG backend.
*/ */
#define TLB_FLAGS_MASK \ #define TLB_FLAGS_MASK \
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
| TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE) | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
/*
* Flags stored in CPUTLBEntryFull.slow_flags[x].
* TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
*/
/* Set if TLB entry requires byte swap. */
#define TLB_BSWAP (1 << 0)
/* Set if TLB entry contains a watchpoint. */
#define TLB_WATCHPOINT (1 << 1)
#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT)
/* The two sets of flags must not overlap. */
QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
/** /**
* tlb_hit_page: return true if page aligned @addr is a hit against the * tlb_hit_page: return true if page aligned @addr is a hit against the

View File

@ -124,6 +124,12 @@ typedef struct CPUTLBEntryFull {
/* @lg_page_size contains the log2 of the page size. */ /* @lg_page_size contains the log2 of the page size. */
uint8_t lg_page_size; uint8_t lg_page_size;
/*
* Additional tlb flags for use by the slow path. If non-zero,
* the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
*/
uint8_t slow_flags[MMU_ACCESS_COUNT];
/* /*
* Allow target-specific additions to this structure. * Allow target-specific additions to this structure.
* This may be used to cache items from the guest cpu * This may be used to cache items from the guest cpu
@ -147,8 +153,8 @@ typedef struct CPUTLBDesc {
* we must flush the entire tlb. The region is matched if * we must flush the entire tlb. The region is matched if
* (addr & large_page_mask) == large_page_addr. * (addr & large_page_mask) == large_page_addr.
*/ */
target_ulong large_page_addr; vaddr large_page_addr;
target_ulong large_page_mask; vaddr large_page_mask;
/* host time (in ns) at the beginning of the time window */ /* host time (in ns) at the beginning of the time window */
int64_t window_begin_ns; int64_t window_begin_ns;
/* maximum number of entries observed in the window */ /* maximum number of entries observed in the window */

View File

@ -328,7 +328,7 @@ static inline void clear_helper_retaddr(void)
#include "tcg/oversized-guest.h" #include "tcg/oversized-guest.h"
static inline target_ulong tlb_read_idx(const CPUTLBEntry *entry, static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
MMUAccessType access_type) MMUAccessType access_type)
{ {
/* Do not rearrange the CPUTLBEntry structure members. */ /* Do not rearrange the CPUTLBEntry structure members. */
@ -355,14 +355,14 @@ static inline target_ulong tlb_read_idx(const CPUTLBEntry *entry,
#endif #endif
} }
static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry) static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
{ {
return tlb_read_idx(entry, MMU_DATA_STORE); return tlb_read_idx(entry, MMU_DATA_STORE);
} }
/* Find the TLB index corresponding to the mmu_idx + address pair. */ /* Find the TLB index corresponding to the mmu_idx + address pair. */
static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx, static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
target_ulong addr) vaddr addr)
{ {
uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS; uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
@ -371,7 +371,7 @@ static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
/* Find the TLB entry corresponding to the mmu_idx + address pair. */ /* Find the TLB entry corresponding to the mmu_idx + address pair. */
static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
target_ulong addr) vaddr addr)
{ {
return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)]; return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
} }

View File

@ -94,7 +94,7 @@ void tlb_destroy(CPUState *cpu);
* Flush one page from the TLB of the specified CPU, for all * Flush one page from the TLB of the specified CPU, for all
* MMU indexes. * MMU indexes.
*/ */
void tlb_flush_page(CPUState *cpu, target_ulong addr); void tlb_flush_page(CPUState *cpu, vaddr addr);
/** /**
* tlb_flush_page_all_cpus: * tlb_flush_page_all_cpus:
* @cpu: src CPU of the flush * @cpu: src CPU of the flush
@ -103,7 +103,7 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr);
* Flush one page from the TLB of the specified CPU, for all * Flush one page from the TLB of the specified CPU, for all
* MMU indexes. * MMU indexes.
*/ */
void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); void tlb_flush_page_all_cpus(CPUState *src, vaddr addr);
/** /**
* tlb_flush_page_all_cpus_synced: * tlb_flush_page_all_cpus_synced:
* @cpu: src CPU of the flush * @cpu: src CPU of the flush
@ -115,7 +115,7 @@ void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
* the source vCPUs safe work is complete. This will depend on when * the source vCPUs safe work is complete. This will depend on when
* the guests translation ends the TB. * the guests translation ends the TB.
*/ */
void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
/** /**
* tlb_flush: * tlb_flush:
* @cpu: CPU whose TLB should be flushed * @cpu: CPU whose TLB should be flushed
@ -150,7 +150,7 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu);
* Flush one page from the TLB of the specified CPU, for the specified * Flush one page from the TLB of the specified CPU, for the specified
* MMU indexes. * MMU indexes.
*/ */
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap); uint16_t idxmap);
/** /**
* tlb_flush_page_by_mmuidx_all_cpus: * tlb_flush_page_by_mmuidx_all_cpus:
@ -161,7 +161,7 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
* Flush one page from the TLB of all CPUs, for the specified * Flush one page from the TLB of all CPUs, for the specified
* MMU indexes. * MMU indexes.
*/ */
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
uint16_t idxmap); uint16_t idxmap);
/** /**
* tlb_flush_page_by_mmuidx_all_cpus_synced: * tlb_flush_page_by_mmuidx_all_cpus_synced:
@ -175,7 +175,7 @@ void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
* complete once the source vCPUs safe work is complete. This will * complete once the source vCPUs safe work is complete. This will
* depend on when the guests translation ends the TB. * depend on when the guests translation ends the TB.
*/ */
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
uint16_t idxmap); uint16_t idxmap);
/** /**
* tlb_flush_by_mmuidx: * tlb_flush_by_mmuidx:
@ -218,14 +218,14 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
* *
* Similar to tlb_flush_page_mask, but with a bitmap of indexes. * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
*/ */
void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits); uint16_t idxmap, unsigned bits);
/* Similarly, with broadcast and syncing. */ /* Similarly, with broadcast and syncing. */
void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits); uint16_t idxmap, unsigned bits);
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
(CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits); (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
/** /**
* tlb_flush_range_by_mmuidx * tlb_flush_range_by_mmuidx
@ -238,17 +238,17 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
* For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len), * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
* comparing only the low @bits worth of each virtual page. * comparing only the low @bits worth of each virtual page.
*/ */
void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
target_ulong len, uint16_t idxmap, vaddr len, uint16_t idxmap,
unsigned bits); unsigned bits);
/* Similarly, with broadcast and syncing. */ /* Similarly, with broadcast and syncing. */
void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
target_ulong len, uint16_t idxmap, vaddr len, uint16_t idxmap,
unsigned bits); unsigned bits);
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
target_ulong addr, vaddr addr,
target_ulong len, vaddr len,
uint16_t idxmap, uint16_t idxmap,
unsigned bits); unsigned bits);
@ -256,7 +256,7 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
* tlb_set_page_full: * tlb_set_page_full:
* @cpu: CPU context * @cpu: CPU context
* @mmu_idx: mmu index of the tlb to modify * @mmu_idx: mmu index of the tlb to modify
* @vaddr: virtual address of the entry to add * @addr: virtual address of the entry to add
* @full: the details of the tlb entry * @full: the details of the tlb entry
* *
* Add an entry to @cpu tlb index @mmu_idx. All of the fields of * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
@ -271,13 +271,13 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
* single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
* used by tlb_flush_page. * used by tlb_flush_page.
*/ */
void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr, void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
CPUTLBEntryFull *full); CPUTLBEntryFull *full);
/** /**
* tlb_set_page_with_attrs: * tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for * @cpu: CPU to add this TLB entry for
* @vaddr: virtual address of page to add entry for * @addr: virtual address of page to add entry for
* @paddr: physical address of the page * @paddr: physical address of the page
* @attrs: memory transaction attributes * @attrs: memory transaction attributes
* @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
@ -285,7 +285,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr,
* @size: size of the page in bytes * @size: size of the page in bytes
* *
* Add an entry to this CPU's TLB (a mapping from virtual address * Add an entry to this CPU's TLB (a mapping from virtual address
* @vaddr to physical address @paddr) with the specified memory * @addr to physical address @paddr) with the specified memory
* transaction attributes. This is generally called by the target CPU * transaction attributes. This is generally called by the target CPU
* specific code after it has been called through the tlb_fill() * specific code after it has been called through the tlb_fill()
* entry point and performed a successful page table walk to find * entry point and performed a successful page table walk to find
@ -296,18 +296,18 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr,
* single TARGET_PAGE_SIZE region is mapped; the supplied @size is only * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
* used by tlb_flush_page. * used by tlb_flush_page.
*/ */
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
hwaddr paddr, MemTxAttrs attrs, hwaddr paddr, MemTxAttrs attrs,
int prot, int mmu_idx, target_ulong size); int prot, int mmu_idx, vaddr size);
/* tlb_set_page: /* tlb_set_page:
* *
* This function is equivalent to calling tlb_set_page_with_attrs() * This function is equivalent to calling tlb_set_page_with_attrs()
* with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
* as a convenience for CPUs which don't use memory transaction attributes. * as a convenience for CPUs which don't use memory transaction attributes.
*/ */
void tlb_set_page(CPUState *cpu, target_ulong vaddr, void tlb_set_page(CPUState *cpu, vaddr addr,
hwaddr paddr, int prot, hwaddr paddr, int prot,
int mmu_idx, target_ulong size); int mmu_idx, vaddr size);
#else #else
static inline void tlb_init(CPUState *cpu) static inline void tlb_init(CPUState *cpu)
{ {
@ -315,14 +315,13 @@ static inline void tlb_init(CPUState *cpu)
static inline void tlb_destroy(CPUState *cpu) static inline void tlb_destroy(CPUState *cpu)
{ {
} }
static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
{ {
} }
static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
{ {
} }
static inline void tlb_flush_page_all_cpus_synced(CPUState *src, static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
target_ulong addr)
{ {
} }
static inline void tlb_flush(CPUState *cpu) static inline void tlb_flush(CPUState *cpu)
@ -335,7 +334,7 @@ static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
{ {
} }
static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
target_ulong addr, uint16_t idxmap) vaddr addr, uint16_t idxmap)
{ {
} }
@ -343,12 +342,12 @@ static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
{ {
} }
static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
target_ulong addr, vaddr addr,
uint16_t idxmap) uint16_t idxmap)
{ {
} }
static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
target_ulong addr, vaddr addr,
uint16_t idxmap) uint16_t idxmap)
{ {
} }
@ -361,37 +360,37 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
{ {
} }
static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
target_ulong addr, vaddr addr,
uint16_t idxmap, uint16_t idxmap,
unsigned bits) unsigned bits)
{ {
} }
static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
target_ulong addr, vaddr addr,
uint16_t idxmap, uint16_t idxmap,
unsigned bits) unsigned bits)
{ {
} }
static inline void static inline void
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits) uint16_t idxmap, unsigned bits)
{ {
} }
static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
target_ulong len, uint16_t idxmap, vaddr len, uint16_t idxmap,
unsigned bits) unsigned bits)
{ {
} }
static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
target_ulong addr, vaddr addr,
target_ulong len, vaddr len,
uint16_t idxmap, uint16_t idxmap,
unsigned bits) unsigned bits)
{ {
} }
static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
target_ulong addr, vaddr addr,
target_long len, vaddr len,
uint16_t idxmap, uint16_t idxmap,
unsigned bits) unsigned bits)
{ {
@ -414,16 +413,16 @@ static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
* Finally, return the host address for a page that is backed by RAM, * Finally, return the host address for a page that is backed by RAM,
* or NULL if the page requires I/O. * or NULL if the page requires I/O.
*/ */
void *probe_access(CPUArchState *env, target_ulong addr, int size, void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
static inline void *probe_write(CPUArchState *env, target_ulong addr, int size, static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
int mmu_idx, uintptr_t retaddr) int mmu_idx, uintptr_t retaddr)
{ {
return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
} }
static inline void *probe_read(CPUArchState *env, target_ulong addr, int size, static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
int mmu_idx, uintptr_t retaddr) int mmu_idx, uintptr_t retaddr)
{ {
return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
@ -448,7 +447,7 @@ static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
* Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags. * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
* For simplicity, all "mmio-like" flags are folded to TLB_MMIO. * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
*/ */
int probe_access_flags(CPUArchState *env, target_ulong addr, int size, int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr); bool nonfault, void **phost, uintptr_t retaddr);
@ -461,7 +460,7 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
* and must be consumed or copied immediately, before any further * and must be consumed or copied immediately, before any further
* access or changes to TLB @mmu_idx. * access or changes to TLB @mmu_idx.
*/ */
int probe_access_full(CPUArchState *env, target_ulong addr, int size, int probe_access_full(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, bool nonfault, void **phost,
CPUTLBEntryFull **pfull, uintptr_t retaddr); CPUTLBEntryFull **pfull, uintptr_t retaddr);
@ -527,7 +526,7 @@ uint32_t curr_cflags(CPUState *cpu);
/* TranslationBlock invalidate API */ /* TranslationBlock invalidate API */
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
void tb_invalidate_phys_addr(target_ulong addr); void tb_invalidate_phys_addr(hwaddr addr);
#else #else
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
#endif #endif
@ -582,7 +581,7 @@ struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
* *
* Note: this function can trigger an exception. * Note: this function can trigger an exception.
*/ */
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp); void **hostp);
/** /**
@ -597,7 +596,7 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
* Note: this function can trigger an exception. * Note: this function can trigger an exception.
*/ */
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
target_ulong addr) vaddr addr)
{ {
return get_page_addr_code_hostp(env, addr, NULL); return get_page_addr_code_hostp(env, addr, NULL);
} }
@ -663,7 +662,7 @@ static inline void mmap_lock(void) {}
static inline void mmap_unlock(void) {} static inline void mmap_unlock(void) {}
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); void tlb_set_dirty(CPUState *cpu, vaddr addr);
MemoryRegionSection * MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,

View File

@ -142,8 +142,8 @@ typedef struct TranslatorOps {
* - When too many instructions have been translated. * - When too many instructions have been translated.
*/ */
void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
target_ulong pc, void *host_pc, vaddr pc, void *host_pc, const TranslatorOps *ops,
const TranslatorOps *ops, DisasContextBase *db); DisasContextBase *db);
/** /**
* translator_use_goto_tb * translator_use_goto_tb
@ -153,7 +153,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
* Return true if goto_tb is allowed between the current TB * Return true if goto_tb is allowed between the current TB
* and the destination PC. * and the destination PC.
*/ */
bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest); bool translator_use_goto_tb(DisasContextBase *db, vaddr dest);
/** /**
* translator_io_start * translator_io_start

View File

@ -84,6 +84,7 @@ typedef enum MMUAccessType {
MMU_DATA_LOAD = 0, MMU_DATA_LOAD = 0,
MMU_DATA_STORE = 1, MMU_DATA_STORE = 1,
MMU_INST_FETCH = 2 MMU_INST_FETCH = 2
#define MMU_ACCESS_COUNT 3
} MMUAccessType; } MMUAccessType;
typedef struct CPUWatchpoint CPUWatchpoint; typedef struct CPUWatchpoint CPUWatchpoint;

View File

@ -37,7 +37,7 @@ struct qemu_plugin_hwaddr {
* It would only fail if not called from an instrumented memory access * It would only fail if not called from an instrumented memory access
* which would be an abuse of the API. * which would be an abuse of the API.
*/ */
bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
bool is_store, struct qemu_plugin_hwaddr *data); bool is_store, struct qemu_plugin_hwaddr *data);
#endif /* PLUGIN_MEMORY_H */ #endif /* PLUGIN_MEMORY_H */

View File

@ -989,13 +989,4 @@ static inline int64_t cpu_get_host_ticks(void)
} }
#endif #endif
#ifdef CONFIG_PROFILER
static inline int64_t profile_getclock(void)
{
return get_clock();
}
extern int64_t dev_time;
#endif
#endif #endif

View File

@ -478,27 +478,6 @@ static inline TCGRegSet output_pref(const TCGOp *op, unsigned i)
return i < ARRAY_SIZE(op->output_pref) ? op->output_pref[i] : 0; return i < ARRAY_SIZE(op->output_pref) ? op->output_pref[i] : 0;
} }
typedef struct TCGProfile {
int64_t cpu_exec_time;
int64_t tb_count1;
int64_t tb_count;
int64_t op_count; /* total insn count */
int op_count_max; /* max insn per TB */
int temp_count_max;
int64_t temp_count;
int64_t del_op_count;
int64_t code_in_len;
int64_t code_out_len;
int64_t search_out_len;
int64_t interm_time;
int64_t code_time;
int64_t la_time;
int64_t opt_time;
int64_t restore_count;
int64_t restore_time;
int64_t table_op_count[NB_OPS];
} TCGProfile;
struct TCGContext { struct TCGContext {
uint8_t *pool_cur, *pool_end; uint8_t *pool_cur, *pool_end;
TCGPool *pool_first, *pool_current, *pool_first_large; TCGPool *pool_first, *pool_current, *pool_first_large;
@ -528,10 +507,6 @@ struct TCGContext {
tcg_insn_unit *code_buf; /* pointer for start of tb */ tcg_insn_unit *code_buf; /* pointer for start of tb */
tcg_insn_unit *code_ptr; /* pointer for running end of tb */ tcg_insn_unit *code_ptr; /* pointer for running end of tb */
#ifdef CONFIG_PROFILER
TCGProfile prof;
#endif
#ifdef CONFIG_DEBUG_TCG #ifdef CONFIG_DEBUG_TCG
int goto_tb_issue_mask; int goto_tb_issue_mask;
const TCGOpcode *vecop_list; const TCGOpcode *vecop_list;
@ -871,7 +846,6 @@ static inline TCGv_ptr tcg_temp_new_ptr(void)
return temp_tcgv_ptr(t); return temp_tcgv_ptr(t);
} }
int64_t tcg_cpu_exec_time(void);
void tcg_dump_info(GString *buf); void tcg_dump_info(GString *buf);
void tcg_dump_op_count(GString *buf); void tcg_dump_op_count(GString *buf);

View File

@ -2121,7 +2121,6 @@ if numa.found()
dependencies: numa)) dependencies: numa))
endif endif
config_host_data.set('CONFIG_OPENGL', opengl.found()) config_host_data.set('CONFIG_OPENGL', opengl.found())
config_host_data.set('CONFIG_PROFILER', get_option('profiler'))
config_host_data.set('CONFIG_RBD', rbd.found()) config_host_data.set('CONFIG_RBD', rbd.found())
config_host_data.set('CONFIG_RDMA', rdma.found()) config_host_data.set('CONFIG_RDMA', rdma.found())
config_host_data.set('CONFIG_SAFESTACK', get_option('safe_stack')) config_host_data.set('CONFIG_SAFESTACK', get_option('safe_stack'))
@ -4087,7 +4086,6 @@ if 'objc' in all_languages
summary_info += {'QEMU_OBJCFLAGS': ' '.join(qemu_common_flags)} summary_info += {'QEMU_OBJCFLAGS': ' '.join(qemu_common_flags)}
endif endif
summary_info += {'QEMU_LDFLAGS': ' '.join(qemu_ldflags)} summary_info += {'QEMU_LDFLAGS': ' '.join(qemu_ldflags)}
summary_info += {'profiler': get_option('profiler')}
summary_info += {'link-time optimization (LTO)': get_option('b_lto')} summary_info += {'link-time optimization (LTO)': get_option('b_lto')}
summary_info += {'PIE': get_option('b_pie')} summary_info += {'PIE': get_option('b_pie')}
summary_info += {'static build': get_option('prefer_static')} summary_info += {'static build': get_option('prefer_static')}

View File

@ -345,8 +345,6 @@ option('qom_cast_debug', type: 'boolean', value: true,
option('gprof', type: 'boolean', value: false, option('gprof', type: 'boolean', value: false,
description: 'QEMU profiling with gprof', description: 'QEMU profiling with gprof',
deprecated: true) deprecated: true)
option('profiler', type: 'boolean', value: false,
description: 'profiler support')
option('slirp_smbd', type : 'feature', value : 'auto', option('slirp_smbd', type : 'feature', value : 'auto',
description: 'use smbd (at path --smbd=*) in slirp networking') description: 'use smbd (at path --smbd=*) in slirp networking')

View File

@ -1575,24 +1575,6 @@
'if': 'CONFIG_TCG', 'if': 'CONFIG_TCG',
'features': [ 'unstable' ] } 'features': [ 'unstable' ] }
##
# @x-query-profile:
#
# Query TCG profiling information
#
# Features:
#
# @unstable: This command is meant for debugging.
#
# Returns: profile information
#
# Since: 6.2
##
{ 'command': 'x-query-profile',
'returns': 'HumanReadableText',
'if': 'CONFIG_TCG',
'features': [ 'unstable' ] }
## ##
# @x-query-ramblock: # @x-query-ramblock:
# #

View File

@ -39,7 +39,6 @@ meson_options_help() {
printf "%s\n" ' jemalloc/system/tcmalloc)' printf "%s\n" ' jemalloc/system/tcmalloc)'
printf "%s\n" ' --enable-module-upgrades try to load modules from alternate paths for' printf "%s\n" ' --enable-module-upgrades try to load modules from alternate paths for'
printf "%s\n" ' upgrades' printf "%s\n" ' upgrades'
printf "%s\n" ' --enable-profiler profiler support'
printf "%s\n" ' --enable-rng-none dummy RNG, avoid using /dev/(u)random and' printf "%s\n" ' --enable-rng-none dummy RNG, avoid using /dev/(u)random and'
printf "%s\n" ' getrandom()' printf "%s\n" ' getrandom()'
printf "%s\n" ' --enable-safe-stack SafeStack Stack Smash Protection (requires' printf "%s\n" ' --enable-safe-stack SafeStack Stack Smash Protection (requires'
@ -401,8 +400,6 @@ _meson_option_parse() {
--with-pkgversion=*) quote_sh "-Dpkgversion=$2" ;; --with-pkgversion=*) quote_sh "-Dpkgversion=$2" ;;
--enable-png) printf "%s" -Dpng=enabled ;; --enable-png) printf "%s" -Dpng=enabled ;;
--disable-png) printf "%s" -Dpng=disabled ;; --disable-png) printf "%s" -Dpng=disabled ;;
--enable-profiler) printf "%s" -Dprofiler=true ;;
--disable-profiler) printf "%s" -Dprofiler=false ;;
--enable-pvrdma) printf "%s" -Dpvrdma=enabled ;; --enable-pvrdma) printf "%s" -Dpvrdma=enabled ;;
--disable-pvrdma) printf "%s" -Dpvrdma=disabled ;; --disable-pvrdma) printf "%s" -Dpvrdma=disabled ;;
--enable-qcow1) printf "%s" -Dqcow1=enabled ;; --enable-qcow1) printf "%s" -Dqcow1=enabled ;;

View File

@ -727,18 +727,9 @@ static bool main_loop_should_exit(int *status)
int qemu_main_loop(void) int qemu_main_loop(void)
{ {
int status = EXIT_SUCCESS; int status = EXIT_SUCCESS;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
while (!main_loop_should_exit(&status)) { while (!main_loop_should_exit(&status)) {
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
main_loop_wait(false); main_loop_wait(false);
#ifdef CONFIG_PROFILER
dev_time += profile_getclock() - ti;
#endif
} }
return status; return status;

View File

@ -462,8 +462,8 @@ void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
MemTxResult response, uintptr_t retaddr); MemTxResult response, uintptr_t retaddr);
#endif #endif
static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *pflags) uint64_t *cs_base, uint32_t *pflags)
{ {
*pc = env->pc; *pc = env->pc;
*cs_base = 0; *cs_base = 0;

View File

@ -3220,8 +3220,8 @@ static inline bool arm_cpu_bswap_data(CPUARMState *env)
} }
#endif #endif
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags); uint64_t *cs_base, uint32_t *flags);
enum { enum {
QEMU_PSCI_CONDUIT_DISABLED = 0, QEMU_PSCI_CONDUIT_DISABLED = 0,

View File

@ -11945,8 +11945,8 @@ static bool mve_no_pred(CPUARMState *env)
return true; return true;
} }
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *pflags) uint64_t *cs_base, uint32_t *pflags)
{ {
CPUARMTBFlags flags; CPUARMTBFlags flags;

View File

@ -190,8 +190,8 @@ enum {
TB_FLAGS_SKIP = 2, TB_FLAGS_SKIP = 2,
}; };
static inline void cpu_get_tb_cpu_state(CPUAVRState *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUAVRState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *pflags) uint64_t *cs_base, uint32_t *pflags)
{ {
uint32_t flags = 0; uint32_t flags = 0;

View File

@ -266,8 +266,8 @@ static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
#include "exec/cpu-all.h" #include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPUCRISState *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUCRISState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags) uint64_t *cs_base, uint32_t *flags)
{ {
*pc = env->pc; *pc = env->pc;
*cs_base = 0; *cs_base = 0;

View File

@ -153,8 +153,8 @@ struct ArchCPU {
FIELD(TB_FLAGS, IS_TIGHT_LOOP, 0, 1) FIELD(TB_FLAGS, IS_TIGHT_LOOP, 0, 1)
static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags) uint64_t *cs_base, uint32_t *flags)
{ {
uint32_t hex_flags = 0; uint32_t hex_flags = 0;
*pc = env->gpr[HEX_REG_PC]; *pc = env->gpr[HEX_REG_PC];

View File

@ -268,9 +268,8 @@ static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc,
#define TB_FLAG_PRIV_SHIFT 8 #define TB_FLAG_PRIV_SHIFT 8
#define TB_FLAG_UNALIGN 0x400 #define TB_FLAG_UNALIGN 0x400
static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
target_ulong *cs_base, uint64_t *cs_base, uint32_t *pflags)
uint32_t *pflags)
{ {
uint32_t flags = env->psw_n * PSW_N; uint32_t flags = env->psw_n * PSW_N;

View File

@ -2275,8 +2275,8 @@ static inline int cpu_mmu_index_kernel(CPUX86State *env)
#include "hw/i386/apic.h" #include "hw/i386/apic.h"
#endif #endif
static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUX86State *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags) uint64_t *cs_base, uint32_t *flags)
{ {
*cs_base = env->segs[R_CS].base; *cs_base = env->segs[R_CS].base;
*pc = *cs_base + env->eip; *pc = *cs_base + env->eip;

View File

@ -427,10 +427,8 @@ static inline int cpu_mmu_index(CPULoongArchState *env, bool ifetch)
#define HW_FLAGS_EUEN_FPE 0x04 #define HW_FLAGS_EUEN_FPE 0x04
#define HW_FLAGS_EUEN_SXE 0x08 #define HW_FLAGS_EUEN_SXE 0x08
static inline void cpu_get_tb_cpu_state(CPULoongArchState *env, static inline void cpu_get_tb_cpu_state(CPULoongArchState *env, vaddr *pc,
target_ulong *pc, uint64_t *cs_base, uint32_t *flags)
target_ulong *cs_base,
uint32_t *flags)
{ {
*pc = env->pc; *pc = env->pc;
*cs_base = 0; *cs_base = 0;

View File

@ -601,8 +601,8 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
#define TB_FLAGS_TRACE 16 #define TB_FLAGS_TRACE 16
#define TB_FLAGS_TRACE_BIT (1 << TB_FLAGS_TRACE) #define TB_FLAGS_TRACE_BIT (1 << TB_FLAGS_TRACE)
static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUM68KState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags) uint64_t *cs_base, uint32_t *flags)
{ {
*pc = env->pc; *pc = env->pc;
*cs_base = 0; *cs_base = 0;

View File

@ -24,6 +24,9 @@
#include "exec/cpu-defs.h" #include "exec/cpu-defs.h"
#include "qemu/cpu-float.h" #include "qemu/cpu-float.h"
/* MicroBlaze is always in-order. */
#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
typedef struct CPUArchState CPUMBState; typedef struct CPUArchState CPUMBState;
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
#include "mmu.h" #include "mmu.h"
@ -401,8 +404,8 @@ void mb_tcg_init(void);
/* Ensure there is no overlap between the two masks. */ /* Ensure there is no overlap between the two masks. */
QEMU_BUILD_BUG_ON(MSR_TB_MASK & IFLAGS_TB_MASK); QEMU_BUILD_BUG_ON(MSR_TB_MASK & IFLAGS_TB_MASK);
static inline void cpu_get_tb_cpu_state(CPUMBState *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUMBState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags) uint64_t *cs_base, uint32_t *flags)
{ {
*pc = env->pc; *pc = env->pc;
*flags = (env->iflags & IFLAGS_TB_MASK) | (env->msr & MSR_TB_MASK); *flags = (env->iflags & IFLAGS_TB_MASK) | (env->msr & MSR_TB_MASK);

View File

@ -1313,8 +1313,8 @@ void itc_reconfigure(struct MIPSITUState *tag);
/* helper.c */ /* helper.c */
target_ulong exception_resume_pc(CPUMIPSState *env); target_ulong exception_resume_pc(CPUMIPSState *env);
static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags) uint64_t *cs_base, uint32_t *flags)
{ {
*pc = env->active_tc.PC; *pc = env->active_tc.PC;
*cs_base = 0; *cs_base = 0;

View File

@ -302,8 +302,8 @@ FIELD(TBFLAGS, CRS0, 0, 1) /* Set if CRS == 0. */
FIELD(TBFLAGS, U, 1, 1) /* Overlaps CR_STATUS_U */ FIELD(TBFLAGS, U, 1, 1) /* Overlaps CR_STATUS_U */
FIELD(TBFLAGS, R0_0, 2, 1) /* Set if R0 == 0. */ FIELD(TBFLAGS, R0_0, 2, 1) /* Set if R0 == 0. */
static inline void cpu_get_tb_cpu_state(CPUNios2State *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUNios2State *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags) uint64_t *cs_base, uint32_t *flags)
{ {
unsigned crs = FIELD_EX32(env->ctrl[CR_STATUS], CR_STATUS, CRS); unsigned crs = FIELD_EX32(env->ctrl[CR_STATUS], CR_STATUS, CRS);

View File

@ -367,9 +367,8 @@ static inline void cpu_set_gpr(CPUOpenRISCState *env, int i, uint32_t val)
env->shadow_gpr[0][i] = val; env->shadow_gpr[0][i] = val;
} }
static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env, static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env, vaddr *pc,
target_ulong *pc, uint64_t *cs_base, uint32_t *flags)
target_ulong *cs_base, uint32_t *flags)
{ {
*pc = env->pc; *pc = env->pc;
*cs_base = 0; *cs_base = 0;

View File

@ -2508,11 +2508,11 @@ void cpu_write_xer(CPUPPCState *env, target_ulong xer);
#define is_book3s_arch2x(ctx) (!!((ctx)->insns_flags & PPC_SEGMENT_64B)) #define is_book3s_arch2x(ctx) (!!((ctx)->insns_flags & PPC_SEGMENT_64B))
#ifdef CONFIG_DEBUG_TCG #ifdef CONFIG_DEBUG_TCG
void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc, void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags); uint64_t *cs_base, uint32_t *flags);
#else #else
static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags) uint64_t *cs_base, uint32_t *flags)
{ {
*pc = env->nip; *pc = env->nip;
*cs_base = 0; *cs_base = 0;

View File

@ -218,8 +218,8 @@ void hreg_update_pmu_hflags(CPUPPCState *env)
} }
#ifdef CONFIG_DEBUG_TCG #ifdef CONFIG_DEBUG_TCG
void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc, void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags) uint64_t *cs_base, uint32_t *flags)
{ {
uint32_t hflags_current = env->hflags; uint32_t hflags_current = env->hflags;
uint32_t hflags_rebuilt; uint32_t hflags_rebuilt;

View File

@ -587,8 +587,8 @@ static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
return cpu->cfg.vlen >> (sew + 3 - lmul); return cpu->cfg.vlen >> (sew + 3 - lmul);
} }
void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *pflags); uint64_t *cs_base, uint32_t *pflags);
void riscv_cpu_update_mask(CPURISCVState *env); void riscv_cpu_update_mask(CPURISCVState *env);

View File

@ -61,8 +61,8 @@ int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
#endif #endif
} }
void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *pflags) uint64_t *cs_base, uint32_t *pflags)
{ {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
RISCVCPU *cpu = RISCV_CPU(cs); RISCVCPU *cpu = RISCV_CPU(cs);

View File

@ -143,8 +143,8 @@ void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
#define RX_CPU_IRQ 0 #define RX_CPU_IRQ 0
#define RX_CPU_FIR 1 #define RX_CPU_FIR 1
static inline void cpu_get_tb_cpu_state(CPURXState *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPURXState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags) uint64_t *cs_base, uint32_t *flags)
{ {
*pc = env->pc; *pc = env->pc;
*cs_base = 0; *cs_base = 0;

View File

@ -378,8 +378,8 @@ static inline int cpu_mmu_index(CPUS390XState *env, bool ifetch)
#endif #endif
} }
static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags) uint64_t *cs_base, uint32_t *flags)
{ {
if (env->psw.addr & 1) { if (env->psw.addr & 1) {
/* /*

View File

@ -368,8 +368,8 @@ static inline void cpu_write_sr(CPUSH4State *env, target_ulong sr)
env->sr = sr & ~((1u << SR_M) | (1u << SR_Q) | (1u << SR_T)); env->sr = sr & ~((1u << SR_M) | (1u << SR_Q) | (1u << SR_T));
} }
static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUSH4State *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags) uint64_t *cs_base, uint32_t *flags)
{ {
*pc = env->pc; *pc = env->pc;
/* For a gUSA region, notice the end of the region. */ /* For a gUSA region, notice the end of the region. */

View File

@ -762,8 +762,8 @@ trap_state* cpu_tsptr(CPUSPARCState* env);
#define TB_FLAG_HYPER (1 << 7) #define TB_FLAG_HYPER (1 << 7)
#define TB_FLAG_ASI_SHIFT 24 #define TB_FLAG_ASI_SHIFT 24
static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *pflags) uint64_t *cs_base, uint32_t *pflags)
{ {
uint32_t flags; uint32_t flags;
*pc = env->pc; *pc = env->pc;

View File

@ -384,8 +384,8 @@ FIELD(TB_FLAGS, PRIV, 0, 2)
void cpu_state_reset(CPUTriCoreState *s); void cpu_state_reset(CPUTriCoreState *s);
void tricore_tcg_init(void); void tricore_tcg_init(void);
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags) uint64_t *cs_base, uint32_t *flags)
{ {
uint32_t new_flags = 0; uint32_t new_flags = 0;
*pc = env->PC; *pc = env->PC;

View File

@ -727,8 +727,8 @@ static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
#include "exec/cpu-all.h" #include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc, static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, vaddr *pc,
target_ulong *cs_base, uint32_t *flags) uint64_t *cs_base, uint32_t *flags)
{ {
*pc = env->pc; *pc = env->pc;
*cs_base = 0; *cs_base = 0;

View File

@ -2774,7 +2774,7 @@ void tcg_gen_gvec_andcs(unsigned vece, uint32_t dofs, uint32_t aofs,
TCGv_i64 tmp = tcg_temp_ebb_new_i64(); TCGv_i64 tmp = tcg_temp_ebb_new_i64();
tcg_gen_dup_i64(vece, tmp, c); tcg_gen_dup_i64(vece, tmp, c);
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &g); tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &g);
tcg_temp_free_i64(tmp); tcg_temp_free_i64(tmp);
} }

View File

@ -39,7 +39,7 @@ static void check_max_alignment(unsigned a_bits)
* The requested alignment cannot overlap the TLB flags. * The requested alignment cannot overlap the TLB flags.
* FIXME: Must keep the count up-to-date with "exec/cpu-all.h". * FIXME: Must keep the count up-to-date with "exec/cpu-all.h".
*/ */
tcg_debug_assert(a_bits + 6 <= tcg_ctx->page_bits); tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits);
#endif #endif
} }

View File

@ -102,7 +102,19 @@ void tcg_gen_br(TCGLabel *l)
void tcg_gen_mb(TCGBar mb_type) void tcg_gen_mb(TCGBar mb_type)
{ {
if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { #ifdef CONFIG_USER_ONLY
bool parallel = tcg_ctx->gen_tb->cflags & CF_PARALLEL;
#else
/*
* It is tempting to elide the barrier in a uniprocessor context.
* However, even with a single cpu we have i/o threads running in
* parallel, and lack of memory order can result in e.g. virtio
* queue entries being read incorrectly.
*/
bool parallel = true;
#endif
if (parallel) {
tcg_gen_op1(INDEX_op_mb, mb_type); tcg_gen_op1(INDEX_op_mb, mb_type);
} }
} }

214
tcg/tcg.c
View File

@ -3033,10 +3033,6 @@ void tcg_op_remove(TCGContext *s, TCGOp *op)
QTAILQ_REMOVE(&s->ops, op, link); QTAILQ_REMOVE(&s->ops, op, link);
QTAILQ_INSERT_TAIL(&s->free_ops, op, link); QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
s->nb_ops--; s->nb_ops--;
#ifdef CONFIG_PROFILER
qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
#endif
} }
void tcg_remove_ops_after(TCGOp *op) void tcg_remove_ops_after(TCGOp *op)
@ -5906,143 +5902,16 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg); tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg);
} }
#ifdef CONFIG_PROFILER
/* avoid copy/paste errors */
#define PROF_ADD(to, from, field) \
do { \
(to)->field += qatomic_read(&((from)->field)); \
} while (0)
#define PROF_MAX(to, from, field) \
do { \
typeof((from)->field) val__ = qatomic_read(&((from)->field)); \
if (val__ > (to)->field) { \
(to)->field = val__; \
} \
} while (0)
/* Pass in a zero'ed @prof */
static inline
void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
{
unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
unsigned int i;
for (i = 0; i < n_ctxs; i++) {
TCGContext *s = qatomic_read(&tcg_ctxs[i]);
const TCGProfile *orig = &s->prof;
if (counters) {
PROF_ADD(prof, orig, cpu_exec_time);
PROF_ADD(prof, orig, tb_count1);
PROF_ADD(prof, orig, tb_count);
PROF_ADD(prof, orig, op_count);
PROF_MAX(prof, orig, op_count_max);
PROF_ADD(prof, orig, temp_count);
PROF_MAX(prof, orig, temp_count_max);
PROF_ADD(prof, orig, del_op_count);
PROF_ADD(prof, orig, code_in_len);
PROF_ADD(prof, orig, code_out_len);
PROF_ADD(prof, orig, search_out_len);
PROF_ADD(prof, orig, interm_time);
PROF_ADD(prof, orig, code_time);
PROF_ADD(prof, orig, la_time);
PROF_ADD(prof, orig, opt_time);
PROF_ADD(prof, orig, restore_count);
PROF_ADD(prof, orig, restore_time);
}
if (table) {
int i;
for (i = 0; i < NB_OPS; i++) {
PROF_ADD(prof, orig, table_op_count[i]);
}
}
}
}
#undef PROF_ADD
#undef PROF_MAX
static void tcg_profile_snapshot_counters(TCGProfile *prof)
{
tcg_profile_snapshot(prof, true, false);
}
static void tcg_profile_snapshot_table(TCGProfile *prof)
{
tcg_profile_snapshot(prof, false, true);
}
void tcg_dump_op_count(GString *buf)
{
TCGProfile prof = {};
int i;
tcg_profile_snapshot_table(&prof);
for (i = 0; i < NB_OPS; i++) {
g_string_append_printf(buf, "%s %" PRId64 "\n", tcg_op_defs[i].name,
prof.table_op_count[i]);
}
}
int64_t tcg_cpu_exec_time(void)
{
unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
unsigned int i;
int64_t ret = 0;
for (i = 0; i < n_ctxs; i++) {
const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
const TCGProfile *prof = &s->prof;
ret += qatomic_read(&prof->cpu_exec_time);
}
return ret;
}
#else
void tcg_dump_op_count(GString *buf) void tcg_dump_op_count(GString *buf)
{ {
g_string_append_printf(buf, "[TCG profiler not compiled]\n"); g_string_append_printf(buf, "[TCG profiler not compiled]\n");
} }
int64_t tcg_cpu_exec_time(void)
{
error_report("%s: TCG profiler not compiled", __func__);
exit(EXIT_FAILURE);
}
#endif
int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start) int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
{ {
#ifdef CONFIG_PROFILER
TCGProfile *prof = &s->prof;
#endif
int i, start_words, num_insns; int i, start_words, num_insns;
TCGOp *op; TCGOp *op;
#ifdef CONFIG_PROFILER
{
int n = 0;
QTAILQ_FOREACH(op, &s->ops, link) {
n++;
}
qatomic_set(&prof->op_count, prof->op_count + n);
if (n > prof->op_count_max) {
qatomic_set(&prof->op_count_max, n);
}
n = s->nb_temps;
qatomic_set(&prof->temp_count, prof->temp_count + n);
if (n > prof->temp_count_max) {
qatomic_set(&prof->temp_count_max, n);
}
}
#endif
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP) if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
&& qemu_log_in_addr_range(pc_start))) { && qemu_log_in_addr_range(pc_start))) {
FILE *logfile = qemu_log_trylock(); FILE *logfile = qemu_log_trylock();
@ -6071,17 +5940,8 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
} }
#endif #endif
#ifdef CONFIG_PROFILER
qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
#endif
tcg_optimize(s); tcg_optimize(s);
#ifdef CONFIG_PROFILER
qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
qatomic_set(&prof->la_time, prof->la_time - profile_getclock());
#endif
reachable_code_pass(s); reachable_code_pass(s);
liveness_pass_0(s); liveness_pass_0(s);
liveness_pass_1(s); liveness_pass_1(s);
@ -6105,10 +5965,6 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
} }
} }
#ifdef CONFIG_PROFILER
qatomic_set(&prof->la_time, prof->la_time + profile_getclock());
#endif
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT) if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
&& qemu_log_in_addr_range(pc_start))) { && qemu_log_in_addr_range(pc_start))) {
FILE *logfile = qemu_log_trylock(); FILE *logfile = qemu_log_trylock();
@ -6151,10 +6007,6 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
QTAILQ_FOREACH(op, &s->ops, link) { QTAILQ_FOREACH(op, &s->ops, link) {
TCGOpcode opc = op->opc; TCGOpcode opc = op->opc;
#ifdef CONFIG_PROFILER
qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
#endif
switch (opc) { switch (opc) {
case INDEX_op_mov_i32: case INDEX_op_mov_i32:
case INDEX_op_mov_i64: case INDEX_op_mov_i64:
@ -6249,76 +6101,10 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
return tcg_current_code_size(s); return tcg_current_code_size(s);
} }
#ifdef CONFIG_PROFILER
void tcg_dump_info(GString *buf)
{
TCGProfile prof = {};
const TCGProfile *s;
int64_t tb_count;
int64_t tb_div_count;
int64_t tot;
tcg_profile_snapshot_counters(&prof);
s = &prof;
tb_count = s->tb_count;
tb_div_count = tb_count ? tb_count : 1;
tot = s->interm_time + s->code_time;
g_string_append_printf(buf, "JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
tot, tot / 2.4e9);
g_string_append_printf(buf, "translated TBs %" PRId64
" (aborted=%" PRId64 " %0.1f%%)\n",
tb_count, s->tb_count1 - tb_count,
(double)(s->tb_count1 - s->tb_count)
/ (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
g_string_append_printf(buf, "avg ops/TB %0.1f max=%d\n",
(double)s->op_count / tb_div_count, s->op_count_max);
g_string_append_printf(buf, "deleted ops/TB %0.2f\n",
(double)s->del_op_count / tb_div_count);
g_string_append_printf(buf, "avg temps/TB %0.2f max=%d\n",
(double)s->temp_count / tb_div_count,
s->temp_count_max);
g_string_append_printf(buf, "avg host code/TB %0.1f\n",
(double)s->code_out_len / tb_div_count);
g_string_append_printf(buf, "avg search data/TB %0.1f\n",
(double)s->search_out_len / tb_div_count);
g_string_append_printf(buf, "cycles/op %0.1f\n",
s->op_count ? (double)tot / s->op_count : 0);
g_string_append_printf(buf, "cycles/in byte %0.1f\n",
s->code_in_len ? (double)tot / s->code_in_len : 0);
g_string_append_printf(buf, "cycles/out byte %0.1f\n",
s->code_out_len ? (double)tot / s->code_out_len : 0);
g_string_append_printf(buf, "cycles/search byte %0.1f\n",
s->search_out_len ?
(double)tot / s->search_out_len : 0);
if (tot == 0) {
tot = 1;
}
g_string_append_printf(buf, " gen_interm time %0.1f%%\n",
(double)s->interm_time / tot * 100.0);
g_string_append_printf(buf, " gen_code time %0.1f%%\n",
(double)s->code_time / tot * 100.0);
g_string_append_printf(buf, "optim./code time %0.1f%%\n",
(double)s->opt_time / (s->code_time ?
s->code_time : 1)
* 100.0);
g_string_append_printf(buf, "liveness/code time %0.1f%%\n",
(double)s->la_time / (s->code_time ?
s->code_time : 1) * 100.0);
g_string_append_printf(buf, "cpu_restore count %" PRId64 "\n",
s->restore_count);
g_string_append_printf(buf, " avg cycles %0.1f\n",
s->restore_count ?
(double)s->restore_time / s->restore_count : 0);
}
#else
void tcg_dump_info(GString *buf) void tcg_dump_info(GString *buf)
{ {
g_string_append_printf(buf, "[TCG profiler not compiled]\n"); g_string_append_printf(buf, "[TCG profiler not compiled]\n");
} }
#endif
#ifdef ELF_HOST_MACHINE #ifdef ELF_HOST_MACHINE
/* In order to use this feature, the backend needs to do three things: /* In order to use this feature, the backend needs to do three things:

View File

@ -19,7 +19,6 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
#define MAX_CPUS 8 /* lets not go nuts */ #define MAX_CPUS 8 /* lets not go nuts */
typedef struct { typedef struct {
uint64_t last_pc;
uint64_t insn_count; uint64_t insn_count;
} InstructionCount; } InstructionCount;
@ -51,13 +50,7 @@ static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata)
{ {
unsigned int i = cpu_index % MAX_CPUS; unsigned int i = cpu_index % MAX_CPUS;
InstructionCount *c = &counts[i]; InstructionCount *c = &counts[i];
uint64_t this_pc = GPOINTER_TO_UINT(udata);
if (this_pc == c->last_pc) {
g_autofree gchar *out = g_strdup_printf("detected repeat execution @ 0x%"
PRIx64 "\n", this_pc);
qemu_plugin_outs(out);
}
c->last_pc = this_pc;
c->insn_count++; c->insn_count++;
} }

View File

@ -46,9 +46,6 @@ static int query_error_class(const char *cmd)
{ "query-balloon", ERROR_CLASS_DEVICE_NOT_ACTIVE }, { "query-balloon", ERROR_CLASS_DEVICE_NOT_ACTIVE },
{ "query-hotpluggable-cpus", ERROR_CLASS_GENERIC_ERROR }, { "query-hotpluggable-cpus", ERROR_CLASS_GENERIC_ERROR },
{ "query-vm-generation-id", ERROR_CLASS_GENERIC_ERROR }, { "query-vm-generation-id", ERROR_CLASS_GENERIC_ERROR },
#ifndef CONFIG_PROFILER
{ "x-query-profile", ERROR_CLASS_GENERIC_ERROR },
#endif
/* Only valid with a USB bus added */ /* Only valid with a USB bus added */
{ "x-query-usb", ERROR_CLASS_GENERIC_ERROR }, { "x-query-usb", ERROR_CLASS_GENERIC_ERROR },
/* Only valid with accel=tcg */ /* Only valid with accel=tcg */

View File

@ -33,14 +33,5 @@ EXTRA_RUNS+=$(MULTIARCH_RUNS)
memory: CFLAGS+=-DCHECK_UNALIGNED=1 memory: CFLAGS+=-DCHECK_UNALIGNED=1
# non-inline runs will trigger the duplicate instruction heuristics in libinsn.so
run-plugin-%-with-libinsn.so:
$(call run-test, $@, \
$(QEMU) -monitor none -display none \
-chardev file$(COMMA)path=$@.out$(COMMA)id=output \
-plugin ../../plugin/libinsn.so$(COMMA)inline=on \
-d plugin -D $*-with-libinsn.so.pout \
$(QEMU_OPTS) $*)
# Running # Running
QEMU_OPTS+=-device isa-debugcon,chardev=output -device isa-debug-exit,iobase=0xf4,iosize=0x4 -kernel QEMU_OPTS+=-device isa-debugcon,chardev=output -device isa-debug-exit,iobase=0xf4,iosize=0x4 -kernel

View File

@ -63,12 +63,6 @@ else
SKIP_I386_TESTS+=test-i386-fprem SKIP_I386_TESTS+=test-i386-fprem
endif endif
# non-inline runs will trigger the duplicate instruction heuristics in libinsn.so
run-plugin-%-with-libinsn.so:
$(call run-test, $@, $(QEMU) $(QEMU_OPTS) \
-plugin ../../plugin/libinsn.so$(COMMA)inline=on \
-d plugin -D $*-with-libinsn.so.pout $*)
# Update TESTS # Update TESTS
I386_TESTS:=$(filter-out $(SKIP_I386_TESTS), $(ALL_X86_TESTS)) I386_TESTS:=$(filter-out $(SKIP_I386_TESTS), $(ALL_X86_TESTS))
TESTS=$(MULTIARCH_TESTS) $(I386_TESTS) TESTS=$(MULTIARCH_TESTS) $(I386_TESTS)

View File

@ -33,14 +33,5 @@ EXTRA_RUNS+=$(MULTIARCH_RUNS)
memory: CFLAGS+=-DCHECK_UNALIGNED=1 memory: CFLAGS+=-DCHECK_UNALIGNED=1
# non-inline runs will trigger the duplicate instruction heuristics in libinsn.so
run-plugin-%-with-libinsn.so:
$(call run-test, $@, \
$(QEMU) -monitor none -display none \
-chardev file$(COMMA)path=$@.out$(COMMA)id=output \
-plugin ../../plugin/libinsn.so$(COMMA)inline=on \
-d plugin -D $*-with-libinsn.so.pout \
$(QEMU_OPTS) $*)
# Running # Running
QEMU_OPTS+=-device isa-debugcon,chardev=output -device isa-debug-exit,iobase=0xf4,iosize=0x4 -kernel QEMU_OPTS+=-device isa-debugcon,chardev=output -device isa-debug-exit,iobase=0xf4,iosize=0x4 -kernel