Atomic build fixes for clang-12

Breakpoint reorg
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmD4fD4dHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV+yRAgAiQ6LOF9nD6f5JlMx
 SOp8MN2uqxiQ3znvhKcZRzrBGUH2EOA9a8Sy3e4m7Gt4km5hNiaEQaXBwS8BefBI
 E25bKagAReh+yiQOeBalS7D08+ydySaeUWocYmprNWNSiETgK1Wg5qqQ+EPsecTb
 iEPyBKSfdlMUId9WBsTqqaBmjfpI8v9y1VAwRPA/IoEifNkiOGQMqWNZyR5/8DNk
 NuiZzoq7S362r6dPauxFo/1K2NKVN4A6JI8j0DmnBRSbxsuU6JZ6zTmyTtljXyri
 qvGwA6iE0Xh0w4gsLPOOavJU3PiAUZ8JAK3R74vinUZDkFjJGbVvddkGxsK70+DQ
 LU47rQ==
 =m7uP
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth-gitlab/tags/pull-tcg-20210721' into staging

Atomic build fixes for clang-12
Breakpoint reorg

# gpg: Signature made Wed 21 Jul 2021 20:57:50 BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth-gitlab/tags/pull-tcg-20210721: (27 commits)
  accel/tcg: Record singlestep_enabled in tb->cflags
  accel/tcg: Hoist tb_cflags to a local in translator_loop
  accel/tcg: Remove TranslatorOps.breakpoint_check
  accel/tcg: Move breakpoint recognition outside translation
  accel/tcg: Merge tb_find into its only caller
  target/avr: Implement gdb_adjust_breakpoint
  hw/core: Introduce CPUClass.gdb_adjust_breakpoint
  target/i386: Implement debug_check_breakpoint
  target/arm: Implement debug_check_breakpoint
  hw/core: Introduce TCGCPUOps.debug_check_breakpoint
  accel/tcg: Use CF_NO_GOTO_{TB, PTR} in cpu_exec_step_atomic
  accel/tcg: Handle -singlestep in curr_cflags
  accel/tcg: Drop CF_NO_GOTO_PTR from -d nochain
  accel/tcg: Add CF_NO_GOTO_TB and CF_NO_GOTO_PTR
  target/alpha: Drop goto_tb path in gen_call_pal
  accel/tcg: Move curr_cflags into cpu-exec.c
  accel/tcg: Reduce CF_COUNT_MASK to match TCG_MAX_INSNS
  accel/tcg: Push trace info building into atomic_common.c.inc
  trace: Fold mem-internal.h into mem.h
  accel/tcg: Expand ATOMIC_MMU_LOOKUP_*
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2021-07-22 14:00:48 +01:00
commit beb1913858
59 changed files with 577 additions and 1216 deletions

View File

@ -13,42 +13,125 @@
* See the COPYING file in the top-level directory. * See the COPYING file in the top-level directory.
*/ */
static inline static uint16_t atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr,
void atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr, uint16_t info) TCGMemOpIdx oi)
{ {
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
trace_guest_mem_before_exec(cpu, addr, info); trace_guest_mem_before_exec(cpu, addr, info);
trace_guest_mem_before_exec(cpu, addr, info | TRACE_MEM_ST); trace_guest_mem_before_exec(cpu, addr, info | TRACE_MEM_ST);
return info;
} }
static inline void static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, uint16_t info) uint16_t info)
{ {
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST);
} }
static inline #if HAVE_ATOMIC128
void atomic_trace_ld_pre(CPUArchState *env, target_ulong addr, uint16_t info) static uint16_t atomic_trace_ld_pre(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi)
{ {
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
trace_guest_mem_before_exec(env_cpu(env), addr, info); trace_guest_mem_before_exec(env_cpu(env), addr, info);
return info;
} }
static inline static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, uint16_t info) uint16_t info)
{ {
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
} }
static inline static uint16_t atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
void atomic_trace_st_pre(CPUArchState *env, target_ulong addr, uint16_t info) TCGMemOpIdx oi)
{ {
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), true);
trace_guest_mem_before_exec(env_cpu(env), addr, info); trace_guest_mem_before_exec(env_cpu(env), addr, info);
return info;
} }
static inline static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
void atomic_trace_st_post(CPUArchState *env, target_ulong addr, uint16_t info) uint16_t info)
{ {
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
} }
#endif
/*
* Atomic helpers callable from TCG.
* These have a common interface and all defer to cpu_atomic_*
* using the host return address from GETPC().
*/
#define CMPXCHG_HELPER(OP, TYPE) \
TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \
TYPE oldv, TYPE newv, uint32_t oi) \
{ return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); }
CMPXCHG_HELPER(cmpxchgb, uint32_t)
CMPXCHG_HELPER(cmpxchgw_be, uint32_t)
CMPXCHG_HELPER(cmpxchgw_le, uint32_t)
CMPXCHG_HELPER(cmpxchgl_be, uint32_t)
CMPXCHG_HELPER(cmpxchgl_le, uint32_t)
#ifdef CONFIG_ATOMIC64
CMPXCHG_HELPER(cmpxchgq_be, uint64_t)
CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
#endif
#undef CMPXCHG_HELPER
#define ATOMIC_HELPER(OP, TYPE) \
TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \
TYPE val, uint32_t oi) \
{ return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); }
#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPERS(OP) \
ATOMIC_HELPER(glue(OP,b), uint32_t) \
ATOMIC_HELPER(glue(OP,w_be), uint32_t) \
ATOMIC_HELPER(glue(OP,w_le), uint32_t) \
ATOMIC_HELPER(glue(OP,l_be), uint32_t) \
ATOMIC_HELPER(glue(OP,l_le), uint32_t) \
ATOMIC_HELPER(glue(OP,q_be), uint64_t) \
ATOMIC_HELPER(glue(OP,q_le), uint64_t)
#else
#define GEN_ATOMIC_HELPERS(OP) \
ATOMIC_HELPER(glue(OP,b), uint32_t) \
ATOMIC_HELPER(glue(OP,w_be), uint32_t) \
ATOMIC_HELPER(glue(OP,w_le), uint32_t) \
ATOMIC_HELPER(glue(OP,l_be), uint32_t) \
ATOMIC_HELPER(glue(OP,l_le), uint32_t)
#endif
GEN_ATOMIC_HELPERS(fetch_add)
GEN_ATOMIC_HELPERS(fetch_and)
GEN_ATOMIC_HELPERS(fetch_or)
GEN_ATOMIC_HELPERS(fetch_xor)
GEN_ATOMIC_HELPERS(fetch_smin)
GEN_ATOMIC_HELPERS(fetch_umin)
GEN_ATOMIC_HELPERS(fetch_smax)
GEN_ATOMIC_HELPERS(fetch_umax)
GEN_ATOMIC_HELPERS(add_fetch)
GEN_ATOMIC_HELPERS(and_fetch)
GEN_ATOMIC_HELPERS(or_fetch)
GEN_ATOMIC_HELPERS(xor_fetch)
GEN_ATOMIC_HELPERS(smin_fetch)
GEN_ATOMIC_HELPERS(umin_fetch)
GEN_ATOMIC_HELPERS(smax_fetch)
GEN_ATOMIC_HELPERS(umax_fetch)
GEN_ATOMIC_HELPERS(xchg)
#undef ATOMIC_HELPER
#undef GEN_ATOMIC_HELPERS

View File

@ -28,8 +28,8 @@
# define SHIFT 4 # define SHIFT 4
#elif DATA_SIZE == 8 #elif DATA_SIZE == 8
# define SUFFIX q # define SUFFIX q
# define DATA_TYPE uint64_t # define DATA_TYPE aligned_uint64_t
# define SDATA_TYPE int64_t # define SDATA_TYPE aligned_int64_t
# define BSWAP bswap64 # define BSWAP bswap64
# define SHIFT 3 # define SHIFT 3
#elif DATA_SIZE == 4 #elif DATA_SIZE == 4
@ -71,15 +71,14 @@
#endif #endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) ABI_TYPE cmpv, ABI_TYPE newv,
TCGMemOpIdx oi, uintptr_t retaddr)
{ {
ATOMIC_MMU_DECLS; DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret; DATA_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
ATOMIC_MMU_IDX);
atomic_trace_rmw_pre(env, addr, info);
#if DATA_SIZE == 16 #if DATA_SIZE == 16
ret = atomic16_cmpxchg(haddr, cmpv, newv); ret = atomic16_cmpxchg(haddr, cmpv, newv);
#else #else
@ -92,45 +91,41 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
#if DATA_SIZE >= 16 #if DATA_SIZE >= 16
#if HAVE_ATOMIC128 #if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{ {
ATOMIC_MMU_DECLS; DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R; PAGE_READ, retaddr);
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, DATA_TYPE val;
ATOMIC_MMU_IDX); uint16_t info = atomic_trace_ld_pre(env, addr, oi);
atomic_trace_ld_pre(env, addr, info);
val = atomic16_read(haddr); val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP; ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, info); atomic_trace_ld_post(env, addr, info);
return val; return val;
} }
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE val EXTRA_ARGS) TCGMemOpIdx oi, uintptr_t retaddr)
{ {
ATOMIC_MMU_DECLS; DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W; PAGE_WRITE, retaddr);
uint16_t info = trace_mem_build_info(SHIFT, false, 0, true, uint16_t info = atomic_trace_st_pre(env, addr, oi);
ATOMIC_MMU_IDX);
atomic_trace_st_pre(env, addr, info);
atomic16_set(haddr, val); atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP; ATOMIC_MMU_CLEANUP;
atomic_trace_st_post(env, addr, info); atomic_trace_st_post(env, addr, info);
} }
#endif #endif
#else #else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE val EXTRA_ARGS) TCGMemOpIdx oi, uintptr_t retaddr)
{ {
ATOMIC_MMU_DECLS; DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret; DATA_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
ATOMIC_MMU_IDX);
atomic_trace_rmw_pre(env, addr, info);
ret = qatomic_xchg__nocheck(haddr, val); ret = qatomic_xchg__nocheck(haddr, val);
ATOMIC_MMU_CLEANUP; ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr, info); atomic_trace_rmw_post(env, addr, info);
@ -139,14 +134,12 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
#define GEN_ATOMIC_HELPER(X) \ #define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val EXTRA_ARGS) \ ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \ { \
ATOMIC_MMU_DECLS; \ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \ PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \ DATA_TYPE ret; \
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \ uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
ATOMIC_MMU_IDX); \
atomic_trace_rmw_pre(env, addr, info); \
ret = qatomic_##X(haddr, val); \ ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \ ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, info); \ atomic_trace_rmw_post(env, addr, info); \
@ -164,7 +157,8 @@ GEN_ATOMIC_HELPER(xor_fetch)
#undef GEN_ATOMIC_HELPER #undef GEN_ATOMIC_HELPER
/* These helpers are, as a whole, full barriers. Within the helper, /*
* These helpers are, as a whole, full barriers. Within the helper,
* the leading barrier is explicit and the trailing barrier is within * the leading barrier is explicit and the trailing barrier is within
* cmpxchg primitive. * cmpxchg primitive.
* *
@ -173,14 +167,12 @@ GEN_ATOMIC_HELPER(xor_fetch)
*/ */
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval EXTRA_ARGS) \ ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \ { \
ATOMIC_MMU_DECLS; \ XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \ PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE cmp, old, new, val = xval; \ XDATA_TYPE cmp, old, new, val = xval; \
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \ uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
ATOMIC_MMU_IDX); \
atomic_trace_rmw_pre(env, addr, info); \
smp_mb(); \ smp_mb(); \
cmp = qatomic_read__nocheck(haddr); \ cmp = qatomic_read__nocheck(haddr); \
do { \ do { \
@ -218,15 +210,14 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
#endif #endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) ABI_TYPE cmpv, ABI_TYPE newv,
TCGMemOpIdx oi, uintptr_t retaddr)
{ {
ATOMIC_MMU_DECLS; DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret; DATA_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false, uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
ATOMIC_MMU_IDX);
atomic_trace_rmw_pre(env, addr, info);
#if DATA_SIZE == 16 #if DATA_SIZE == 16
ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv)); ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
#else #else
@ -239,30 +230,28 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
#if DATA_SIZE >= 16 #if DATA_SIZE >= 16
#if HAVE_ATOMIC128 #if HAVE_ATOMIC128
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{ {
ATOMIC_MMU_DECLS; DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R; PAGE_READ, retaddr);
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false, DATA_TYPE val;
ATOMIC_MMU_IDX); uint16_t info = atomic_trace_ld_pre(env, addr, oi);
atomic_trace_ld_pre(env, addr, info);
val = atomic16_read(haddr); val = atomic16_read(haddr);
ATOMIC_MMU_CLEANUP; ATOMIC_MMU_CLEANUP;
atomic_trace_ld_post(env, addr, info); atomic_trace_ld_post(env, addr, info);
return BSWAP(val); return BSWAP(val);
} }
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE val EXTRA_ARGS) TCGMemOpIdx oi, uintptr_t retaddr)
{ {
ATOMIC_MMU_DECLS; DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W; PAGE_WRITE, retaddr);
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, true, uint16_t info = atomic_trace_st_pre(env, addr, oi);
ATOMIC_MMU_IDX);
val = BSWAP(val); val = BSWAP(val);
atomic_trace_st_pre(env, addr, info);
val = BSWAP(val); val = BSWAP(val);
atomic16_set(haddr, val); atomic16_set(haddr, val);
ATOMIC_MMU_CLEANUP; ATOMIC_MMU_CLEANUP;
@ -270,16 +259,14 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
} }
#endif #endif
#else #else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE val EXTRA_ARGS) TCGMemOpIdx oi, uintptr_t retaddr)
{ {
ATOMIC_MMU_DECLS; DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; PAGE_READ | PAGE_WRITE, retaddr);
ABI_TYPE ret; ABI_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false, uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
ATOMIC_MMU_IDX);
atomic_trace_rmw_pre(env, addr, info);
ret = qatomic_xchg__nocheck(haddr, BSWAP(val)); ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
ATOMIC_MMU_CLEANUP; ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr, info); atomic_trace_rmw_post(env, addr, info);
@ -288,14 +275,12 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
#define GEN_ATOMIC_HELPER(X) \ #define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val EXTRA_ARGS) \ ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \ { \
ATOMIC_MMU_DECLS; \ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \ PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \ DATA_TYPE ret; \
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \ uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
false, ATOMIC_MMU_IDX); \
atomic_trace_rmw_pre(env, addr, info); \
ret = qatomic_##X(haddr, BSWAP(val)); \ ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \ ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, info); \ atomic_trace_rmw_post(env, addr, info); \
@ -320,14 +305,12 @@ GEN_ATOMIC_HELPER(xor_fetch)
*/ */
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval EXTRA_ARGS) \ ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \ { \
ATOMIC_MMU_DECLS; \ XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \ PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE ldo, ldn, old, new, val = xval; \ XDATA_TYPE ldo, ldn, old, new, val = xval; \
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \ uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
false, ATOMIC_MMU_IDX); \
atomic_trace_rmw_pre(env, addr, info); \
smp_mb(); \ smp_mb(); \
ldn = qatomic_read__nocheck(haddr); \ ldn = qatomic_read__nocheck(haddr); \
do { \ do { \

View File

@ -145,6 +145,28 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
} }
#endif /* CONFIG USER ONLY */ #endif /* CONFIG USER ONLY */
uint32_t curr_cflags(CPUState *cpu)
{
uint32_t cflags = cpu->tcg_cflags;
/*
* Record gdb single-step. We should be exiting the TB by raising
* EXCP_DEBUG, but to simplify other tests, disable chaining too.
*
* For singlestep and -d nochain, suppress goto_tb so that
* we can log -d cpu,exec after every TB.
*/
if (unlikely(cpu->singlestep_enabled)) {
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
} else if (singlestep) {
cflags |= CF_NO_GOTO_TB | 1;
} else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
cflags |= CF_NO_GOTO_TB;
}
return cflags;
}
/* Might cause an exception, so have a longjmp destination ready */ /* Might cause an exception, so have a longjmp destination ready */
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, target_ulong cs_base,
@ -205,6 +227,76 @@ static inline void log_cpu_exec(target_ulong pc, CPUState *cpu,
} }
} }
static bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
uint32_t *cflags)
{
CPUBreakpoint *bp;
bool match_page = false;
if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) {
return false;
}
/*
* Singlestep overrides breakpoints.
* This requirement is visible in the record-replay tests, where
* we would fail to make forward progress in reverse-continue.
*
* TODO: gdb singlestep should only override gdb breakpoints,
* so that one could (gdb) singlestep into the guest kernel's
* architectural breakpoint handler.
*/
if (cpu->singlestep_enabled) {
return false;
}
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
/*
* If we have an exact pc match, trigger the breakpoint.
* Otherwise, note matches within the page.
*/
if (pc == bp->pc) {
bool match_bp = false;
if (bp->flags & BP_GDB) {
match_bp = true;
} else if (bp->flags & BP_CPU) {
#ifdef CONFIG_USER_ONLY
g_assert_not_reached();
#else
CPUClass *cc = CPU_GET_CLASS(cpu);
assert(cc->tcg_ops->debug_check_breakpoint);
match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
#endif
}
if (match_bp) {
cpu->exception_index = EXCP_DEBUG;
return true;
}
} else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) {
match_page = true;
}
}
/*
* Within the same page as a breakpoint, single-step,
* returning to helper_lookup_tb_ptr after each insn looking
* for the actual breakpoint.
*
* TODO: Perhaps better to record all of the TBs associated
* with a given virtual page that contains a breakpoint, and
* then invalidate them when a new overlapping breakpoint is
* set on the page. Non-overlapping TBs would not be
* invalidated, nor would any TB need to be invalidated as
* breakpoints are removed.
*/
if (match_page) {
*cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1;
}
return false;
}
/** /**
* helper_lookup_tb_ptr: quick check for next tb * helper_lookup_tb_ptr: quick check for next tb
* @env: current cpu state * @env: current cpu state
@ -218,11 +310,16 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
CPUState *cpu = env_cpu(env); CPUState *cpu = env_cpu(env);
TranslationBlock *tb; TranslationBlock *tb;
target_ulong cs_base, pc; target_ulong cs_base, pc;
uint32_t flags; uint32_t flags, cflags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu)); cflags = curr_cflags(cpu);
if (check_for_breakpoints(cpu, pc, &cflags)) {
cpu_loop_exit(cpu);
}
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) { if (tb == NULL) {
return tcg_code_gen_epilogue; return tcg_code_gen_epilogue;
} }
@ -313,8 +410,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
CPUArchState *env = (CPUArchState *)cpu->env_ptr; CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb; TranslationBlock *tb;
target_ulong cs_base, pc; target_ulong cs_base, pc;
uint32_t flags; uint32_t flags, cflags;
uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1;
int tb_exit; int tb_exit;
if (sigsetjmp(cpu->jmp_env, 0) == 0) { if (sigsetjmp(cpu->jmp_env, 0) == 0) {
@ -324,8 +420,20 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu->running = true; cpu->running = true;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
cflags = curr_cflags(cpu);
/* Execute in a serial context. */
cflags &= ~CF_PARALLEL;
/* After 1 insn, return and release the exclusive lock. */
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
/*
* No need to check_for_breakpoints here.
* We only arrive in cpu_exec_step_atomic after beginning execution
* of an insn that includes an atomic operation we can't handle.
* Any breakpoint for this insn will have been recognized earlier.
*/
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) { if (tb == NULL) {
mmap_lock(); mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
@ -478,41 +586,6 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
return; return;
} }
static inline TranslationBlock *tb_find(CPUState *cpu,
TranslationBlock *last_tb,
int tb_exit, uint32_t cflags)
{
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock();
/* We add the TB in the virtual pc hash table for the fast lookup */
qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
}
#ifndef CONFIG_USER_ONLY
/* We don't take care of direct jumps when address mapping changes in
* system emulation. So it's not safe to make a direct jump to a TB
* spanning two pages because the mapping for the second page can change.
*/
if (tb->page_addr[1] != -1) {
last_tb = NULL;
}
#endif
/* See if we can patch the calling TB. */
if (last_tb) {
tb_add_jump(last_tb, tb_exit, tb);
}
return tb;
}
static inline bool cpu_handle_halt(CPUState *cpu) static inline bool cpu_handle_halt(CPUState *cpu)
{ {
if (cpu->halted) { if (cpu->halted) {
@ -846,22 +919,60 @@ int cpu_exec(CPUState *cpu)
int tb_exit = 0; int tb_exit = 0;
while (!cpu_handle_interrupt(cpu, &last_tb)) { while (!cpu_handle_interrupt(cpu, &last_tb)) {
uint32_t cflags = cpu->cflags_next_tb;
TranslationBlock *tb; TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags, cflags;
/* When requested, use an exact setting for cflags for the next cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
execution. This is used for icount, precise smc, and stop-
after-access watchpoints. Since this request should never /*
have CF_INVALID set, -1 is a convenient invalid value that * When requested, use an exact setting for cflags for the next
does not require tcg headers for cpu_common_reset. */ * execution. This is used for icount, precise smc, and stop-
* after-access watchpoints. Since this request should never
* have CF_INVALID set, -1 is a convenient invalid value that
* does not require tcg headers for cpu_common_reset.
*/
cflags = cpu->cflags_next_tb;
if (cflags == -1) { if (cflags == -1) {
cflags = curr_cflags(cpu); cflags = curr_cflags(cpu);
} else { } else {
cpu->cflags_next_tb = -1; cpu->cflags_next_tb = -1;
} }
tb = tb_find(cpu, last_tb, tb_exit, cflags); if (check_for_breakpoints(cpu, pc, &cflags)) {
break;
}
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock();
/*
* We add the TB in the virtual pc hash table
* for the fast lookup
*/
qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
}
#ifndef CONFIG_USER_ONLY
/*
* We don't take care of direct jumps when address mapping
* changes in system emulation. So it's not safe to make a
* direct jump to a TB spanning two pages because the mapping
* for the second page can change.
*/
if (tb->page_addr[1] != -1) {
last_tb = NULL;
}
#endif
/* See if we can patch the calling TB. */
if (last_tb) {
tb_add_jump(last_tb, tb_exit, tb);
}
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
/* Try to align the host and virtual clocks /* Try to align the host and virtual clocks
if the guest is in advance */ if the guest is in advance */
align_clocks(&sc, cpu); align_clocks(&sc, cpu);

View File

@ -2686,19 +2686,14 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
cpu_stq_le_data_ra(env, ptr, val, 0); cpu_stq_le_data_ra(env, ptr, val, 0);
} }
/* First set of helpers allows passing in of OI and RETADDR. This makes /*
them callable from other helpers. */ * First set of functions passes in OI and RETADDR.
* This makes them callable from other helpers.
*/
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
#define ATOMIC_NAME(X) \ #define ATOMIC_NAME(X) \
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
#define ATOMIC_MMU_DECLS
#define ATOMIC_MMU_LOOKUP_RW \
atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr)
#define ATOMIC_MMU_LOOKUP_R \
atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, retaddr)
#define ATOMIC_MMU_LOOKUP_W \
atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, retaddr)
#define ATOMIC_MMU_CLEANUP #define ATOMIC_MMU_CLEANUP
#define ATOMIC_MMU_IDX get_mmuidx(oi) #define ATOMIC_MMU_IDX get_mmuidx(oi)
@ -2723,38 +2718,6 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
#include "atomic_template.h" #include "atomic_template.h"
#endif #endif
/* Second set of helpers are directly callable from TCG as helpers. */
#undef EXTRA_ARGS
#undef ATOMIC_NAME
#undef ATOMIC_MMU_LOOKUP_RW
#undef ATOMIC_MMU_LOOKUP_R
#undef ATOMIC_MMU_LOOKUP_W
#define EXTRA_ARGS , TCGMemOpIdx oi
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
#define ATOMIC_MMU_LOOKUP_RW \
atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, GETPC())
#define ATOMIC_MMU_LOOKUP_R \
atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, GETPC())
#define ATOMIC_MMU_LOOKUP_W \
atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, GETPC())
#define DATA_SIZE 1
#include "atomic_template.h"
#define DATA_SIZE 2
#include "atomic_template.h"
#define DATA_SIZE 4
#include "atomic_template.h"
#ifdef CONFIG_ATOMIC64
#define DATA_SIZE 8
#include "atomic_template.h"
#endif
#undef ATOMIC_MMU_IDX
/* Code access functions. */ /* Code access functions. */
static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,

View File

@ -39,8 +39,6 @@ DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr) DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr)
#endif /* IN_HELPER_PROTO */ #endif /* IN_HELPER_PROTO */
#ifdef CONFIG_SOFTMMU
DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG,
i32, env, tl, i32, i32, i32) i32, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG,
@ -88,50 +86,6 @@ DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
TCG_CALL_NO_WG, i32, env, tl, i32, i32) TCG_CALL_NO_WG, i32, env, tl, i32, i32)
#endif /* CONFIG_ATOMIC64 */ #endif /* CONFIG_ATOMIC64 */
#else
DEF_HELPER_FLAGS_4(atomic_cmpxchgb, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
DEF_HELPER_FLAGS_4(atomic_cmpxchgw_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
DEF_HELPER_FLAGS_4(atomic_cmpxchgw_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
DEF_HELPER_FLAGS_4(atomic_cmpxchgl_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
DEF_HELPER_FLAGS_4(atomic_cmpxchgl_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
#ifdef CONFIG_ATOMIC64
DEF_HELPER_FLAGS_4(atomic_cmpxchgq_be, TCG_CALL_NO_WG, i64, env, tl, i64, i64)
DEF_HELPER_FLAGS_4(atomic_cmpxchgq_le, TCG_CALL_NO_WG, i64, env, tl, i64, i64)
#endif
#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPERS(NAME) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_le), \
TCG_CALL_NO_WG, i64, env, tl, i64) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_be), \
TCG_CALL_NO_WG, i64, env, tl, i64)
#else
#define GEN_ATOMIC_HELPERS(NAME) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be), \
TCG_CALL_NO_WG, i32, env, tl, i32)
#endif /* CONFIG_ATOMIC64 */
#endif /* CONFIG_SOFTMMU */
GEN_ATOMIC_HELPERS(fetch_add) GEN_ATOMIC_HELPERS(fetch_add)
GEN_ATOMIC_HELPERS(fetch_and) GEN_ATOMIC_HELPERS(fetch_and)
GEN_ATOMIC_HELPERS(fetch_or) GEN_ATOMIC_HELPERS(fetch_or)

View File

@ -1428,14 +1428,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
max_insns = cflags & CF_COUNT_MASK; max_insns = cflags & CF_COUNT_MASK;
if (max_insns == 0) { if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS; max_insns = TCG_MAX_INSNS;
} }
if (cpu->singlestep_enabled || singlestep) { QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
max_insns = 1;
}
buffer_overflow: buffer_overflow:
tb = tcg_tb_alloc(tcg_ctx); tb = tcg_tb_alloc(tcg_ctx);

View File

@ -33,8 +33,8 @@ void translator_loop_temp_check(DisasContextBase *db)
bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest) bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
{ {
/* Suppress goto_tb in the case of single-steping. */ /* Suppress goto_tb if requested. */
if (db->singlestep_enabled || singlestep) { if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
return false; return false;
} }
@ -45,7 +45,7 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
void translator_loop(const TranslatorOps *ops, DisasContextBase *db, void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
CPUState *cpu, TranslationBlock *tb, int max_insns) CPUState *cpu, TranslationBlock *tb, int max_insns)
{ {
int bp_insn = 0; uint32_t cflags = tb_cflags(tb);
bool plugin_enabled; bool plugin_enabled;
/* Initialize DisasContext */ /* Initialize DisasContext */
@ -55,7 +55,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
db->is_jmp = DISAS_NEXT; db->is_jmp = DISAS_NEXT;
db->num_insns = 0; db->num_insns = 0;
db->max_insns = max_insns; db->max_insns = max_insns;
db->singlestep_enabled = cpu->singlestep_enabled; db->singlestep_enabled = cflags & CF_SINGLE_STEP;
ops->init_disas_context(db, cpu); ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@ -68,8 +68,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
ops->tb_start(db, cpu); ops->tb_start(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
plugin_enabled = plugin_gen_tb_start(cpu, tb, plugin_enabled = plugin_gen_tb_start(cpu, tb, cflags & CF_MEMI_ONLY);
tb_cflags(db->tb) & CF_MEMI_ONLY);
while (true) { while (true) {
db->num_insns++; db->num_insns++;
@ -80,39 +79,17 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
plugin_gen_insn_start(cpu, db); plugin_gen_insn_start(cpu, db);
} }
/* Pass breakpoint hits to target for further processing */
if (!db->singlestep_enabled
&& unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
CPUBreakpoint *bp;
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
if (bp->pc == db->pc_next) {
if (ops->breakpoint_check(db, cpu, bp)) {
bp_insn = 1;
break;
}
}
}
/* The breakpoint_check hook may use DISAS_TOO_MANY to indicate
that only one more instruction is to be executed. Otherwise
it should use DISAS_NORETURN when generating an exception,
but may use a DISAS_TARGET_* value for Something Else. */
if (db->is_jmp > DISAS_TOO_MANY) {
break;
}
}
/* Disassemble one instruction. The translate_insn hook should /* Disassemble one instruction. The translate_insn hook should
update db->pc_next and db->is_jmp to indicate what should be update db->pc_next and db->is_jmp to indicate what should be
done next -- either exiting this loop or locate the start of done next -- either exiting this loop or locate the start of
the next instruction. */ the next instruction. */
if (db->num_insns == db->max_insns if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) {
&& (tb_cflags(db->tb) & CF_LAST_IO)) {
/* Accept I/O on the last instruction. */ /* Accept I/O on the last instruction. */
gen_io_start(); gen_io_start();
ops->translate_insn(db, cpu); ops->translate_insn(db, cpu);
} else { } else {
/* we should only see CF_MEMI_ONLY for io_recompile */ /* we should only see CF_MEMI_ONLY for io_recompile */
tcg_debug_assert(!(tb_cflags(db->tb) & CF_MEMI_ONLY)); tcg_debug_assert(!(cflags & CF_MEMI_ONLY));
ops->translate_insn(db, cpu); ops->translate_insn(db, cpu);
} }
@ -139,7 +116,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
/* Emit code to exit the TB, as indicated by db->is_jmp. */ /* Emit code to exit the TB, as indicated by db->is_jmp. */
ops->tb_stop(db, cpu); ops->tb_stop(db, cpu);
gen_tb_end(db->tb, db->num_insns - bp_insn); gen_tb_end(db->tb, db->num_insns);
if (plugin_enabled) { if (plugin_enabled) {
plugin_gen_tb_end(cpu); plugin_gen_tb_end(cpu);

View File

@ -1221,9 +1221,14 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
return ret; return ret;
} }
/* Do not allow unaligned operations to proceed. Return the host address. */ /*
* Do not allow unaligned operations to proceed. Return the host address.
*
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
*/
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
int size, uintptr_t retaddr) TCGMemOpIdx oi, int size, int prot,
uintptr_t retaddr)
{ {
/* Enforce qemu required alignment. */ /* Enforce qemu required alignment. */
if (unlikely(addr & (size - 1))) { if (unlikely(addr & (size - 1))) {
@ -1234,19 +1239,18 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
return ret; return ret;
} }
/* Macro to call the above, with local variables from the use context. */ #include "atomic_common.c.inc"
#define ATOMIC_MMU_DECLS do {} while (0)
#define ATOMIC_MMU_LOOKUP_RW atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC()) /*
#define ATOMIC_MMU_LOOKUP_R ATOMIC_MMU_LOOKUP_RW * First set of functions passes in OI and RETADDR.
#define ATOMIC_MMU_LOOKUP_W ATOMIC_MMU_LOOKUP_RW * This makes them callable from other helpers.
*/
#define ATOMIC_NAME(X) \
glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
#define ATOMIC_MMU_IDX MMU_USER_IDX #define ATOMIC_MMU_IDX MMU_USER_IDX
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
#define EXTRA_ARGS
#include "atomic_common.c.inc"
#define DATA_SIZE 1 #define DATA_SIZE 1
#include "atomic_template.h" #include "atomic_template.h"
@ -1261,20 +1265,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
#include "atomic_template.h" #include "atomic_template.h"
#endif #endif
/* The following is only callable from other helpers, and matches up
with the softmmu version. */
#if HAVE_ATOMIC128 || HAVE_CMPXCHG128 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
#undef EXTRA_ARGS
#undef ATOMIC_NAME
#undef ATOMIC_MMU_LOOKUP_RW
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
#define ATOMIC_NAME(X) \
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
#define ATOMIC_MMU_LOOKUP_RW atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
#define DATA_SIZE 16 #define DATA_SIZE 16
#include "atomic_template.h" #include "atomic_template.h"
#endif #endif

7
configure vendored
View File

@ -3991,18 +3991,11 @@ cat > $TMPC << EOF
int main(void) int main(void)
{ {
uint64_t x = 0, y = 0; uint64_t x = 0, y = 0;
#ifdef __ATOMIC_RELAXED
y = __atomic_load_n(&x, __ATOMIC_RELAXED); y = __atomic_load_n(&x, __ATOMIC_RELAXED);
__atomic_store_n(&x, y, __ATOMIC_RELAXED); __atomic_store_n(&x, y, __ATOMIC_RELAXED);
__atomic_compare_exchange_n(&x, &y, x, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); __atomic_compare_exchange_n(&x, &y, x, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
__atomic_exchange_n(&x, y, __ATOMIC_RELAXED); __atomic_exchange_n(&x, y, __ATOMIC_RELAXED);
__atomic_fetch_add(&x, y, __ATOMIC_RELAXED); __atomic_fetch_add(&x, y, __ATOMIC_RELAXED);
#else
typedef char is_host64[sizeof(void *) >= sizeof(uint64_t) ? 1 : -1];
__sync_lock_test_and_set(&x, y);
__sync_val_compare_and_swap(&x, y, 0);
__sync_fetch_and_add(&x, y);
#endif
return 0; return 0;
} }
EOF EOF

34
cpu.c
View File

@ -225,11 +225,6 @@ void tb_invalidate_phys_addr(target_ulong addr)
tb_invalidate_phys_page_range(addr, addr + 1); tb_invalidate_phys_page_range(addr, addr + 1);
mmap_unlock(); mmap_unlock();
} }
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{
tb_invalidate_phys_addr(pc);
}
#else #else
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
{ {
@ -250,25 +245,19 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
ram_addr = memory_region_get_ram_addr(mr) + addr; ram_addr = memory_region_get_ram_addr(mr) + addr;
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1); tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
} }
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{
/*
* There may not be a virtual to physical translation for the pc
* right now, but there may exist cached TB for this pc.
* Flush the whole TB cache to force re-translation of such TBs.
* This is heavyweight, but we're debugging anyway.
*/
tb_flush(cpu);
}
#endif #endif
/* Add a breakpoint. */ /* Add a breakpoint. */
int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
CPUBreakpoint **breakpoint) CPUBreakpoint **breakpoint)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUBreakpoint *bp; CPUBreakpoint *bp;
if (cc->gdb_adjust_breakpoint) {
pc = cc->gdb_adjust_breakpoint(cpu, pc);
}
bp = g_malloc(sizeof(*bp)); bp = g_malloc(sizeof(*bp));
bp->pc = pc; bp->pc = pc;
@ -281,8 +270,6 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
} }
breakpoint_invalidate(cpu, pc);
if (breakpoint) { if (breakpoint) {
*breakpoint = bp; *breakpoint = bp;
} }
@ -294,8 +281,13 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
/* Remove a specific breakpoint. */ /* Remove a specific breakpoint. */
int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUBreakpoint *bp; CPUBreakpoint *bp;
if (cc->gdb_adjust_breakpoint) {
pc = cc->gdb_adjust_breakpoint(cpu, pc);
}
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
if (bp->pc == pc && bp->flags == flags) { if (bp->pc == pc && bp->flags == flags) {
cpu_breakpoint_remove_by_ref(cpu, bp); cpu_breakpoint_remove_by_ref(cpu, bp);
@ -310,8 +302,6 @@ void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp)
{ {
QTAILQ_REMOVE(&cpu->breakpoints, bp, entry); QTAILQ_REMOVE(&cpu->breakpoints, bp, entry);
breakpoint_invalidate(cpu, bp->pc);
trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags); trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags);
g_free(bp); g_free(bp);
} }
@ -336,10 +326,6 @@ void cpu_single_step(CPUState *cpu, int enabled)
cpu->singlestep_enabled = enabled; cpu->singlestep_enabled = enabled;
if (kvm_enabled()) { if (kvm_enabled()) {
kvm_update_guest_debug(cpu, 0); kvm_update_guest_debug(cpu, 0);
} else {
/* must flush all the translated code to avoid inconsistencies */
/* XXX: only flush what is necessary */
tb_flush(cpu);
} }
trace_breakpoint_singlestep(cpu->cpu_index, enabled); trace_breakpoint_singlestep(cpu->cpu_index, enabled);
} }

View File

@ -492,13 +492,18 @@ struct TranslationBlock {
target_ulong cs_base; /* CS base for this block */ target_ulong cs_base; /* CS base for this block */
uint32_t flags; /* flags defining in which context the code was generated */ uint32_t flags; /* flags defining in which context the code was generated */
uint32_t cflags; /* compile flags */ uint32_t cflags; /* compile flags */
#define CF_COUNT_MASK 0x00007fff
#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ /* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
#define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */ #define CF_COUNT_MASK 0x000001ff
#define CF_USE_ICOUNT 0x00020000 #define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ #define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ #define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
#define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */
#define CF_USE_ICOUNT 0x00020000
#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
#define CF_CLUSTER_SHIFT 24 #define CF_CLUSTER_SHIFT 24
/* Per-vCPU dynamic tracing state used to generate this TB */ /* Per-vCPU dynamic tracing state used to generate this TB */
@ -563,10 +568,7 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
} }
/* current cflags for hashing/comparison */ /* current cflags for hashing/comparison */
static inline uint32_t curr_cflags(CPUState *cpu) uint32_t curr_cflags(CPUState *cpu);
{
return cpu->tcg_cflags;
}
/* TranslationBlock invalidate API */ /* TranslationBlock invalidate API */
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)

View File

@ -89,15 +89,6 @@ typedef struct DisasContextBase {
* @insn_start: * @insn_start:
* Emit the tcg_gen_insn_start opcode. * Emit the tcg_gen_insn_start opcode.
* *
* @breakpoint_check:
* When called, the breakpoint has already been checked to match the PC,
* but the target may decide the breakpoint missed the address
* (e.g., due to conditions encoded in their flags). Return true to
* indicate that the breakpoint did hit, in which case no more breakpoints
* are checked. If the breakpoint did hit, emit any code required to
* signal the exception, and set db->is_jmp as necessary to terminate
* the main loop.
*
* @translate_insn: * @translate_insn:
* Disassemble one instruction and set db->pc_next for the start * Disassemble one instruction and set db->pc_next for the start
* of the following instruction. Set db->is_jmp as necessary to * of the following instruction. Set db->is_jmp as necessary to
@ -113,8 +104,6 @@ typedef struct TranslatorOps {
void (*init_disas_context)(DisasContextBase *db, CPUState *cpu); void (*init_disas_context)(DisasContextBase *db, CPUState *cpu);
void (*tb_start)(DisasContextBase *db, CPUState *cpu); void (*tb_start)(DisasContextBase *db, CPUState *cpu);
void (*insn_start)(DisasContextBase *db, CPUState *cpu); void (*insn_start)(DisasContextBase *db, CPUState *cpu);
bool (*breakpoint_check)(DisasContextBase *db, CPUState *cpu,
const CPUBreakpoint *bp);
void (*translate_insn)(DisasContextBase *db, CPUState *cpu); void (*translate_insn)(DisasContextBase *db, CPUState *cpu);
void (*tb_stop)(DisasContextBase *db, CPUState *cpu); void (*tb_stop)(DisasContextBase *db, CPUState *cpu);
void (*disas_log)(const DisasContextBase *db, CPUState *cpu); void (*disas_log)(const DisasContextBase *db, CPUState *cpu);

View File

@ -103,6 +103,9 @@ struct SysemuCPUOps;
* also implement the synchronize_from_tb hook. * also implement the synchronize_from_tb hook.
* @gdb_read_register: Callback for letting GDB read a register. * @gdb_read_register: Callback for letting GDB read a register.
* @gdb_write_register: Callback for letting GDB write a register. * @gdb_write_register: Callback for letting GDB write a register.
* @gdb_adjust_breakpoint: Callback for adjusting the address of a
* breakpoint. Used by AVR to handle a gdb mis-feature with
* its Harvard architecture split code and data.
* @gdb_num_core_regs: Number of core registers accessible to GDB. * @gdb_num_core_regs: Number of core registers accessible to GDB.
* @gdb_core_xml_file: File name for core registers GDB XML description. * @gdb_core_xml_file: File name for core registers GDB XML description.
* @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
@ -137,6 +140,7 @@ struct CPUClass {
void (*set_pc)(CPUState *cpu, vaddr value); void (*set_pc)(CPUState *cpu, vaddr value);
int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg); int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg); int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr);
const char *gdb_core_xml_file; const char *gdb_core_xml_file;
gchar * (*gdb_arch_name)(CPUState *cpu); gchar * (*gdb_arch_name)(CPUState *cpu);

View File

@ -88,6 +88,12 @@ struct TCGCPUOps {
*/ */
bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp); bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
/**
* @debug_check_breakpoint: return true if the architectural
* breakpoint whose PC has matched should really fire.
*/
bool (*debug_check_breakpoint)(CPUState *cpu);
/** /**
* @io_recompile_replay_branch: Callback for cpu_io_recompile. * @io_recompile_replay_branch: Callback for cpu_io_recompile.
* *

View File

@ -60,8 +60,9 @@
(unsigned short)1, \ (unsigned short)1, \
(expr)+0)))))) (expr)+0))))))
#ifdef __ATOMIC_RELAXED #ifndef __ATOMIC_RELAXED
/* For C11 atomic ops */ #error "Expecting C11 atomic ops"
#endif
/* Manual memory barriers /* Manual memory barriers
* *
@ -239,193 +240,8 @@
#define qatomic_xor(ptr, n) \ #define qatomic_xor(ptr, n) \
((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
#else /* __ATOMIC_RELAXED */
#ifdef __alpha__
#define smp_read_barrier_depends() asm volatile("mb":::"memory")
#endif
#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
/*
* Because of the strongly ordered storage model, wmb() and rmb() are nops
* here (a compiler barrier only). QEMU doesn't do accesses to write-combining
* qemu memory or non-temporal load/stores from C code.
*/
#define smp_mb_release() barrier()
#define smp_mb_acquire() barrier()
/*
* __sync_lock_test_and_set() is documented to be an acquire barrier only,
* but it is a full barrier at the hardware level. Add a compiler barrier
* to make it a full barrier also at the compiler level.
*/
#define qatomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
#elif defined(_ARCH_PPC)
/*
* We use an eieio() for wmb() on powerpc. This assumes we don't
* need to order cacheable and non-cacheable stores with respect to
* each other.
*
* smp_mb has the same problem as on x86 for not-very-new GCC
* (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
*/
#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
#if defined(__powerpc64__)
#define smp_mb_release() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
#define smp_mb_acquire() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
#else
#define smp_mb_release() ({ asm volatile("sync" ::: "memory"); (void)0; })
#define smp_mb_acquire() ({ asm volatile("sync" ::: "memory"); (void)0; })
#endif
#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
#endif /* _ARCH_PPC */
/*
* For (host) platforms we don't have explicit barrier definitions
* for, we use the gcc __sync_synchronize() primitive to generate a
* full barrier. This should be safe on all platforms, though it may
* be overkill for smp_mb_acquire() and smp_mb_release().
*/
#ifndef smp_mb
#define smp_mb() __sync_synchronize()
#endif
#ifndef smp_mb_acquire
#define smp_mb_acquire() __sync_synchronize()
#endif
#ifndef smp_mb_release
#define smp_mb_release() __sync_synchronize()
#endif
#ifndef smp_read_barrier_depends
#define smp_read_barrier_depends() barrier()
#endif
#ifndef signal_barrier
#define signal_barrier() barrier()
#endif
/* These will only be atomic if the processor does the fetch or store
* in a single issue memory operation
*/
#define qatomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
#define qatomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
#define qatomic_read(ptr) qatomic_read__nocheck(ptr)
#define qatomic_set(ptr, i) qatomic_set__nocheck(ptr,i)
/**
* qatomic_rcu_read - reads a RCU-protected pointer to a local variable
* into a RCU read-side critical section. The pointer can later be safely
* dereferenced within the critical section.
*
* This ensures that the pointer copy is invariant thorough the whole critical
* section.
*
* Inserts memory barriers on architectures that require them (currently only
* Alpha) and documents which pointers are protected by RCU.
*
* qatomic_rcu_read also includes a compiler barrier to ensure that
* value-speculative optimizations (e.g. VSS: Value Speculation
* Scheduling) does not perform the data read before the pointer read
* by speculating the value of the pointer.
*
* Should match qatomic_rcu_set(), qatomic_xchg(), qatomic_cmpxchg().
*/
#define qatomic_rcu_read(ptr) ({ \
typeof(*ptr) _val = qatomic_read(ptr); \
smp_read_barrier_depends(); \
_val; \
})
/**
* qatomic_rcu_set - assigns (publicizes) a pointer to a new data structure
* meant to be read by RCU read-side critical sections.
*
* Documents which pointers will be dereferenced by RCU read-side critical
* sections and adds the required memory barriers on architectures requiring
* them. It also makes sure the compiler does not reorder code initializing the
* data structure before its publication.
*
* Should match qatomic_rcu_read().
*/
#define qatomic_rcu_set(ptr, i) do { \
smp_wmb(); \
qatomic_set(ptr, i); \
} while (0)
#define qatomic_load_acquire(ptr) ({ \
typeof(*ptr) _val = qatomic_read(ptr); \
smp_mb_acquire(); \
_val; \
})
#define qatomic_store_release(ptr, i) do { \
smp_mb_release(); \
qatomic_set(ptr, i); \
} while (0)
#ifndef qatomic_xchg
#if defined(__clang__)
#define qatomic_xchg(ptr, i) __sync_swap(ptr, i)
#else
/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
#define qatomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
#endif
#endif
#define qatomic_xchg__nocheck qatomic_xchg
/* Provide shorter names for GCC atomic builtins. */
#define qatomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
#define qatomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
#define qatomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
#define qatomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
#define qatomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
#define qatomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
#define qatomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
#define qatomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1)
#define qatomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1)
#define qatomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
#define qatomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
#define qatomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
#define qatomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
#define qatomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
#define qatomic_cmpxchg(ptr, old, new) \
__sync_val_compare_and_swap(ptr, old, new)
#define qatomic_cmpxchg__nocheck(ptr, old, new) qatomic_cmpxchg(ptr, old, new)
/* And even shorter names that return void. */
#define qatomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
#define qatomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
#define qatomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
#define qatomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
#define qatomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
#define qatomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
#define qatomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n))
#endif /* __ATOMIC_RELAXED */
#ifndef smp_wmb
#define smp_wmb() smp_mb_release() #define smp_wmb() smp_mb_release()
#endif
#ifndef smp_rmb
#define smp_rmb() smp_mb_acquire() #define smp_rmb() smp_mb_acquire()
#endif
/* This is more efficient than a store plus a fence. */
#if !defined(__SANITIZE_THREAD__)
#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
#define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i))
#endif
#endif
/* qatomic_mb_read/set semantics map Java volatile variables. They are /* qatomic_mb_read/set semantics map Java volatile variables. They are
* less expensive on some platforms (notably POWER) than fully * less expensive on some platforms (notably POWER) than fully
@ -435,16 +251,16 @@
* use. See docs/devel/atomics.rst for more discussion. * use. See docs/devel/atomics.rst for more discussion.
*/ */
#ifndef qatomic_mb_read
#define qatomic_mb_read(ptr) \ #define qatomic_mb_read(ptr) \
qatomic_load_acquire(ptr) qatomic_load_acquire(ptr)
#endif
#ifndef qatomic_mb_set #if !defined(__SANITIZE_THREAD__) && \
#define qatomic_mb_set(ptr, i) do { \ (defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
qatomic_store_release(ptr, i); \ /* This is more efficient than a store plus a fence. */
smp_mb(); \ # define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i))
} while(0) #else
# define qatomic_mb_set(ptr, i) \
({ qatomic_store_release(ptr, i); smp_mb(); })
#endif #endif
#define qatomic_fetch_inc_nonzero(ptr) ({ \ #define qatomic_fetch_inc_nonzero(ptr) ({ \
@ -455,28 +271,29 @@
_oldn; \ _oldn; \
}) })
/* Abstractions to access atomically (i.e. "once") i64/u64 variables */ /*
* Abstractions to access atomically (i.e. "once") i64/u64 variables.
*
* The i386 abi is odd in that by default members are only aligned to
* 4 bytes, which means that 8-byte types can wind up mis-aligned.
* Clang will then warn about this, and emit a call into libatomic.
*
* Use of these types in structures when they will be used with atomic
* operations can avoid this.
*/
typedef int64_t aligned_int64_t __attribute__((aligned(8)));
typedef uint64_t aligned_uint64_t __attribute__((aligned(8)));
#ifdef CONFIG_ATOMIC64 #ifdef CONFIG_ATOMIC64
static inline int64_t qatomic_read_i64(const int64_t *ptr) /* Use __nocheck because sizeof(void *) might be < sizeof(u64) */
{ #define qatomic_read_i64(P) \
/* use __nocheck because sizeof(void *) might be < sizeof(u64) */ _Generic(*(P), int64_t: qatomic_read__nocheck(P))
return qatomic_read__nocheck(ptr); #define qatomic_read_u64(P) \
} _Generic(*(P), uint64_t: qatomic_read__nocheck(P))
#define qatomic_set_i64(P, V) \
static inline uint64_t qatomic_read_u64(const uint64_t *ptr) _Generic(*(P), int64_t: qatomic_set__nocheck(P, V))
{ #define qatomic_set_u64(P, V) \
return qatomic_read__nocheck(ptr); _Generic(*(P), uint64_t: qatomic_set__nocheck(P, V))
}
static inline void qatomic_set_i64(int64_t *ptr, int64_t val)
{
qatomic_set__nocheck(ptr, val);
}
static inline void qatomic_set_u64(uint64_t *ptr, uint64_t val)
{
qatomic_set__nocheck(ptr, val);
}
static inline void qatomic64_init(void) static inline void qatomic64_init(void)
{ {

View File

@ -21,7 +21,7 @@
typedef struct Stat64 { typedef struct Stat64 {
#ifdef CONFIG_ATOMIC64 #ifdef CONFIG_ATOMIC64
uint64_t value; aligned_uint64_t value;
#else #else
uint32_t low, high; uint32_t low, high;
uint32_t lock; uint32_t lock;

View File

@ -1341,31 +1341,32 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
# define helper_ret_stl_mmu helper_le_stl_mmu # define helper_ret_stl_mmu helper_le_stl_mmu
# define helper_ret_stq_mmu helper_le_stq_mmu # define helper_ret_stq_mmu helper_le_stq_mmu
#endif #endif
#endif /* CONFIG_SOFTMMU */
uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr, uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv, uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr); TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr, uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv, uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr); TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr, uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv, uint64_t cmpv, uint64_t newv,
TCGMemOpIdx oi, uintptr_t retaddr); TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr, uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
uint64_t cmpv, uint64_t newv, uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr); TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr, uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv, uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr); TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr, uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv, uint64_t cmpv, uint64_t newv,
TCGMemOpIdx oi, uintptr_t retaddr); TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
uint64_t cmpv, uint64_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \ #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \ TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
(CPUArchState *env, target_ulong addr, TYPE val, \ (CPUArchState *env, target_ulong addr, TYPE val, \
TCGMemOpIdx oi, uintptr_t retaddr); TCGMemOpIdx oi, uintptr_t retaddr);
@ -1411,31 +1412,22 @@ GEN_ATOMIC_HELPER_ALL(xchg)
#undef GEN_ATOMIC_HELPER_ALL #undef GEN_ATOMIC_HELPER_ALL
#undef GEN_ATOMIC_HELPER #undef GEN_ATOMIC_HELPER
#endif /* CONFIG_SOFTMMU */
/* Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
* These aren't really a "proper" helpers because TCG cannot manage Int128. Int128 cmpv, Int128 newv,
* However, use the same format as the others, for use by the backends. TCGMemOpIdx oi, uintptr_t retaddr);
* Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
* The cmpxchg functions are only defined if HAVE_CMPXCHG128; Int128 cmpv, Int128 newv,
* the ld/st functions are only defined if HAVE_ATOMIC128, TCGMemOpIdx oi, uintptr_t retaddr);
* as defined by <qemu/atomic128.h>.
*/
Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv,
TCGMemOpIdx oi, uintptr_t retaddr);
Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv,
TCGMemOpIdx oi, uintptr_t retaddr);
Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr, Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr); TCGMemOpIdx oi, uintptr_t retaddr);
Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr, Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr); TCGMemOpIdx oi, uintptr_t retaddr);
void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
TCGMemOpIdx oi, uintptr_t retaddr); TCGMemOpIdx oi, uintptr_t retaddr);
void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
TCGMemOpIdx oi, uintptr_t retaddr); TCGMemOpIdx oi, uintptr_t retaddr);
#ifdef CONFIG_DEBUG_TCG #ifdef CONFIG_DEBUG_TCG
void tcg_assert_listed_vecop(TCGOpcode); void tcg_assert_listed_vecop(TCGOpcode);

View File

@ -82,7 +82,7 @@ static abi_ulong hppa_lws(CPUHPPAState *env)
o64 = *(uint64_t *)g2h(cs, old); o64 = *(uint64_t *)g2h(cs, old);
n64 = *(uint64_t *)g2h(cs, new); n64 = *(uint64_t *)g2h(cs, new);
#ifdef CONFIG_ATOMIC64 #ifdef CONFIG_ATOMIC64
r64 = qatomic_cmpxchg__nocheck((uint64_t *)g2h(cs, addr), r64 = qatomic_cmpxchg__nocheck((aligned_uint64_t *)g2h(cs, addr),
o64, n64); o64, n64);
ret = r64 != o64; ret = r64 != o64;
#else #else

View File

@ -27,7 +27,7 @@
#include "exec/helper-proto.h" #include "exec/helper-proto.h"
#include "tcg/tcg.h" #include "tcg/tcg.h"
#include "tcg/tcg-op.h" #include "tcg/tcg-op.h"
#include "trace/mem-internal.h" /* mem_info macros */ #include "trace/mem.h" /* mem_info macros */
#include "plugin.h" #include "plugin.h"
#include "qemu/compiler.h" #include "qemu/compiler.h"

View File

@ -47,7 +47,7 @@ typedef struct TimersState {
int64_t last_delta; int64_t last_delta;
/* Compensate for varying guest execution speed. */ /* Compensate for varying guest execution speed. */
int64_t qemu_icount_bias; aligned_int64_t qemu_icount_bias;
int64_t vm_clock_warp_start; int64_t vm_clock_warp_start;
int64_t cpu_clock_offset; int64_t cpu_clock_offset;

View File

@ -1207,19 +1207,8 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
? 0x2000 + (palcode - 0x80) * 64 ? 0x2000 + (palcode - 0x80) * 64
: 0x1000 + palcode * 64); : 0x1000 + palcode * 64);
/* Since the destination is running in PALmode, we don't really tcg_gen_movi_i64(cpu_pc, entry);
need the page permissions check. We'll see the existence of return DISAS_PC_UPDATED;
the page when we create the TB, and we'll flush all TBs if
we change the PAL base register. */
if (!ctx->base.singlestep_enabled) {
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(cpu_pc, entry);
tcg_gen_exit_tb(ctx->base.tb, 0);
return DISAS_NORETURN;
} else {
tcg_gen_movi_i64(cpu_pc, entry);
return DISAS_PC_UPDATED;
}
} }
#endif #endif
} }
@ -2978,21 +2967,6 @@ static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(dcbase->pc_next); tcg_gen_insn_start(dcbase->pc_next);
} }
static bool alpha_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
const CPUBreakpoint *bp)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG, 0);
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that
the logic setting tb->size below does the right thing. */
ctx->base.pc_next += 4;
return true;
}
static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{ {
DisasContext *ctx = container_of(dcbase, DisasContext, base); DisasContext *ctx = container_of(dcbase, DisasContext, base);
@ -3051,7 +3025,6 @@ static const TranslatorOps alpha_tr_ops = {
.init_disas_context = alpha_tr_init_disas_context, .init_disas_context = alpha_tr_init_disas_context,
.tb_start = alpha_tr_tb_start, .tb_start = alpha_tr_tb_start,
.insn_start = alpha_tr_insn_start, .insn_start = alpha_tr_insn_start,
.breakpoint_check = alpha_tr_breakpoint_check,
.translate_insn = alpha_tr_translate_insn, .translate_insn = alpha_tr_translate_insn,
.tb_stop = alpha_tr_tb_stop, .tb_stop = alpha_tr_tb_stop,
.disas_log = alpha_tr_disas_log, .disas_log = alpha_tr_disas_log,

View File

@ -1984,6 +1984,7 @@ static const struct TCGCPUOps arm_tcg_ops = {
.do_unaligned_access = arm_cpu_do_unaligned_access, .do_unaligned_access = arm_cpu_do_unaligned_access,
.adjust_watchpoint_address = arm_adjust_watchpoint_address, .adjust_watchpoint_address = arm_adjust_watchpoint_address,
.debug_check_watchpoint = arm_debug_check_watchpoint, .debug_check_watchpoint = arm_debug_check_watchpoint,
.debug_check_breakpoint = arm_debug_check_breakpoint,
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */
}; };
#endif /* CONFIG_TCG */ #endif /* CONFIG_TCG */

View File

@ -911,6 +911,7 @@ static const struct TCGCPUOps arm_v7m_tcg_ops = {
.do_unaligned_access = arm_cpu_do_unaligned_access, .do_unaligned_access = arm_cpu_do_unaligned_access,
.adjust_watchpoint_address = arm_adjust_watchpoint_address, .adjust_watchpoint_address = arm_adjust_watchpoint_address,
.debug_check_watchpoint = arm_debug_check_watchpoint, .debug_check_watchpoint = arm_debug_check_watchpoint,
.debug_check_breakpoint = arm_debug_check_breakpoint,
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */
}; };
#endif /* CONFIG_TCG */ #endif /* CONFIG_TCG */

View File

@ -216,8 +216,9 @@ static bool check_watchpoints(ARMCPU *cpu)
return false; return false;
} }
static bool check_breakpoints(ARMCPU *cpu) bool arm_debug_check_breakpoint(CPUState *cs)
{ {
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env; CPUARMState *env = &cpu->env;
int n; int n;
@ -238,15 +239,6 @@ static bool check_breakpoints(ARMCPU *cpu)
return false; return false;
} }
void HELPER(check_breakpoints)(CPUARMState *env)
{
ARMCPU *cpu = env_archcpu(env);
if (check_breakpoints(cpu)) {
HELPER(exception_internal(env, EXCP_DEBUG));
}
}
bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
{ {
/* /*

View File

@ -564,7 +564,7 @@ uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
cmpv = int128_make128(env->exclusive_val, env->exclusive_high); cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
newv = int128_make128(new_lo, new_hi); newv = int128_make128(new_lo, new_hi);
oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra); oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
success = int128_eq(oldv, cmpv); success = int128_eq(oldv, cmpv);
return !success; return !success;
@ -638,7 +638,7 @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
*/ */
cmpv = int128_make128(env->exclusive_high, env->exclusive_val); cmpv = int128_make128(env->exclusive_high, env->exclusive_val);
newv = int128_make128(new_hi, new_lo); newv = int128_make128(new_hi, new_lo);
oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
success = int128_eq(oldv, cmpv); success = int128_eq(oldv, cmpv);
return !success; return !success;
@ -660,7 +660,7 @@ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]); cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]);
newv = int128_make128(new_lo, new_hi); newv = int128_make128(new_lo, new_hi);
oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra); oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
env->xregs[rs] = int128_getlo(oldv); env->xregs[rs] = int128_getlo(oldv);
env->xregs[rs + 1] = int128_gethi(oldv); env->xregs[rs + 1] = int128_gethi(oldv);
@ -681,7 +681,7 @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]); cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]);
newv = int128_make128(new_lo, new_hi); newv = int128_make128(new_lo, new_hi);
oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
env->xregs[rs + 1] = int128_getlo(oldv); env->xregs[rs + 1] = int128_getlo(oldv);
env->xregs[rs] = int128_gethi(oldv); env->xregs[rs] = int128_gethi(oldv);

View File

@ -54,8 +54,6 @@ DEF_HELPER_1(yield, void, env)
DEF_HELPER_1(pre_hvc, void, env) DEF_HELPER_1(pre_hvc, void, env)
DEF_HELPER_2(pre_smc, void, env, i32) DEF_HELPER_2(pre_smc, void, env, i32)
DEF_HELPER_1(check_breakpoints, void, env)
DEF_HELPER_3(cpsr_write, void, env, i32, i32) DEF_HELPER_3(cpsr_write, void, env, i32, i32)
DEF_HELPER_2(cpsr_write_eret, void, env, i32) DEF_HELPER_2(cpsr_write_eret, void, env, i32)
DEF_HELPER_1(cpsr_read, i32, env) DEF_HELPER_1(cpsr_read, i32, env)

View File

@ -282,6 +282,9 @@ void hw_breakpoint_update(ARMCPU *cpu, int n);
*/ */
void hw_breakpoint_update_all(ARMCPU *cpu); void hw_breakpoint_update_all(ARMCPU *cpu);
/* Callback function for checking if a breakpoint should trigger. */
bool arm_debug_check_breakpoint(CPUState *cs);
/* Callback function for checking if a watchpoint should trigger. */ /* Callback function for checking if a watchpoint should trigger. */
bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);

View File

@ -14844,30 +14844,6 @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
dc->insn_start = tcg_last_op(); dc->insn_start = tcg_last_op();
} }
static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
if (bp->flags & BP_CPU) {
gen_a64_set_pc_im(dc->base.pc_next);
gen_helper_check_breakpoints(cpu_env);
/* End the TB early; it likely won't be executed */
dc->base.is_jmp = DISAS_TOO_MANY;
} else {
gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
/* The address covered by the breakpoint must be
included in [tb->pc, tb->pc + tb->size) in order
to for it to be properly cleared -- thus we
increment the PC here so that the logic setting
tb->size below does the right thing. */
dc->base.pc_next += 4;
dc->base.is_jmp = DISAS_NORETURN;
}
return true;
}
static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
@ -14982,7 +14958,6 @@ const TranslatorOps aarch64_translator_ops = {
.init_disas_context = aarch64_tr_init_disas_context, .init_disas_context = aarch64_tr_init_disas_context,
.tb_start = aarch64_tr_tb_start, .tb_start = aarch64_tr_tb_start,
.insn_start = aarch64_tr_insn_start, .insn_start = aarch64_tr_insn_start,
.breakpoint_check = aarch64_tr_breakpoint_check,
.translate_insn = aarch64_tr_translate_insn, .translate_insn = aarch64_tr_translate_insn,
.tb_stop = aarch64_tr_tb_stop, .tb_stop = aarch64_tr_tb_stop,
.disas_log = aarch64_tr_disas_log, .disas_log = aarch64_tr_disas_log,

View File

@ -9438,33 +9438,6 @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
dc->insn_start = tcg_last_op(); dc->insn_start = tcg_last_op();
} }
static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
if (bp->flags & BP_CPU) {
gen_set_condexec(dc);
gen_set_pc_im(dc, dc->base.pc_next);
gen_helper_check_breakpoints(cpu_env);
/* End the TB early; it's likely not going to be executed */
dc->base.is_jmp = DISAS_TOO_MANY;
} else {
gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
/* The address covered by the breakpoint must be
included in [tb->pc, tb->pc + tb->size) in order
to for it to be properly cleared -- thus we
increment the PC here so that the logic setting
tb->size below does the right thing. */
/* TODO: Advance PC by correct instruction length to
* avoid disassembler error messages */
dc->base.pc_next += 2;
dc->base.is_jmp = DISAS_NORETURN;
}
return true;
}
static bool arm_pre_translate_insn(DisasContext *dc) static bool arm_pre_translate_insn(DisasContext *dc)
{ {
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
@ -9827,7 +9800,6 @@ static const TranslatorOps arm_translator_ops = {
.init_disas_context = arm_tr_init_disas_context, .init_disas_context = arm_tr_init_disas_context,
.tb_start = arm_tr_tb_start, .tb_start = arm_tr_tb_start,
.insn_start = arm_tr_insn_start, .insn_start = arm_tr_insn_start,
.breakpoint_check = arm_tr_breakpoint_check,
.translate_insn = arm_tr_translate_insn, .translate_insn = arm_tr_translate_insn,
.tb_stop = arm_tr_tb_stop, .tb_stop = arm_tr_tb_stop,
.disas_log = arm_tr_disas_log, .disas_log = arm_tr_disas_log,
@ -9837,7 +9809,6 @@ static const TranslatorOps thumb_translator_ops = {
.init_disas_context = arm_tr_init_disas_context, .init_disas_context = arm_tr_init_disas_context,
.tb_start = arm_tr_tb_start, .tb_start = arm_tr_tb_start,
.insn_start = arm_tr_insn_start, .insn_start = arm_tr_insn_start,
.breakpoint_check = arm_tr_breakpoint_check,
.translate_insn = thumb_tr_translate_insn, .translate_insn = thumb_tr_translate_insn,
.tb_stop = arm_tr_tb_stop, .tb_stop = arm_tr_tb_stop,
.disas_log = arm_tr_disas_log, .disas_log = arm_tr_disas_log,

View File

@ -223,6 +223,7 @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
cc->disas_set_info = avr_cpu_disas_set_info; cc->disas_set_info = avr_cpu_disas_set_info;
cc->gdb_read_register = avr_cpu_gdb_read_register; cc->gdb_read_register = avr_cpu_gdb_read_register;
cc->gdb_write_register = avr_cpu_gdb_write_register; cc->gdb_write_register = avr_cpu_gdb_write_register;
cc->gdb_adjust_breakpoint = avr_cpu_gdb_adjust_breakpoint;
cc->gdb_num_core_regs = 35; cc->gdb_num_core_regs = 35;
cc->gdb_core_xml_file = "avr-cpu.xml"; cc->gdb_core_xml_file = "avr-cpu.xml";
cc->tcg_ops = &avr_tcg_ops; cc->tcg_ops = &avr_tcg_ops;

View File

@ -162,6 +162,7 @@ hwaddr avr_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int avr_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int avr_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int avr_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); int avr_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
int avr_print_insn(bfd_vma addr, disassemble_info *info); int avr_print_insn(bfd_vma addr, disassemble_info *info);
vaddr avr_cpu_gdb_adjust_breakpoint(CPUState *cpu, vaddr addr);
static inline int avr_feature(CPUAVRState *env, AVRFeature feature) static inline int avr_feature(CPUAVRState *env, AVRFeature feature)
{ {

View File

@ -82,3 +82,16 @@ int avr_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return 0; return 0;
} }
vaddr avr_cpu_gdb_adjust_breakpoint(CPUState *cpu, vaddr addr)
{
/*
* This is due to some strange GDB behavior
* Let's assume main has address 0x100:
* b main - sets breakpoint at address 0x00000100 (code)
* b *0x100 - sets breakpoint at address 0x00800100 (data)
*
* Force all breakpoints into code space.
*/
return addr % OFFSET_DATA;
}

View File

@ -2900,14 +2900,6 @@ static bool canonicalize_skip(DisasContext *ctx)
return true; return true;
} }
static void gen_breakpoint(DisasContext *ctx)
{
canonicalize_skip(ctx);
tcg_gen_movi_tl(cpu_pc, ctx->npc);
gen_helper_debug(cpu_env);
ctx->base.is_jmp = DISAS_NORETURN;
}
static void avr_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) static void avr_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{ {
DisasContext *ctx = container_of(dcbase, DisasContext, base); DisasContext *ctx = container_of(dcbase, DisasContext, base);
@ -2944,34 +2936,11 @@ static void avr_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
tcg_gen_insn_start(ctx->npc); tcg_gen_insn_start(ctx->npc);
} }
static bool avr_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
const CPUBreakpoint *bp)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
gen_breakpoint(ctx);
return true;
}
static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{ {
DisasContext *ctx = container_of(dcbase, DisasContext, base); DisasContext *ctx = container_of(dcbase, DisasContext, base);
TCGLabel *skip_label = NULL; TCGLabel *skip_label = NULL;
/*
* This is due to some strange GDB behavior
* Let's assume main has address 0x100:
* b main - sets breakpoint at address 0x00000100 (code)
* b *0x100 - sets breakpoint at address 0x00800100 (data)
*
* The translator driver has already taken care of the code pointer.
*/
if (!ctx->base.singlestep_enabled &&
cpu_breakpoint_test(cs, OFFSET_DATA + ctx->base.pc_next, BP_ANY)) {
gen_breakpoint(ctx);
return;
}
/* Conditionally skip the next instruction, if indicated. */ /* Conditionally skip the next instruction, if indicated. */
if (ctx->skip_cond != TCG_COND_NEVER) { if (ctx->skip_cond != TCG_COND_NEVER) {
skip_label = gen_new_label(); skip_label = gen_new_label();
@ -3069,7 +3038,6 @@ static const TranslatorOps avr_tr_ops = {
.init_disas_context = avr_tr_init_disas_context, .init_disas_context = avr_tr_init_disas_context,
.tb_start = avr_tr_tb_start, .tb_start = avr_tr_tb_start,
.insn_start = avr_tr_insn_start, .insn_start = avr_tr_insn_start,
.breakpoint_check = avr_tr_breakpoint_check,
.translate_insn = avr_tr_translate_insn, .translate_insn = avr_tr_translate_insn,
.tb_stop = avr_tr_tb_stop, .tb_stop = avr_tr_tb_stop,
.disas_log = avr_tr_disas_log, .disas_log = avr_tr_disas_log,

View File

@ -3118,25 +3118,6 @@ static void cris_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(dc->delayed_branch == 1 ? dc->ppc | 1 : dc->pc); tcg_gen_insn_start(dc->delayed_branch == 1 ? dc->ppc | 1 : dc->pc);
} }
static bool cris_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
cris_evaluate_flags(dc);
tcg_gen_movi_tl(env_pc, dc->pc);
t_gen_raise_exception(EXCP_DEBUG);
dc->base.is_jmp = DISAS_NORETURN;
/*
* The address covered by the breakpoint must be included in
* [tb->pc, tb->pc + tb->size) in order to for it to be
* properly cleared -- thus we increment the PC here so that
* the logic setting tb->size below does the right thing.
*/
dc->pc += 2;
return true;
}
static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
@ -3315,7 +3296,6 @@ static const TranslatorOps cris_tr_ops = {
.init_disas_context = cris_tr_init_disas_context, .init_disas_context = cris_tr_init_disas_context,
.tb_start = cris_tr_tb_start, .tb_start = cris_tr_tb_start,
.insn_start = cris_tr_insn_start, .insn_start = cris_tr_insn_start,
.breakpoint_check = cris_tr_breakpoint_check,
.translate_insn = cris_tr_translate_insn, .translate_insn = cris_tr_translate_insn,
.tb_stop = cris_tr_tb_stop, .tb_stop = cris_tr_tb_stop,
.disas_log = cris_tr_disas_log, .disas_log = cris_tr_disas_log,

View File

@ -540,22 +540,6 @@ static void hexagon_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(ctx->base.pc_next); tcg_gen_insn_start(ctx->base.pc_next);
} }
static bool hexagon_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
const CPUBreakpoint *bp)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
gen_exception_end_tb(ctx, EXCP_DEBUG);
/*
* The address covered by the breakpoint must be included in
* [tb->pc, tb->pc + tb->size) in order to for it to be
* properly cleared -- thus we increment the PC here so that
* the logic setting tb->size below does the right thing.
*/
ctx->base.pc_next += 4;
return true;
}
static bool pkt_crosses_page(CPUHexagonState *env, DisasContext *ctx) static bool pkt_crosses_page(CPUHexagonState *env, DisasContext *ctx)
{ {
target_ulong page_start = ctx->base.pc_first & TARGET_PAGE_MASK; target_ulong page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
@ -631,7 +615,6 @@ static const TranslatorOps hexagon_tr_ops = {
.init_disas_context = hexagon_tr_init_disas_context, .init_disas_context = hexagon_tr_init_disas_context,
.tb_start = hexagon_tr_tb_start, .tb_start = hexagon_tr_tb_start,
.insn_start = hexagon_tr_insn_start, .insn_start = hexagon_tr_insn_start,
.breakpoint_check = hexagon_tr_breakpoint_check,
.translate_insn = hexagon_tr_translate_packet, .translate_insn = hexagon_tr_translate_packet,
.tb_stop = hexagon_tr_tb_stop, .tb_stop = hexagon_tr_tb_stop,
.disas_log = hexagon_tr_disas_log, .disas_log = hexagon_tr_disas_log,

View File

@ -4159,16 +4159,6 @@ static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b); tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
} }
static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
const CPUBreakpoint *bp)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
gen_excp(ctx, EXCP_DEBUG);
ctx->base.pc_next += 4;
return true;
}
static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{ {
DisasContext *ctx = container_of(dcbase, DisasContext, base); DisasContext *ctx = container_of(dcbase, DisasContext, base);
@ -4330,7 +4320,6 @@ static const TranslatorOps hppa_tr_ops = {
.init_disas_context = hppa_tr_init_disas_context, .init_disas_context = hppa_tr_init_disas_context,
.tb_start = hppa_tr_tb_start, .tb_start = hppa_tr_tb_start,
.insn_start = hppa_tr_insn_start, .insn_start = hppa_tr_insn_start,
.breakpoint_check = hppa_tr_breakpoint_check,
.translate_insn = hppa_tr_translate_insn, .translate_insn = hppa_tr_translate_insn,
.tb_stop = hppa_tr_tb_stop, .tb_stop = hppa_tr_tb_stop,
.disas_log = hppa_tr_disas_log, .disas_log = hppa_tr_disas_log,

View File

@ -64,22 +64,12 @@ void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]); cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]); newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
#ifdef CONFIG_USER_ONLY
{
uint64_t *haddr = g2h(env_cpu(env), a0);
cmpv = cpu_to_le64(cmpv);
newv = cpu_to_le64(newv);
oldv = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
oldv = le64_to_cpu(oldv);
}
#else
{ {
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
int mem_idx = cpu_mmu_index(env, false); int mem_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx); TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
oldv = helper_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra); oldv = cpu_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
} }
#endif
if (oldv == cmpv) { if (oldv == cmpv) {
eflags |= CC_Z; eflags |= CC_Z;
@ -147,8 +137,7 @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
int mem_idx = cpu_mmu_index(env, false); int mem_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
Int128 oldv = helper_atomic_cmpxchgo_le_mmu(env, a0, cmpv, Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra);
newv, oi, ra);
if (int128_eq(oldv, cmpv)) { if (int128_eq(oldv, cmpv)) {
eflags |= CC_Z; eflags |= CC_Z;

View File

@ -54,6 +54,17 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs,
cpu->env.eip = tb->pc - tb->cs_base; cpu->env.eip = tb->pc - tb->cs_base;
} }
#ifndef CONFIG_USER_ONLY
static bool x86_debug_check_breakpoint(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
/* RF disables all architectural breakpoints. */
return !(env->eflags & RF_MASK);
}
#endif
#include "hw/core/tcg-cpu-ops.h" #include "hw/core/tcg-cpu-ops.h"
static const struct TCGCPUOps x86_tcg_ops = { static const struct TCGCPUOps x86_tcg_ops = {
@ -66,6 +77,7 @@ static const struct TCGCPUOps x86_tcg_ops = {
.tlb_fill = x86_cpu_tlb_fill, .tlb_fill = x86_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
.debug_excp_handler = breakpoint_handler, .debug_excp_handler = breakpoint_handler,
.debug_check_breakpoint = x86_debug_check_breakpoint,
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */
}; };

View File

@ -2604,14 +2604,6 @@ static void gen_interrupt(DisasContext *s, int intno,
s->base.is_jmp = DISAS_NORETURN; s->base.is_jmp = DISAS_NORETURN;
} }
static void gen_debug(DisasContext *s)
{
gen_update_cc_op(s);
gen_jmp_im(s, s->base.pc_next - s->cs_base);
gen_helper_debug(cpu_env);
s->base.is_jmp = DISAS_NORETURN;
}
static void gen_set_hflag(DisasContext *s, uint32_t mask) static void gen_set_hflag(DisasContext *s, uint32_t mask)
{ {
if ((s->flags & mask) == 0) { if ((s->flags & mask) == 0) {
@ -8635,25 +8627,6 @@ static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(dc->base.pc_next, dc->cc_op); tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
} }
static bool i386_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
/* If RF is set, suppress an internally generated breakpoint. */
int flags = dc->base.tb->flags & HF_RF_MASK ? BP_GDB : BP_ANY;
if (bp->flags & flags) {
gen_debug(dc);
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that
the generic logic setting tb->size later does the right thing. */
dc->base.pc_next += 1;
return true;
} else {
return false;
}
}
static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
@ -8721,7 +8694,6 @@ static const TranslatorOps i386_tr_ops = {
.init_disas_context = i386_tr_init_disas_context, .init_disas_context = i386_tr_init_disas_context,
.tb_start = i386_tr_tb_start, .tb_start = i386_tr_tb_start,
.insn_start = i386_tr_insn_start, .insn_start = i386_tr_insn_start,
.breakpoint_check = i386_tr_breakpoint_check,
.translate_insn = i386_tr_translate_insn, .translate_insn = i386_tr_translate_insn,
.tb_stop = i386_tr_tb_stop, .tb_stop = i386_tr_tb_stop,
.disas_log = i386_tr_disas_log, .disas_log = i386_tr_disas_log,

View File

@ -22,6 +22,7 @@
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/cpu_ldst.h" #include "exec/cpu_ldst.h"
#include "semihosting/semihost.h" #include "semihosting/semihost.h"
#include "tcg/tcg.h"
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
@ -782,9 +783,9 @@ static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2,
uint32_t u2 = env->dregs[Du2]; uint32_t u2 = env->dregs[Du2];
uint32_t l1, l2; uint32_t l1, l2;
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
#if defined(CONFIG_ATOMIC64) && !defined(CONFIG_USER_ONLY) #if defined(CONFIG_ATOMIC64)
int mmu_idx = cpu_mmu_index(env, 0); int mmu_idx = cpu_mmu_index(env, 0);
TCGMemOpIdx oi; TCGMemOpIdx oi = make_memop_idx(MO_BEQ, mmu_idx);
#endif #endif
if (parallel) { if (parallel) {
@ -794,23 +795,13 @@ static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2,
if ((a1 & 7) == 0 && a2 == a1 + 4) { if ((a1 & 7) == 0 && a2 == a1 + 4) {
c = deposit64(c2, 32, 32, c1); c = deposit64(c2, 32, 32, c1);
u = deposit64(u2, 32, 32, u1); u = deposit64(u2, 32, 32, u1);
#ifdef CONFIG_USER_ONLY l = cpu_atomic_cmpxchgq_be_mmu(env, a1, c, u, oi, ra);
l = helper_atomic_cmpxchgq_be(env, a1, c, u);
#else
oi = make_memop_idx(MO_BEQ, mmu_idx);
l = helper_atomic_cmpxchgq_be_mmu(env, a1, c, u, oi, ra);
#endif
l1 = l >> 32; l1 = l >> 32;
l2 = l; l2 = l;
} else if ((a2 & 7) == 0 && a1 == a2 + 4) { } else if ((a2 & 7) == 0 && a1 == a2 + 4) {
c = deposit64(c1, 32, 32, c2); c = deposit64(c1, 32, 32, c2);
u = deposit64(u1, 32, 32, u2); u = deposit64(u1, 32, 32, u2);
#ifdef CONFIG_USER_ONLY l = cpu_atomic_cmpxchgq_be_mmu(env, a2, c, u, oi, ra);
l = helper_atomic_cmpxchgq_be(env, a2, c, u);
#else
oi = make_memop_idx(MO_BEQ, mmu_idx);
l = helper_atomic_cmpxchgq_be_mmu(env, a2, c, u, oi, ra);
#endif
l2 = l >> 32; l2 = l >> 32;
l1 = l; l1 = l;
} else } else

View File

@ -6208,23 +6208,6 @@ static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(dc->base.pc_next, dc->cc_op); tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
} }
static bool m68k_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
gen_exception(dc, dc->base.pc_next, EXCP_DEBUG);
/*
* The address covered by the breakpoint must be included in
* [tb->pc, tb->pc + tb->size) in order to for it to be
* properly cleared -- thus we increment the PC here so that
* the logic setting tb->size below does the right thing.
*/
dc->base.pc_next += 2;
return true;
}
static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
@ -6310,7 +6293,6 @@ static const TranslatorOps m68k_tr_ops = {
.init_disas_context = m68k_tr_init_disas_context, .init_disas_context = m68k_tr_init_disas_context,
.tb_start = m68k_tr_tb_start, .tb_start = m68k_tr_tb_start,
.insn_start = m68k_tr_insn_start, .insn_start = m68k_tr_insn_start,
.breakpoint_check = m68k_tr_breakpoint_check,
.translate_insn = m68k_tr_translate_insn, .translate_insn = m68k_tr_translate_insn,
.tb_stop = m68k_tr_tb_stop, .tb_stop = m68k_tr_tb_stop,
.disas_log = m68k_tr_disas_log, .disas_log = m68k_tr_disas_log,

View File

@ -1673,23 +1673,6 @@ static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
dc->insn_start = tcg_last_op(); dc->insn_start = tcg_last_op();
} }
static bool mb_tr_breakpoint_check(DisasContextBase *dcb, CPUState *cs,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcb, DisasContext, base);
gen_raise_exception_sync(dc, EXCP_DEBUG);
/*
* The address covered by the breakpoint must be included in
* [tb->pc, tb->pc + tb->size) in order to for it to be
* properly cleared -- thus we increment the PC here so that
* the logic setting tb->size below does the right thing.
*/
dc->base.pc_next += 4;
return true;
}
static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs) static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
{ {
DisasContext *dc = container_of(dcb, DisasContext, base); DisasContext *dc = container_of(dcb, DisasContext, base);
@ -1854,7 +1837,6 @@ static const TranslatorOps mb_tr_ops = {
.init_disas_context = mb_tr_init_disas_context, .init_disas_context = mb_tr_init_disas_context,
.tb_start = mb_tr_tb_start, .tb_start = mb_tr_tb_start,
.insn_start = mb_tr_insn_start, .insn_start = mb_tr_insn_start,
.breakpoint_check = mb_tr_breakpoint_check,
.translate_insn = mb_tr_translate_insn, .translate_insn = mb_tr_translate_insn,
.tb_stop = mb_tr_tb_stop, .tb_stop = mb_tr_tb_stop,
.disas_log = mb_tr_disas_log, .disas_log = mb_tr_disas_log,

View File

@ -16178,24 +16178,6 @@ static void mips_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
ctx->btarget); ctx->btarget);
} }
static bool mips_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
const CPUBreakpoint *bp)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
save_cpu_state(ctx, 1);
ctx->base.is_jmp = DISAS_NORETURN;
gen_helper_raise_exception_debug(cpu_env);
/*
* The address covered by the breakpoint must be included in
* [tb->pc, tb->pc + tb->size) in order to for it to be
* properly cleared -- thus we increment the PC here so that
* the logic setting tb->size below does the right thing.
*/
ctx->base.pc_next += 4;
return true;
}
static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{ {
CPUMIPSState *env = cs->env_ptr; CPUMIPSState *env = cs->env_ptr;
@ -16303,7 +16285,6 @@ static const TranslatorOps mips_tr_ops = {
.init_disas_context = mips_tr_init_disas_context, .init_disas_context = mips_tr_init_disas_context,
.tb_start = mips_tr_tb_start, .tb_start = mips_tr_tb_start,
.insn_start = mips_tr_insn_start, .insn_start = mips_tr_insn_start,
.breakpoint_check = mips_tr_breakpoint_check,
.translate_insn = mips_tr_translate_insn, .translate_insn = mips_tr_translate_insn,
.tb_stop = mips_tr_tb_stop, .tb_stop = mips_tr_tb_stop,
.disas_log = mips_tr_disas_log, .disas_log = mips_tr_disas_log,

View File

@ -744,16 +744,6 @@ static const char * const regnames[] = {
#include "exec/gen-icount.h" #include "exec/gen-icount.h"
static void gen_exception(DisasContext *dc, uint32_t excp)
{
TCGv_i32 tmp = tcg_const_i32(excp);
tcg_gen_movi_tl(cpu_R[R_PC], dc->pc);
gen_helper_raise_exception(cpu_env, tmp);
tcg_temp_free_i32(tmp);
dc->base.is_jmp = DISAS_NORETURN;
}
/* generate intermediate code for basic block 'tb'. */ /* generate intermediate code for basic block 'tb'. */
static void nios2_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) static void nios2_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{ {
@ -777,22 +767,6 @@ static void nios2_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
tcg_gen_insn_start(dcbase->pc_next); tcg_gen_insn_start(dcbase->pc_next);
} }
static bool nios2_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
gen_exception(dc, EXCP_DEBUG);
/*
* The address covered by the breakpoint must be included in
* [tb->pc, tb->pc + tb->size) in order to for it to be
* properly cleared -- thus we increment the PC here so that
* the logic setting tb->size below does the right thing.
*/
dc->base.pc_next += 4;
return true;
}
static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
@ -870,7 +844,6 @@ static const TranslatorOps nios2_tr_ops = {
.init_disas_context = nios2_tr_init_disas_context, .init_disas_context = nios2_tr_init_disas_context,
.tb_start = nios2_tr_tb_start, .tb_start = nios2_tr_tb_start,
.insn_start = nios2_tr_insn_start, .insn_start = nios2_tr_insn_start,
.breakpoint_check = nios2_tr_breakpoint_check,
.translate_insn = nios2_tr_translate_insn, .translate_insn = nios2_tr_translate_insn,
.tb_stop = nios2_tr_tb_stop, .tb_stop = nios2_tr_tb_stop,
.disas_log = nios2_tr_disas_log, .disas_log = nios2_tr_disas_log,

View File

@ -1609,22 +1609,6 @@ static void openrisc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
| (dc->base.num_insns > 1 ? 2 : 0)); | (dc->base.num_insns > 1 ? 2 : 0));
} }
static bool openrisc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
gen_exception(dc, EXCP_DEBUG);
dc->base.is_jmp = DISAS_NORETURN;
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that
the logic setting tb->size below does the right thing. */
dc->base.pc_next += 4;
return true;
}
static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
@ -1727,7 +1711,6 @@ static const TranslatorOps openrisc_tr_ops = {
.init_disas_context = openrisc_tr_init_disas_context, .init_disas_context = openrisc_tr_init_disas_context,
.tb_start = openrisc_tr_tb_start, .tb_start = openrisc_tr_tb_start,
.insn_start = openrisc_tr_insn_start, .insn_start = openrisc_tr_insn_start,
.breakpoint_check = openrisc_tr_breakpoint_check,
.translate_insn = openrisc_tr_translate_insn, .translate_insn = openrisc_tr_translate_insn,
.tb_stop = openrisc_tr_tb_stop, .tb_stop = openrisc_tr_tb_stop,
.disas_log = openrisc_tr_disas_log, .disas_log = openrisc_tr_disas_log,

View File

@ -376,7 +376,7 @@ uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
/* We will have raised EXCP_ATOMIC from the translator. */ /* We will have raised EXCP_ATOMIC from the translator. */
assert(HAVE_ATOMIC128); assert(HAVE_ATOMIC128);
ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC()); ret = cpu_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
env->retxh = int128_gethi(ret); env->retxh = int128_gethi(ret);
return int128_getlo(ret); return int128_getlo(ret);
} }
@ -388,7 +388,7 @@ uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
/* We will have raised EXCP_ATOMIC from the translator. */ /* We will have raised EXCP_ATOMIC from the translator. */
assert(HAVE_ATOMIC128); assert(HAVE_ATOMIC128);
ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC()); ret = cpu_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
env->retxh = int128_gethi(ret); env->retxh = int128_gethi(ret);
return int128_getlo(ret); return int128_getlo(ret);
} }
@ -401,7 +401,7 @@ void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr,
/* We will have raised EXCP_ATOMIC from the translator. */ /* We will have raised EXCP_ATOMIC from the translator. */
assert(HAVE_ATOMIC128); assert(HAVE_ATOMIC128);
val = int128_make128(lo, hi); val = int128_make128(lo, hi);
helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC()); cpu_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
} }
void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr, void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
@ -412,7 +412,7 @@ void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
/* We will have raised EXCP_ATOMIC from the translator. */ /* We will have raised EXCP_ATOMIC from the translator. */
assert(HAVE_ATOMIC128); assert(HAVE_ATOMIC128);
val = int128_make128(lo, hi); val = int128_make128(lo, hi);
helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC()); cpu_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
} }
uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr, uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
@ -429,8 +429,8 @@ uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
cmpv = int128_make128(env->reserve_val2, env->reserve_val); cmpv = int128_make128(env->reserve_val2, env->reserve_val);
newv = int128_make128(new_lo, new_hi); newv = int128_make128(new_lo, new_hi);
oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
opidx, GETPC()); opidx, GETPC());
success = int128_eq(oldv, cmpv); success = int128_eq(oldv, cmpv);
} }
env->reserve_addr = -1; env->reserve_addr = -1;
@ -451,8 +451,8 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
cmpv = int128_make128(env->reserve_val2, env->reserve_val); cmpv = int128_make128(env->reserve_val2, env->reserve_val);
newv = int128_make128(new_lo, new_hi); newv = int128_make128(new_lo, new_hi);
oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
opidx, GETPC()); opidx, GETPC());
success = int128_eq(oldv, cmpv); success = int128_eq(oldv, cmpv);
} }
env->reserve_addr = -1; env->reserve_addr = -1;

View File

@ -8565,23 +8565,6 @@ static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
tcg_gen_insn_start(dcbase->pc_next); tcg_gen_insn_start(dcbase->pc_next);
} }
static bool ppc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
const CPUBreakpoint *bp)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
gen_update_nip(ctx, ctx->base.pc_next);
gen_debug_exception(ctx);
/*
* The address covered by the breakpoint must be included in
* [tb->pc, tb->pc + tb->size) in order to for it to be properly
* cleared -- thus we increment the PC here so that the logic
* setting tb->size below does the right thing.
*/
ctx->base.pc_next += 4;
return true;
}
static bool is_prefix_insn(DisasContext *ctx, uint32_t insn) static bool is_prefix_insn(DisasContext *ctx, uint32_t insn)
{ {
REQUIRE_INSNS_FLAGS2(ctx, ISA310); REQUIRE_INSNS_FLAGS2(ctx, ISA310);
@ -8710,7 +8693,6 @@ static const TranslatorOps ppc_tr_ops = {
.init_disas_context = ppc_tr_init_disas_context, .init_disas_context = ppc_tr_init_disas_context,
.tb_start = ppc_tr_tb_start, .tb_start = ppc_tr_tb_start,
.insn_start = ppc_tr_insn_start, .insn_start = ppc_tr_insn_start,
.breakpoint_check = ppc_tr_breakpoint_check,
.translate_insn = ppc_tr_translate_insn, .translate_insn = ppc_tr_translate_insn,
.tb_stop = ppc_tr_tb_stop, .tb_stop = ppc_tr_tb_stop,
.disas_log = ppc_tr_disas_log, .disas_log = ppc_tr_disas_log,

View File

@ -961,22 +961,6 @@ static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(ctx->base.pc_next); tcg_gen_insn_start(ctx->base.pc_next);
} }
static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
const CPUBreakpoint *bp)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
ctx->base.is_jmp = DISAS_NORETURN;
gen_exception_debug();
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that
the logic setting tb->size below does the right thing. */
ctx->base.pc_next += 4;
return true;
}
static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{ {
DisasContext *ctx = container_of(dcbase, DisasContext, base); DisasContext *ctx = container_of(dcbase, DisasContext, base);
@ -1029,7 +1013,6 @@ static const TranslatorOps riscv_tr_ops = {
.init_disas_context = riscv_tr_init_disas_context, .init_disas_context = riscv_tr_init_disas_context,
.tb_start = riscv_tr_tb_start, .tb_start = riscv_tr_tb_start,
.insn_start = riscv_tr_insn_start, .insn_start = riscv_tr_insn_start,
.breakpoint_check = riscv_tr_breakpoint_check,
.translate_insn = riscv_tr_translate_insn, .translate_insn = riscv_tr_translate_insn,
.tb_stop = riscv_tr_tb_stop, .tb_stop = riscv_tr_tb_stop,
.disas_log = riscv_tr_disas_log, .disas_log = riscv_tr_disas_log,

View File

@ -2309,19 +2309,6 @@ static void rx_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
tcg_gen_insn_start(ctx->base.pc_next); tcg_gen_insn_start(ctx->base.pc_next);
} }
static bool rx_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
const CPUBreakpoint *bp)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
/* We have hit a breakpoint - make sure PC is up-to-date */
tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
gen_helper_debug(cpu_env);
ctx->base.is_jmp = DISAS_NORETURN;
ctx->base.pc_next += 1;
return true;
}
static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{ {
DisasContext *ctx = container_of(dcbase, DisasContext, base); DisasContext *ctx = container_of(dcbase, DisasContext, base);
@ -2373,7 +2360,6 @@ static const TranslatorOps rx_tr_ops = {
.init_disas_context = rx_tr_init_disas_context, .init_disas_context = rx_tr_init_disas_context,
.tb_start = rx_tr_tb_start, .tb_start = rx_tr_tb_start,
.insn_start = rx_tr_insn_start, .insn_start = rx_tr_insn_start,
.breakpoint_check = rx_tr_breakpoint_check,
.translate_insn = rx_tr_translate_insn, .translate_insn = rx_tr_translate_insn,
.tb_stop = rx_tr_tb_stop, .tb_stop = rx_tr_tb_stop,
.disas_log = rx_tr_disas_log, .disas_log = rx_tr_disas_log,

View File

@ -1811,7 +1811,7 @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
mem_idx = cpu_mmu_index(env, false); mem_idx = cpu_mmu_index(env, false);
oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
fail = !int128_eq(oldv, cmpv); fail = !int128_eq(oldv, cmpv);
env->cc_op = fail; env->cc_op = fail;
@ -1884,7 +1884,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
ov = qatomic_cmpxchg__nocheck(haddr, cv, nv); ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
#else #else
TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx); TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra); ov = cpu_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
#endif #endif
} else { } else {
ov = cpu_ldl_data_ra(env, a1, ra); ov = cpu_ldl_data_ra(env, a1, ra);
@ -1903,13 +1903,8 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
if (parallel) { if (parallel) {
#ifdef CONFIG_ATOMIC64 #ifdef CONFIG_ATOMIC64
# ifdef CONFIG_USER_ONLY
uint64_t *haddr = g2h(env_cpu(env), a1);
ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
# else
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx); TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra); ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
# endif
#else #else
/* Note that we asserted !parallel above. */ /* Note that we asserted !parallel above. */
g_assert_not_reached(); g_assert_not_reached();
@ -1945,7 +1940,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra); cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
} else if (HAVE_CMPXCHG128) { } else if (HAVE_CMPXCHG128) {
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
cc = !int128_eq(ov, cv); cc = !int128_eq(ov, cv);
} else { } else {
/* Note that we asserted !parallel above. */ /* Note that we asserted !parallel above. */
@ -1985,7 +1980,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
} else if (HAVE_ATOMIC128) { } else if (HAVE_ATOMIC128) {
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
Int128 sv = int128_make128(svl, svh); Int128 sv = int128_make128(svl, svh);
helper_atomic_sto_be_mmu(env, a2, sv, oi, ra); cpu_atomic_sto_be_mmu(env, a2, sv, oi, ra);
} else { } else {
/* Note that we asserted !parallel above. */ /* Note that we asserted !parallel above. */
g_assert_not_reached(); g_assert_not_reached();
@ -2486,7 +2481,7 @@ uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
mem_idx = cpu_mmu_index(env, false); mem_idx = cpu_mmu_index(env, false);
oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
v = helper_atomic_ldo_be_mmu(env, addr, oi, ra); v = cpu_atomic_ldo_be_mmu(env, addr, oi, ra);
hi = int128_gethi(v); hi = int128_gethi(v);
lo = int128_getlo(v); lo = int128_getlo(v);
@ -2518,7 +2513,7 @@ void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
mem_idx = cpu_mmu_index(env, false); mem_idx = cpu_mmu_index(env, false);
oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
v = int128_make128(low, high); v = int128_make128(low, high);
helper_atomic_sto_be_mmu(env, addr, v, oi, ra); cpu_atomic_sto_be_mmu(env, addr, v, oi, ra);
} }
/* Execute instruction. This instruction executes an insn modified with /* Execute instruction. This instruction executes an insn modified with

View File

@ -6552,29 +6552,6 @@ static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
{ {
} }
static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
/*
* Emit an insn_start to accompany the breakpoint exception.
* The ILEN value is a dummy, since this does not result in
* an s390x exception, but an internal qemu exception which
* brings us back to interact with the gdbstub.
*/
tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2);
dc->base.is_jmp = DISAS_PC_STALE;
dc->do_debug = true;
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that
the logic setting tb->size does the right thing. */
dc->base.pc_next += 2;
return true;
}
static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{ {
CPUS390XState *env = cs->env_ptr; CPUS390XState *env = cs->env_ptr;
@ -6642,7 +6619,6 @@ static const TranslatorOps s390x_tr_ops = {
.init_disas_context = s390x_tr_init_disas_context, .init_disas_context = s390x_tr_init_disas_context,
.tb_start = s390x_tr_tb_start, .tb_start = s390x_tr_tb_start,
.insn_start = s390x_tr_insn_start, .insn_start = s390x_tr_insn_start,
.breakpoint_check = s390x_tr_breakpoint_check,
.translate_insn = s390x_tr_translate_insn, .translate_insn = s390x_tr_translate_insn,
.tb_stop = s390x_tr_tb_stop, .tb_stop = s390x_tr_tb_stop,
.disas_log = s390x_tr_disas_log, .disas_log = s390x_tr_disas_log,

View File

@ -2289,23 +2289,6 @@ static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags); tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
} }
static bool sh4_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
const CPUBreakpoint *bp)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
/* We have hit a breakpoint - make sure PC is up-to-date */
gen_save_cpu_state(ctx, true);
gen_helper_debug(cpu_env);
ctx->base.is_jmp = DISAS_NORETURN;
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that
the logic setting tb->size below does the right thing. */
ctx->base.pc_next += 2;
return true;
}
static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{ {
CPUSH4State *env = cs->env_ptr; CPUSH4State *env = cs->env_ptr;
@ -2369,7 +2352,6 @@ static const TranslatorOps sh4_tr_ops = {
.init_disas_context = sh4_tr_init_disas_context, .init_disas_context = sh4_tr_init_disas_context,
.tb_start = sh4_tr_tb_start, .tb_start = sh4_tr_tb_start,
.insn_start = sh4_tr_insn_start, .insn_start = sh4_tr_insn_start,
.breakpoint_check = sh4_tr_breakpoint_check,
.translate_insn = sh4_tr_translate_insn, .translate_insn = sh4_tr_translate_insn,
.tb_stop = sh4_tr_tb_stop, .tb_stop = sh4_tr_tb_stop,
.disas_log = sh4_tr_disas_log, .disas_log = sh4_tr_disas_log,

View File

@ -5854,22 +5854,6 @@ static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
} }
} }
static bool sparc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
if (dc->pc != dc->base.pc_first) {
save_state(dc);
}
gen_helper_debug(cpu_env);
tcg_gen_exit_tb(NULL, 0);
dc->base.is_jmp = DISAS_NORETURN;
/* update pc_next so that the current instruction is included in tb->size */
dc->base.pc_next += 4;
return true;
}
static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
@ -5932,7 +5916,6 @@ static const TranslatorOps sparc_tr_ops = {
.init_disas_context = sparc_tr_init_disas_context, .init_disas_context = sparc_tr_init_disas_context,
.tb_start = sparc_tr_tb_start, .tb_start = sparc_tr_tb_start,
.insn_start = sparc_tr_insn_start, .insn_start = sparc_tr_insn_start,
.breakpoint_check = sparc_tr_breakpoint_check,
.translate_insn = sparc_tr_translate_insn, .translate_insn = sparc_tr_translate_insn,
.tb_stop = sparc_tr_tb_stop, .tb_stop = sparc_tr_tb_stop,
.disas_log = sparc_tr_disas_log, .disas_log = sparc_tr_disas_log,

View File

@ -8810,21 +8810,6 @@ static void tricore_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(ctx->base.pc_next); tcg_gen_insn_start(ctx->base.pc_next);
} }
static bool tricore_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
const CPUBreakpoint *bp)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
generate_qemu_excp(ctx, EXCP_DEBUG);
/*
* The address covered by the breakpoint must be included in
* [tb->pc, tb->pc + tb->size) in order to for it to be
* properly cleared -- thus we increment the PC here so that
* the logic setting tb->size below does the right thing.
*/
ctx->base.pc_next += 4;
return true;
}
static bool insn_crosses_page(CPUTriCoreState *env, DisasContext *ctx) static bool insn_crosses_page(CPUTriCoreState *env, DisasContext *ctx)
{ {
/* /*
@ -8898,7 +8883,6 @@ static const TranslatorOps tricore_tr_ops = {
.init_disas_context = tricore_tr_init_disas_context, .init_disas_context = tricore_tr_init_disas_context,
.tb_start = tricore_tr_tb_start, .tb_start = tricore_tr_tb_start,
.insn_start = tricore_tr_insn_start, .insn_start = tricore_tr_insn_start,
.breakpoint_check = tricore_tr_breakpoint_check,
.translate_insn = tricore_tr_translate_insn, .translate_insn = tricore_tr_translate_insn,
.tb_stop = tricore_tr_tb_stop, .tb_stop = tricore_tr_tb_stop,
.disas_log = tricore_tr_disas_log, .disas_log = tricore_tr_disas_log,

View File

@ -1232,22 +1232,6 @@ static void xtensa_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_insn_start(dcbase->pc_next); tcg_gen_insn_start(dcbase->pc_next);
} }
static bool xtensa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
const CPUBreakpoint *bp)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
gen_exception(dc, EXCP_DEBUG);
dc->base.is_jmp = DISAS_NORETURN;
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that
the logic setting tb->size below does the right thing. */
dc->base.pc_next += 2;
return true;
}
static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{ {
DisasContext *dc = container_of(dcbase, DisasContext, base); DisasContext *dc = container_of(dcbase, DisasContext, base);
@ -1330,7 +1314,6 @@ static const TranslatorOps xtensa_translator_ops = {
.init_disas_context = xtensa_tr_init_disas_context, .init_disas_context = xtensa_tr_init_disas_context,
.tb_start = xtensa_tr_tb_start, .tb_start = xtensa_tr_tb_start,
.insn_start = xtensa_tr_insn_start, .insn_start = xtensa_tr_insn_start,
.breakpoint_check = xtensa_tr_breakpoint_check,
.translate_insn = xtensa_tr_translate_insn, .translate_insn = xtensa_tr_translate_insn,
.tb_stop = xtensa_tr_tb_stop, .tb_stop = xtensa_tr_tb_stop,
.disas_log = xtensa_tr_disas_log, .disas_log = xtensa_tr_disas_log,

View File

@ -2723,10 +2723,6 @@ void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx)
seen this numbered exit before, via tcg_gen_goto_tb. */ seen this numbered exit before, via tcg_gen_goto_tb. */
tcg_debug_assert(tcg_ctx->goto_tb_issue_mask & (1 << idx)); tcg_debug_assert(tcg_ctx->goto_tb_issue_mask & (1 << idx));
#endif #endif
/* When not chaining, exit without indicating a link. */
if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
val = 0;
}
} else { } else {
/* This is an exit via the exitreq label. */ /* This is an exit via the exitreq label. */
tcg_debug_assert(idx == TB_EXIT_REQUESTED); tcg_debug_assert(idx == TB_EXIT_REQUESTED);
@ -2738,6 +2734,8 @@ void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx)
void tcg_gen_goto_tb(unsigned idx) void tcg_gen_goto_tb(unsigned idx)
{ {
/* We tested CF_NO_GOTO_TB in translator_use_goto_tb. */
tcg_debug_assert(!(tcg_ctx->tb_cflags & CF_NO_GOTO_TB));
/* We only support two chained exits. */ /* We only support two chained exits. */
tcg_debug_assert(idx <= TB_EXIT_IDXMAX); tcg_debug_assert(idx <= TB_EXIT_IDXMAX);
#ifdef CONFIG_DEBUG_TCG #ifdef CONFIG_DEBUG_TCG
@ -2746,25 +2744,23 @@ void tcg_gen_goto_tb(unsigned idx)
tcg_ctx->goto_tb_issue_mask |= 1 << idx; tcg_ctx->goto_tb_issue_mask |= 1 << idx;
#endif #endif
plugin_gen_disable_mem_helpers(); plugin_gen_disable_mem_helpers();
/* When not chaining, we simply fall through to the "fallback" exit. */ tcg_gen_op1i(INDEX_op_goto_tb, idx);
if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
tcg_gen_op1i(INDEX_op_goto_tb, idx);
}
} }
void tcg_gen_lookup_and_goto_ptr(void) void tcg_gen_lookup_and_goto_ptr(void)
{ {
if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { TCGv_ptr ptr;
TCGv_ptr ptr;
plugin_gen_disable_mem_helpers(); if (tcg_ctx->tb_cflags & CF_NO_GOTO_PTR) {
ptr = tcg_temp_new_ptr();
gen_helper_lookup_tb_ptr(ptr, cpu_env);
tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
tcg_temp_free_ptr(ptr);
} else {
tcg_gen_exit_tb(NULL, 0); tcg_gen_exit_tb(NULL, 0);
return;
} }
plugin_gen_disable_mem_helpers();
ptr = tcg_temp_new_ptr();
gen_helper_lookup_tb_ptr(ptr, cpu_env);
tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
tcg_temp_free_ptr(ptr);
} }
static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
@ -3084,7 +3080,6 @@ static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc)
} }
} }
#ifdef CONFIG_SOFTMMU
typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv,
TCGv_i32, TCGv_i32, TCGv_i32); TCGv_i32, TCGv_i32, TCGv_i32);
typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv,
@ -3093,12 +3088,6 @@ typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv,
TCGv_i32, TCGv_i32); TCGv_i32, TCGv_i32);
typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv,
TCGv_i64, TCGv_i32); TCGv_i64, TCGv_i32);
#else
typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32, TCGv_i32);
typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64, TCGv_i64);
typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32);
typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64);
#endif
#ifdef CONFIG_ATOMIC64 #ifdef CONFIG_ATOMIC64
# define WITH_ATOMIC64(X) X, # define WITH_ATOMIC64(X) X,
@ -3140,18 +3129,13 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
tcg_temp_free_i32(t1); tcg_temp_free_i32(t1);
} else { } else {
gen_atomic_cx_i32 gen; gen_atomic_cx_i32 gen;
TCGMemOpIdx oi;
gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
tcg_debug_assert(gen != NULL); tcg_debug_assert(gen != NULL);
#ifdef CONFIG_SOFTMMU oi = make_memop_idx(memop & ~MO_SIGN, idx);
{ gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
}
#else
gen(retv, cpu_env, addr, cmpv, newv);
#endif
if (memop & MO_SIGN) { if (memop & MO_SIGN) {
tcg_gen_ext_i32(retv, retv, memop); tcg_gen_ext_i32(retv, retv, memop);
@ -3184,18 +3168,13 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
} else if ((memop & MO_SIZE) == MO_64) { } else if ((memop & MO_SIZE) == MO_64) {
#ifdef CONFIG_ATOMIC64 #ifdef CONFIG_ATOMIC64
gen_atomic_cx_i64 gen; gen_atomic_cx_i64 gen;
TCGMemOpIdx oi;
gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
tcg_debug_assert(gen != NULL); tcg_debug_assert(gen != NULL);
#ifdef CONFIG_SOFTMMU oi = make_memop_idx(memop, idx);
{ gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
TCGMemOpIdx oi = make_memop_idx(memop, idx);
gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
}
#else
gen(retv, cpu_env, addr, cmpv, newv);
#endif
#else #else
gen_helper_exit_atomic(cpu_env); gen_helper_exit_atomic(cpu_env);
/* Produce a result, so that we have a well-formed opcode stream /* Produce a result, so that we have a well-formed opcode stream
@ -3245,20 +3224,15 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
TCGArg idx, MemOp memop, void * const table[]) TCGArg idx, MemOp memop, void * const table[])
{ {
gen_atomic_op_i32 gen; gen_atomic_op_i32 gen;
TCGMemOpIdx oi;
memop = tcg_canonicalize_memop(memop, 0, 0); memop = tcg_canonicalize_memop(memop, 0, 0);
gen = table[memop & (MO_SIZE | MO_BSWAP)]; gen = table[memop & (MO_SIZE | MO_BSWAP)];
tcg_debug_assert(gen != NULL); tcg_debug_assert(gen != NULL);
#ifdef CONFIG_SOFTMMU oi = make_memop_idx(memop & ~MO_SIGN, idx);
{ gen(ret, cpu_env, addr, val, tcg_constant_i32(oi));
TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
gen(ret, cpu_env, addr, val, tcg_constant_i32(oi));
}
#else
gen(ret, cpu_env, addr, val);
#endif
if (memop & MO_SIGN) { if (memop & MO_SIGN) {
tcg_gen_ext_i32(ret, ret, memop); tcg_gen_ext_i32(ret, ret, memop);
@ -3292,18 +3266,13 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
if ((memop & MO_SIZE) == MO_64) { if ((memop & MO_SIZE) == MO_64) {
#ifdef CONFIG_ATOMIC64 #ifdef CONFIG_ATOMIC64
gen_atomic_op_i64 gen; gen_atomic_op_i64 gen;
TCGMemOpIdx oi;
gen = table[memop & (MO_SIZE | MO_BSWAP)]; gen = table[memop & (MO_SIZE | MO_BSWAP)];
tcg_debug_assert(gen != NULL); tcg_debug_assert(gen != NULL);
#ifdef CONFIG_SOFTMMU oi = make_memop_idx(memop & ~MO_SIGN, idx);
{ gen(ret, cpu_env, addr, val, tcg_constant_i32(oi));
TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
gen(ret, cpu_env, addr, val, tcg_constant_i32(oi));
}
#else
gen(ret, cpu_env, addr, val);
#endif
#else #else
gen_helper_exit_atomic(cpu_env); gen_helper_exit_atomic(cpu_env);
/* Produce a result, so that we have a well-formed opcode stream /* Produce a result, so that we have a well-formed opcode stream

View File

@ -1,50 +0,0 @@
/*
* Helper functions for guest memory tracing
*
* Copyright (C) 2016 Lluís Vilanova <vilanova@ac.upc.edu>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef TRACE__MEM_INTERNAL_H
#define TRACE__MEM_INTERNAL_H
#define TRACE_MEM_SZ_SHIFT_MASK 0xf /* size shift mask */
#define TRACE_MEM_SE (1ULL << 4) /* sign extended (y/n) */
#define TRACE_MEM_BE (1ULL << 5) /* big endian (y/n) */
#define TRACE_MEM_ST (1ULL << 6) /* store (y/n) */
#define TRACE_MEM_MMU_SHIFT 8 /* mmu idx */
static inline uint16_t trace_mem_build_info(
int size_shift, bool sign_extend, MemOp endianness,
bool store, unsigned int mmu_idx)
{
uint16_t res;
res = size_shift & TRACE_MEM_SZ_SHIFT_MASK;
if (sign_extend) {
res |= TRACE_MEM_SE;
}
if (endianness == MO_BE) {
res |= TRACE_MEM_BE;
}
if (store) {
res |= TRACE_MEM_ST;
}
#ifdef CONFIG_SOFTMMU
res |= mmu_idx << TRACE_MEM_MMU_SHIFT;
#endif
return res;
}
static inline uint16_t trace_mem_get_info(MemOp op,
unsigned int mmu_idx,
bool store)
{
return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN),
op & MO_BSWAP, store,
mmu_idx);
}
#endif /* TRACE__MEM_INTERNAL_H */

View File

@ -12,24 +12,52 @@
#include "tcg/tcg.h" #include "tcg/tcg.h"
#define TRACE_MEM_SZ_SHIFT_MASK 0xf /* size shift mask */
/** #define TRACE_MEM_SE (1ULL << 4) /* sign extended (y/n) */
* trace_mem_get_info: #define TRACE_MEM_BE (1ULL << 5) /* big endian (y/n) */
* #define TRACE_MEM_ST (1ULL << 6) /* store (y/n) */
* Return a value for the 'info' argument in guest memory access traces. #define TRACE_MEM_MMU_SHIFT 8 /* mmu idx */
*/
static uint16_t trace_mem_get_info(MemOp op, unsigned int mmu_idx, bool store);
/** /**
* trace_mem_build_info: * trace_mem_build_info:
* *
* Return a value for the 'info' argument in guest memory access traces. * Return a value for the 'info' argument in guest memory access traces.
*/ */
static uint16_t trace_mem_build_info(int size_shift, bool sign_extend, static inline uint16_t trace_mem_build_info(int size_shift, bool sign_extend,
MemOp endianness, bool store, MemOp endianness, bool store,
unsigned int mmuidx); unsigned int mmu_idx)
{
uint16_t res;
res = size_shift & TRACE_MEM_SZ_SHIFT_MASK;
if (sign_extend) {
res |= TRACE_MEM_SE;
}
if (endianness == MO_BE) {
res |= TRACE_MEM_BE;
}
if (store) {
res |= TRACE_MEM_ST;
}
#ifdef CONFIG_SOFTMMU
res |= mmu_idx << TRACE_MEM_MMU_SHIFT;
#endif
return res;
}
#include "trace/mem-internal.h" /**
* trace_mem_get_info:
*
* Return a value for the 'info' argument in guest memory access traces.
*/
static inline uint16_t trace_mem_get_info(MemOp op,
unsigned int mmu_idx,
bool store)
{
return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN),
op & MO_BSWAP, store,
mmu_idx);
}
#endif /* TRACE__MEM_H */ #endif /* TRACE__MEM_H */

View File

@ -83,8 +83,8 @@ typedef struct QSPCallSite QSPCallSite;
struct QSPEntry { struct QSPEntry {
void *thread_ptr; void *thread_ptr;
const QSPCallSite *callsite; const QSPCallSite *callsite;
uint64_t n_acqs; aligned_uint64_t n_acqs;
uint64_t ns; aligned_uint64_t ns;
unsigned int n_objs; /* count of coalesced objs; only used for reporting */ unsigned int n_objs; /* count of coalesced objs; only used for reporting */
}; };
typedef struct QSPEntry QSPEntry; typedef struct QSPEntry QSPEntry;