Use MO_128 for 16-byte atomic memory operations.

Add cpu_ld/st_mmu memory primitives.
 Move helper_ld/st memory helpers out of tcg.h.
 Canonicalize alignment flags in MemOp.
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmFnG/0dHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/P8Qf/TIb+nP/q4ZesoHV5
 hNuKIMcGMiIWjP7YkuXg7H8n4QQxSK+nKXI3qlWCTIVtKOQFC3jkqNnxV8ncHUyS
 RW6ePEcmJfb+yv20MnDLObxMcAq6mIkHtOjARQcvcHiXxMNEZdIvJ8f8/qrkYib1
 RRJarqIGlYFJvGyfbplq/JA/WYcJleIElEUx7JPSewz38Kk0gDIH2+BR2TBFrWAD
 TDfh+GvlHeX8IYU19rWnt7pFv8TVPVQODqJBtlRPEYnl+LGdpJPCP2ATUAggWHiA
 hucYKsuMWXXXhGx2nsurkpSNrBfGe6OHybOE5d1ARqmq0MnyHJat+ryh6qTx3Z9w
 oZKi+Q==
 =QpK0
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20211013' into staging

Use MO_128 for 16-byte atomic memory operations.
Add cpu_ld/st_mmu memory primitives.
Move helper_ld/st memory helpers out of tcg.h.
Canonicalize alignment flags in MemOp.

# gpg: Signature made Wed 13 Oct 2021 10:48:45 AM PDT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate]

* remotes/rth/tags/pull-tcg-20211013:
  tcg: Canonicalize alignment flags in MemOp
  tcg: Move helper_*_mmu decls to tcg/tcg-ldst.h
  target/arm: Use cpu_*_mmu instead of helper_*_mmu
  target/sparc: Use cpu_*_mmu instead of helper_*_mmu
  target/s390x: Use cpu_*_mmu instead of helper_*_mmu
  target/mips: Use 8-byte memory ops for msa load/store
  target/mips: Use cpu_*_data_ra for msa load/store
  accel/tcg: Move cpu_atomic decls to exec/cpu_ldst.h
  accel/tcg: Add cpu_{ld,st}*_mmu interfaces
  target/hexagon: Implement cpu_mmu_index
  target/s390x: Use MO_128 for 16 byte atomics
  target/ppc: Use MO_128 for 16 byte atomics
  target/i386: Use MO_128 for 16 byte atomics
  target/arm: Use MO_128 for 16 byte atomics
  memory: Log access direction for invalid accesses

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2021-10-13 11:43:29 -07:00
commit e5b2333f24
21 changed files with 1032 additions and 1206 deletions

View File

@ -39,6 +39,7 @@
#ifdef CONFIG_PLUGIN #ifdef CONFIG_PLUGIN
#include "qemu/plugin-memory.h" #include "qemu/plugin-memory.h"
#endif #endif
#include "tcg/tcg-ldst.h"
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
/* #define DEBUG_TLB */ /* #define DEBUG_TLB */
@ -1839,6 +1840,25 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
cpu_loop_exit_atomic(env_cpu(env), retaddr); cpu_loop_exit_atomic(env_cpu(env), retaddr);
} }
/*
* Verify that we have passed the correct MemOp to the correct function.
*
* In the case of the helper_*_mmu functions, we will have done this by
* using the MemOp to look up the helper during code generation.
*
* In the case of the cpu_*_mmu functions, this is up to the caller.
* We could present one function to target code, and dispatch based on
* the MemOp, but so far we have worked hard to avoid an indirect function
* call along the memory path.
*/
static void validate_memop(MemOpIdx oi, MemOp expected)
{
#ifdef CONFIG_DEBUG_TCG
MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
assert(have == expected);
#endif
}
/* /*
* Load Helpers * Load Helpers
* *
@ -1992,6 +2012,7 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
validate_memop(oi, MO_UB);
return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
} }
@ -2004,6 +2025,7 @@ tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
validate_memop(oi, MO_LEUW);
return load_helper(env, addr, oi, retaddr, MO_LEUW, false, return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
full_le_lduw_mmu); full_le_lduw_mmu);
} }
@ -2017,6 +2039,7 @@ tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
validate_memop(oi, MO_BEUW);
return load_helper(env, addr, oi, retaddr, MO_BEUW, false, return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
full_be_lduw_mmu); full_be_lduw_mmu);
} }
@ -2030,6 +2053,7 @@ tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
validate_memop(oi, MO_LEUL);
return load_helper(env, addr, oi, retaddr, MO_LEUL, false, return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
full_le_ldul_mmu); full_le_ldul_mmu);
} }
@ -2043,6 +2067,7 @@ tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
validate_memop(oi, MO_BEUL);
return load_helper(env, addr, oi, retaddr, MO_BEUL, false, return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
full_be_ldul_mmu); full_be_ldul_mmu);
} }
@ -2056,6 +2081,7 @@ tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
validate_memop(oi, MO_LEQ);
return load_helper(env, addr, oi, retaddr, MO_LEQ, false, return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
helper_le_ldq_mmu); helper_le_ldq_mmu);
} }
@ -2063,6 +2089,7 @@ uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
validate_memop(oi, MO_BEQ);
return load_helper(env, addr, oi, retaddr, MO_BEQ, false, return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
helper_be_ldq_mmu); helper_be_ldq_mmu);
} }
@ -2108,186 +2135,56 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
*/ */
static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t retaddr, MemOpIdx oi, uintptr_t retaddr,
MemOp op, FullLoadHelper *full_load) FullLoadHelper *full_load)
{ {
MemOpIdx oi = make_memop_idx(op, mmu_idx);
uint64_t ret; uint64_t ret;
trace_guest_ld_before_exec(env_cpu(env), addr, oi); trace_guest_ld_before_exec(env_cpu(env), addr, oi);
ret = full_load(env, addr, oi, retaddr); ret = full_load(env, addr, oi, retaddr);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret; return ret;
} }
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
int mmu_idx, uintptr_t ra)
{ {
return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu); return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
} }
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra); return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
} }
uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu); return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
} }
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra); return cpu_load_helper(env, addr, oi, MO_BEQ, helper_be_ldq_mmu);
} }
uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu); return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
} }
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu); return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
} }
uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra) MemOpIdx oi, uintptr_t ra)
{ {
return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu); return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
}
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
}
uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu);
}
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu);
}
uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
{
return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
{
return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
{
return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldub_data_ra(env, ptr, 0);
}
int cpu_ldsb_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldsb_data_ra(env, ptr, 0);
}
uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr)
{
return cpu_lduw_be_data_ra(env, ptr, 0);
}
int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldsw_be_data_ra(env, ptr, 0);
}
uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldl_be_data_ra(env, ptr, 0);
}
uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldq_be_data_ra(env, ptr, 0);
}
uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr)
{
return cpu_lduw_le_data_ra(env, ptr, 0);
}
int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldsw_le_data_ra(env, ptr, 0);
}
uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldl_le_data_ra(env, ptr, 0);
}
uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldq_le_data_ra(env, ptr, 0);
} }
/* /*
@ -2324,6 +2221,9 @@ store_memop(void *haddr, uint64_t val, MemOp op)
} }
} }
static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr);
static void __attribute__((noinline)) static void __attribute__((noinline))
store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
uintptr_t retaddr, size_t size, uintptr_t mmu_idx, uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
@ -2387,13 +2287,13 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
/* Big-endian extract. */ /* Big-endian extract. */
uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); full_stb_mmu(env, addr + i, val8, oi, retaddr);
} }
} else { } else {
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
/* Little-endian extract. */ /* Little-endian extract. */
uint8_t val8 = val >> (i * 8); uint8_t val8 = val >> (i * 8);
helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); full_stb_mmu(env, addr + i, val8, oi, retaddr);
} }
} }
} }
@ -2496,46 +2396,83 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
store_memop(haddr, val, op); store_memop(haddr, val, op);
} }
void __attribute__((noinline)) static void __attribute__((noinline))
helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
validate_memop(oi, MO_UB);
store_helper(env, addr, val, oi, retaddr, MO_UB); store_helper(env, addr, val, oi, retaddr, MO_UB);
} }
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
MemOpIdx oi, uintptr_t retaddr)
{
full_stb_mmu(env, addr, val, oi, retaddr);
}
static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_LEUW);
store_helper(env, addr, val, oi, retaddr, MO_LEUW);
}
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
store_helper(env, addr, val, oi, retaddr, MO_LEUW); full_le_stw_mmu(env, addr, val, oi, retaddr);
}
static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_BEUW);
store_helper(env, addr, val, oi, retaddr, MO_BEUW);
} }
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
store_helper(env, addr, val, oi, retaddr, MO_BEUW); full_be_stw_mmu(env, addr, val, oi, retaddr);
}
static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_LEUL);
store_helper(env, addr, val, oi, retaddr, MO_LEUL);
} }
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
store_helper(env, addr, val, oi, retaddr, MO_LEUL); full_le_stl_mmu(env, addr, val, oi, retaddr);
}
static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
validate_memop(oi, MO_BEUL);
store_helper(env, addr, val, oi, retaddr, MO_BEUL);
} }
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
store_helper(env, addr, val, oi, retaddr, MO_BEUL); full_be_stl_mmu(env, addr, val, oi, retaddr);
} }
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
validate_memop(oi, MO_LEQ);
store_helper(env, addr, val, oi, retaddr, MO_LEQ); store_helper(env, addr, val, oi, retaddr, MO_LEQ);
} }
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
validate_memop(oi, MO_BEQ);
store_helper(env, addr, val, oi, retaddr, MO_BEQ); store_helper(env, addr, val, oi, retaddr, MO_BEQ);
} }
@ -2543,137 +2480,61 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
* Store Helpers for cpu_ldst.h * Store Helpers for cpu_ldst.h
*/ */
static inline void QEMU_ALWAYS_INLINE typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val, uint64_t val, MemOpIdx oi, uintptr_t retaddr);
int mmu_idx, uintptr_t retaddr, MemOp op)
static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
uint64_t val, MemOpIdx oi, uintptr_t ra,
FullStoreHelper *full_store)
{ {
MemOpIdx oi = make_memop_idx(op, mmu_idx);
trace_guest_st_before_exec(env_cpu(env), addr, oi); trace_guest_st_before_exec(env_cpu(env), addr, oi);
full_store(env, addr, val, oi, ra);
store_helper(env, addr, val, oi, retaddr, op);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
} }
void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
int mmu_idx, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB); cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
} }
void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
int mmu_idx, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW); cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
} }
void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL); cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
} }
void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ); cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
} }
void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
int mmu_idx, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW); cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
} }
void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL); cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
} }
void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr) MemOpIdx oi, uintptr_t retaddr)
{ {
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ); cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
} }
void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr, #include "ldst_common.c.inc"
uint32_t val, uintptr_t retaddr)
{
cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr,
uint32_t val, uintptr_t retaddr)
{
cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr,
uint32_t val, uintptr_t retaddr)
{
cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr,
uint64_t val, uintptr_t retaddr)
{
cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr,
uint32_t val, uintptr_t retaddr)
{
cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr,
uint32_t val, uintptr_t retaddr)
{
cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr,
uint64_t val, uintptr_t retaddr)
{
cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val)
{
cpu_stb_data_ra(env, ptr, val, 0);
}
void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
{
cpu_stw_be_data_ra(env, ptr, val, 0);
}
void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
{
cpu_stl_be_data_ra(env, ptr, val, 0);
}
void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val)
{
cpu_stq_be_data_ra(env, ptr, val, 0);
}
void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
{
cpu_stw_le_data_ra(env, ptr, val, 0);
}
void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
{
cpu_stl_le_data_ra(env, ptr, val, 0);
}
void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
{
cpu_stq_le_data_ra(env, ptr, val, 0);
}
/* /*
* First set of functions passes in OI and RETADDR. * First set of functions passes in OI and RETADDR.

307
accel/tcg/ldst_common.c.inc Normal file
View File

@ -0,0 +1,307 @@
/*
* Routines common to user and system emulation of load/store.
*
* Copyright (c) 2003 Fabrice Bellard
*
* SPDX-License-Identifier: GPL-2.0-or-later
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
return cpu_ldb_mmu(env, addr, oi, ra);
}
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
}
uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
return cpu_ldw_be_mmu(env, addr, oi, ra);
}
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
}
uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
return cpu_ldl_be_mmu(env, addr, oi, ra);
}
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx);
return cpu_ldq_be_mmu(env, addr, oi, ra);
}
uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
return cpu_ldw_le_mmu(env, addr, oi, ra);
}
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
}
uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
return cpu_ldl_le_mmu(env, addr, oi, ra);
}
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx);
return cpu_ldq_le_mmu(env, addr, oi, ra);
}
void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
cpu_stb_mmu(env, addr, val, oi, ra);
}
void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
cpu_stw_be_mmu(env, addr, val, oi, ra);
}
void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
cpu_stl_be_mmu(env, addr, val, oi, ra);
}
void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx);
cpu_stq_be_mmu(env, addr, val, oi, ra);
}
void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
cpu_stw_le_mmu(env, addr, val, oi, ra);
}
void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
cpu_stl_le_mmu(env, addr, val, oi, ra);
}
void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx);
cpu_stq_le_mmu(env, addr, val, oi, ra);
}
/*--------------------------*/
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldub_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
}
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return (int8_t)cpu_ldub_data_ra(env, addr, ra);
}
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_lduw_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
}
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return (int16_t)cpu_lduw_be_data_ra(env, addr, ra);
}
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldl_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
}
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldq_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
}
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_lduw_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
}
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return (int16_t)cpu_lduw_le_data_ra(env, addr, ra);
}
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldl_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
}
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
{
return cpu_ldq_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
}
void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stb_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
}
void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stw_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
}
void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stl_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
}
void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr,
uint64_t val, uintptr_t ra)
{
cpu_stq_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
}
void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stw_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
}
void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, uintptr_t ra)
{
cpu_stl_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
}
void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr,
uint64_t val, uintptr_t ra)
{
cpu_stq_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
}
/*--------------------------*/
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr addr)
{
return cpu_ldub_data_ra(env, addr, 0);
}
int cpu_ldsb_data(CPUArchState *env, abi_ptr addr)
{
return (int8_t)cpu_ldub_data(env, addr);
}
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr addr)
{
return cpu_lduw_be_data_ra(env, addr, 0);
}
int cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr)
{
return (int16_t)cpu_lduw_be_data(env, addr);
}
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr addr)
{
return cpu_ldl_be_data_ra(env, addr, 0);
}
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr addr)
{
return cpu_ldq_be_data_ra(env, addr, 0);
}
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr addr)
{
return cpu_lduw_le_data_ra(env, addr, 0);
}
int cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr)
{
return (int16_t)cpu_lduw_le_data(env, addr);
}
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr addr)
{
return cpu_ldl_le_data_ra(env, addr, 0);
}
uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr addr)
{
return cpu_ldq_le_data_ra(env, addr, 0);
}
void cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val)
{
cpu_stb_data_ra(env, addr, val, 0);
}
void cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
{
cpu_stw_be_data_ra(env, addr, val, 0);
}
void cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
{
cpu_stl_be_data_ra(env, addr, val, 0);
}
void cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val)
{
cpu_stq_be_data_ra(env, addr, val, 0);
}
void cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
{
cpu_stw_le_data_ra(env, addr, val, 0);
}
void cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
{
cpu_stl_le_data_ra(env, addr, val, 0);
}
void cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val)
{
cpu_stq_le_data_ra(env, addr, val, 0);
}

View File

@ -886,300 +886,227 @@ int cpu_signal_handler(int host_signum, void *pinfo,
/* The softmmu versions of these helpers are in cputlb.c. */ /* The softmmu versions of these helpers are in cputlb.c. */
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr) /*
* Verify that we have passed the correct MemOp to the correct function.
*
* We could present one function to target code, and dispatch based on
* the MemOp, but so far we have worked hard to avoid an indirect function
* call along the memory path.
*/
static void validate_memop(MemOpIdx oi, MemOp expected)
{ {
MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX); #ifdef CONFIG_DEBUG_TCG
uint32_t ret; MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
assert(have == expected);
#endif
}
trace_guest_ld_before_exec(env_cpu(env), ptr, oi); static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
ret = ldub_p(g2h(env_cpu(env), ptr)); MemOpIdx oi, uintptr_t ra, MMUAccessType type)
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R); {
void *ret;
/* TODO: Enforce guest required alignment. */
ret = g2h(env_cpu(env), addr);
set_helper_retaddr(ra);
return ret; return ret;
} }
int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr) uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{ {
return (int8_t)cpu_ldub_data(env, ptr); void *haddr;
} uint8_t ret;
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr) validate_memop(oi, MO_UB);
{ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
uint32_t ret; ret = ldub_p(haddr);
clear_helper_retaddr();
trace_guest_ld_before_exec(env_cpu(env), ptr, oi); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
ret = lduw_be_p(g2h(env_cpu(env), ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
return ret; return ret;
} }
int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr) uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{ {
return (int16_t)cpu_lduw_be_data(env, ptr); void *haddr;
} uint16_t ret;
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr) validate_memop(oi, MO_BEUW);
{ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
uint32_t ret; ret = lduw_be_p(haddr);
clear_helper_retaddr();
trace_guest_ld_before_exec(env_cpu(env), ptr, oi); qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
ret = ldl_be_p(g2h(env_cpu(env), ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
return ret; return ret;
} }
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr) uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{ {
MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX); void *haddr;
uint32_t ret;
validate_memop(oi, MO_BEUL);
trace_guest_ld_before_exec(env_cpu(env), addr, oi);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldl_be_p(haddr);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint64_t ret; uint64_t ret;
trace_guest_ld_before_exec(env_cpu(env), ptr, oi); validate_memop(oi, MO_BEQ);
ret = ldq_be_p(g2h(env_cpu(env), ptr)); trace_guest_ld_before_exec(env_cpu(env), addr, oi);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_be_p(haddr);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret; return ret;
} }
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr) uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{ {
MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX); void *haddr;
uint16_t ret;
validate_memop(oi, MO_LEUW);
trace_guest_ld_before_exec(env_cpu(env), addr, oi);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = lduw_le_p(haddr);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
uint32_t ret; uint32_t ret;
trace_guest_ld_before_exec(env_cpu(env), ptr, oi); validate_memop(oi, MO_LEUL);
ret = lduw_le_p(g2h(env_cpu(env), ptr)); trace_guest_ld_before_exec(env_cpu(env), addr, oi);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldl_le_p(haddr);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret; return ret;
} }
int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr) uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{ {
return (int16_t)cpu_lduw_le_data(env, ptr); void *haddr;
}
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
{
MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX);
uint32_t ret;
trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
ret = ldl_le_p(g2h(env_cpu(env), ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr)
{
MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX);
uint64_t ret; uint64_t ret;
trace_guest_ld_before_exec(env_cpu(env), ptr, oi); validate_memop(oi, MO_LEQ);
ret = ldq_le_p(g2h(env_cpu(env), ptr)); trace_guest_ld_before_exec(env_cpu(env), addr, oi);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_le_p(haddr);
clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret; return ret;
} }
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
MemOpIdx oi, uintptr_t ra)
{ {
uint32_t ret; void *haddr;
set_helper_retaddr(retaddr); validate_memop(oi, MO_UB);
ret = cpu_ldub_data(env, ptr); trace_guest_st_before_exec(env_cpu(env), addr, oi);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stb_p(haddr, val);
clear_helper_retaddr(); clear_helper_retaddr();
return ret; qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
} }
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra)
{ {
return (int8_t)cpu_ldub_data_ra(env, ptr, retaddr); void *haddr;
}
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) validate_memop(oi, MO_BEUW);
{ trace_guest_st_before_exec(env_cpu(env), addr, oi);
uint32_t ret; haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stw_be_p(haddr, val);
set_helper_retaddr(retaddr);
ret = cpu_lduw_be_data(env, ptr);
clear_helper_retaddr(); clear_helper_retaddr();
return ret; qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
} }
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{ {
return (int16_t)cpu_lduw_be_data_ra(env, ptr, retaddr); void *haddr;
}
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) validate_memop(oi, MO_BEUL);
{ trace_guest_st_before_exec(env_cpu(env), addr, oi);
uint32_t ret; haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stl_be_p(haddr, val);
set_helper_retaddr(retaddr);
ret = cpu_ldl_be_data(env, ptr);
clear_helper_retaddr(); clear_helper_retaddr();
return ret; qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
} }
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{ {
uint64_t ret; void *haddr;
set_helper_retaddr(retaddr); validate_memop(oi, MO_BEQ);
ret = cpu_ldq_be_data(env, ptr); trace_guest_st_before_exec(env_cpu(env), addr, oi);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stq_be_p(haddr, val);
clear_helper_retaddr(); clear_helper_retaddr();
return ret; qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
} }
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra)
{ {
uint32_t ret; void *haddr;
set_helper_retaddr(retaddr); validate_memop(oi, MO_LEUW);
ret = cpu_lduw_le_data(env, ptr); trace_guest_st_before_exec(env_cpu(env), addr, oi);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stw_le_p(haddr, val);
clear_helper_retaddr(); clear_helper_retaddr();
return ret; qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
} }
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{ {
return (int16_t)cpu_lduw_le_data_ra(env, ptr, retaddr); void *haddr;
}
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) validate_memop(oi, MO_LEUL);
{ trace_guest_st_before_exec(env_cpu(env), addr, oi);
uint32_t ret; haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stl_le_p(haddr, val);
set_helper_retaddr(retaddr);
ret = cpu_ldl_le_data(env, ptr);
clear_helper_retaddr(); clear_helper_retaddr();
return ret; qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
} }
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{ {
uint64_t ret; void *haddr;
set_helper_retaddr(retaddr); validate_memop(oi, MO_LEQ);
ret = cpu_ldq_le_data(env, ptr); trace_guest_st_before_exec(env_cpu(env), addr, oi);
clear_helper_retaddr(); haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
return ret; stq_le_p(haddr, val);
}
void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
{
MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX);
trace_guest_st_before_exec(env_cpu(env), ptr, oi);
stb_p(g2h(env_cpu(env), ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
{
MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX);
trace_guest_st_before_exec(env_cpu(env), ptr, oi);
stw_be_p(g2h(env_cpu(env), ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
{
MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX);
trace_guest_st_before_exec(env_cpu(env), ptr, oi);
stl_be_p(g2h(env_cpu(env), ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
{
MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX);
trace_guest_st_before_exec(env_cpu(env), ptr, oi);
stq_be_p(g2h(env_cpu(env), ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
{
MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX);
trace_guest_st_before_exec(env_cpu(env), ptr, oi);
stw_le_p(g2h(env_cpu(env), ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
{
MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX);
trace_guest_st_before_exec(env_cpu(env), ptr, oi);
stl_le_p(g2h(env_cpu(env), ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
{
MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX);
trace_guest_st_before_exec(env_cpu(env), ptr, oi);
stq_le_p(g2h(env_cpu(env), ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t retaddr)
{
set_helper_retaddr(retaddr);
cpu_stb_data(env, ptr, val);
clear_helper_retaddr();
}
void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t retaddr)
{
set_helper_retaddr(retaddr);
cpu_stw_be_data(env, ptr, val);
clear_helper_retaddr();
}
void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t retaddr)
{
set_helper_retaddr(retaddr);
cpu_stl_be_data(env, ptr, val);
clear_helper_retaddr();
}
void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
uint64_t val, uintptr_t retaddr)
{
set_helper_retaddr(retaddr);
cpu_stq_be_data(env, ptr, val);
clear_helper_retaddr();
}
void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t retaddr)
{
set_helper_retaddr(retaddr);
cpu_stw_le_data(env, ptr, val);
clear_helper_retaddr();
}
void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t retaddr)
{
set_helper_retaddr(retaddr);
cpu_stl_le_data(env, ptr, val);
clear_helper_retaddr();
}
void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
uint64_t val, uintptr_t retaddr)
{
set_helper_retaddr(retaddr);
cpu_stq_le_data(env, ptr, val);
clear_helper_retaddr(); clear_helper_retaddr();
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
} }
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
@ -1222,6 +1149,8 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
return ret; return ret;
} }
#include "ldst_common.c.inc"
/* /*
* Do not allow unaligned operations to proceed. Return the host address. * Do not allow unaligned operations to proceed. Return the host address.
* *

View File

@ -68,15 +68,19 @@ Regexes for git grep
- ``\<ldn_\([hbl]e\)?_p\>`` - ``\<ldn_\([hbl]e\)?_p\>``
- ``\<stn_\([hbl]e\)?_p\>`` - ``\<stn_\([hbl]e\)?_p\>``
``cpu_{ld,st}*_mmuidx_ra`` ``cpu_{ld,st}*_mmu``
~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~
These functions operate on a guest virtual address plus a context, These functions operate on a guest virtual address, plus a context
known as a "mmu index" or ``mmuidx``, which controls how that virtual known as a "mmu index" which controls how that virtual address is
address is translated. The meaning of the indexes are target specific, translated, plus a ``MemOp`` which contains alignment requirements
but specifying a particular index might be necessary if, for instance, among other things. The ``MemOp`` and mmu index are combined into
the helper requires an "always as non-privileged" access rather that a single argument of type ``MemOpIdx``.
the default access for the current state of the guest CPU.
The meaning of the indexes are target specific, but specifying a
particular index might be necessary if, for instance, the helper
requires a "always as non-privileged" access rather than the
default access for the current state of the guest CPU.
These functions may cause a guest CPU exception to be taken These functions may cause a guest CPU exception to be taken
(e.g. for an alignment fault or MMU fault) which will result in (e.g. for an alignment fault or MMU fault) which will result in
@ -99,6 +103,35 @@ function, which is a return address into the generated code [#gpc]_.
Function names follow the pattern: Function names follow the pattern:
load: ``cpu_ld{size}{end}_mmu(env, ptr, oi, retaddr)``
store: ``cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)``
``size``
- ``b`` : 8 bits
- ``w`` : 16 bits
- ``l`` : 32 bits
- ``q`` : 64 bits
``end``
- (empty) : for target endian, or 8 bit sizes
- ``_be`` : big endian
- ``_le`` : little endian
Regexes for git grep:
- ``\<cpu_ld[bwlq](_[bl]e)\?_mmu\>``
- ``\<cpu_st[bwlq](_[bl]e)\?_mmu\>``
``cpu_{ld,st}*_mmuidx_ra``
~~~~~~~~~~~~~~~~~~~~~~~~~~
These functions work like the ``cpu_{ld,st}_mmu`` functions except
that the ``mmuidx`` parameter is not combined with a ``MemOp``,
and therefore there is no required alignment supplied or enforced.
Function names follow the pattern:
load: ``cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmuidx, retaddr)`` load: ``cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmuidx, retaddr)``
store: ``cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmuidx, retaddr)`` store: ``cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmuidx, retaddr)``
@ -132,7 +165,8 @@ of the guest CPU, as determined by ``cpu_mmu_index(env, false)``.
These are generally the preferred way to do accesses by guest These are generally the preferred way to do accesses by guest
virtual address from helper functions, unless the access should virtual address from helper functions, unless the access should
be performed with a context other than the default. be performed with a context other than the default, or alignment
should be enforced for the access.
Function names follow the pattern: Function names follow the pattern:

View File

@ -28,10 +28,12 @@
* load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr) * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
* cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr) * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
* cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr) * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
* cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
* *
* store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val) * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
* cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr) * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
* cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr) * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
* cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
* *
* sign is: * sign is:
* (empty): for 32 and 64 bit sizes * (empty): for 32 and 64 bit sizes
@ -53,10 +55,16 @@
* The "mmuidx" suffix carries an extra mmu_idx argument that specifies * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
* the index to use; the "data" and "code" suffixes take the index from * the index to use; the "data" and "code" suffixes take the index from
* cpu_mmu_index(). * cpu_mmu_index().
*
* The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
* MemOp including alignment requirements. The alignment will be enforced.
*/ */
#ifndef CPU_LDST_H #ifndef CPU_LDST_H
#define CPU_LDST_H #define CPU_LDST_H
#include "exec/memopidx.h"
#include "qemu/int128.h"
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
/* sparc32plus has 64bit long but 32bit space address /* sparc32plus has 64bit long but 32bit space address
* this can make bad result with g2h() and h2g() * this can make bad result with g2h() and h2g()
@ -118,12 +126,10 @@ typedef target_ulong abi_ptr;
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr); uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr); int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr); uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr); int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr); uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr); uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr); uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr); int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr); uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
@ -131,37 +137,31 @@ uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val); void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val); void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t ra); uint32_t val, uintptr_t ra);
void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr, void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t ra); uint32_t val, uintptr_t ra);
void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr, void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t ra); uint32_t val, uintptr_t ra);
void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr, void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
uint64_t val, uintptr_t ra); uint64_t val, uintptr_t ra);
void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr, void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t ra); uint32_t val, uintptr_t ra);
void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
@ -169,6 +169,157 @@ void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr, void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
uint64_t val, uintptr_t ra); uint64_t val, uintptr_t ra);
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
int mmu_idx, uintptr_t ra);
void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
int mmu_idx, uintptr_t ra);
void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
int mmu_idx, uintptr_t ra);
void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
int mmu_idx, uintptr_t ra);
void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
int mmu_idx, uintptr_t ra);
void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
int mmu_idx, uintptr_t ra);
void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
int mmu_idx, uintptr_t ra);
void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
int mmu_idx, uintptr_t ra);
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr ptr,
MemOpIdx oi, uintptr_t ra);
uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr ptr,
MemOpIdx oi, uintptr_t ra);
uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr ptr,
MemOpIdx oi, uintptr_t ra);
uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr ptr,
MemOpIdx oi, uintptr_t ra);
uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr ptr,
MemOpIdx oi, uintptr_t ra);
uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr ptr,
MemOpIdx oi, uintptr_t ra);
void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val,
MemOpIdx oi, uintptr_t ra);
void cpu_stw_be_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
MemOpIdx oi, uintptr_t ra);
void cpu_stl_be_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
MemOpIdx oi, uintptr_t ra);
void cpu_stq_be_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
MemOpIdx oi, uintptr_t ra);
void cpu_stw_le_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
MemOpIdx oi, uintptr_t ra);
void cpu_stl_le_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
MemOpIdx oi, uintptr_t ra);
void cpu_stq_le_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
MemOpIdx oi, uintptr_t ra);
uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
MemOpIdx oi, uintptr_t retaddr);
uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
MemOpIdx oi, uintptr_t retaddr);
uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
MemOpIdx oi, uintptr_t retaddr);
uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
uint64_t cmpv, uint64_t newv,
MemOpIdx oi, uintptr_t retaddr);
uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
MemOpIdx oi, uintptr_t retaddr);
uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
MemOpIdx oi, uintptr_t retaddr);
uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
uint64_t cmpv, uint64_t newv,
MemOpIdx oi, uintptr_t retaddr);
#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
(CPUArchState *env, target_ulong addr, TYPE val, \
MemOpIdx oi, uintptr_t retaddr);
#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPER_ALL(NAME) \
GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
#else
#define GEN_ATOMIC_HELPER_ALL(NAME) \
GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
#endif
GEN_ATOMIC_HELPER_ALL(fetch_add)
GEN_ATOMIC_HELPER_ALL(fetch_sub)
GEN_ATOMIC_HELPER_ALL(fetch_and)
GEN_ATOMIC_HELPER_ALL(fetch_or)
GEN_ATOMIC_HELPER_ALL(fetch_xor)
GEN_ATOMIC_HELPER_ALL(fetch_smin)
GEN_ATOMIC_HELPER_ALL(fetch_umin)
GEN_ATOMIC_HELPER_ALL(fetch_smax)
GEN_ATOMIC_HELPER_ALL(fetch_umax)
GEN_ATOMIC_HELPER_ALL(add_fetch)
GEN_ATOMIC_HELPER_ALL(sub_fetch)
GEN_ATOMIC_HELPER_ALL(and_fetch)
GEN_ATOMIC_HELPER_ALL(or_fetch)
GEN_ATOMIC_HELPER_ALL(xor_fetch)
GEN_ATOMIC_HELPER_ALL(smin_fetch)
GEN_ATOMIC_HELPER_ALL(umin_fetch)
GEN_ATOMIC_HELPER_ALL(smax_fetch)
GEN_ATOMIC_HELPER_ALL(umax_fetch)
GEN_ATOMIC_HELPER_ALL(xchg)
#undef GEN_ATOMIC_HELPER_ALL
#undef GEN_ATOMIC_HELPER
Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv,
MemOpIdx oi, uintptr_t retaddr);
Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv,
MemOpIdx oi, uintptr_t retaddr);
Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
MemOpIdx oi, uintptr_t retaddr);
void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
MemOpIdx oi, uintptr_t retaddr);
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
extern __thread uintptr_t helper_retaddr; extern __thread uintptr_t helper_retaddr;
@ -193,119 +344,6 @@ static inline void clear_helper_retaddr(void)
helper_retaddr = 0; helper_retaddr = 0;
} }
/*
* Provide the same *_mmuidx_ra interface as for softmmu.
* The mmu_idx argument is ignored.
*/
static inline uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_ldub_data_ra(env, addr, ra);
}
static inline int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_ldsb_data_ra(env, addr, ra);
}
static inline uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_lduw_be_data_ra(env, addr, ra);
}
static inline int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_ldsw_be_data_ra(env, addr, ra);
}
static inline uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_ldl_be_data_ra(env, addr, ra);
}
static inline uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_ldq_be_data_ra(env, addr, ra);
}
static inline uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_lduw_le_data_ra(env, addr, ra);
}
static inline int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_ldsw_le_data_ra(env, addr, ra);
}
static inline uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_ldl_le_data_ra(env, addr, ra);
}
static inline uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_ldq_le_data_ra(env, addr, ra);
}
static inline void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, int mmu_idx, uintptr_t ra)
{
cpu_stb_data_ra(env, addr, val, ra);
}
static inline void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, int mmu_idx,
uintptr_t ra)
{
cpu_stw_be_data_ra(env, addr, val, ra);
}
static inline void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, int mmu_idx,
uintptr_t ra)
{
cpu_stl_be_data_ra(env, addr, val, ra);
}
static inline void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uint64_t val, int mmu_idx,
uintptr_t ra)
{
cpu_stq_be_data_ra(env, addr, val, ra);
}
static inline void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, int mmu_idx,
uintptr_t ra)
{
cpu_stw_le_data_ra(env, addr, val, ra);
}
static inline void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, int mmu_idx,
uintptr_t ra)
{
cpu_stl_le_data_ra(env, addr, val, ra);
}
static inline void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uint64_t val, int mmu_idx,
uintptr_t ra)
{
cpu_stq_le_data_ra(env, addr, val, ra);
}
#else #else
/* Needed for TCG_OVERSIZED_GUEST */ /* Needed for TCG_OVERSIZED_GUEST */
@ -336,46 +374,6 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)]; return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
} }
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t retaddr);
void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t retaddr);
#endif /* defined(CONFIG_USER_ONLY) */ #endif /* defined(CONFIG_USER_ONLY) */
#ifdef TARGET_WORDS_BIGENDIAN #ifdef TARGET_WORDS_BIGENDIAN
@ -391,6 +389,9 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
# define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra # define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra
# define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra # define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra
# define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra # define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra
# define cpu_ldw_mmu cpu_ldw_be_mmu
# define cpu_ldl_mmu cpu_ldl_be_mmu
# define cpu_ldq_mmu cpu_ldq_be_mmu
# define cpu_stw_data cpu_stw_be_data # define cpu_stw_data cpu_stw_be_data
# define cpu_stl_data cpu_stl_be_data # define cpu_stl_data cpu_stl_be_data
# define cpu_stq_data cpu_stq_be_data # define cpu_stq_data cpu_stq_be_data
@ -400,6 +401,9 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
# define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra # define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra
# define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra # define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra
# define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra # define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra
# define cpu_stw_mmu cpu_stw_be_mmu
# define cpu_stl_mmu cpu_stl_be_mmu
# define cpu_stq_mmu cpu_stq_be_mmu
#else #else
# define cpu_lduw_data cpu_lduw_le_data # define cpu_lduw_data cpu_lduw_le_data
# define cpu_ldsw_data cpu_ldsw_le_data # define cpu_ldsw_data cpu_ldsw_le_data
@ -413,6 +417,9 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
# define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra # define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra
# define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra # define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra
# define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra # define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra
# define cpu_ldw_mmu cpu_ldw_le_mmu
# define cpu_ldl_mmu cpu_ldl_le_mmu
# define cpu_ldq_mmu cpu_ldq_le_mmu
# define cpu_stw_data cpu_stw_le_data # define cpu_stw_data cpu_stw_le_data
# define cpu_stl_data cpu_stl_le_data # define cpu_stl_data cpu_stl_le_data
# define cpu_stq_data cpu_stq_le_data # define cpu_stq_data cpu_stq_le_data
@ -422,6 +429,9 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
# define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra # define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra
# define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra # define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra
# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra # define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
# define cpu_stw_mmu cpu_stw_le_mmu
# define cpu_stl_mmu cpu_stl_le_mmu
# define cpu_stq_mmu cpu_stq_le_mmu
#endif #endif
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr); uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);

74
include/tcg/tcg-ldst.h Normal file
View File

@ -0,0 +1,74 @@
/*
* Memory helpers that will be used by TCG generated code.
*
* Copyright (c) 2008 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef TCG_LDST_H
#define TCG_LDST_H 1
#ifdef CONFIG_SOFTMMU
/* Value zero-extended to tcg register size. */
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
/* Value sign-extended to tcg register size. */
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr);
#endif /* CONFIG_SOFTMMU */
#endif /* TCG_LDST_H */

View File

@ -33,7 +33,6 @@
#include "qemu/queue.h" #include "qemu/queue.h"
#include "tcg/tcg-mo.h" #include "tcg/tcg-mo.h"
#include "tcg-target.h" #include "tcg-target.h"
#include "qemu/int128.h"
#include "tcg/tcg-cond.h" #include "tcg/tcg-cond.h"
/* XXX: make safe guess about sizes */ /* XXX: make safe guess about sizes */
@ -1241,163 +1240,6 @@ uint64_t dup_const(unsigned vece, uint64_t c);
: (target_long)dup_const(VECE, C)) : (target_long)dup_const(VECE, C))
#endif #endif
/*
* Memory helpers that will be used by TCG generated code.
*/
#ifdef CONFIG_SOFTMMU
/* Value zero-extended to tcg register size. */
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
/* Value sign-extended to tcg register size. */
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr);
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr);
/* Temporary aliases until backends are converted. */
#ifdef TARGET_WORDS_BIGENDIAN
# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
# define helper_ret_lduw_mmu helper_be_lduw_mmu
# define helper_ret_ldsl_mmu helper_be_ldsl_mmu
# define helper_ret_ldul_mmu helper_be_ldul_mmu
# define helper_ret_ldl_mmu helper_be_ldul_mmu
# define helper_ret_ldq_mmu helper_be_ldq_mmu
# define helper_ret_stw_mmu helper_be_stw_mmu
# define helper_ret_stl_mmu helper_be_stl_mmu
# define helper_ret_stq_mmu helper_be_stq_mmu
#else
# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
# define helper_ret_lduw_mmu helper_le_lduw_mmu
# define helper_ret_ldsl_mmu helper_le_ldsl_mmu
# define helper_ret_ldul_mmu helper_le_ldul_mmu
# define helper_ret_ldl_mmu helper_le_ldul_mmu
# define helper_ret_ldq_mmu helper_le_ldq_mmu
# define helper_ret_stw_mmu helper_le_stw_mmu
# define helper_ret_stl_mmu helper_le_stl_mmu
# define helper_ret_stq_mmu helper_le_stq_mmu
#endif
#endif /* CONFIG_SOFTMMU */
uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
MemOpIdx oi, uintptr_t retaddr);
uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
MemOpIdx oi, uintptr_t retaddr);
uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
MemOpIdx oi, uintptr_t retaddr);
uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
uint64_t cmpv, uint64_t newv,
MemOpIdx oi, uintptr_t retaddr);
uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
MemOpIdx oi, uintptr_t retaddr);
uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
MemOpIdx oi, uintptr_t retaddr);
uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
uint64_t cmpv, uint64_t newv,
MemOpIdx oi, uintptr_t retaddr);
#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
(CPUArchState *env, target_ulong addr, TYPE val, \
MemOpIdx oi, uintptr_t retaddr);
#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPER_ALL(NAME) \
GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
#else
#define GEN_ATOMIC_HELPER_ALL(NAME) \
GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
#endif
GEN_ATOMIC_HELPER_ALL(fetch_add)
GEN_ATOMIC_HELPER_ALL(fetch_sub)
GEN_ATOMIC_HELPER_ALL(fetch_and)
GEN_ATOMIC_HELPER_ALL(fetch_or)
GEN_ATOMIC_HELPER_ALL(fetch_xor)
GEN_ATOMIC_HELPER_ALL(fetch_smin)
GEN_ATOMIC_HELPER_ALL(fetch_umin)
GEN_ATOMIC_HELPER_ALL(fetch_smax)
GEN_ATOMIC_HELPER_ALL(fetch_umax)
GEN_ATOMIC_HELPER_ALL(add_fetch)
GEN_ATOMIC_HELPER_ALL(sub_fetch)
GEN_ATOMIC_HELPER_ALL(and_fetch)
GEN_ATOMIC_HELPER_ALL(or_fetch)
GEN_ATOMIC_HELPER_ALL(xor_fetch)
GEN_ATOMIC_HELPER_ALL(smin_fetch)
GEN_ATOMIC_HELPER_ALL(umin_fetch)
GEN_ATOMIC_HELPER_ALL(smax_fetch)
GEN_ATOMIC_HELPER_ALL(umax_fetch)
GEN_ATOMIC_HELPER_ALL(xchg)
#undef GEN_ATOMIC_HELPER_ALL
#undef GEN_ATOMIC_HELPER
Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv,
MemOpIdx oi, uintptr_t retaddr);
Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv,
MemOpIdx oi, uintptr_t retaddr);
Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr);
void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
MemOpIdx oi, uintptr_t retaddr);
void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
MemOpIdx oi, uintptr_t retaddr);
#ifdef CONFIG_DEBUG_TCG #ifdef CONFIG_DEBUG_TCG
void tcg_assert_listed_vecop(TCGOpcode); void tcg_assert_listed_vecop(TCGOpcode);
#else #else

View File

@ -1378,17 +1378,17 @@ bool memory_region_access_valid(MemoryRegion *mr,
{ {
if (mr->ops->valid.accepts if (mr->ops->valid.accepts
&& !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) { && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr " qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
"0x%" HWADDR_PRIX ", size %u, " ", size %u, region '%s', reason: rejected\n",
"region '%s', reason: rejected\n", is_write ? "write" : "read",
addr, size, memory_region_name(mr)); addr, size, memory_region_name(mr));
return false; return false;
} }
if (!mr->ops->valid.unaligned && (addr & (size - 1))) { if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr " qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
"0x%" HWADDR_PRIX ", size %u, " ", size %u, region '%s', reason: unaligned\n",
"region '%s', reason: unaligned\n", is_write ? "write" : "read",
addr, size, memory_region_name(mr)); addr, size, memory_region_name(mr));
return false; return false;
} }
@ -1400,10 +1400,10 @@ bool memory_region_access_valid(MemoryRegion *mr,
if (size > mr->ops->valid.max_access_size if (size > mr->ops->valid.max_access_size
|| size < mr->ops->valid.min_access_size) { || size < mr->ops->valid.min_access_size) {
qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr " qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
"0x%" HWADDR_PRIX ", size %u, " ", size %u, region '%s', reason: invalid size "
"region '%s', reason: invalid size " "(min:%u max:%u)\n",
"(min:%u max:%u)\n", is_write ? "write" : "read",
addr, size, memory_region_name(mr), addr, size, memory_region_name(mr),
mr->ops->valid.min_access_size, mr->ops->valid.min_access_size,
mr->ops->valid.max_access_size); mr->ops->valid.max_access_size);

View File

@ -32,7 +32,6 @@
#include "exec/cpu_ldst.h" #include "exec/cpu_ldst.h"
#include "qemu/int128.h" #include "qemu/int128.h"
#include "qemu/atomic128.h" #include "qemu/atomic128.h"
#include "tcg/tcg.h"
#include "fpu/softfloat.h" #include "fpu/softfloat.h"
#include <zlib.h> /* For crc32 */ #include <zlib.h> /* For crc32 */
@ -513,37 +512,19 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
uint64_t o0, o1; uint64_t o0, o1;
bool success; bool success;
#ifdef CONFIG_USER_ONLY
/* ??? Enforce alignment. */
uint64_t *haddr = g2h(env_cpu(env), addr);
set_helper_retaddr(ra);
o0 = ldq_le_p(haddr + 0);
o1 = ldq_le_p(haddr + 1);
oldv = int128_make128(o0, o1);
success = int128_eq(oldv, cmpv);
if (success) {
stq_le_p(haddr + 0, int128_getlo(newv));
stq_le_p(haddr + 1, int128_gethi(newv));
}
clear_helper_retaddr();
#else
int mem_idx = cpu_mmu_index(env, false); int mem_idx = cpu_mmu_index(env, false);
MemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); MemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
MemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx); MemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx);
o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra); o0 = cpu_ldq_le_mmu(env, addr + 0, oi0, ra);
o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra); o1 = cpu_ldq_le_mmu(env, addr + 8, oi1, ra);
oldv = int128_make128(o0, o1); oldv = int128_make128(o0, o1);
success = int128_eq(oldv, cmpv); success = int128_eq(oldv, cmpv);
if (success) { if (success) {
helper_le_stq_mmu(env, addr + 0, int128_getlo(newv), oi1, ra); cpu_stq_le_mmu(env, addr + 0, int128_getlo(newv), oi1, ra);
helper_le_stq_mmu(env, addr + 8, int128_gethi(newv), oi1, ra); cpu_stq_le_mmu(env, addr + 8, int128_gethi(newv), oi1, ra);
} }
#endif
return !success; return !success;
} }
@ -560,7 +541,7 @@ uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
assert(HAVE_CMPXCHG128); assert(HAVE_CMPXCHG128);
mem_idx = cpu_mmu_index(env, false); mem_idx = cpu_mmu_index(env, false);
oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
cmpv = int128_make128(env->exclusive_val, env->exclusive_high); cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
newv = int128_make128(new_lo, new_hi); newv = int128_make128(new_lo, new_hi);
@ -583,37 +564,19 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
uintptr_t ra = GETPC(); uintptr_t ra = GETPC();
uint64_t o0, o1; uint64_t o0, o1;
bool success; bool success;
#ifdef CONFIG_USER_ONLY
/* ??? Enforce alignment. */
uint64_t *haddr = g2h(env_cpu(env), addr);
set_helper_retaddr(ra);
o1 = ldq_be_p(haddr + 0);
o0 = ldq_be_p(haddr + 1);
oldv = int128_make128(o0, o1);
success = int128_eq(oldv, cmpv);
if (success) {
stq_be_p(haddr + 0, int128_gethi(newv));
stq_be_p(haddr + 1, int128_getlo(newv));
}
clear_helper_retaddr();
#else
int mem_idx = cpu_mmu_index(env, false); int mem_idx = cpu_mmu_index(env, false);
MemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); MemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
MemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx); MemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx);
o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra); o1 = cpu_ldq_be_mmu(env, addr + 0, oi0, ra);
o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra); o0 = cpu_ldq_be_mmu(env, addr + 8, oi1, ra);
oldv = int128_make128(o0, o1); oldv = int128_make128(o0, o1);
success = int128_eq(oldv, cmpv); success = int128_eq(oldv, cmpv);
if (success) { if (success) {
helper_be_stq_mmu(env, addr + 0, int128_gethi(newv), oi1, ra); cpu_stq_be_mmu(env, addr + 0, int128_gethi(newv), oi1, ra);
helper_be_stq_mmu(env, addr + 8, int128_getlo(newv), oi1, ra); cpu_stq_be_mmu(env, addr + 8, int128_getlo(newv), oi1, ra);
} }
#endif
return !success; return !success;
} }
@ -630,7 +593,7 @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
assert(HAVE_CMPXCHG128); assert(HAVE_CMPXCHG128);
mem_idx = cpu_mmu_index(env, false); mem_idx = cpu_mmu_index(env, false);
oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); oi = make_memop_idx(MO_BE | MO_128 | MO_ALIGN, mem_idx);
/* /*
* High and low need to be switched here because this is not actually a * High and low need to be switched here because this is not actually a
@ -656,7 +619,7 @@ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
assert(HAVE_CMPXCHG128); assert(HAVE_CMPXCHG128);
mem_idx = cpu_mmu_index(env, false); mem_idx = cpu_mmu_index(env, false);
oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]); cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]);
newv = int128_make128(new_lo, new_hi); newv = int128_make128(new_lo, new_hi);
@ -677,7 +640,7 @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
assert(HAVE_CMPXCHG128); assert(HAVE_CMPXCHG128);
mem_idx = cpu_mmu_index(env, false); mem_idx = cpu_mmu_index(env, false);
oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]); cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]);
newv = int128_make128(new_lo, new_hi); newv = int128_make128(new_lo, new_hi);

View File

@ -1947,9 +1947,9 @@ static bool do_v7m_function_return(ARMCPU *cpu)
* do them as secure, so work out what MMU index that is. * do them as secure, so work out what MMU index that is.
*/ */
mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx)); oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx));
newpc = helper_le_ldul_mmu(env, frameptr, oi, 0); newpc = cpu_ldl_le_mmu(env, frameptr, oi, 0);
newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0); newpsr = cpu_ldl_le_mmu(env, frameptr + 4, oi, 0);
/* Consistency checks on new IPSR */ /* Consistency checks on new IPSR */
newpsr_exc = newpsr & XPSR_EXCP; newpsr_exc = newpsr & XPSR_EXCP;

View File

@ -141,6 +141,15 @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, target_ulong *pc,
#endif #endif
} }
static inline int cpu_mmu_index(CPUHexagonState *env, bool ifetch)
{
#ifdef CONFIG_USER_ONLY
return MMU_USER_IDX;
#else
#error System mode not supported on Hexagon yet
#endif
}
typedef struct CPUHexagonState CPUArchState; typedef struct CPUHexagonState CPUArchState;
typedef HexagonCPU ArchCPU; typedef HexagonCPU ArchCPU;

View File

@ -136,7 +136,7 @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]); Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
int mem_idx = cpu_mmu_index(env, false); int mem_idx = cpu_mmu_index(env, false);
MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra); Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra);
if (int128_eq(oldv, cmpv)) { if (int128_eq(oldv, cmpv)) {

View File

@ -22,7 +22,6 @@
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/cpu_ldst.h" #include "exec/cpu_ldst.h"
#include "semihosting/semihost.h" #include "semihosting/semihost.h"
#include "tcg/tcg.h"
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)

View File

@ -8218,178 +8218,86 @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
#define MEMOP_IDX(DF) #define MEMOP_IDX(DF)
#endif #endif
#ifdef TARGET_WORDS_BIGENDIAN
static inline uint64_t bswap16x4(uint64_t x)
{
uint64_t m = 0x00ff00ff00ff00ffull;
return ((x & m) << 8) | ((x >> 8) & m);
}
static inline uint64_t bswap32x2(uint64_t x)
{
return ror64(bswap64(x), 32);
}
#endif
void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd, void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd,
target_ulong addr) target_ulong addr)
{ {
wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
MEMOP_IDX(DF_BYTE) uintptr_t ra = GETPC();
#if !defined(CONFIG_USER_ONLY) uint64_t d0, d1;
#if !defined(HOST_WORDS_BIGENDIAN)
pwd->b[0] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC()); /* Load 8 bytes at a time. Vector element ordering makes this LE. */
pwd->b[1] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC()); d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
pwd->b[2] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC()); d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
pwd->b[3] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC()); pwd->d[0] = d0;
pwd->b[4] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC()); pwd->d[1] = d1;
pwd->b[5] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC());
pwd->b[6] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC());
pwd->b[7] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC());
pwd->b[8] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC());
pwd->b[9] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC());
pwd->b[10] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC());
pwd->b[11] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC());
pwd->b[12] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC());
pwd->b[13] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC());
pwd->b[14] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC());
pwd->b[15] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC());
#else
pwd->b[0] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC());
pwd->b[1] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC());
pwd->b[2] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC());
pwd->b[3] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC());
pwd->b[4] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC());
pwd->b[5] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC());
pwd->b[6] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC());
pwd->b[7] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC());
pwd->b[8] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC());
pwd->b[9] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC());
pwd->b[10] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC());
pwd->b[11] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC());
pwd->b[12] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC());
pwd->b[13] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC());
pwd->b[14] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC());
pwd->b[15] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC());
#endif
#else
#if !defined(HOST_WORDS_BIGENDIAN)
pwd->b[0] = cpu_ldub_data(env, addr + (0 << DF_BYTE));
pwd->b[1] = cpu_ldub_data(env, addr + (1 << DF_BYTE));
pwd->b[2] = cpu_ldub_data(env, addr + (2 << DF_BYTE));
pwd->b[3] = cpu_ldub_data(env, addr + (3 << DF_BYTE));
pwd->b[4] = cpu_ldub_data(env, addr + (4 << DF_BYTE));
pwd->b[5] = cpu_ldub_data(env, addr + (5 << DF_BYTE));
pwd->b[6] = cpu_ldub_data(env, addr + (6 << DF_BYTE));
pwd->b[7] = cpu_ldub_data(env, addr + (7 << DF_BYTE));
pwd->b[8] = cpu_ldub_data(env, addr + (8 << DF_BYTE));
pwd->b[9] = cpu_ldub_data(env, addr + (9 << DF_BYTE));
pwd->b[10] = cpu_ldub_data(env, addr + (10 << DF_BYTE));
pwd->b[11] = cpu_ldub_data(env, addr + (11 << DF_BYTE));
pwd->b[12] = cpu_ldub_data(env, addr + (12 << DF_BYTE));
pwd->b[13] = cpu_ldub_data(env, addr + (13 << DF_BYTE));
pwd->b[14] = cpu_ldub_data(env, addr + (14 << DF_BYTE));
pwd->b[15] = cpu_ldub_data(env, addr + (15 << DF_BYTE));
#else
pwd->b[0] = cpu_ldub_data(env, addr + (7 << DF_BYTE));
pwd->b[1] = cpu_ldub_data(env, addr + (6 << DF_BYTE));
pwd->b[2] = cpu_ldub_data(env, addr + (5 << DF_BYTE));
pwd->b[3] = cpu_ldub_data(env, addr + (4 << DF_BYTE));
pwd->b[4] = cpu_ldub_data(env, addr + (3 << DF_BYTE));
pwd->b[5] = cpu_ldub_data(env, addr + (2 << DF_BYTE));
pwd->b[6] = cpu_ldub_data(env, addr + (1 << DF_BYTE));
pwd->b[7] = cpu_ldub_data(env, addr + (0 << DF_BYTE));
pwd->b[8] = cpu_ldub_data(env, addr + (15 << DF_BYTE));
pwd->b[9] = cpu_ldub_data(env, addr + (14 << DF_BYTE));
pwd->b[10] = cpu_ldub_data(env, addr + (13 << DF_BYTE));
pwd->b[11] = cpu_ldub_data(env, addr + (12 << DF_BYTE));
pwd->b[12] = cpu_ldub_data(env, addr + (11 << DF_BYTE));
pwd->b[13] = cpu_ldub_data(env, addr + (10 << DF_BYTE));
pwd->b[14] = cpu_ldub_data(env, addr + (9 << DF_BYTE));
pwd->b[15] = cpu_ldub_data(env, addr + (8 << DF_BYTE));
#endif
#endif
} }
void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd, void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
target_ulong addr) target_ulong addr)
{ {
wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
MEMOP_IDX(DF_HALF) uintptr_t ra = GETPC();
#if !defined(CONFIG_USER_ONLY) uint64_t d0, d1;
#if !defined(HOST_WORDS_BIGENDIAN)
pwd->h[0] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC()); /*
pwd->h[1] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC()); * Load 8 bytes at a time. Use little-endian load, then for
pwd->h[2] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC()); * big-endian target, we must then swap the four halfwords.
pwd->h[3] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC()); */
pwd->h[4] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC()); d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
pwd->h[5] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC()); d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
pwd->h[6] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC()); #ifdef TARGET_WORDS_BIGENDIAN
pwd->h[7] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC()); d0 = bswap16x4(d0);
#else d1 = bswap16x4(d1);
pwd->h[0] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC());
pwd->h[1] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC());
pwd->h[2] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC());
pwd->h[3] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC());
pwd->h[4] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC());
pwd->h[5] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC());
pwd->h[6] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC());
pwd->h[7] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC());
#endif
#else
#if !defined(HOST_WORDS_BIGENDIAN)
pwd->h[0] = cpu_lduw_data(env, addr + (0 << DF_HALF));
pwd->h[1] = cpu_lduw_data(env, addr + (1 << DF_HALF));
pwd->h[2] = cpu_lduw_data(env, addr + (2 << DF_HALF));
pwd->h[3] = cpu_lduw_data(env, addr + (3 << DF_HALF));
pwd->h[4] = cpu_lduw_data(env, addr + (4 << DF_HALF));
pwd->h[5] = cpu_lduw_data(env, addr + (5 << DF_HALF));
pwd->h[6] = cpu_lduw_data(env, addr + (6 << DF_HALF));
pwd->h[7] = cpu_lduw_data(env, addr + (7 << DF_HALF));
#else
pwd->h[0] = cpu_lduw_data(env, addr + (3 << DF_HALF));
pwd->h[1] = cpu_lduw_data(env, addr + (2 << DF_HALF));
pwd->h[2] = cpu_lduw_data(env, addr + (1 << DF_HALF));
pwd->h[3] = cpu_lduw_data(env, addr + (0 << DF_HALF));
pwd->h[4] = cpu_lduw_data(env, addr + (7 << DF_HALF));
pwd->h[5] = cpu_lduw_data(env, addr + (6 << DF_HALF));
pwd->h[6] = cpu_lduw_data(env, addr + (5 << DF_HALF));
pwd->h[7] = cpu_lduw_data(env, addr + (4 << DF_HALF));
#endif
#endif #endif
pwd->d[0] = d0;
pwd->d[1] = d1;
} }
void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd, void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
target_ulong addr) target_ulong addr)
{ {
wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
MEMOP_IDX(DF_WORD) uintptr_t ra = GETPC();
#if !defined(CONFIG_USER_ONLY) uint64_t d0, d1;
#if !defined(HOST_WORDS_BIGENDIAN)
pwd->w[0] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC()); /*
pwd->w[1] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC()); * Load 8 bytes at a time. Use little-endian load, then for
pwd->w[2] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC()); * big-endian target, we must then bswap the two words.
pwd->w[3] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC()); */
#else d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
pwd->w[0] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC()); d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
pwd->w[1] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC()); #ifdef TARGET_WORDS_BIGENDIAN
pwd->w[2] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC()); d0 = bswap32x2(d0);
pwd->w[3] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC()); d1 = bswap32x2(d1);
#endif
#else
#if !defined(HOST_WORDS_BIGENDIAN)
pwd->w[0] = cpu_ldl_data(env, addr + (0 << DF_WORD));
pwd->w[1] = cpu_ldl_data(env, addr + (1 << DF_WORD));
pwd->w[2] = cpu_ldl_data(env, addr + (2 << DF_WORD));
pwd->w[3] = cpu_ldl_data(env, addr + (3 << DF_WORD));
#else
pwd->w[0] = cpu_ldl_data(env, addr + (1 << DF_WORD));
pwd->w[1] = cpu_ldl_data(env, addr + (0 << DF_WORD));
pwd->w[2] = cpu_ldl_data(env, addr + (3 << DF_WORD));
pwd->w[3] = cpu_ldl_data(env, addr + (2 << DF_WORD));
#endif
#endif #endif
pwd->d[0] = d0;
pwd->d[1] = d1;
} }
void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd, void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
target_ulong addr) target_ulong addr)
{ {
wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
MEMOP_IDX(DF_DOUBLE) uintptr_t ra = GETPC();
#if !defined(CONFIG_USER_ONLY) uint64_t d0, d1;
pwd->d[0] = helper_ret_ldq_mmu(env, addr + (0 << DF_DOUBLE), oi, GETPC());
pwd->d[1] = helper_ret_ldq_mmu(env, addr + (1 << DF_DOUBLE), oi, GETPC()); d0 = cpu_ldq_data_ra(env, addr + 0, ra);
#else d1 = cpu_ldq_data_ra(env, addr + 8, ra);
pwd->d[0] = cpu_ldq_data(env, addr + (0 << DF_DOUBLE)); pwd->d[0] = d0;
pwd->d[1] = cpu_ldq_data(env, addr + (1 << DF_DOUBLE)); pwd->d[1] = d1;
#endif
} }
#define MSA_PAGESPAN(x) \ #define MSA_PAGESPAN(x) \
@ -8415,82 +8323,13 @@ void helper_msa_st_b(CPUMIPSState *env, uint32_t wd,
{ {
wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
int mmu_idx = cpu_mmu_index(env, false); int mmu_idx = cpu_mmu_index(env, false);
uintptr_t ra = GETPC();
MEMOP_IDX(DF_BYTE) ensure_writable_pages(env, addr, mmu_idx, ra);
ensure_writable_pages(env, addr, mmu_idx, GETPC());
#if !defined(CONFIG_USER_ONLY) /* Store 8 bytes at a time. Vector element ordering makes this LE. */
#if !defined(HOST_WORDS_BIGENDIAN) cpu_stq_le_data_ra(env, addr + 0, pwd->d[0], ra);
helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[0], oi, GETPC()); cpu_stq_le_data_ra(env, addr + 0, pwd->d[1], ra);
helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[1], oi, GETPC());
helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[2], oi, GETPC());
helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[3], oi, GETPC());
helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[4], oi, GETPC());
helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[5], oi, GETPC());
helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[6], oi, GETPC());
helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[7], oi, GETPC());
helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[8], oi, GETPC());
helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[9], oi, GETPC());
helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[10], oi, GETPC());
helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[11], oi, GETPC());
helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[12], oi, GETPC());
helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[13], oi, GETPC());
helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[14], oi, GETPC());
helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[15], oi, GETPC());
#else
helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[0], oi, GETPC());
helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[1], oi, GETPC());
helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[2], oi, GETPC());
helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[3], oi, GETPC());
helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[4], oi, GETPC());
helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[5], oi, GETPC());
helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[6], oi, GETPC());
helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[7], oi, GETPC());
helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[8], oi, GETPC());
helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[9], oi, GETPC());
helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[10], oi, GETPC());
helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[11], oi, GETPC());
helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[12], oi, GETPC());
helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[13], oi, GETPC());
helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[14], oi, GETPC());
helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[15], oi, GETPC());
#endif
#else
#if !defined(HOST_WORDS_BIGENDIAN)
cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[0]);
cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[1]);
cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[2]);
cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[3]);
cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[4]);
cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[5]);
cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[6]);
cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[7]);
cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[8]);
cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[9]);
cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[10]);
cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[11]);
cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[12]);
cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[13]);
cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[14]);
cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[15]);
#else
cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[0]);
cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[1]);
cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[2]);
cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[3]);
cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[4]);
cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[5]);
cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[6]);
cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[7]);
cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[8]);
cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[9]);
cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[10]);
cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[11]);
cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[12]);
cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[13]);
cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[14]);
cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[15]);
#endif
#endif
} }
void helper_msa_st_h(CPUMIPSState *env, uint32_t wd, void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
@ -8498,50 +8337,20 @@ void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
{ {
wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
int mmu_idx = cpu_mmu_index(env, false); int mmu_idx = cpu_mmu_index(env, false);
uintptr_t ra = GETPC();
uint64_t d0, d1;
MEMOP_IDX(DF_HALF) ensure_writable_pages(env, addr, mmu_idx, ra);
ensure_writable_pages(env, addr, mmu_idx, GETPC());
#if !defined(CONFIG_USER_ONLY) /* Store 8 bytes at a time. See helper_msa_ld_h. */
#if !defined(HOST_WORDS_BIGENDIAN) d0 = pwd->d[0];
helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[0], oi, GETPC()); d1 = pwd->d[1];
helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[1], oi, GETPC()); #ifdef TARGET_WORDS_BIGENDIAN
helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[2], oi, GETPC()); d0 = bswap16x4(d0);
helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[3], oi, GETPC()); d1 = bswap16x4(d1);
helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[4], oi, GETPC());
helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[5], oi, GETPC());
helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[6], oi, GETPC());
helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[7], oi, GETPC());
#else
helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[0], oi, GETPC());
helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[1], oi, GETPC());
helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[2], oi, GETPC());
helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[3], oi, GETPC());
helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[4], oi, GETPC());
helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[5], oi, GETPC());
helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[6], oi, GETPC());
helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[7], oi, GETPC());
#endif
#else
#if !defined(HOST_WORDS_BIGENDIAN)
cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[0]);
cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[1]);
cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[2]);
cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[3]);
cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[4]);
cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[5]);
cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[6]);
cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[7]);
#else
cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[0]);
cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[1]);
cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[2]);
cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[3]);
cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[4]);
cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[5]);
cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[6]);
cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[7]);
#endif
#endif #endif
cpu_stq_le_data_ra(env, addr + 0, d0, ra);
cpu_stq_le_data_ra(env, addr + 8, d1, ra);
} }
void helper_msa_st_w(CPUMIPSState *env, uint32_t wd, void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
@ -8549,34 +8358,20 @@ void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
{ {
wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
int mmu_idx = cpu_mmu_index(env, false); int mmu_idx = cpu_mmu_index(env, false);
uintptr_t ra = GETPC();
uint64_t d0, d1;
MEMOP_IDX(DF_WORD) ensure_writable_pages(env, addr, mmu_idx, ra);
ensure_writable_pages(env, addr, mmu_idx, GETPC());
#if !defined(CONFIG_USER_ONLY) /* Store 8 bytes at a time. See helper_msa_ld_w. */
#if !defined(HOST_WORDS_BIGENDIAN) d0 = pwd->d[0];
helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[0], oi, GETPC()); d1 = pwd->d[1];
helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[1], oi, GETPC()); #ifdef TARGET_WORDS_BIGENDIAN
helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[2], oi, GETPC()); d0 = bswap32x2(d0);
helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[3], oi, GETPC()); d1 = bswap32x2(d1);
#else
helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[0], oi, GETPC());
helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[1], oi, GETPC());
helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[2], oi, GETPC());
helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[3], oi, GETPC());
#endif
#else
#if !defined(HOST_WORDS_BIGENDIAN)
cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[0]);
cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[1]);
cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[2]);
cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[3]);
#else
cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[0]);
cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[1]);
cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[2]);
cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[3]);
#endif
#endif #endif
cpu_stq_le_data_ra(env, addr + 0, d0, ra);
cpu_stq_le_data_ra(env, addr + 8, d1, ra);
} }
void helper_msa_st_d(CPUMIPSState *env, uint32_t wd, void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
@ -8584,14 +8379,10 @@ void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
{ {
wr_t *pwd = &(env->active_fpu.fpr[wd].wr); wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
int mmu_idx = cpu_mmu_index(env, false); int mmu_idx = cpu_mmu_index(env, false);
uintptr_t ra = GETPC();
MEMOP_IDX(DF_DOUBLE)
ensure_writable_pages(env, addr, mmu_idx, GETPC()); ensure_writable_pages(env, addr, mmu_idx, GETPC());
#if !defined(CONFIG_USER_ONLY)
helper_ret_stq_mmu(env, addr + (0 << DF_DOUBLE), pwd->d[0], oi, GETPC()); cpu_stq_data_ra(env, addr + 0, pwd->d[0], ra);
helper_ret_stq_mmu(env, addr + (1 << DF_DOUBLE), pwd->d[1], oi, GETPC()); cpu_stq_data_ra(env, addr + 8, pwd->d[1], ra);
#else
cpu_stq_data(env, addr + (0 << DF_DOUBLE), pwd->d[0]);
cpu_stq_data(env, addr + (1 << DF_DOUBLE), pwd->d[1]);
#endif
} }

View File

@ -25,7 +25,6 @@
#include "exec/helper-proto.h" #include "exec/helper-proto.h"
#include "helper_regs.h" #include "helper_regs.h"
#include "exec/cpu_ldst.h" #include "exec/cpu_ldst.h"
#include "tcg/tcg.h"
#include "internal.h" #include "internal.h"
#include "qemu/atomic128.h" #include "qemu/atomic128.h"

View File

@ -3462,10 +3462,12 @@ static void gen_std(DisasContext *ctx)
if (HAVE_ATOMIC128) { if (HAVE_ATOMIC128) {
TCGv_i32 oi = tcg_temp_new_i32(); TCGv_i32 oi = tcg_temp_new_i32();
if (ctx->le_mode) { if (ctx->le_mode) {
tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128,
ctx->mem_idx));
gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi); gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi);
} else { } else {
tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128,
ctx->mem_idx));
gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi); gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi);
} }
tcg_temp_free_i32(oi); tcg_temp_free_i32(oi);
@ -4067,11 +4069,11 @@ static void gen_lqarx(DisasContext *ctx)
if (HAVE_ATOMIC128) { if (HAVE_ATOMIC128) {
TCGv_i32 oi = tcg_temp_new_i32(); TCGv_i32 oi = tcg_temp_new_i32();
if (ctx->le_mode) { if (ctx->le_mode) {
tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16, tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128 | MO_ALIGN,
ctx->mem_idx)); ctx->mem_idx));
gen_helper_lq_le_parallel(lo, cpu_env, EA, oi); gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
} else { } else {
tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16, tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128 | MO_ALIGN,
ctx->mem_idx)); ctx->mem_idx));
gen_helper_lq_be_parallel(lo, cpu_env, EA, oi); gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
} }
@ -4122,7 +4124,7 @@ static void gen_stqcx_(DisasContext *ctx)
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
if (HAVE_CMPXCHG128) { if (HAVE_CMPXCHG128) {
TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16); TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_128) | MO_ALIGN);
if (ctx->le_mode) { if (ctx->le_mode) {
gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env,
EA, lo, hi, oi); EA, lo, hi, oi);

View File

@ -27,7 +27,6 @@
#include "exec/cpu_ldst.h" #include "exec/cpu_ldst.h"
#include "qemu/int128.h" #include "qemu/int128.h"
#include "qemu/atomic128.h" #include "qemu/atomic128.h"
#include "tcg/tcg.h"
#include "trace.h" #include "trace.h"
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
@ -250,13 +249,13 @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
* page. This is especially relevant to speed up TLB_NOTDIRTY. * page. This is especially relevant to speed up TLB_NOTDIRTY.
*/ */
g_assert(size > 0); g_assert(size > 0);
helper_ret_stb_mmu(env, vaddr, byte, oi, ra); cpu_stb_mmu(env, vaddr, byte, oi, ra);
haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
if (likely(haddr)) { if (likely(haddr)) {
memset(haddr + 1, byte, size - 1); memset(haddr + 1, byte, size - 1);
} else { } else {
for (i = 1; i < size; i++) { for (i = 1; i < size; i++) {
helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra); cpu_stb_mmu(env, vaddr + i, byte, oi, ra);
} }
} }
} }
@ -292,7 +291,7 @@ static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
* Do a single access and test if we can then get access to the * Do a single access and test if we can then get access to the
* page. This is especially relevant to speed up TLB_NOTDIRTY. * page. This is especially relevant to speed up TLB_NOTDIRTY.
*/ */
byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra); byte = cpu_ldb_mmu(env, vaddr + offset, oi, ra);
*haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx); *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx);
return byte; return byte;
#endif #endif
@ -326,7 +325,7 @@ static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
* Do a single access and test if we can then get access to the * Do a single access and test if we can then get access to the
* page. This is especially relevant to speed up TLB_NOTDIRTY. * page. This is especially relevant to speed up TLB_NOTDIRTY.
*/ */
helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra); cpu_stb_mmu(env, vaddr + offset, byte, oi, ra);
*haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
#endif #endif
} }
@ -1811,7 +1810,7 @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
assert(HAVE_CMPXCHG128); assert(HAVE_CMPXCHG128);
mem_idx = cpu_mmu_index(env, false); mem_idx = cpu_mmu_index(env, false);
oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
fail = !int128_eq(oldv, cmpv); fail = !int128_eq(oldv, cmpv);
@ -1940,7 +1939,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra); cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra); cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
} else if (HAVE_CMPXCHG128) { } else if (HAVE_CMPXCHG128) {
MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
cc = !int128_eq(ov, cv); cc = !int128_eq(ov, cv);
} else { } else {

View File

@ -1333,27 +1333,27 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
oi = make_memop_idx(memop, idx); oi = make_memop_idx(memop, idx);
switch (size) { switch (size) {
case 1: case 1:
ret = helper_ret_ldub_mmu(env, addr, oi, GETPC()); ret = cpu_ldb_mmu(env, addr, oi, GETPC());
break; break;
case 2: case 2:
if (asi & 8) { if (asi & 8) {
ret = helper_le_lduw_mmu(env, addr, oi, GETPC()); ret = cpu_ldw_le_mmu(env, addr, oi, GETPC());
} else { } else {
ret = helper_be_lduw_mmu(env, addr, oi, GETPC()); ret = cpu_ldw_be_mmu(env, addr, oi, GETPC());
} }
break; break;
case 4: case 4:
if (asi & 8) { if (asi & 8) {
ret = helper_le_ldul_mmu(env, addr, oi, GETPC()); ret = cpu_ldl_le_mmu(env, addr, oi, GETPC());
} else { } else {
ret = helper_be_ldul_mmu(env, addr, oi, GETPC()); ret = cpu_ldl_be_mmu(env, addr, oi, GETPC());
} }
break; break;
case 8: case 8:
if (asi & 8) { if (asi & 8) {
ret = helper_le_ldq_mmu(env, addr, oi, GETPC()); ret = cpu_ldq_le_mmu(env, addr, oi, GETPC());
} else { } else {
ret = helper_be_ldq_mmu(env, addr, oi, GETPC()); ret = cpu_ldq_be_mmu(env, addr, oi, GETPC());
} }
break; break;
default: default:

View File

@ -2765,7 +2765,12 @@ void tcg_gen_lookup_and_goto_ptr(void)
static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
{ {
/* Trigger the asserts within as early as possible. */ /* Trigger the asserts within as early as possible. */
(void)get_alignment_bits(op); unsigned a_bits = get_alignment_bits(op);
/* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */
if (a_bits == (op & MO_SIZE)) {
op = (op & ~MO_AMASK) | MO_ALIGN;
}
switch (op & MO_SIZE) { switch (op & MO_SIZE) {
case MO_8: case MO_8:

View File

@ -58,6 +58,7 @@
#include "elf.h" #include "elf.h"
#include "exec/log.h" #include "exec/log.h"
#include "tcg/tcg-ldst.h"
#include "tcg-internal.h" #include "tcg-internal.h"
#ifdef CONFIG_TCG_INTERPRETER #ifdef CONFIG_TCG_INTERPRETER

View File

@ -22,6 +22,7 @@
#include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */ #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
#include "exec/cpu_ldst.h" #include "exec/cpu_ldst.h"
#include "tcg/tcg-op.h" #include "tcg/tcg-op.h"
#include "tcg/tcg-ldst.h"
#include "qemu/compiler.h" #include "qemu/compiler.h"
#include <ffi.h> #include <ffi.h>