tcg: Use HAVE_CMPXCHG128 instead of CONFIG_CMPXCHG128

accel/tcg: Introduce page_check_range_empty
 accel/tcg: Introduce page_find_range_empty
 accel/tcg: Accept more page flags in page_check_range
 accel/tcg: Return bool from page_check_range
 accel/tcg: Always lock pages before translation
 linux-user: Use abi_* types for target structures in syscall_defs.h
 linux-user: Fix abi_llong alignment for microblaze and nios2
 linux-user: Fix do_shmat type errors
 linux-user: Implement execve without execveat
 linux-user: Make sure initial brk is aligned
 linux-user: Use a mask with strace flags
 linux-user: Implement MAP_FIXED_NOREPLACE
 linux-user: Widen target_mmap offset argument to off_t
 linux-user: Use page_find_range_empty for mmap_find_vma_reserved
 linux-user: Use 'last' instead of 'end' in target_mmap and subroutines
 linux-user: Remove can_passthrough_madvise
 linux-user: Simplify target_madvise
 linux-user: Drop uint and ulong types
 linux-user/arm: Do not allocate a commpage at all for M-profile CPUs
 bsd-user: Use page_check_range_empty for MAP_EXCL
 bsd-user: Use page_find_range_empty for mmap_find_vma_reserved
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmSypEYdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9VzQf/RMRK4SQDJiJEbQ6K
 5U1i955Rl4MMLT8PrkbT/UDA9soyIlSVjUenW8ThJJg6SLbSvkXZsWn165PFu+yW
 nYkeCYxkJtAjWmmFlZ44J+VLEZZ6LkWrIvPZHvKohelpi6uT/fuQaAZjKuH2prI/
 7bdP5YdLUMpCztERHYfxmroEX4wJR6knsRpt5rYchADxEfkWk82PanneCw7grQ6V
 VNg1pRGplp0jMkpOOBvMD1ENkmoipklMe9P1gQdCHobg2/kqpozhT1oQp/gHNkP5
 66Cjzv8o0nnPjJetm74pnP06iNhuMjDesD7f+Vq/DALgMobwjvhDW5GD+Ccto85B
 hqvwHA==
 =vm0t
 -----END PGP SIGNATURE-----

Merge tag 'pull-tcg-20230715' of https://gitlab.com/rth7680/qemu into staging

tcg: Use HAVE_CMPXCHG128 instead of CONFIG_CMPXCHG128
accel/tcg: Introduce page_check_range_empty
accel/tcg: Introduce page_find_range_empty
accel/tcg: Accept more page flags in page_check_range
accel/tcg: Return bool from page_check_range
accel/tcg: Always lock pages before translation
linux-user: Use abi_* types for target structures in syscall_defs.h
linux-user: Fix abi_llong alignment for microblaze and nios2
linux-user: Fix do_shmat type errors
linux-user: Implement execve without execveat
linux-user: Make sure initial brk is aligned
linux-user: Use a mask with strace flags
linux-user: Implement MAP_FIXED_NOREPLACE
linux-user: Widen target_mmap offset argument to off_t
linux-user: Use page_find_range_empty for mmap_find_vma_reserved
linux-user: Use 'last' instead of 'end' in target_mmap and subroutines
linux-user: Remove can_passthrough_madvise
linux-user: Simplify target_madvise
linux-user: Drop uint and ulong types
linux-user/arm: Do not allocate a commpage at all for M-profile CPUs
bsd-user: Use page_check_range_empty for MAP_EXCL
bsd-user: Use page_find_range_empty for mmap_find_vma_reserved

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmSypEYdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9VzQf/RMRK4SQDJiJEbQ6K
# 5U1i955Rl4MMLT8PrkbT/UDA9soyIlSVjUenW8ThJJg6SLbSvkXZsWn165PFu+yW
# nYkeCYxkJtAjWmmFlZ44J+VLEZZ6LkWrIvPZHvKohelpi6uT/fuQaAZjKuH2prI/
# 7bdP5YdLUMpCztERHYfxmroEX4wJR6knsRpt5rYchADxEfkWk82PanneCw7grQ6V
# VNg1pRGplp0jMkpOOBvMD1ENkmoipklMe9P1gQdCHobg2/kqpozhT1oQp/gHNkP5
# 66Cjzv8o0nnPjJetm74pnP06iNhuMjDesD7f+Vq/DALgMobwjvhDW5GD+Ccto85B
# hqvwHA==
# =vm0t
# -----END PGP SIGNATURE-----
# gpg: Signature made Sat 15 Jul 2023 02:51:02 PM BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate]

* tag 'pull-tcg-20230715' of https://gitlab.com/rth7680/qemu: (47 commits)
  tcg: Use HAVE_CMPXCHG128 instead of CONFIG_CMPXCHG128
  accel/tcg: Always lock pages before translation
  linux-user/arm: Do not allocate a commpage at all for M-profile CPUs
  linux-user: Drop uint and ulong
  linux-user: Simplify target_madvise
  linux-user: Remove can_passthrough_madvise
  accel/tcg: Return bool from page_check_range
  accel/tcg: Accept more page flags in page_check_range
  linux-user: Simplify target_munmap
  linux-user: Rename mmap_reserve to mmap_reserve_or_unmap
  linux-user: Rewrite mmap_reserve
  linux-user: Use 'last' instead of 'end' in target_mmap
  linux-user: Use page_find_range_empty for mmap_find_vma_reserved
  bsd-user: Use page_find_range_empty for mmap_find_vma_reserved
  accel/tcg: Introduce page_find_range_empty
  linux-user: Rewrite mmap_frag
  linux-user: Rewrite target_mprotect
  linux-user: Widen target_mmap offset argument to off_t
  linux-user: Split out target_to_host_prot
  linux-user: Implement MAP_FIXED_NOREPLACE
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-07-15 14:53:37 +01:00
commit d7be40e138
36 changed files with 2043 additions and 1824 deletions

View File

@ -41,7 +41,7 @@ CMPXCHG_HELPER(cmpxchgq_be, uint64_t)
CMPXCHG_HELPER(cmpxchgq_le, uint64_t) CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
#endif #endif
#ifdef CONFIG_CMPXCHG128 #if HAVE_CMPXCHG128
CMPXCHG_HELPER(cmpxchgo_be, Int128) CMPXCHG_HELPER(cmpxchgo_be, Int128)
CMPXCHG_HELPER(cmpxchgo_le, Int128) CMPXCHG_HELPER(cmpxchgo_le, Int128)
#endif #endif

View File

@ -526,6 +526,43 @@ static void cpu_exec_exit(CPUState *cpu)
} }
} }
static void cpu_exec_longjmp_cleanup(CPUState *cpu)
{
/* Non-buggy compilers preserve this; assert the correct value. */
g_assert(cpu == current_cpu);
#ifdef CONFIG_USER_ONLY
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#else
/*
* For softmmu, a tlb_fill fault during translation will land here,
* and we need to release any page locks held. In system mode we
* have one tcg_ctx per thread, so we know it was this cpu doing
* the translation.
*
* Alternative 1: Install a cleanup to be called via an exception
* handling safe longjmp. It seems plausible that all our hosts
* support such a thing. We'd have to properly register unwind info
* for the JIT for EH, rather that just for GDB.
*
* Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to
* capture the cpu_loop_exit longjmp, perform the cleanup, and
* jump again to arrive here.
*/
if (tcg_ctx->gen_tb) {
tb_unlock_pages(tcg_ctx->gen_tb);
tcg_ctx->gen_tb = NULL;
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
}
void cpu_exec_step_atomic(CPUState *cpu) void cpu_exec_step_atomic(CPUState *cpu)
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
@ -568,16 +605,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu_tb_exec(cpu, tb, &tb_exit); cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu); cpu_exec_exit(cpu);
} else { } else {
#ifdef CONFIG_USER_ONLY cpu_exec_longjmp_cleanup(cpu);
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
} }
/* /*
@ -1023,20 +1051,7 @@ static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
{ {
/* Prepare setjmp context for exception handling. */ /* Prepare setjmp context for exception handling. */
if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) { if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
/* Non-buggy compilers preserve this; assert the correct value. */ cpu_exec_longjmp_cleanup(cpu);
g_assert(cpu == current_cpu);
#ifdef CONFIG_USER_ONLY
clear_helper_retaddr();
if (have_mmap_lock()) {
mmap_unlock();
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
} }
return cpu_exec_loop(cpu, sc); return cpu_exec_loop(cpu, sc);

View File

@ -3105,7 +3105,7 @@ void cpu_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val,
#include "atomic_template.h" #include "atomic_template.h"
#endif #endif
#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128) #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
#define DATA_SIZE 16 #define DATA_SIZE 16
#include "atomic_template.h" #include "atomic_template.h"
#endif #endif

View File

@ -10,6 +10,7 @@
#define ACCEL_TCG_INTERNAL_H #define ACCEL_TCG_INTERNAL_H
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/translate-all.h"
/* /*
* Access to the various translations structures need to be serialised * Access to the various translations structures need to be serialised
@ -35,6 +36,32 @@ static inline void page_table_config_init(void) { }
void page_table_config_init(void); void page_table_config_init(void);
#endif #endif
#ifdef CONFIG_USER_ONLY
/*
* For user-only, page_protect sets the page read-only.
* Since most execution is already on read-only pages, and we'd need to
* account for other TBs on the same page, defer undoing any page protection
* until we receive the write fault.
*/
static inline void tb_lock_page0(tb_page_addr_t p0)
{
page_protect(p0);
}
static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
{
page_protect(p1);
}
static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
static inline void tb_unlock_pages(TranslationBlock *tb) { }
#else
void tb_lock_page0(tb_page_addr_t);
void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
void tb_unlock_pages(TranslationBlock *);
#endif
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr, void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
unsigned size, unsigned size,
@ -48,8 +75,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
void page_init(void); void page_init(void);
void tb_htable_init(void); void tb_htable_init(void);
void tb_reset_jump(TranslationBlock *tb, int n); void tb_reset_jump(TranslationBlock *tb, int n);
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, TranslationBlock *tb_link_page(TranslationBlock *tb);
tb_page_addr_t phys_page2);
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc); bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc); uintptr_t host_pc);

View File

@ -159,7 +159,7 @@ static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
* another process, because the fallback start_exclusive solution * another process, because the fallback start_exclusive solution
* provides no protection across processes. * provides no protection across processes.
*/ */
if (!page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) { if (page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
uint64_t *p = __builtin_assume_aligned(pv, 8); uint64_t *p = __builtin_assume_aligned(pv, 8);
return *p; return *p;
} }
@ -194,7 +194,7 @@ static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
* another process, because the fallback start_exclusive solution * another process, because the fallback start_exclusive solution
* provides no protection across processes. * provides no protection across processes.
*/ */
if (!page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) { if (page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
return *p; return *p;
} }
#endif #endif

View File

@ -70,17 +70,7 @@ typedef struct PageDesc PageDesc;
*/ */
#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock()) #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
static inline void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, static inline void tb_lock_pages(const TranslationBlock *tb) { }
PageDesc **ret_p2, tb_page_addr_t phys2,
bool alloc)
{
*ret_p1 = NULL;
*ret_p2 = NULL;
}
static inline void page_unlock(PageDesc *pd) { }
static inline void page_lock_tb(const TranslationBlock *tb) { }
static inline void page_unlock_tb(const TranslationBlock *tb) { }
/* /*
* For user-only, since we are protecting all of memory with a single lock, * For user-only, since we are protecting all of memory with a single lock,
@ -96,7 +86,7 @@ static void tb_remove_all(void)
} }
/* Call with mmap_lock held. */ /* Call with mmap_lock held. */
static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2) static void tb_record(TranslationBlock *tb)
{ {
vaddr addr; vaddr addr;
int flags; int flags;
@ -391,12 +381,108 @@ static void page_lock(PageDesc *pd)
qemu_spin_lock(&pd->lock); qemu_spin_lock(&pd->lock);
} }
/* Like qemu_spin_trylock, returns false on success */
static bool page_trylock(PageDesc *pd)
{
bool busy = qemu_spin_trylock(&pd->lock);
if (!busy) {
page_lock__debug(pd);
}
return busy;
}
static void page_unlock(PageDesc *pd) static void page_unlock(PageDesc *pd)
{ {
qemu_spin_unlock(&pd->lock); qemu_spin_unlock(&pd->lock);
page_unlock__debug(pd); page_unlock__debug(pd);
} }
void tb_lock_page0(tb_page_addr_t paddr)
{
page_lock(page_find_alloc(paddr >> TARGET_PAGE_BITS, true));
}
void tb_lock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1)
{
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
PageDesc *pd0, *pd1;
if (pindex0 == pindex1) {
/* Identical pages, and the first page is already locked. */
return;
}
pd1 = page_find_alloc(pindex1, true);
if (pindex0 < pindex1) {
/* Correct locking order, we may block. */
page_lock(pd1);
return;
}
/* Incorrect locking order, we cannot block lest we deadlock. */
if (!page_trylock(pd1)) {
return;
}
/*
* Drop the lock on page0 and get both page locks in the right order.
* Restart translation via longjmp.
*/
pd0 = page_find_alloc(pindex0, false);
page_unlock(pd0);
page_lock(pd1);
page_lock(pd0);
siglongjmp(tcg_ctx->jmp_trans, -3);
}
void tb_unlock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1)
{
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
if (pindex0 != pindex1) {
page_unlock(page_find_alloc(pindex1, false));
}
}
static void tb_lock_pages(TranslationBlock *tb)
{
tb_page_addr_t paddr0 = tb_page_addr0(tb);
tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
if (unlikely(paddr0 == -1)) {
return;
}
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
if (pindex0 < pindex1) {
page_lock(page_find_alloc(pindex0, true));
page_lock(page_find_alloc(pindex1, true));
return;
}
page_lock(page_find_alloc(pindex1, true));
}
page_lock(page_find_alloc(pindex0, true));
}
void tb_unlock_pages(TranslationBlock *tb)
{
tb_page_addr_t paddr0 = tb_page_addr0(tb);
tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
if (unlikely(paddr0 == -1)) {
return;
}
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
page_unlock(page_find_alloc(pindex1, false));
}
page_unlock(page_find_alloc(pindex0, false));
}
static inline struct page_entry * static inline struct page_entry *
page_entry_new(PageDesc *pd, tb_page_addr_t index) page_entry_new(PageDesc *pd, tb_page_addr_t index)
{ {
@ -420,13 +506,10 @@ static void page_entry_destroy(gpointer p)
/* returns false on success */ /* returns false on success */
static bool page_entry_trylock(struct page_entry *pe) static bool page_entry_trylock(struct page_entry *pe)
{ {
bool busy; bool busy = page_trylock(pe->pd);
busy = qemu_spin_trylock(&pe->pd->lock);
if (!busy) { if (!busy) {
g_assert(!pe->locked); g_assert(!pe->locked);
pe->locked = true; pe->locked = true;
page_lock__debug(pe->pd);
} }
return busy; return busy;
} }
@ -604,8 +687,7 @@ static void tb_remove_all(void)
* Add the tb in the target page and protect it if necessary. * Add the tb in the target page and protect it if necessary.
* Called with @p->lock held. * Called with @p->lock held.
*/ */
static inline void tb_page_add(PageDesc *p, TranslationBlock *tb, static void tb_page_add(PageDesc *p, TranslationBlock *tb, unsigned int n)
unsigned int n)
{ {
bool page_already_protected; bool page_already_protected;
@ -625,15 +707,21 @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
} }
} }
static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2) static void tb_record(TranslationBlock *tb)
{ {
tb_page_add(p1, tb, 0); tb_page_addr_t paddr0 = tb_page_addr0(tb);
if (unlikely(p2)) { tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_add(p2, tb, 1); tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS;
assert(paddr0 != -1);
if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
tb_page_add(page_find_alloc(pindex1, false), tb, 1);
} }
tb_page_add(page_find_alloc(pindex0, false), tb, 0);
} }
static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) static void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
{ {
TranslationBlock *tb1; TranslationBlock *tb1;
uintptr_t *pprev; uintptr_t *pprev;
@ -653,74 +741,16 @@ static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
static void tb_remove(TranslationBlock *tb) static void tb_remove(TranslationBlock *tb)
{ {
PageDesc *pd; tb_page_addr_t paddr0 = tb_page_addr0(tb);
tb_page_addr_t paddr1 = tb_page_addr1(tb);
tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
tb_page_addr_t pindex1 = paddr0 >> TARGET_PAGE_BITS;
pd = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); assert(paddr0 != -1);
tb_page_remove(pd, tb); if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
if (unlikely(tb->page_addr[1] != -1)) { tb_page_remove(page_find_alloc(pindex1, false), tb);
pd = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
tb_page_remove(pd, tb);
}
}
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
{
PageDesc *p1, *p2;
tb_page_addr_t page1;
tb_page_addr_t page2;
assert_memory_lock();
g_assert(phys1 != -1);
page1 = phys1 >> TARGET_PAGE_BITS;
page2 = phys2 >> TARGET_PAGE_BITS;
p1 = page_find_alloc(page1, alloc);
if (ret_p1) {
*ret_p1 = p1;
}
if (likely(phys2 == -1)) {
page_lock(p1);
return;
} else if (page1 == page2) {
page_lock(p1);
if (ret_p2) {
*ret_p2 = p1;
}
return;
}
p2 = page_find_alloc(page2, alloc);
if (ret_p2) {
*ret_p2 = p2;
}
if (page1 < page2) {
page_lock(p1);
page_lock(p2);
} else {
page_lock(p2);
page_lock(p1);
}
}
/* lock the page(s) of a TB in the correct acquisition order */
static void page_lock_tb(const TranslationBlock *tb)
{
page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false);
}
static void page_unlock_tb(const TranslationBlock *tb)
{
PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS);
page_unlock(p1);
if (unlikely(tb_page_addr1(tb) != -1)) {
PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS);
if (p2 != p1) {
page_unlock(p2);
}
} }
tb_page_remove(page_find_alloc(pindex0, false), tb);
} }
#endif /* CONFIG_USER_ONLY */ #endif /* CONFIG_USER_ONLY */
@ -925,18 +955,16 @@ static void tb_phys_invalidate__locked(TranslationBlock *tb)
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
{ {
if (page_addr == -1 && tb_page_addr0(tb) != -1) { if (page_addr == -1 && tb_page_addr0(tb) != -1) {
page_lock_tb(tb); tb_lock_pages(tb);
do_tb_phys_invalidate(tb, true); do_tb_phys_invalidate(tb, true);
page_unlock_tb(tb); tb_unlock_pages(tb);
} else { } else {
do_tb_phys_invalidate(tb, false); do_tb_phys_invalidate(tb, false);
} }
} }
/* /*
* Add a new TB and link it to the physical page tables. phys_page2 is * Add a new TB and link it to the physical page tables.
* (-1) to indicate that only one page contains the TB.
*
* Called with mmap_lock held for user-mode emulation. * Called with mmap_lock held for user-mode emulation.
* *
* Returns a pointer @tb, or a pointer to an existing TB that matches @tb. * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
@ -944,43 +972,29 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
* for the same block of guest code that @tb corresponds to. In that case, * for the same block of guest code that @tb corresponds to. In that case,
* the caller should discard the original @tb, and use instead the returned TB. * the caller should discard the original @tb, and use instead the returned TB.
*/ */
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, TranslationBlock *tb_link_page(TranslationBlock *tb)
tb_page_addr_t phys_page2)
{ {
PageDesc *p;
PageDesc *p2 = NULL;
void *existing_tb = NULL; void *existing_tb = NULL;
uint32_t h; uint32_t h;
assert_memory_lock(); assert_memory_lock();
tcg_debug_assert(!(tb->cflags & CF_INVALID)); tcg_debug_assert(!(tb->cflags & CF_INVALID));
/* tb_record(tb);
* Add the TB to the page list, acquiring first the pages's locks.
* We keep the locks held until after inserting the TB in the hash table,
* so that if the insertion fails we know for sure that the TBs are still
* in the page descriptors.
* Note that inserting into the hash table first isn't an option, since
* we can only insert TBs that are fully initialized.
*/
page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
tb_record(tb, p, p2);
/* add in the hash table */ /* add in the hash table */
h = tb_hash_func(phys_pc, (tb->cflags & CF_PCREL ? 0 : tb->pc), h = tb_hash_func(tb_page_addr0(tb), (tb->cflags & CF_PCREL ? 0 : tb->pc),
tb->flags, tb->cs_base, tb->cflags); tb->flags, tb->cs_base, tb->cflags);
qht_insert(&tb_ctx.htable, tb, h, &existing_tb); qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
/* remove TB from the page(s) if we couldn't insert it */ /* remove TB from the page(s) if we couldn't insert it */
if (unlikely(existing_tb)) { if (unlikely(existing_tb)) {
tb_remove(tb); tb_remove(tb);
tb = existing_tb; tb_unlock_pages(tb);
return existing_tb;
} }
if (p2 && p2 != p) { tb_unlock_pages(tb);
page_unlock(p2);
}
page_unlock(p);
return tb; return tb;
} }

View File

@ -58,7 +58,7 @@ DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
i64, env, i64, i64, i64, i32) i64, env, i64, i64, i64, i32)
#endif #endif
#ifdef CONFIG_CMPXCHG128 #if HAVE_CMPXCHG128
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG,
i128, env, i64, i128, i128, i32) i128, env, i64, i128, i128, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG, DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG,

View File

@ -290,7 +290,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb, *existing_tb; TranslationBlock *tb, *existing_tb;
tb_page_addr_t phys_pc; tb_page_addr_t phys_pc, phys_p2;
tcg_insn_unit *gen_code_buf; tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size, max_insns; int gen_code_size, search_size, max_insns;
int64_t ti; int64_t ti;
@ -313,6 +313,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS); QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
buffer_overflow: buffer_overflow:
assert_no_pages_locked();
tb = tcg_tb_alloc(tcg_ctx); tb = tcg_tb_alloc(tcg_ctx);
if (unlikely(!tb)) { if (unlikely(!tb)) {
/* flush must be done */ /* flush must be done */
@ -333,6 +334,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb->cflags = cflags; tb->cflags = cflags;
tb_set_page_addr0(tb, phys_pc); tb_set_page_addr0(tb, phys_pc);
tb_set_page_addr1(tb, -1); tb_set_page_addr1(tb, -1);
if (phys_pc != -1) {
tb_lock_page0(phys_pc);
}
tcg_ctx->gen_tb = tb; tcg_ctx->gen_tb = tb;
tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64; tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
@ -349,8 +354,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tcg_ctx->guest_mo = TCG_MO_ALL; tcg_ctx->guest_mo = TCG_MO_ALL;
#endif #endif
tb_overflow: restart_translate:
trace_translate_block(tb, pc, tb->tc.ptr); trace_translate_block(tb, pc, tb->tc.ptr);
gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti); gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
@ -369,6 +373,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
"Restarting code generation for " "Restarting code generation for "
"code_gen_buffer overflow\n"); "code_gen_buffer overflow\n");
tb_unlock_pages(tb);
goto buffer_overflow; goto buffer_overflow;
case -2: case -2:
@ -387,14 +392,39 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
"Restarting code generation with " "Restarting code generation with "
"smaller translation block (max %d insns)\n", "smaller translation block (max %d insns)\n",
max_insns); max_insns);
goto tb_overflow;
/*
* The half-sized TB may not cross pages.
* TODO: Fix all targets that cross pages except with
* the first insn, at which point this can't be reached.
*/
phys_p2 = tb_page_addr1(tb);
if (unlikely(phys_p2 != -1)) {
tb_unlock_page1(phys_pc, phys_p2);
tb_set_page_addr1(tb, -1);
}
goto restart_translate;
case -3:
/*
* We had a page lock ordering problem. In order to avoid
* deadlock we had to drop the lock on page0, which means
* that everything we translated so far is compromised.
* Restart with locks held on both pages.
*/
qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
"Restarting code generation with re-locked pages");
goto restart_translate;
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
} }
tcg_ctx->gen_tb = NULL;
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
if (unlikely(search_size < 0)) { if (unlikely(search_size < 0)) {
tb_unlock_pages(tb);
goto buffer_overflow; goto buffer_overflow;
} }
tb->tc.size = gen_code_size; tb->tc.size = gen_code_size;
@ -504,6 +534,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* before attempting to link to other TBs or add to the lookup table. * before attempting to link to other TBs or add to the lookup table.
*/ */
if (tb_page_addr0(tb) == -1) { if (tb_page_addr0(tb) == -1) {
assert_no_pages_locked();
return tb; return tb;
} }
@ -518,7 +549,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* No explicit memory barrier is required -- tb_link_page() makes the * No explicit memory barrier is required -- tb_link_page() makes the
* TB visible in a consistent state. * TB visible in a consistent state.
*/ */
existing_tb = tb_link_page(tb, tb_page_addr0(tb), tb_page_addr1(tb)); existing_tb = tb_link_page(tb);
assert_no_pages_locked();
/* if the TB already exists, discard what we just translated */ /* if the TB already exists, discard what we just translated */
if (unlikely(existing_tb != tb)) { if (unlikely(existing_tb != tb)) {
uintptr_t orig_aligned = (uintptr_t)gen_code_buf; uintptr_t orig_aligned = (uintptr_t)gen_code_buf;

View File

@ -12,9 +12,9 @@
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "exec/translator.h" #include "exec/translator.h"
#include "exec/translate-all.h"
#include "exec/plugin-gen.h" #include "exec/plugin-gen.h"
#include "tcg/tcg-op-common.h" #include "tcg/tcg-op-common.h"
#include "internal.h"
static void gen_io_start(void) static void gen_io_start(void)
{ {
@ -147,10 +147,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
db->host_addr[0] = host_pc; db->host_addr[0] = host_pc;
db->host_addr[1] = NULL; db->host_addr[1] = NULL;
#ifdef CONFIG_USER_ONLY
page_protect(pc);
#endif
ops->init_disas_context(db, cpu); ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@ -256,22 +252,36 @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
host = db->host_addr[1]; host = db->host_addr[1];
base = TARGET_PAGE_ALIGN(db->pc_first); base = TARGET_PAGE_ALIGN(db->pc_first);
if (host == NULL) { if (host == NULL) {
tb_page_addr_t phys_page = tb_page_addr_t page0, old_page1, new_page1;
get_page_addr_code_hostp(env, base, &db->host_addr[1]);
new_page1 = get_page_addr_code_hostp(env, base, &db->host_addr[1]);
/* /*
* If the second page is MMIO, treat as if the first page * If the second page is MMIO, treat as if the first page
* was MMIO as well, so that we do not cache the TB. * was MMIO as well, so that we do not cache the TB.
*/ */
if (unlikely(phys_page == -1)) { if (unlikely(new_page1 == -1)) {
tb_unlock_pages(tb);
tb_set_page_addr0(tb, -1); tb_set_page_addr0(tb, -1);
return NULL; return NULL;
} }
tb_set_page_addr1(tb, phys_page); /*
#ifdef CONFIG_USER_ONLY * If this is not the first time around, and page1 matches,
page_protect(end); * then we already have the page locked. Alternately, we're
#endif * not doing anything to prevent the PTE from changing, so
* we might wind up with a different page, requiring us to
* re-do the locking.
*/
old_page1 = tb_page_addr1(tb);
if (likely(new_page1 != old_page1)) {
page0 = tb_page_addr0(tb);
if (unlikely(old_page1 != -1)) {
tb_unlock_page1(page0, old_page1);
}
tb_set_page_addr1(tb, new_page1);
tb_lock_page1(page0, new_page1);
}
host = db->host_addr[1]; host = db->host_addr[1];
} }

View File

@ -520,19 +520,19 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
} }
} }
int page_check_range(target_ulong start, target_ulong len, int flags) bool page_check_range(target_ulong start, target_ulong len, int flags)
{ {
target_ulong last; target_ulong last;
int locked; /* tri-state: =0: unlocked, +1: global, -1: local */ int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
int ret; bool ret;
if (len == 0) { if (len == 0) {
return 0; /* trivial length */ return true; /* trivial length */
} }
last = start + len - 1; last = start + len - 1;
if (last < start) { if (last < start) {
return -1; /* wrap around */ return false; /* wrap around */
} }
locked = have_mmap_lock(); locked = have_mmap_lock();
@ -551,33 +551,33 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
p = pageflags_find(start, last); p = pageflags_find(start, last);
} }
if (!p) { if (!p) {
ret = -1; /* entire region invalid */ ret = false; /* entire region invalid */
break; break;
} }
} }
if (start < p->itree.start) { if (start < p->itree.start) {
ret = -1; /* initial bytes invalid */ ret = false; /* initial bytes invalid */
break; break;
} }
missing = flags & ~p->flags; missing = flags & ~p->flags;
if (missing & PAGE_READ) { if (missing & ~PAGE_WRITE) {
ret = -1; /* page not readable */ ret = false; /* page doesn't match */
break; break;
} }
if (missing & PAGE_WRITE) { if (missing & PAGE_WRITE) {
if (!(p->flags & PAGE_WRITE_ORG)) { if (!(p->flags & PAGE_WRITE_ORG)) {
ret = -1; /* page not writable */ ret = false; /* page not writable */
break; break;
} }
/* Asking about writable, but has been protected: undo. */ /* Asking about writable, but has been protected: undo. */
if (!page_unprotect(start, 0)) { if (!page_unprotect(start, 0)) {
ret = -1; ret = false;
break; break;
} }
/* TODO: page_unprotect should take a range, not a single page. */ /* TODO: page_unprotect should take a range, not a single page. */
if (last - start < TARGET_PAGE_SIZE) { if (last - start < TARGET_PAGE_SIZE) {
ret = 0; /* ok */ ret = true; /* ok */
break; break;
} }
start += TARGET_PAGE_SIZE; start += TARGET_PAGE_SIZE;
@ -585,7 +585,7 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
} }
if (last <= p->itree.last) { if (last <= p->itree.last) {
ret = 0; /* ok */ ret = true; /* ok */
break; break;
} }
start = p->itree.last + 1; start = p->itree.last + 1;
@ -598,6 +598,54 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
return ret; return ret;
} }
bool page_check_range_empty(target_ulong start, target_ulong last)
{
assert(last >= start);
assert_memory_lock();
return pageflags_find(start, last) == NULL;
}
target_ulong page_find_range_empty(target_ulong min, target_ulong max,
target_ulong len, target_ulong align)
{
target_ulong len_m1, align_m1;
assert(min <= max);
assert(max <= GUEST_ADDR_MAX);
assert(len != 0);
assert(is_power_of_2(align));
assert_memory_lock();
len_m1 = len - 1;
align_m1 = align - 1;
/* Iteratively narrow the search region. */
while (1) {
PageFlagsNode *p;
/* Align min and double-check there's enough space remaining. */
min = (min + align_m1) & ~align_m1;
if (min > max) {
return -1;
}
if (len_m1 > max - min) {
return -1;
}
p = pageflags_find(min, min + len_m1);
if (p == NULL) {
/* Found! */
return min;
}
if (max <= p->itree.last) {
/* Existing allocation fills the remainder of the search region. */
return -1;
}
/* Skip across existing allocation. */
min = p->itree.last + 1;
}
}
void page_protect(tb_page_addr_t address) void page_protect(tb_page_addr_t address)
{ {
PageFlagsNode *p; PageFlagsNode *p;
@ -1385,7 +1433,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
#include "atomic_template.h" #include "atomic_template.h"
#endif #endif
#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128) #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
#define DATA_SIZE 16 #define DATA_SIZE 16
#include "atomic_template.h" #include "atomic_template.h"
#endif #endif

View File

@ -222,50 +222,16 @@ unsigned long last_brk;
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
abi_ulong alignment) abi_ulong alignment)
{ {
abi_ulong addr; abi_ulong ret;
abi_ulong end_addr;
int prot;
int looped = 0;
if (size > reserved_va) { ret = page_find_range_empty(start, reserved_va, size, alignment);
return (abi_ulong)-1; if (ret == -1 && start > TARGET_PAGE_SIZE) {
/* Restart at the beginning of the address space. */
ret = page_find_range_empty(TARGET_PAGE_SIZE, start - 1,
size, alignment);
} }
size = HOST_PAGE_ALIGN(size) + alignment; return ret;
end_addr = start + size;
if (end_addr > reserved_va) {
end_addr = reserved_va + 1;
}
addr = end_addr - qemu_host_page_size;
while (1) {
if (addr > end_addr) {
if (looped) {
return (abi_ulong)-1;
}
end_addr = reserved_va + 1;
addr = end_addr - qemu_host_page_size;
looped = 1;
continue;
}
prot = page_get_flags(addr);
if (prot) {
end_addr = addr;
}
if (end_addr - addr >= size) {
break;
}
addr -= qemu_host_page_size;
}
if (start == mmap_next_start) {
mmap_next_start = addr;
}
/* addr is sufficiently low to align it up */
if (alignment != 0) {
addr = (addr + alignment) & ~(alignment - 1);
}
return addr;
} }
/* /*
@ -609,7 +575,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
} }
/* Reject the mapping if any page within the range is mapped */ /* Reject the mapping if any page within the range is mapped */
if ((flags & MAP_EXCL) && page_check_range(start, len, 0) < 0) { if ((flags & MAP_EXCL) && !page_check_range_empty(start, end - 1)) {
errno = EINVAL; errno = EINVAL;
goto fail; goto fail;
} }

View File

@ -267,7 +267,7 @@ abi_long do_freebsd_sysarch(void *cpu_env, abi_long arg1, abi_long arg2);
static inline bool access_ok(int type, abi_ulong addr, abi_ulong size) static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
{ {
return page_check_range((target_ulong)addr, size, type) == 0; return page_check_range((target_ulong)addr, size, type);
} }
/* /*

View File

@ -222,7 +222,45 @@ int walk_memory_regions(void *, walk_memory_regions_fn);
int page_get_flags(target_ulong address); int page_get_flags(target_ulong address);
void page_set_flags(target_ulong start, target_ulong last, int flags); void page_set_flags(target_ulong start, target_ulong last, int flags);
void page_reset_target_data(target_ulong start, target_ulong last); void page_reset_target_data(target_ulong start, target_ulong last);
int page_check_range(target_ulong start, target_ulong len, int flags);
/**
* page_check_range
* @start: first byte of range
* @len: length of range
* @flags: flags required for each page
*
* Return true if every page in [@start, @start+@len) has @flags set.
* Return false if any page is unmapped. Thus testing flags == 0 is
* equivalent to testing for flags == PAGE_VALID.
*/
bool page_check_range(target_ulong start, target_ulong last, int flags);
/**
* page_check_range_empty:
* @start: first byte of range
* @last: last byte of range
* Context: holding mmap lock
*
* Return true if the entire range [@start, @last] is unmapped.
* The memory lock must be held so that the caller will can ensure
* the result stays true until a new mapping can be installed.
*/
bool page_check_range_empty(target_ulong start, target_ulong last);
/**
* page_find_range_empty
* @min: first byte of search range
* @max: last byte of search range
* @len: size of the hole required
* @align: alignment of the hole required (power of 2)
*
* If there is a range [x, x+@len) within [@min, @max] such that
* x % @align == 0, then return x. Otherwise return -1.
* The memory lock must be held, as the caller will want to ensure
* the returned range stays empty until a new mapping can be installed.
*/
target_ulong page_find_range_empty(target_ulong min, target_ulong max,
target_ulong len, target_ulong align);
/** /**
* page_get_target_data(address) * page_get_target_data(address)

View File

@ -7,6 +7,8 @@
#ifndef HELPER_PROTO_COMMON_H #ifndef HELPER_PROTO_COMMON_H
#define HELPER_PROTO_COMMON_H #define HELPER_PROTO_COMMON_H
#include "qemu/atomic128.h" /* for HAVE_CMPXCHG128 */
#define HELPER_H "accel/tcg/tcg-runtime.h" #define HELPER_H "accel/tcg/tcg-runtime.h"
#include "exec/helper-proto.h.inc" #include "exec/helper-proto.h.inc"
#undef HELPER_H #undef HELPER_H

View File

@ -15,7 +15,10 @@
#define ABI_LLONG_ALIGNMENT 2 #define ABI_LLONG_ALIGNMENT 2
#endif #endif
#if (defined(TARGET_I386) && !defined(TARGET_X86_64)) || defined(TARGET_SH4) #if (defined(TARGET_I386) && !defined(TARGET_X86_64)) \
|| defined(TARGET_SH4) \
|| defined(TARGET_MICROBLAZE) \
|| defined(TARGET_NIOS2)
#define ABI_LLONG_ALIGNMENT 4 #define ABI_LLONG_ALIGNMENT 4
#endif #endif

View File

@ -1 +1,9 @@
#ifndef AARCH64_TARGET_MMAN_H
#define AARCH64_TARGET_MMAN_H
#define TARGET_PROT_BTI 0x10
#define TARGET_PROT_MTE 0x20
#include "../generic/target_mman.h" #include "../generic/target_mman.h"
#endif

View File

@ -1,6 +1,19 @@
#ifndef ALPHA_TARGET_MMAN_H #ifndef ALPHA_TARGET_MMAN_H
#define ALPHA_TARGET_MMAN_H #define ALPHA_TARGET_MMAN_H
#define TARGET_MAP_ANONYMOUS 0x10
#define TARGET_MAP_FIXED 0x100
#define TARGET_MAP_GROWSDOWN 0x01000
#define TARGET_MAP_DENYWRITE 0x02000
#define TARGET_MAP_EXECUTABLE 0x04000
#define TARGET_MAP_LOCKED 0x08000
#define TARGET_MAP_NORESERVE 0x10000
#define TARGET_MAP_POPULATE 0x20000
#define TARGET_MAP_NONBLOCK 0x40000
#define TARGET_MAP_STACK 0x80000
#define TARGET_MAP_HUGETLB 0x100000
#define TARGET_MAP_FIXED_NOREPLACE 0x200000
#define TARGET_MADV_DONTNEED 6 #define TARGET_MADV_DONTNEED 6
#define TARGET_MS_ASYNC 1 #define TARGET_MS_ASYNC 1

View File

@ -424,10 +424,23 @@ enum {
static bool init_guest_commpage(void) static bool init_guest_commpage(void)
{ {
abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size; ARMCPU *cpu = ARM_CPU(thread_cpu);
void *want = g2h_untagged(commpage); abi_ptr commpage;
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE, void *want;
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); void *addr;
/*
* M-profile allocates maximum of 2GB address space, so can never
* allocate the commpage. Skip it.
*/
if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
return true;
}
commpage = HI_COMMPAGE & -qemu_host_page_size;
want = g2h_untagged(commpage);
addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
if (addr == MAP_FAILED) { if (addr == MAP_FAILED) {
perror("Allocating guest commpage"); perror("Allocating guest commpage");

View File

@ -1,6 +1,64 @@
#ifndef LINUX_USER_TARGET_MMAN_H #ifndef LINUX_USER_TARGET_MMAN_H
#define LINUX_USER_TARGET_MMAN_H #define LINUX_USER_TARGET_MMAN_H
/* These are defined in linux/mmap.h */
#define TARGET_MAP_SHARED 0x01
#define TARGET_MAP_PRIVATE 0x02
#define TARGET_MAP_SHARED_VALIDATE 0x03
/* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */
#ifndef TARGET_MAP_GROWSDOWN
#define TARGET_MAP_GROWSDOWN 0x0100
#endif
#ifndef TARGET_MAP_DENYWRITE
#define TARGET_MAP_DENYWRITE 0x0800
#endif
#ifndef TARGET_MAP_EXECUTABLE
#define TARGET_MAP_EXECUTABLE 0x1000
#endif
#ifndef TARGET_MAP_LOCKED
#define TARGET_MAP_LOCKED 0x2000
#endif
#ifndef TARGET_MAP_NORESERVE
#define TARGET_MAP_NORESERVE 0x4000
#endif
/* Defined in asm-generic/mman-common.h */
#ifndef TARGET_PROT_SEM
#define TARGET_PROT_SEM 0x08
#endif
#ifndef TARGET_MAP_TYPE
#define TARGET_MAP_TYPE 0x0f
#endif
#ifndef TARGET_MAP_FIXED
#define TARGET_MAP_FIXED 0x10
#endif
#ifndef TARGET_MAP_ANONYMOUS
#define TARGET_MAP_ANONYMOUS 0x20
#endif
#ifndef TARGET_MAP_POPULATE
#define TARGET_MAP_POPULATE 0x008000
#endif
#ifndef TARGET_MAP_NONBLOCK
#define TARGET_MAP_NONBLOCK 0x010000
#endif
#ifndef TARGET_MAP_STACK
#define TARGET_MAP_STACK 0x020000
#endif
#ifndef TARGET_MAP_HUGETLB
#define TARGET_MAP_HUGETLB 0x040000
#endif
#ifndef TARGET_MAP_SYNC
#define TARGET_MAP_SYNC 0x080000
#endif
#ifndef TARGET_MAP_FIXED_NOREPLACE
#define TARGET_MAP_FIXED_NOREPLACE 0x100000
#endif
#ifndef TARGET_MAP_UNINITIALIZED
#define TARGET_MAP_UNINITIALIZED 0x4000000
#endif
#ifndef TARGET_MADV_NORMAL #ifndef TARGET_MADV_NORMAL
#define TARGET_MADV_NORMAL 0 #define TARGET_MADV_NORMAL 0
#endif #endif

View File

@ -1,6 +1,16 @@
#ifndef HPPA_TARGET_MMAN_H #ifndef HPPA_TARGET_MMAN_H
#define HPPA_TARGET_MMAN_H #define HPPA_TARGET_MMAN_H
#define TARGET_MAP_TYPE 0x2b
#define TARGET_MAP_FIXED 0x04
#define TARGET_MAP_ANONYMOUS 0x10
#define TARGET_MAP_GROWSDOWN 0x8000
#define TARGET_MAP_POPULATE 0x10000
#define TARGET_MAP_NONBLOCK 0x20000
#define TARGET_MAP_STACK 0x40000
#define TARGET_MAP_HUGETLB 0x80000
#define TARGET_MAP_UNINITIALIZED 0
#define TARGET_MADV_MERGEABLE 65 #define TARGET_MADV_MERGEABLE 65
#define TARGET_MADV_UNMERGEABLE 66 #define TARGET_MADV_UNMERGEABLE 66
#define TARGET_MADV_HUGEPAGE 67 #define TARGET_MADV_HUGEPAGE 67

View File

@ -1 +1,19 @@
#ifndef MIPS_TARGET_MMAN_H
#define MIPS_TARGET_MMAN_H
#define TARGET_PROT_SEM 0x10
#define TARGET_MAP_NORESERVE 0x0400
#define TARGET_MAP_ANONYMOUS 0x0800
#define TARGET_MAP_GROWSDOWN 0x1000
#define TARGET_MAP_DENYWRITE 0x2000
#define TARGET_MAP_EXECUTABLE 0x4000
#define TARGET_MAP_LOCKED 0x8000
#define TARGET_MAP_POPULATE 0x10000
#define TARGET_MAP_NONBLOCK 0x20000
#define TARGET_MAP_STACK 0x40000
#define TARGET_MAP_HUGETLB 0x80000
#include "../generic/target_mman.h" #include "../generic/target_mman.h"
#endif

View File

@ -1 +1 @@
#include "../generic/target_mman.h" #include "../mips/target_mman.h"

File diff suppressed because it is too large Load Diff

View File

@ -1 +1,9 @@
#ifndef PPC_TARGET_MMAN_H
#define PPC_TARGET_MMAN_H
#define TARGET_MAP_NORESERVE 0x40
#define TARGET_MAP_LOCKED 0x80
#include "../generic/target_mman.h" #include "../generic/target_mman.h"
#endif

View File

@ -182,7 +182,7 @@ static inline bool access_ok_untagged(int type, abi_ulong addr, abi_ulong size)
: !guest_range_valid_untagged(addr, size)) { : !guest_range_valid_untagged(addr, size)) {
return false; return false;
} }
return page_check_range((target_ulong)addr, size, type) == 0; return page_check_range((target_ulong)addr, size, type);
} }
static inline bool access_ok(CPUState *cpu, int type, static inline bool access_ok(CPUState *cpu, int type,

View File

@ -1 +1,10 @@
#ifndef SPARC_TARGET_MMAN_H
#define SPARC_TARGET_MMAN_H
#define TARGET_MAP_NORESERVE 0x40
#define TARGET_MAP_LOCKED 0x100
#define TARGET_MAP_GROWSDOWN 0x0200
#include "../generic/target_mman.h" #include "../generic/target_mman.h"
#endif

View File

@ -46,15 +46,21 @@ struct syscallname {
*/ */
struct flags { struct flags {
abi_long f_value; /* flag */ abi_long f_value; /* flag */
abi_long f_mask; /* mask */
const char *f_string; /* stringified flag */ const char *f_string; /* stringified flag */
}; };
/* No 'struct flags' element should have a zero mask. */
#define FLAG_BASIC(V, M, N) { V, M | QEMU_BUILD_BUG_ON_ZERO(!(M)), N }
/* common flags for all architectures */ /* common flags for all architectures */
#define FLAG_GENERIC(name) { name, #name } #define FLAG_GENERIC_MASK(V, M) FLAG_BASIC(V, M, #V)
#define FLAG_GENERIC(V) FLAG_BASIC(V, V, #V)
/* target specific flags (syscall_defs.h has TARGET_<flag>) */ /* target specific flags (syscall_defs.h has TARGET_<flag>) */
#define FLAG_TARGET(name) { TARGET_ ## name, #name } #define FLAG_TARGET_MASK(V, M) FLAG_BASIC(TARGET_##V, TARGET_##M, #V)
#define FLAG_TARGET(V) FLAG_BASIC(TARGET_##V, TARGET_##V, #V)
/* end of flags array */ /* end of flags array */
#define FLAG_END { 0, NULL } #define FLAG_END { 0, 0, NULL }
/* Structure used to translate enumerated values into strings */ /* Structure used to translate enumerated values into strings */
struct enums { struct enums {
@ -963,7 +969,7 @@ print_syscall_ret_ioctl(CPUArchState *cpu_env, const struct syscallname *name,
#endif #endif
UNUSED static const struct flags access_flags[] = { UNUSED static const struct flags access_flags[] = {
FLAG_GENERIC(F_OK), FLAG_GENERIC_MASK(F_OK, R_OK | W_OK | X_OK),
FLAG_GENERIC(R_OK), FLAG_GENERIC(R_OK),
FLAG_GENERIC(W_OK), FLAG_GENERIC(W_OK),
FLAG_GENERIC(X_OK), FLAG_GENERIC(X_OK),
@ -999,9 +1005,9 @@ UNUSED static const struct flags mode_flags[] = {
}; };
UNUSED static const struct flags open_access_flags[] = { UNUSED static const struct flags open_access_flags[] = {
FLAG_TARGET(O_RDONLY), FLAG_TARGET_MASK(O_RDONLY, O_ACCMODE),
FLAG_TARGET(O_WRONLY), FLAG_TARGET_MASK(O_WRONLY, O_ACCMODE),
FLAG_TARGET(O_RDWR), FLAG_TARGET_MASK(O_RDWR, O_ACCMODE),
FLAG_END, FLAG_END,
}; };
@ -1010,7 +1016,9 @@ UNUSED static const struct flags open_flags[] = {
FLAG_TARGET(O_CREAT), FLAG_TARGET(O_CREAT),
FLAG_TARGET(O_DIRECTORY), FLAG_TARGET(O_DIRECTORY),
FLAG_TARGET(O_EXCL), FLAG_TARGET(O_EXCL),
#if TARGET_O_LARGEFILE != 0
FLAG_TARGET(O_LARGEFILE), FLAG_TARGET(O_LARGEFILE),
#endif
FLAG_TARGET(O_NOCTTY), FLAG_TARGET(O_NOCTTY),
FLAG_TARGET(O_NOFOLLOW), FLAG_TARGET(O_NOFOLLOW),
FLAG_TARGET(O_NONBLOCK), /* also O_NDELAY */ FLAG_TARGET(O_NONBLOCK), /* also O_NDELAY */
@ -1075,7 +1083,7 @@ UNUSED static const struct flags umount2_flags[] = {
}; };
UNUSED static const struct flags mmap_prot_flags[] = { UNUSED static const struct flags mmap_prot_flags[] = {
FLAG_GENERIC(PROT_NONE), FLAG_GENERIC_MASK(PROT_NONE, PROT_READ | PROT_WRITE | PROT_EXEC),
FLAG_GENERIC(PROT_EXEC), FLAG_GENERIC(PROT_EXEC),
FLAG_GENERIC(PROT_READ), FLAG_GENERIC(PROT_READ),
FLAG_GENERIC(PROT_WRITE), FLAG_GENERIC(PROT_WRITE),
@ -1086,28 +1094,25 @@ UNUSED static const struct flags mmap_prot_flags[] = {
}; };
UNUSED static const struct flags mmap_flags[] = { UNUSED static const struct flags mmap_flags[] = {
FLAG_TARGET(MAP_SHARED), FLAG_TARGET_MASK(MAP_SHARED, MAP_TYPE),
FLAG_TARGET(MAP_PRIVATE), FLAG_TARGET_MASK(MAP_PRIVATE, MAP_TYPE),
FLAG_TARGET_MASK(MAP_SHARED_VALIDATE, MAP_TYPE),
FLAG_TARGET(MAP_ANONYMOUS), FLAG_TARGET(MAP_ANONYMOUS),
FLAG_TARGET(MAP_DENYWRITE), FLAG_TARGET(MAP_DENYWRITE),
FLAG_TARGET(MAP_FIXED),
FLAG_TARGET(MAP_GROWSDOWN),
FLAG_TARGET(MAP_EXECUTABLE), FLAG_TARGET(MAP_EXECUTABLE),
#ifdef MAP_LOCKED FLAG_TARGET(MAP_FIXED),
FLAG_TARGET(MAP_FIXED_NOREPLACE),
FLAG_TARGET(MAP_GROWSDOWN),
FLAG_TARGET(MAP_HUGETLB),
FLAG_TARGET(MAP_LOCKED), FLAG_TARGET(MAP_LOCKED),
#endif
#ifdef MAP_NONBLOCK
FLAG_TARGET(MAP_NONBLOCK), FLAG_TARGET(MAP_NONBLOCK),
#endif
FLAG_TARGET(MAP_NORESERVE), FLAG_TARGET(MAP_NORESERVE),
#ifdef MAP_POPULATE
FLAG_TARGET(MAP_POPULATE), FLAG_TARGET(MAP_POPULATE),
#endif FLAG_TARGET(MAP_STACK),
#ifdef TARGET_MAP_UNINITIALIZED FLAG_TARGET(MAP_SYNC),
#if TARGET_MAP_UNINITIALIZED != 0
FLAG_TARGET(MAP_UNINITIALIZED), FLAG_TARGET(MAP_UNINITIALIZED),
#endif #endif
FLAG_TARGET(MAP_HUGETLB),
FLAG_TARGET(MAP_STACK),
FLAG_END, FLAG_END,
}; };
@ -1201,13 +1206,13 @@ UNUSED static const struct flags statx_flags[] = {
FLAG_GENERIC(AT_SYMLINK_NOFOLLOW), FLAG_GENERIC(AT_SYMLINK_NOFOLLOW),
#endif #endif
#ifdef AT_STATX_SYNC_AS_STAT #ifdef AT_STATX_SYNC_AS_STAT
FLAG_GENERIC(AT_STATX_SYNC_AS_STAT), FLAG_GENERIC_MASK(AT_STATX_SYNC_AS_STAT, AT_STATX_SYNC_TYPE),
#endif #endif
#ifdef AT_STATX_FORCE_SYNC #ifdef AT_STATX_FORCE_SYNC
FLAG_GENERIC(AT_STATX_FORCE_SYNC), FLAG_GENERIC_MASK(AT_STATX_FORCE_SYNC, AT_STATX_SYNC_TYPE),
#endif #endif
#ifdef AT_STATX_DONT_SYNC #ifdef AT_STATX_DONT_SYNC
FLAG_GENERIC(AT_STATX_DONT_SYNC), FLAG_GENERIC_MASK(AT_STATX_DONT_SYNC, AT_STATX_SYNC_TYPE),
#endif #endif
FLAG_END, FLAG_END,
}; };
@ -1481,14 +1486,10 @@ print_flags(const struct flags *f, abi_long flags, int last)
const char *sep = ""; const char *sep = "";
int n; int n;
if ((flags == 0) && (f->f_value == 0)) {
qemu_log("%s%s", f->f_string, get_comma(last));
return;
}
for (n = 0; f->f_string != NULL; f++) { for (n = 0; f->f_string != NULL; f++) {
if ((f->f_value != 0) && ((flags & f->f_value) == f->f_value)) { if ((flags & f->f_mask) == f->f_value) {
qemu_log("%s%s", sep, f->f_string); qemu_log("%s%s", sep, f->f_string);
flags &= ~f->f_value; flags &= ~f->f_mask;
sep = "|"; sep = "|";
n++; n++;
} }

View File

@ -309,16 +309,16 @@ _syscall0(int, sys_gettid)
#endif #endif
#if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS) #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
_syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
#endif #endif
#if (defined(TARGET_NR_getdents) && \ #if (defined(TARGET_NR_getdents) && \
!defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \ !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
(defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
_syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
#endif #endif
#if defined(TARGET_NR__llseek) && defined(__NR_llseek) #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
_syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, _syscall5(int, _llseek, unsigned int, fd, unsigned long, hi, unsigned long, lo,
loff_t *, res, uint, wh); loff_t *, res, unsigned int, wh);
#endif #endif
_syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo) _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
_syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig, _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
@ -659,6 +659,7 @@ safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
#endif #endif
safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \ safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
int, options, struct rusage *, rusage) int, options, struct rusage *, rusage)
safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
safe_syscall5(int, execveat, int, dirfd, const char *, filename, safe_syscall5(int, execveat, int, dirfd, const char *, filename,
char **, argv, char **, envp, int, flags) char **, argv, char **, envp, int, flags)
#if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
@ -805,7 +806,7 @@ static abi_ulong brk_page;
void target_set_brk(abi_ulong new_brk) void target_set_brk(abi_ulong new_brk)
{ {
target_brk = new_brk; target_brk = TARGET_PAGE_ALIGN(new_brk);
brk_page = HOST_PAGE_ALIGN(target_brk); brk_page = HOST_PAGE_ALIGN(target_brk);
} }
@ -4538,14 +4539,14 @@ static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
} }
#endif #endif
static inline abi_ulong do_shmat(CPUArchState *cpu_env, static abi_ulong do_shmat(CPUArchState *cpu_env, int shmid,
int shmid, abi_ulong shmaddr, int shmflg) abi_ulong shmaddr, int shmflg)
{ {
CPUState *cpu = env_cpu(cpu_env); CPUState *cpu = env_cpu(cpu_env);
abi_long raddr; abi_ulong raddr;
void *host_raddr; void *host_raddr;
struct shmid_ds shm_info; struct shmid_ds shm_info;
int i,ret; int i, ret;
abi_ulong shmlba; abi_ulong shmlba;
/* shmat pointers are always untagged */ /* shmat pointers are always untagged */
@ -4601,9 +4602,9 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
if (host_raddr == (void *)-1) { if (host_raddr == (void *)-1) {
mmap_unlock(); mmap_unlock();
return get_errno((long)host_raddr); return get_errno((intptr_t)host_raddr);
} }
raddr=h2g((unsigned long)host_raddr); raddr = h2g((uintptr_t)host_raddr);
page_set_flags(raddr, raddr + shm_info.shm_segsz - 1, page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
PAGE_VALID | PAGE_RESET | PAGE_READ | PAGE_VALID | PAGE_RESET | PAGE_READ |
@ -4620,7 +4621,6 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
mmap_unlock(); mmap_unlock();
return raddr; return raddr;
} }
static inline abi_long do_shmdt(abi_ulong shmaddr) static inline abi_long do_shmdt(abi_ulong shmaddr)
@ -6012,9 +6012,19 @@ static const StructEntry struct_termios_def = {
.print = print_termios, .print = print_termios,
}; };
/* If the host does not provide these bits, they may be safely discarded. */
#ifndef MAP_SYNC
#define MAP_SYNC 0
#endif
#ifndef MAP_UNINITIALIZED
#define MAP_UNINITIALIZED 0
#endif
static const bitmask_transtbl mmap_flags_tbl[] = { static const bitmask_transtbl mmap_flags_tbl[] = {
{ TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, { TARGET_MAP_TYPE, TARGET_MAP_SHARED, MAP_TYPE, MAP_SHARED },
{ TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, { TARGET_MAP_TYPE, TARGET_MAP_PRIVATE, MAP_TYPE, MAP_PRIVATE },
{ TARGET_MAP_TYPE, TARGET_MAP_SHARED_VALIDATE,
MAP_TYPE, MAP_SHARED_VALIDATE },
{ TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
{ TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
MAP_ANONYMOUS, MAP_ANONYMOUS }, MAP_ANONYMOUS, MAP_ANONYMOUS },
@ -6032,6 +6042,13 @@ static const bitmask_transtbl mmap_flags_tbl[] = {
Recognize it for the target insofar as we do not want to pass Recognize it for the target insofar as we do not want to pass
it through to the host. */ it through to the host. */
{ TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 }, { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
{ TARGET_MAP_SYNC, TARGET_MAP_SYNC, MAP_SYNC, MAP_SYNC },
{ TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
{ TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
{ TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
{ TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
MAP_UNINITIALIZED, MAP_UNINITIALIZED },
{ 0, 0, 0, 0 } { 0, 0, 0, 0 }
}; };
@ -8105,7 +8122,7 @@ static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
max = h2g_valid(max - 1) ? max = h2g_valid(max - 1) ?
max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1; max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
if (page_check_range(h2g(min), max - min, flags) == -1) { if (!page_check_range(h2g(min), max - min, flags)) {
continue; continue;
} }
@ -8629,9 +8646,9 @@ ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
return ret; return ret;
} }
static int do_execveat(CPUArchState *cpu_env, int dirfd, static int do_execv(CPUArchState *cpu_env, int dirfd,
abi_long pathname, abi_long guest_argp, abi_long pathname, abi_long guest_argp,
abi_long guest_envp, int flags) abi_long guest_envp, int flags, bool is_execveat)
{ {
int ret; int ret;
char **argp, **envp; char **argp, **envp;
@ -8710,11 +8727,14 @@ static int do_execveat(CPUArchState *cpu_env, int dirfd,
goto execve_efault; goto execve_efault;
} }
const char *exe = p;
if (is_proc_myself(p, "exe")) { if (is_proc_myself(p, "exe")) {
ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags)); exe = exec_path;
} else {
ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
} }
ret = is_execveat
? safe_execveat(dirfd, exe, argp, envp, flags)
: safe_execve(exe, argp, envp);
ret = get_errno(ret);
unlock_user(p, pathname, 0); unlock_user(p, pathname, 0);
@ -9406,9 +9426,9 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
return ret; return ret;
#endif #endif
case TARGET_NR_execveat: case TARGET_NR_execveat:
return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5); return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
case TARGET_NR_execve: case TARGET_NR_execve:
return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0); return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
case TARGET_NR_chdir: case TARGET_NR_chdir:
if (!(p = lock_user_string(arg1))) if (!(p = lock_user_string(arg1)))
return -TARGET_EFAULT; return -TARGET_EFAULT;
@ -10571,7 +10591,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
#endif #endif
ret = target_mmap(arg1, arg2, arg3, ret = target_mmap(arg1, arg2, arg3,
target_to_host_bitmask(arg4, mmap_flags_tbl), target_to_host_bitmask(arg4, mmap_flags_tbl),
arg5, arg6 << MMAP_SHIFT); arg5, (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
return get_errno(ret); return get_errno(ret);
#endif #endif
case TARGET_NR_munmap: case TARGET_NR_munmap:

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,7 @@
int target_mprotect(abi_ulong start, abi_ulong len, int prot); int target_mprotect(abi_ulong start, abi_ulong len, int prot);
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
int flags, int fd, abi_ulong offset); int flags, int fd, off_t offset);
int target_munmap(abi_ulong start, abi_ulong len); int target_munmap(abi_ulong start, abi_ulong len);
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
abi_ulong new_size, unsigned long flags, abi_ulong new_size, unsigned long flags,

View File

@ -1 +1,19 @@
#ifndef XTENSA_TARGET_MMAN_H
#define XTENSA_TARGET_MMAN_H
#define TARGET_PROT_SEM 0x10
#define TARGET_MAP_NORESERVE 0x0400
#define TARGET_MAP_ANONYMOUS 0x0800
#define TARGET_MAP_GROWSDOWN 0x1000
#define TARGET_MAP_DENYWRITE 0x2000
#define TARGET_MAP_EXECUTABLE 0x4000
#define TARGET_MAP_LOCKED 0x8000
#define TARGET_MAP_POPULATE 0x10000
#define TARGET_MAP_NONBLOCK 0x20000
#define TARGET_MAP_STACK 0x40000
#define TARGET_MAP_HUGETLB 0x80000
#include "../generic/target_mman.h" #include "../generic/target_mman.h"
#endif

View File

@ -168,7 +168,7 @@ target_ureg HELPER(probe)(CPUHPPAState *env, target_ulong addr,
uint32_t level, uint32_t want) uint32_t level, uint32_t want)
{ {
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
return (page_check_range(addr, 1, want) == 0) ? 1 : 0; return page_check_range(addr, 1, want);
#else #else
int prot, excp; int prot, excp;
hwaddr phys; hwaddr phys;

View File

@ -583,7 +583,7 @@ vext_ldff(void *vd, void *v0, target_ulong base,
cpu_mmu_index(env, false)); cpu_mmu_index(env, false));
if (host) { if (host) {
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
if (page_check_range(addr, offset, PAGE_READ) < 0) { if (page_check_range(addr, offset, PAGE_READ)) {
vl = i; vl = i;
goto ProbeSuccess; goto ProbeSuccess;
} }

View File

@ -1191,7 +1191,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
case ASI_PNFL: /* Primary no-fault LE */ case ASI_PNFL: /* Primary no-fault LE */
case ASI_SNF: /* Secondary no-fault */ case ASI_SNF: /* Secondary no-fault */
case ASI_SNFL: /* Secondary no-fault LE */ case ASI_SNFL: /* Secondary no-fault LE */
if (page_check_range(addr, size, PAGE_READ) == -1) { if (!page_check_range(addr, size, PAGE_READ)) {
ret = 0; ret = 0;
break; break;
} }

View File

@ -778,7 +778,7 @@ typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv_i64,
#else #else
# define WITH_ATOMIC64(X) # define WITH_ATOMIC64(X)
#endif #endif
#ifdef CONFIG_CMPXCHG128 #if HAVE_CMPXCHG128
# define WITH_ATOMIC128(X) X, # define WITH_ATOMIC128(X) X,
#else #else
# define WITH_ATOMIC128(X) # define WITH_ATOMIC128(X)

View File

@ -1083,7 +1083,7 @@ static void layout_arg_by_ref(TCGCumulativeArgs *cum, TCGHelperInfo *info)
.ref_slot = cum->ref_slot + i, .ref_slot = cum->ref_slot + i,
}; };
} }
cum->info_in_idx += n; cum->info_in_idx += n - 1; /* i=0 accounted for in layout_arg_1 */
cum->ref_slot += n; cum->ref_slot += n;
} }