linux-user: Use WITH_MMAP_LOCK_GUARD in target_{shmat,shmdt}

Move the CF_PARALLEL setting outside of the mmap lock.

Tested-by: Helge Deller <deller@gmx.de>
Reviewed-by: Helge Deller <deller@gmx.de>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-08-20 10:08:44 -07:00
parent 225a206c44
commit 69fa2708a2

View File

@ -1017,9 +1017,8 @@ abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
{ {
CPUState *cpu = env_cpu(cpu_env); CPUState *cpu = env_cpu(cpu_env);
abi_ulong raddr; abi_ulong raddr;
void *host_raddr;
struct shmid_ds shm_info; struct shmid_ds shm_info;
int i, ret; int ret;
abi_ulong shmlba; abi_ulong shmlba;
/* shmat pointers are always untagged */ /* shmat pointers are always untagged */
@ -1044,7 +1043,43 @@ abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
return -TARGET_EINVAL; return -TARGET_EINVAL;
} }
mmap_lock(); WITH_MMAP_LOCK_GUARD() {
void *host_raddr;
if (shmaddr) {
host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
} else {
abi_ulong mmap_start;
/* In order to use the host shmat, we need to honor host SHMLBA. */
mmap_start = mmap_find_vma(0, shm_info.shm_segsz,
MAX(SHMLBA, shmlba));
if (mmap_start == -1) {
return -TARGET_ENOMEM;
}
host_raddr = shmat(shmid, g2h_untagged(mmap_start),
shmflg | SHM_REMAP);
}
if (host_raddr == (void *)-1) {
return get_errno(-1);
}
raddr = h2g(host_raddr);
page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
PAGE_VALID | PAGE_RESET | PAGE_READ |
(shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
for (int i = 0; i < N_SHM_REGIONS; i++) {
if (!shm_regions[i].in_use) {
shm_regions[i].in_use = true;
shm_regions[i].start = raddr;
shm_regions[i].size = shm_info.shm_segsz;
break;
}
}
}
/* /*
* We're mapping shared memory, so ensure we generate code for parallel * We're mapping shared memory, so ensure we generate code for parallel
@ -1057,56 +1092,17 @@ abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
tb_flush(cpu); tb_flush(cpu);
} }
if (shmaddr) {
host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
} else {
abi_ulong mmap_start;
/* In order to use the host shmat, we need to honor host SHMLBA. */
mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
if (mmap_start == -1) {
errno = ENOMEM;
host_raddr = (void *)-1;
} else {
host_raddr = shmat(shmid, g2h_untagged(mmap_start),
shmflg | SHM_REMAP);
}
}
if (host_raddr == (void *)-1) {
mmap_unlock();
return get_errno((intptr_t)host_raddr);
}
raddr = h2g((uintptr_t)host_raddr);
page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
PAGE_VALID | PAGE_RESET | PAGE_READ |
(shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
for (i = 0; i < N_SHM_REGIONS; i++) {
if (!shm_regions[i].in_use) {
shm_regions[i].in_use = true;
shm_regions[i].start = raddr;
shm_regions[i].size = shm_info.shm_segsz;
break;
}
}
mmap_unlock();
return raddr; return raddr;
} }
abi_long target_shmdt(abi_ulong shmaddr) abi_long target_shmdt(abi_ulong shmaddr)
{ {
int i;
abi_long rv; abi_long rv;
/* shmdt pointers are always untagged */ /* shmdt pointers are always untagged */
mmap_lock(); WITH_MMAP_LOCK_GUARD() {
for (int i = 0; i < N_SHM_REGIONS; ++i) {
for (i = 0; i < N_SHM_REGIONS; ++i) {
if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
shm_regions[i].in_use = false; shm_regions[i].in_use = false;
page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0); page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
@ -1114,8 +1110,6 @@ abi_long target_shmdt(abi_ulong shmaddr)
} }
} }
rv = get_errno(shmdt(g2h_untagged(shmaddr))); rv = get_errno(shmdt(g2h_untagged(shmaddr)));
}
mmap_unlock();
return rv; return rv;
} }