target/s390x: Implement s390_cpu_record_sigsegv
Move the masking of the address from cpu_loop into s390_cpu_record_sigsegv -- this is governed by hw, not linux. This does mean we have to raise our own exception, rather than return to the fallback. Use maperr to choose between PGM_PROTECTION and PGM_ADDRESSING. Use the appropriate si_code for each in cpu_loop. Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
db9aab5783
commit
c8e7fef102
@ -24,8 +24,6 @@
|
|||||||
#include "cpu_loop-common.h"
|
#include "cpu_loop-common.h"
|
||||||
#include "signal-common.h"
|
#include "signal-common.h"
|
||||||
|
|
||||||
/* s390x masks the fault address it reports in si_addr for SIGSEGV and SIGBUS */
|
|
||||||
#define S390X_FAIL_ADDR_MASK -4096LL
|
|
||||||
|
|
||||||
static int get_pgm_data_si_code(int dxc_code)
|
static int get_pgm_data_si_code(int dxc_code)
|
||||||
{
|
{
|
||||||
@ -111,12 +109,13 @@ void cpu_loop(CPUS390XState *env)
|
|||||||
n = TARGET_ILL_ILLOPC;
|
n = TARGET_ILL_ILLOPC;
|
||||||
goto do_signal_pc;
|
goto do_signal_pc;
|
||||||
case PGM_PROTECTION:
|
case PGM_PROTECTION:
|
||||||
|
force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_ACCERR,
|
||||||
|
env->__excp_addr);
|
||||||
|
break;
|
||||||
case PGM_ADDRESSING:
|
case PGM_ADDRESSING:
|
||||||
sig = TARGET_SIGSEGV;
|
force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR,
|
||||||
/* XXX: check env->error_code */
|
env->__excp_addr);
|
||||||
n = TARGET_SEGV_MAPERR;
|
break;
|
||||||
addr = env->__excp_addr & S390X_FAIL_ADDR_MASK;
|
|
||||||
goto do_signal;
|
|
||||||
case PGM_EXECUTE:
|
case PGM_EXECUTE:
|
||||||
case PGM_SPECIFICATION:
|
case PGM_SPECIFICATION:
|
||||||
case PGM_SPECIAL_OP:
|
case PGM_SPECIAL_OP:
|
||||||
|
@ -266,9 +266,11 @@ static void s390_cpu_reset_full(DeviceState *dev)
|
|||||||
|
|
||||||
static const struct TCGCPUOps s390_tcg_ops = {
|
static const struct TCGCPUOps s390_tcg_ops = {
|
||||||
.initialize = s390x_translate_init,
|
.initialize = s390x_translate_init,
|
||||||
.tlb_fill = s390_cpu_tlb_fill,
|
|
||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#ifdef CONFIG_USER_ONLY
|
||||||
|
.record_sigsegv = s390_cpu_record_sigsegv,
|
||||||
|
#else
|
||||||
|
.tlb_fill = s390_cpu_tlb_fill,
|
||||||
.cpu_exec_interrupt = s390_cpu_exec_interrupt,
|
.cpu_exec_interrupt = s390_cpu_exec_interrupt,
|
||||||
.do_interrupt = s390_cpu_do_interrupt,
|
.do_interrupt = s390_cpu_do_interrupt,
|
||||||
.debug_excp_handler = s390x_cpu_debug_excp_handler,
|
.debug_excp_handler = s390x_cpu_debug_excp_handler,
|
||||||
|
@ -270,13 +270,20 @@ ObjectClass *s390_cpu_class_by_name(const char *name);
|
|||||||
void s390x_cpu_debug_excp_handler(CPUState *cs);
|
void s390x_cpu_debug_excp_handler(CPUState *cs);
|
||||||
void s390_cpu_do_interrupt(CPUState *cpu);
|
void s390_cpu_do_interrupt(CPUState *cpu);
|
||||||
bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||||
bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|
||||||
MMUAccessType access_type, int mmu_idx,
|
|
||||||
bool probe, uintptr_t retaddr);
|
|
||||||
void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
|
void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
|
||||||
MMUAccessType access_type, int mmu_idx,
|
MMUAccessType access_type, int mmu_idx,
|
||||||
uintptr_t retaddr) QEMU_NORETURN;
|
uintptr_t retaddr) QEMU_NORETURN;
|
||||||
|
|
||||||
|
#ifdef CONFIG_USER_ONLY
|
||||||
|
void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
|
||||||
|
MMUAccessType access_type,
|
||||||
|
bool maperr, uintptr_t retaddr);
|
||||||
|
#else
|
||||||
|
bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
|
MMUAccessType access_type, int mmu_idx,
|
||||||
|
bool probe, uintptr_t retaddr);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/* fpu_helper.c */
|
/* fpu_helper.c */
|
||||||
uint32_t set_cc_nz_f32(float32 v);
|
uint32_t set_cc_nz_f32(float32 v);
|
||||||
|
@ -89,16 +89,20 @@ void s390_cpu_do_interrupt(CPUState *cs)
|
|||||||
cs->exception_index = -1;
|
cs->exception_index = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
|
||||||
MMUAccessType access_type, int mmu_idx,
|
MMUAccessType access_type,
|
||||||
bool probe, uintptr_t retaddr)
|
bool maperr, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
S390CPU *cpu = S390_CPU(cs);
|
S390CPU *cpu = S390_CPU(cs);
|
||||||
|
|
||||||
trigger_pgm_exception(&cpu->env, PGM_ADDRESSING);
|
trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION);
|
||||||
/* On real machines this value is dropped into LowMem. Since this
|
/*
|
||||||
is userland, simply put this someplace that cpu_loop can find it. */
|
* On real machines this value is dropped into LowMem. Since this
|
||||||
cpu->env.__excp_addr = address;
|
* is userland, simply put this someplace that cpu_loop can find it.
|
||||||
|
* S390 only gives the page of the fault, not the exact address.
|
||||||
|
* C.f. the construction of TEC in mmu_translate().
|
||||||
|
*/
|
||||||
|
cpu->env.__excp_addr = address & TARGET_PAGE_MASK;
|
||||||
cpu_loop_exit_restore(cs, retaddr);
|
cpu_loop_exit_restore(cs, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user