target/ppc: Restrict ATTN / SCV / PMINSN helpers to TCG

Move helper_attn(), helper_scv() and helper_pminsn() to
tcg-excp_helper.c.

Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
Message-ID: <20250127102620.39159-15-philmd@linaro.org>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
This commit is contained in:
Philippe Mathieu-Daudé 2025-01-27 11:26:18 +01:00 committed by Nicholas Piggin
parent 92c787de34
commit c2c687013d
3 changed files with 421 additions and 439 deletions

View File

@ -2755,9 +2755,6 @@ static inline void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
} }
#endif #endif
G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception);
G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception,
uint32_t error_code);
G_NORETURN void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, G_NORETURN void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
uint32_t error_code, uintptr_t raddr); uint32_t error_code, uintptr_t raddr);

View File

@ -30,11 +30,6 @@
#include "trace.h" #include "trace.h"
#ifdef CONFIG_TCG
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#endif
/*****************************************************************************/ /*****************************************************************************/
/* Exception processing */ /* Exception processing */
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
@ -399,21 +394,6 @@ static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector,
env->reserve_addr = -1; env->reserve_addr = -1;
} }
#ifdef CONFIG_TCG
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
void helper_attn(CPUPPCState *env)
{
/* POWER attn is unprivileged when enabled by HID, otherwise illegal */
if ((*env->check_attn)(env)) {
powerpc_checkstop(env, "host executed attn");
} else {
raise_exception_err(env, POWERPC_EXCP_HV_EMU,
POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
}
}
#endif
#endif /* CONFIG_TCG */
static void powerpc_mcheck_checkstop(CPUPPCState *env) static void powerpc_mcheck_checkstop(CPUPPCState *env)
{ {
/* KVM guests always have MSR[ME] enabled */ /* KVM guests always have MSR[ME] enabled */
@ -2503,417 +2483,3 @@ bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
} }
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */
#ifdef CONFIG_TCG
#ifndef CONFIG_USER_ONLY
void helper_store_msr(CPUPPCState *env, target_ulong val)
{
uint32_t excp = hreg_store_msr(env, val, 0);
if (excp != 0) {
cpu_interrupt_exittb(env_cpu(env));
raise_exception(env, excp);
}
}
void helper_ppc_maybe_interrupt(CPUPPCState *env)
{
ppc_maybe_interrupt(env);
}
#ifdef TARGET_PPC64
void helper_scv(CPUPPCState *env, uint32_t lev)
{
if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
} else {
raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
}
}
void helper_pminsn(CPUPPCState *env, uint32_t insn)
{
CPUState *cs = env_cpu(env);
cs->halted = 1;
/* Condition for waking up at 0x100 */
env->resume_as_sreset = (insn != PPC_PM_STOP) ||
(env->spr[SPR_PSSCR] & PSSCR_EC);
/* HDECR is not to wake from PM state, it may have already fired */
if (env->resume_as_sreset) {
PowerPCCPU *cpu = env_archcpu(env);
ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
}
ppc_maybe_interrupt(env);
}
#endif /* TARGET_PPC64 */
static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
{
/* MSR:POW cannot be set by any form of rfi */
msr &= ~(1ULL << MSR_POW);
/* MSR:TGPR cannot be set by any form of rfi */
if (env->flags & POWERPC_FLAG_TGPR) {
msr &= ~(1ULL << MSR_TGPR);
}
#ifdef TARGET_PPC64
/* Switching to 32-bit ? Crop the nip */
if (!msr_is_64bit(env, msr)) {
nip = (uint32_t)nip;
}
#else
nip = (uint32_t)nip;
#endif
/* XXX: beware: this is false if VLE is supported */
env->nip = nip & ~((target_ulong)0x00000003);
hreg_store_msr(env, msr, 1);
trace_ppc_excp_rfi(env->nip, env->msr);
/*
* No need to raise an exception here, as rfi is always the last
* insn of a TB
*/
cpu_interrupt_exittb(env_cpu(env));
/* Reset the reservation */
env->reserve_addr = -1;
/* Context synchronizing: check if TCG TLB needs flush */
check_tlb_flush(env, false);
}
void helper_rfi(CPUPPCState *env)
{
do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
}
#ifdef TARGET_PPC64
void helper_rfid(CPUPPCState *env)
{
/*
* The architecture defines a number of rules for which bits can
* change but in practice, we handle this in hreg_store_msr()
* which will be called by do_rfi(), so there is no need to filter
* here
*/
do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
}
void helper_rfscv(CPUPPCState *env)
{
do_rfi(env, env->lr, env->ctr);
}
void helper_hrfid(CPUPPCState *env)
{
do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
}
void helper_rfebb(CPUPPCState *env, target_ulong s)
{
target_ulong msr = env->msr;
/*
* Handling of BESCR bits 32:33 according to PowerISA v3.1:
*
* "If BESCR 32:33 != 0b00 the instruction is treated as if
* the instruction form were invalid."
*/
if (env->spr[SPR_BESCR] & BESCR_INVALID) {
raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
}
env->nip = env->spr[SPR_EBBRR];
/* Switching to 32-bit ? Crop the nip */
if (!msr_is_64bit(env, msr)) {
env->nip = (uint32_t)env->spr[SPR_EBBRR];
}
if (s) {
env->spr[SPR_BESCR] |= BESCR_GE;
} else {
env->spr[SPR_BESCR] &= ~BESCR_GE;
}
}
/*
* Triggers or queues an 'ebb_excp' EBB exception. All checks
* but FSCR, HFSCR and msr_pr must be done beforehand.
*
* PowerISA v3.1 isn't clear about whether an EBB should be
* postponed or cancelled if the EBB facility is unavailable.
* Our assumption here is that the EBB is cancelled if both
* FSCR and HFSCR EBB facilities aren't available.
*/
static void do_ebb(CPUPPCState *env, int ebb_excp)
{
PowerPCCPU *cpu = env_archcpu(env);
/*
* FSCR_EBB and FSCR_IC_EBB are the same bits used with
* HFSCR.
*/
helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
env->spr[SPR_BESCR] |= BESCR_PMEO;
} else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
env->spr[SPR_BESCR] |= BESCR_EEO;
}
if (FIELD_EX64(env->msr, MSR, PR)) {
powerpc_excp(cpu, ebb_excp);
} else {
ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
}
}
void raise_ebb_perfm_exception(CPUPPCState *env)
{
bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
env->spr[SPR_BESCR] & BESCR_PME &&
env->spr[SPR_BESCR] & BESCR_GE;
if (!perfm_ebb_enabled) {
return;
}
do_ebb(env, POWERPC_EXCP_PERFM_EBB);
}
#endif /* TARGET_PPC64 */
/*****************************************************************************/
/* Embedded PowerPC specific helpers */
void helper_40x_rfci(CPUPPCState *env)
{
do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
}
void helper_rfci(CPUPPCState *env)
{
do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
}
void helper_rfdi(CPUPPCState *env)
{
/* FIXME: choose CSRR1 or DSRR1 based on cpu type */
do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
}
void helper_rfmci(CPUPPCState *env)
{
/* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
}
/* Embedded.Processor Control */
static int dbell2irq(target_ulong rb)
{
int msg = rb & DBELL_TYPE_MASK;
int irq = -1;
switch (msg) {
case DBELL_TYPE_DBELL:
irq = PPC_INTERRUPT_DOORBELL;
break;
case DBELL_TYPE_DBELL_CRIT:
irq = PPC_INTERRUPT_CDOORBELL;
break;
case DBELL_TYPE_G_DBELL:
case DBELL_TYPE_G_DBELL_CRIT:
case DBELL_TYPE_G_DBELL_MC:
/* XXX implement */
default:
break;
}
return irq;
}
void helper_msgclr(CPUPPCState *env, target_ulong rb)
{
int irq = dbell2irq(rb);
if (irq < 0) {
return;
}
ppc_set_irq(env_archcpu(env), irq, 0);
}
void helper_msgsnd(target_ulong rb)
{
int irq = dbell2irq(rb);
int pir = rb & DBELL_PIRTAG_MASK;
CPUState *cs;
if (irq < 0) {
return;
}
bql_lock();
CPU_FOREACH(cs) {
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *cenv = &cpu->env;
if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
ppc_set_irq(cpu, irq, 1);
}
}
bql_unlock();
}
/* Server Processor Control */
static bool dbell_type_server(target_ulong rb)
{
/*
* A Directed Hypervisor Doorbell message is sent only if the
* message type is 5. All other types are reserved and the
* instruction is a no-op
*/
return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
}
static inline bool dbell_bcast_core(target_ulong rb)
{
return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE;
}
static inline bool dbell_bcast_subproc(target_ulong rb)
{
return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC;
}
/*
* Send an interrupt to a thread in the same core as env).
*/
static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq)
{
PowerPCCPU *cpu = env_archcpu(env);
CPUState *cs = env_cpu(env);
if (ppc_cpu_lpar_single_threaded(cs)) {
if (target_tir == 0) {
ppc_set_irq(cpu, irq, 1);
}
} else {
CPUState *ccs;
/* Does iothread need to be locked for walking CPU list? */
bql_lock();
THREAD_SIBLING_FOREACH(cs, ccs) {
PowerPCCPU *ccpu = POWERPC_CPU(ccs);
if (target_tir == ppc_cpu_tir(ccpu)) {
ppc_set_irq(ccpu, irq, 1);
break;
}
}
bql_unlock();
}
}
void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
{
if (!dbell_type_server(rb)) {
return;
}
ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
}
void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb)
{
int pir = rb & DBELL_PROCIDTAG_MASK;
bool brdcast = false;
CPUState *cs, *ccs;
PowerPCCPU *cpu;
if (!dbell_type_server(rb)) {
return;
}
/* POWER8 msgsnd is like msgsndp (targets a thread within core) */
if (!(env->insns_flags2 & PPC2_ISA300)) {
msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL);
return;
}
/* POWER9 and later msgsnd is a global (targets any thread) */
cpu = ppc_get_vcpu_by_pir(pir);
if (!cpu) {
return;
}
cs = CPU(cpu);
if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) &&
(env->flags & POWERPC_FLAG_SMT_1LPAR))) {
brdcast = true;
}
if (ppc_cpu_core_single_threaded(cs) || !brdcast) {
ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1);
return;
}
/*
* Why is bql needed for walking CPU list? Answer seems to be because ppc
* irq handling needs it, but ppc_set_irq takes the lock itself if needed,
* so could this be removed?
*/
bql_lock();
THREAD_SIBLING_FOREACH(cs, ccs) {
ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1);
}
bql_unlock();
}
#ifdef TARGET_PPC64
void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
{
helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
if (!dbell_type_server(rb)) {
return;
}
ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
}
/*
* sends a message to another thread on the same
* multi-threaded processor
*/
void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
{
helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
if (!dbell_type_server(rb)) {
return;
}
msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL);
}
#endif /* TARGET_PPC64 */
/* Single-step tracing */
void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
{
uint32_t error_code = 0;
if (env->insns_flags2 & PPC2_ISA207S) {
/* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
env->spr[SPR_POWER_SIAR] = prev_ip;
error_code = PPC_BIT(33);
}
raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
}
#endif /* !CONFIG_USER_ONLY */
#endif /* CONFIG_TCG */

View File

@ -17,6 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "qemu/log.h" #include "qemu/log.h"
#include "exec/cpu_ldst.h" #include "exec/cpu_ldst.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
@ -55,13 +56,13 @@ void helper_raise_exception(CPUPPCState *env, uint32_t exception)
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
void raise_exception_err(CPUPPCState *env, uint32_t exception, static G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception,
uint32_t error_code) uint32_t error_code)
{ {
raise_exception_err_ra(env, exception, error_code, 0); raise_exception_err_ra(env, exception, error_code, 0);
} }
void raise_exception(CPUPPCState *env, uint32_t exception) static G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception)
{ {
raise_exception_err_ra(env, exception, 0, 0); raise_exception_err_ra(env, exception, 0, 0);
} }
@ -426,4 +427,422 @@ uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr)
return insn; return insn;
} }
#if defined(TARGET_PPC64)
void helper_attn(CPUPPCState *env)
{
/* POWER attn is unprivileged when enabled by HID, otherwise illegal */
if ((*env->check_attn)(env)) {
powerpc_checkstop(env, "host executed attn");
} else {
raise_exception_err(env, POWERPC_EXCP_HV_EMU,
POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
}
}
void helper_scv(CPUPPCState *env, uint32_t lev)
{
if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
} else {
raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
}
}
void helper_pminsn(CPUPPCState *env, uint32_t insn)
{
CPUState *cs = env_cpu(env);
cs->halted = 1;
/* Condition for waking up at 0x100 */
env->resume_as_sreset = (insn != PPC_PM_STOP) ||
(env->spr[SPR_PSSCR] & PSSCR_EC);
/* HDECR is not to wake from PM state, it may have already fired */
if (env->resume_as_sreset) {
PowerPCCPU *cpu = env_archcpu(env);
ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
}
ppc_maybe_interrupt(env);
}
#endif /* TARGET_PPC64 */
void helper_store_msr(CPUPPCState *env, target_ulong val)
{
uint32_t excp = hreg_store_msr(env, val, 0);
if (excp != 0) {
cpu_interrupt_exittb(env_cpu(env));
raise_exception(env, excp);
}
}
void helper_ppc_maybe_interrupt(CPUPPCState *env)
{
ppc_maybe_interrupt(env);
}
static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
{
/* MSR:POW cannot be set by any form of rfi */
msr &= ~(1ULL << MSR_POW);
/* MSR:TGPR cannot be set by any form of rfi */
if (env->flags & POWERPC_FLAG_TGPR) {
msr &= ~(1ULL << MSR_TGPR);
}
#ifdef TARGET_PPC64
/* Switching to 32-bit ? Crop the nip */
if (!msr_is_64bit(env, msr)) {
nip = (uint32_t)nip;
}
#else
nip = (uint32_t)nip;
#endif
/* XXX: beware: this is false if VLE is supported */
env->nip = nip & ~((target_ulong)0x00000003);
hreg_store_msr(env, msr, 1);
trace_ppc_excp_rfi(env->nip, env->msr);
/*
* No need to raise an exception here, as rfi is always the last
* insn of a TB
*/
cpu_interrupt_exittb(env_cpu(env));
/* Reset the reservation */
env->reserve_addr = -1;
/* Context synchronizing: check if TCG TLB needs flush */
check_tlb_flush(env, false);
}
void helper_rfi(CPUPPCState *env)
{
do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
}
#ifdef TARGET_PPC64
void helper_rfid(CPUPPCState *env)
{
/*
* The architecture defines a number of rules for which bits can
* change but in practice, we handle this in hreg_store_msr()
* which will be called by do_rfi(), so there is no need to filter
* here
*/
do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
}
void helper_rfscv(CPUPPCState *env)
{
do_rfi(env, env->lr, env->ctr);
}
void helper_hrfid(CPUPPCState *env)
{
do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
}
void helper_rfebb(CPUPPCState *env, target_ulong s)
{
target_ulong msr = env->msr;
/*
* Handling of BESCR bits 32:33 according to PowerISA v3.1:
*
* "If BESCR 32:33 != 0b00 the instruction is treated as if
* the instruction form were invalid."
*/
if (env->spr[SPR_BESCR] & BESCR_INVALID) {
raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
}
env->nip = env->spr[SPR_EBBRR];
/* Switching to 32-bit ? Crop the nip */
if (!msr_is_64bit(env, msr)) {
env->nip = (uint32_t)env->spr[SPR_EBBRR];
}
if (s) {
env->spr[SPR_BESCR] |= BESCR_GE;
} else {
env->spr[SPR_BESCR] &= ~BESCR_GE;
}
}
/*
* Triggers or queues an 'ebb_excp' EBB exception. All checks
* but FSCR, HFSCR and msr_pr must be done beforehand.
*
* PowerISA v3.1 isn't clear about whether an EBB should be
* postponed or cancelled if the EBB facility is unavailable.
* Our assumption here is that the EBB is cancelled if both
* FSCR and HFSCR EBB facilities aren't available.
*/
static void do_ebb(CPUPPCState *env, int ebb_excp)
{
PowerPCCPU *cpu = env_archcpu(env);
/*
* FSCR_EBB and FSCR_IC_EBB are the same bits used with
* HFSCR.
*/
helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
env->spr[SPR_BESCR] |= BESCR_PMEO;
} else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
env->spr[SPR_BESCR] |= BESCR_EEO;
}
if (FIELD_EX64(env->msr, MSR, PR)) {
powerpc_excp(cpu, ebb_excp);
} else {
ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
}
}
void raise_ebb_perfm_exception(CPUPPCState *env)
{
bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
env->spr[SPR_BESCR] & BESCR_PME &&
env->spr[SPR_BESCR] & BESCR_GE;
if (!perfm_ebb_enabled) {
return;
}
do_ebb(env, POWERPC_EXCP_PERFM_EBB);
}
#endif /* TARGET_PPC64 */
/*****************************************************************************/
/* Embedded PowerPC specific helpers */
void helper_40x_rfci(CPUPPCState *env)
{
do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
}
void helper_rfci(CPUPPCState *env)
{
do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
}
void helper_rfdi(CPUPPCState *env)
{
/* FIXME: choose CSRR1 or DSRR1 based on cpu type */
do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
}
void helper_rfmci(CPUPPCState *env)
{
/* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
}
/* Embedded.Processor Control */
static int dbell2irq(target_ulong rb)
{
int msg = rb & DBELL_TYPE_MASK;
int irq = -1;
switch (msg) {
case DBELL_TYPE_DBELL:
irq = PPC_INTERRUPT_DOORBELL;
break;
case DBELL_TYPE_DBELL_CRIT:
irq = PPC_INTERRUPT_CDOORBELL;
break;
case DBELL_TYPE_G_DBELL:
case DBELL_TYPE_G_DBELL_CRIT:
case DBELL_TYPE_G_DBELL_MC:
/* XXX implement */
default:
break;
}
return irq;
}
void helper_msgclr(CPUPPCState *env, target_ulong rb)
{
int irq = dbell2irq(rb);
if (irq < 0) {
return;
}
ppc_set_irq(env_archcpu(env), irq, 0);
}
void helper_msgsnd(target_ulong rb)
{
int irq = dbell2irq(rb);
int pir = rb & DBELL_PIRTAG_MASK;
CPUState *cs;
if (irq < 0) {
return;
}
bql_lock();
CPU_FOREACH(cs) {
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *cenv = &cpu->env;
if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
ppc_set_irq(cpu, irq, 1);
}
}
bql_unlock();
}
/* Server Processor Control */
static bool dbell_type_server(target_ulong rb)
{
/*
* A Directed Hypervisor Doorbell message is sent only if the
* message type is 5. All other types are reserved and the
* instruction is a no-op
*/
return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
}
static inline bool dbell_bcast_core(target_ulong rb)
{
return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE;
}
static inline bool dbell_bcast_subproc(target_ulong rb)
{
return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC;
}
/*
* Send an interrupt to a thread in the same core as env).
*/
static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq)
{
PowerPCCPU *cpu = env_archcpu(env);
CPUState *cs = env_cpu(env);
if (ppc_cpu_lpar_single_threaded(cs)) {
if (target_tir == 0) {
ppc_set_irq(cpu, irq, 1);
}
} else {
CPUState *ccs;
/* Does iothread need to be locked for walking CPU list? */
bql_lock();
THREAD_SIBLING_FOREACH(cs, ccs) {
PowerPCCPU *ccpu = POWERPC_CPU(ccs);
if (target_tir == ppc_cpu_tir(ccpu)) {
ppc_set_irq(ccpu, irq, 1);
break;
}
}
bql_unlock();
}
}
void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
{
if (!dbell_type_server(rb)) {
return;
}
ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
}
void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb)
{
int pir = rb & DBELL_PROCIDTAG_MASK;
bool brdcast = false;
CPUState *cs, *ccs;
PowerPCCPU *cpu;
if (!dbell_type_server(rb)) {
return;
}
/* POWER8 msgsnd is like msgsndp (targets a thread within core) */
if (!(env->insns_flags2 & PPC2_ISA300)) {
msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL);
return;
}
/* POWER9 and later msgsnd is a global (targets any thread) */
cpu = ppc_get_vcpu_by_pir(pir);
if (!cpu) {
return;
}
cs = CPU(cpu);
if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) &&
(env->flags & POWERPC_FLAG_SMT_1LPAR))) {
brdcast = true;
}
if (ppc_cpu_core_single_threaded(cs) || !brdcast) {
ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1);
return;
}
/*
* Why is bql needed for walking CPU list? Answer seems to be because ppc
* irq handling needs it, but ppc_set_irq takes the lock itself if needed,
* so could this be removed?
*/
bql_lock();
THREAD_SIBLING_FOREACH(cs, ccs) {
ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1);
}
bql_unlock();
}
#ifdef TARGET_PPC64
void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
{
helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
if (!dbell_type_server(rb)) {
return;
}
ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
}
/*
* sends a message to another thread on the same
* multi-threaded processor
*/
void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
{
helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
if (!dbell_type_server(rb)) {
return;
}
msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL);
}
#endif /* TARGET_PPC64 */
/* Single-step tracing */
void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
{
uint32_t error_code = 0;
if (env->insns_flags2 & PPC2_ISA207S) {
/* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
env->spr[SPR_POWER_SIAR] = prev_ip;
error_code = PPC_BIT(33);
}
raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
}
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */