s390x/kvm: cleanup partial register handling

The partial register handling (introduced with commits
420840e58b85f7f4e5493dca3f273566f261090a and
3474b679486caa8f6448bae974e131370f360c13 ) aimed to improve intercept
handling performance.

It made the code more complicated though. During development for life
migration/init/reset etc it turned out that this might cause several
hard to debug programming errors. With the introduction of ioeventfd
(and future irqfd patches) the qemu intercept handlers are no longer
hot-path. And therefore the partial register handling can be
removed to simplify the code.

Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com>
CC: Jason J. Herne <jjherne@us.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
Dominik Dingel 2013-10-01 16:28:23 +02:00 committed by Christian Borntraeger
parent 234eef51a1
commit 44c68de044
2 changed files with 69 additions and 116 deletions

View File

@ -78,11 +78,6 @@ typedef struct MchkQueue {
uint16_t type; uint16_t type;
} MchkQueue; } MchkQueue;
/* Defined values for CPUS390XState.runtime_reg_dirty_mask */
#define KVM_S390_RUNTIME_DIRTY_NONE 0
#define KVM_S390_RUNTIME_DIRTY_PARTIAL 1
#define KVM_S390_RUNTIME_DIRTY_FULL 2
typedef struct CPUS390XState { typedef struct CPUS390XState {
uint64_t regs[16]; /* GP registers */ uint64_t regs[16]; /* GP registers */
CPU_DoubleU fregs[16]; /* FP registers */ CPU_DoubleU fregs[16]; /* FP registers */
@ -126,13 +121,6 @@ typedef struct CPUS390XState {
uint64_t cputm; uint64_t cputm;
uint32_t todpr; uint32_t todpr;
/* on S390 the runtime register set has two dirty states:
* a partial dirty state in which only the registers that
* are needed all the time are fetched. And a fully dirty
* state in which all runtime registers are fetched.
*/
uint32_t runtime_reg_dirty_mask;
CPU_COMMON CPU_COMMON
/* reset does memset(0) up to here */ /* reset does memset(0) up to here */
@ -1076,7 +1064,6 @@ void kvm_s390_io_interrupt(S390CPU *cpu, uint16_t subchannel_id,
uint32_t io_int_word); uint32_t io_int_word);
void kvm_s390_crw_mchk(S390CPU *cpu); void kvm_s390_crw_mchk(S390CPU *cpu);
void kvm_s390_enable_css_support(S390CPU *cpu); void kvm_s390_enable_css_support(S390CPU *cpu);
int kvm_s390_get_registers_partial(CPUState *cpu);
int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
int vq, bool assign); int vq, bool assign);
int kvm_s390_cpu_restart(S390CPU *cpu); int kvm_s390_cpu_restart(S390CPU *cpu);
@ -1094,10 +1081,6 @@ static inline void kvm_s390_crw_mchk(S390CPU *cpu)
static inline void kvm_s390_enable_css_support(S390CPU *cpu) static inline void kvm_s390_enable_css_support(S390CPU *cpu)
{ {
} }
static inline int kvm_s390_get_registers_partial(CPUState *cpu)
{
return -ENOSYS;
}
static inline int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, static inline int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier,
uint32_t sch, int vq, uint32_t sch, int vq,
bool assign) bool assign)

View File

@ -152,35 +152,32 @@ int kvm_arch_put_registers(CPUState *cs, int level)
} }
} }
if (env->runtime_reg_dirty_mask == KVM_S390_RUNTIME_DIRTY_FULL) {
reg.id = KVM_REG_S390_CPU_TIMER;
reg.addr = (__u64)&(env->cputm);
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
return ret;
}
reg.id = KVM_REG_S390_CLOCK_COMP;
reg.addr = (__u64)&(env->ckc);
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
return ret;
}
reg.id = KVM_REG_S390_TODPR;
reg.addr = (__u64)&(env->todpr);
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
return ret;
}
}
env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_NONE;
/* Do we need to save more than that? */ /* Do we need to save more than that? */
if (level == KVM_PUT_RUNTIME_STATE) { if (level == KVM_PUT_RUNTIME_STATE) {
return 0; return 0;
} }
reg.id = KVM_REG_S390_CPU_TIMER;
reg.addr = (__u64)&(env->cputm);
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
return ret;
}
reg.id = KVM_REG_S390_CLOCK_COMP;
reg.addr = (__u64)&(env->ckc);
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
return ret;
}
reg.id = KVM_REG_S390_TODPR;
reg.addr = (__u64)&(env->todpr);
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
return ret;
}
if (cap_sync_regs && if (cap_sync_regs &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS && cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) { cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
@ -216,13 +213,54 @@ int kvm_arch_get_registers(CPUState *cs)
S390CPU *cpu = S390_CPU(cs); S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env; CPUS390XState *env = &cpu->env;
struct kvm_one_reg reg; struct kvm_one_reg reg;
int r; struct kvm_sregs sregs;
struct kvm_regs regs;
int i, r;
r = kvm_s390_get_registers_partial(cs); /* get the PSW */
if (r < 0) { env->psw.addr = cs->kvm_run->psw_addr;
return r; env->psw.mask = cs->kvm_run->psw_mask;
/* the GPRS */
if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) {
for (i = 0; i < 16; i++) {
env->regs[i] = cs->kvm_run->s.regs.gprs[i];
}
} else {
r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
if (r < 0) {
return r;
}
for (i = 0; i < 16; i++) {
env->regs[i] = regs.gprs[i];
}
} }
/* The ACRS and CRS */
if (cap_sync_regs &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
for (i = 0; i < 16; i++) {
env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
env->cregs[i] = cs->kvm_run->s.regs.crs[i];
}
} else {
r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
if (r < 0) {
return r;
}
for (i = 0; i < 16; i++) {
env->aregs[i] = sregs.acrs[i];
env->cregs[i] = sregs.crs[i];
}
}
/* The prefix */
if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
env->psa = cs->kvm_run->s.regs.prefix;
}
/* One Regs */
reg.id = KVM_REG_S390_CPU_TIMER; reg.id = KVM_REG_S390_CPU_TIMER;
reg.addr = (__u64)&(env->cputm); reg.addr = (__u64)&(env->cputm);
r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg); r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
@ -244,69 +282,6 @@ int kvm_arch_get_registers(CPUState *cs)
return r; return r;
} }
env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_FULL;
return 0;
}
int kvm_s390_get_registers_partial(CPUState *cs)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
struct kvm_sregs sregs;
struct kvm_regs regs;
int ret;
int i;
if (env->runtime_reg_dirty_mask) {
return 0;
}
/* get the PSW */
env->psw.addr = cs->kvm_run->psw_addr;
env->psw.mask = cs->kvm_run->psw_mask;
/* the GPRS */
if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) {
for (i = 0; i < 16; i++) {
env->regs[i] = cs->kvm_run->s.regs.gprs[i];
}
} else {
ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
if (ret < 0) {
return ret;
}
for (i = 0; i < 16; i++) {
env->regs[i] = regs.gprs[i];
}
}
/* The ACRS and CRS */
if (cap_sync_regs &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
for (i = 0; i < 16; i++) {
env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
env->cregs[i] = cs->kvm_run->s.regs.crs[i];
}
} else {
ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
if (ret < 0) {
return ret;
}
for (i = 0; i < 16; i++) {
env->aregs[i] = sregs.acrs[i];
env->cregs[i] = sregs.crs[i];
}
}
/* Finally the prefix */
if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
env->psa = cs->kvm_run->s.regs.prefix;
} else {
/* no prefix without sync regs */
}
env->runtime_reg_dirty_mask = KVM_S390_RUNTIME_DIRTY_PARTIAL;
return 0; return 0;
} }
@ -442,15 +417,13 @@ static int kvm_handle_css_inst(S390CPU *cpu, struct kvm_run *run,
uint8_t ipa0, uint8_t ipa1, uint8_t ipb) uint8_t ipa0, uint8_t ipa1, uint8_t ipb)
{ {
CPUS390XState *env = &cpu->env; CPUS390XState *env = &cpu->env;
CPUState *cs = CPU(cpu);
if (ipa0 != 0xb2) { if (ipa0 != 0xb2) {
/* Not handled for now. */ /* Not handled for now. */
return -1; return -1;
} }
kvm_s390_get_registers_partial(cs); cpu_synchronize_state(CPU(cpu));
cs->kvm_vcpu_dirty = true;
switch (ipa1) { switch (ipa1) {
case PRIV_XSCH: case PRIV_XSCH:
@ -537,11 +510,9 @@ static int handle_priv(S390CPU *cpu, struct kvm_run *run,
static int handle_hypercall(S390CPU *cpu, struct kvm_run *run) static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
{ {
CPUState *cs = CPU(cpu);
CPUS390XState *env = &cpu->env; CPUS390XState *env = &cpu->env;
kvm_s390_get_registers_partial(cs); cpu_synchronize_state(CPU(cpu));
cs->kvm_vcpu_dirty = true;
env->regs[2] = s390_virtio_hypercall(env); env->regs[2] = s390_virtio_hypercall(env);
return 0; return 0;
@ -767,8 +738,7 @@ static int handle_tsch(S390CPU *cpu)
struct kvm_run *run = cs->kvm_run; struct kvm_run *run = cs->kvm_run;
int ret; int ret;
kvm_s390_get_registers_partial(cs); cpu_synchronize_state(cs);
cs->kvm_vcpu_dirty = true;
ret = ioinst_handle_tsch(env, env->regs[1], run->s390_tsch.ipb); ret = ioinst_handle_tsch(env, env->regs[1], run->s390_tsch.ipb);
if (ret >= 0) { if (ret >= 0) {