target-i386: add support for SMBASE MSR and SMIs
Apart from the MSR, the smi field of struct kvm_vcpu_events has to be translated into the corresponding CPUX86State fields. Also, memory transaction flags depend on SMM state, so pull it from struct kvm_run on every exit from KVM to userspace. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
		
							parent
							
								
									afd6895b45
								
							
						
					
					
						commit
						fc12d72e10
					
				@ -314,6 +314,7 @@
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
#define MSR_P6_PERFCTR0                 0xc1
 | 
					#define MSR_P6_PERFCTR0                 0xc1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define MSR_IA32_SMBASE                 0x9e
 | 
				
			||||||
#define MSR_MTRRcap                     0xfe
 | 
					#define MSR_MTRRcap                     0xfe
 | 
				
			||||||
#define MSR_MTRRcap_VCNT                8
 | 
					#define MSR_MTRRcap_VCNT                8
 | 
				
			||||||
#define MSR_MTRRcap_FIXRANGE_SUPPORT    (1 << 8)
 | 
					#define MSR_MTRRcap_FIXRANGE_SUPPORT    (1 << 8)
 | 
				
			||||||
 | 
				
			|||||||
@ -73,6 +73,7 @@ static bool has_msr_feature_control;
 | 
				
			|||||||
static bool has_msr_async_pf_en;
 | 
					static bool has_msr_async_pf_en;
 | 
				
			||||||
static bool has_msr_pv_eoi_en;
 | 
					static bool has_msr_pv_eoi_en;
 | 
				
			||||||
static bool has_msr_misc_enable;
 | 
					static bool has_msr_misc_enable;
 | 
				
			||||||
 | 
					static bool has_msr_smbase;
 | 
				
			||||||
static bool has_msr_bndcfgs;
 | 
					static bool has_msr_bndcfgs;
 | 
				
			||||||
static bool has_msr_kvm_steal_time;
 | 
					static bool has_msr_kvm_steal_time;
 | 
				
			||||||
static int lm_capable_kernel;
 | 
					static int lm_capable_kernel;
 | 
				
			||||||
@ -819,6 +820,10 @@ static int kvm_get_supported_msrs(KVMState *s)
 | 
				
			|||||||
                    has_msr_tsc_deadline = true;
 | 
					                    has_msr_tsc_deadline = true;
 | 
				
			||||||
                    continue;
 | 
					                    continue;
 | 
				
			||||||
                }
 | 
					                }
 | 
				
			||||||
 | 
					                if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) {
 | 
				
			||||||
 | 
					                    has_msr_smbase = true;
 | 
				
			||||||
 | 
					                    continue;
 | 
				
			||||||
 | 
					                }
 | 
				
			||||||
                if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
 | 
					                if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
 | 
				
			||||||
                    has_msr_misc_enable = true;
 | 
					                    has_msr_misc_enable = true;
 | 
				
			||||||
                    continue;
 | 
					                    continue;
 | 
				
			||||||
@ -1245,6 +1250,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
 | 
				
			|||||||
        kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE,
 | 
					        kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE,
 | 
				
			||||||
                          env->msr_ia32_misc_enable);
 | 
					                          env->msr_ia32_misc_enable);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					    if (has_msr_smbase) {
 | 
				
			||||||
 | 
					        kvm_msr_entry_set(&msrs[n++], MSR_IA32_SMBASE, env->smbase);
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
    if (has_msr_bndcfgs) {
 | 
					    if (has_msr_bndcfgs) {
 | 
				
			||||||
        kvm_msr_entry_set(&msrs[n++], MSR_IA32_BNDCFGS, env->msr_bndcfgs);
 | 
					        kvm_msr_entry_set(&msrs[n++], MSR_IA32_BNDCFGS, env->msr_bndcfgs);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
@ -1606,6 +1614,9 @@ static int kvm_get_msrs(X86CPU *cpu)
 | 
				
			|||||||
    if (has_msr_misc_enable) {
 | 
					    if (has_msr_misc_enable) {
 | 
				
			||||||
        msrs[n++].index = MSR_IA32_MISC_ENABLE;
 | 
					        msrs[n++].index = MSR_IA32_MISC_ENABLE;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					    if (has_msr_smbase) {
 | 
				
			||||||
 | 
					        msrs[n++].index = MSR_IA32_SMBASE;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
    if (has_msr_feature_control) {
 | 
					    if (has_msr_feature_control) {
 | 
				
			||||||
        msrs[n++].index = MSR_IA32_FEATURE_CONTROL;
 | 
					        msrs[n++].index = MSR_IA32_FEATURE_CONTROL;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
@ -1760,6 +1771,9 @@ static int kvm_get_msrs(X86CPU *cpu)
 | 
				
			|||||||
        case MSR_IA32_MISC_ENABLE:
 | 
					        case MSR_IA32_MISC_ENABLE:
 | 
				
			||||||
            env->msr_ia32_misc_enable = msrs[i].data;
 | 
					            env->msr_ia32_misc_enable = msrs[i].data;
 | 
				
			||||||
            break;
 | 
					            break;
 | 
				
			||||||
 | 
					        case MSR_IA32_SMBASE:
 | 
				
			||||||
 | 
					            env->smbase = msrs[i].data;
 | 
				
			||||||
 | 
					            break;
 | 
				
			||||||
        case MSR_IA32_FEATURE_CONTROL:
 | 
					        case MSR_IA32_FEATURE_CONTROL:
 | 
				
			||||||
            env->msr_ia32_feature_control = msrs[i].data;
 | 
					            env->msr_ia32_feature_control = msrs[i].data;
 | 
				
			||||||
            break;
 | 
					            break;
 | 
				
			||||||
@ -1923,6 +1937,7 @@ static int kvm_put_apic(X86CPU *cpu)
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
static int kvm_put_vcpu_events(X86CPU *cpu, int level)
 | 
					static int kvm_put_vcpu_events(X86CPU *cpu, int level)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    CPUState *cs = CPU(cpu);
 | 
				
			||||||
    CPUX86State *env = &cpu->env;
 | 
					    CPUX86State *env = &cpu->env;
 | 
				
			||||||
    struct kvm_vcpu_events events = {};
 | 
					    struct kvm_vcpu_events events = {};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -1947,6 +1962,24 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level)
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
    events.sipi_vector = env->sipi_vector;
 | 
					    events.sipi_vector = env->sipi_vector;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    if (has_msr_smbase) {
 | 
				
			||||||
 | 
					        events.smi.smm = !!(env->hflags & HF_SMM_MASK);
 | 
				
			||||||
 | 
					        events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
 | 
				
			||||||
 | 
					        if (kvm_irqchip_in_kernel()) {
 | 
				
			||||||
 | 
					            /* As soon as these are moved to the kernel, remove them
 | 
				
			||||||
 | 
					             * from cs->interrupt_request.
 | 
				
			||||||
 | 
					             */
 | 
				
			||||||
 | 
					            events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
 | 
				
			||||||
 | 
					            events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
 | 
				
			||||||
 | 
					            cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
 | 
				
			||||||
 | 
					        } else {
 | 
				
			||||||
 | 
					            /* Keep these in cs->interrupt_request.  */
 | 
				
			||||||
 | 
					            events.smi.pending = 0;
 | 
				
			||||||
 | 
					            events.smi.latched_init = 0;
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					        events.flags |= KVM_VCPUEVENT_VALID_SMM;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    events.flags = 0;
 | 
					    events.flags = 0;
 | 
				
			||||||
    if (level >= KVM_PUT_RESET_STATE) {
 | 
					    if (level >= KVM_PUT_RESET_STATE) {
 | 
				
			||||||
        events.flags |=
 | 
					        events.flags |=
 | 
				
			||||||
@ -1966,6 +1999,7 @@ static int kvm_get_vcpu_events(X86CPU *cpu)
 | 
				
			|||||||
        return 0;
 | 
					        return 0;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    memset(&events, 0, sizeof(events));
 | 
				
			||||||
    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
 | 
					    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
 | 
				
			||||||
    if (ret < 0) {
 | 
					    if (ret < 0) {
 | 
				
			||||||
       return ret;
 | 
					       return ret;
 | 
				
			||||||
@ -1987,6 +2021,29 @@ static int kvm_get_vcpu_events(X86CPU *cpu)
 | 
				
			|||||||
        env->hflags2 &= ~HF2_NMI_MASK;
 | 
					        env->hflags2 &= ~HF2_NMI_MASK;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
 | 
				
			||||||
 | 
					        if (events.smi.smm) {
 | 
				
			||||||
 | 
					            env->hflags |= HF_SMM_MASK;
 | 
				
			||||||
 | 
					        } else {
 | 
				
			||||||
 | 
					            env->hflags &= ~HF_SMM_MASK;
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					        if (events.smi.pending) {
 | 
				
			||||||
 | 
					            cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
 | 
				
			||||||
 | 
					        } else {
 | 
				
			||||||
 | 
					            cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					        if (events.smi.smm_inside_nmi) {
 | 
				
			||||||
 | 
					            env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
 | 
				
			||||||
 | 
					        } else {
 | 
				
			||||||
 | 
					            env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					        if (events.smi.latched_init) {
 | 
				
			||||||
 | 
					            cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
 | 
				
			||||||
 | 
					        } else {
 | 
				
			||||||
 | 
					            cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    env->sipi_vector = events.sipi_vector;
 | 
					    env->sipi_vector = events.sipi_vector;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    return 0;
 | 
					    return 0;
 | 
				
			||||||
@ -2190,11 +2247,11 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
 | 
				
			|||||||
    int ret;
 | 
					    int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Inject NMI */
 | 
					    /* Inject NMI */
 | 
				
			||||||
 | 
					    if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
 | 
				
			||||||
        if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
 | 
					        if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
 | 
				
			||||||
            qemu_mutex_lock_iothread();
 | 
					            qemu_mutex_lock_iothread();
 | 
				
			||||||
            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
 | 
					            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
 | 
				
			||||||
            qemu_mutex_unlock_iothread();
 | 
					            qemu_mutex_unlock_iothread();
 | 
				
			||||||
 | 
					 | 
				
			||||||
            DPRINTF("injected NMI\n");
 | 
					            DPRINTF("injected NMI\n");
 | 
				
			||||||
            ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
 | 
					            ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
 | 
				
			||||||
            if (ret < 0) {
 | 
					            if (ret < 0) {
 | 
				
			||||||
@ -2202,6 +2259,18 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
 | 
				
			|||||||
                        strerror(-ret));
 | 
					                        strerror(-ret));
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					        if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
 | 
				
			||||||
 | 
					            qemu_mutex_lock_iothread();
 | 
				
			||||||
 | 
					            cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
 | 
				
			||||||
 | 
					            qemu_mutex_unlock_iothread();
 | 
				
			||||||
 | 
					            DPRINTF("injected SMI\n");
 | 
				
			||||||
 | 
					            ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
 | 
				
			||||||
 | 
					            if (ret < 0) {
 | 
				
			||||||
 | 
					                fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
 | 
				
			||||||
 | 
					                        strerror(-ret));
 | 
				
			||||||
 | 
					            }
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (!kvm_irqchip_in_kernel()) {
 | 
					    if (!kvm_irqchip_in_kernel()) {
 | 
				
			||||||
        qemu_mutex_lock_iothread();
 | 
					        qemu_mutex_lock_iothread();
 | 
				
			||||||
@ -2212,8 +2281,14 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
 | 
				
			|||||||
     * pending TPR access reports.
 | 
					     * pending TPR access reports.
 | 
				
			||||||
     */
 | 
					     */
 | 
				
			||||||
    if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
 | 
					    if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
 | 
				
			||||||
 | 
					        if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
 | 
				
			||||||
 | 
					            !(env->hflags & HF_SMM_MASK)) {
 | 
				
			||||||
            cpu->exit_request = 1;
 | 
					            cpu->exit_request = 1;
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					        if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
 | 
				
			||||||
 | 
					            cpu->exit_request = 1;
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (!kvm_irqchip_in_kernel()) {
 | 
					    if (!kvm_irqchip_in_kernel()) {
 | 
				
			||||||
        /* Try to inject an interrupt if the guest can accept it */
 | 
					        /* Try to inject an interrupt if the guest can accept it */
 | 
				
			||||||
@ -2260,6 +2335,11 @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
 | 
				
			|||||||
    X86CPU *x86_cpu = X86_CPU(cpu);
 | 
					    X86CPU *x86_cpu = X86_CPU(cpu);
 | 
				
			||||||
    CPUX86State *env = &x86_cpu->env;
 | 
					    CPUX86State *env = &x86_cpu->env;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    if (run->flags & KVM_RUN_X86_SMM) {
 | 
				
			||||||
 | 
					        env->hflags |= HF_SMM_MASK;
 | 
				
			||||||
 | 
					    } else {
 | 
				
			||||||
 | 
					        env->hflags &= HF_SMM_MASK;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
    if (run->if_flag) {
 | 
					    if (run->if_flag) {
 | 
				
			||||||
        env->eflags |= IF_MASK;
 | 
					        env->eflags |= IF_MASK;
 | 
				
			||||||
    } else {
 | 
					    } else {
 | 
				
			||||||
@ -2307,7 +2387,8 @@ int kvm_arch_process_async_events(CPUState *cs)
 | 
				
			|||||||
        }
 | 
					        }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (cs->interrupt_request & CPU_INTERRUPT_INIT) {
 | 
					    if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
 | 
				
			||||||
 | 
					        !(env->hflags & HF_SMM_MASK)) {
 | 
				
			||||||
        kvm_cpu_synchronize_state(cs);
 | 
					        kvm_cpu_synchronize_state(cs);
 | 
				
			||||||
        do_cpu_init(cpu);
 | 
					        do_cpu_init(cpu);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user