target/i386/hvf: fix a typo in a type name

The prefix x68 is wrong. Change it to x86.

Signed-off-by: Wei Liu <liuwe@linux.microsoft.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Link: https://lore.kernel.org/r/1740126987-8483-2-git-send-email-liuwe@linux.microsoft.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Wei Liu 2025-02-21 00:36:09 -08:00 committed by Paolo Bonzini
parent d50ea7f0e6
commit 2540917285
7 changed files with 26 additions and 26 deletions

View File

@ -674,7 +674,7 @@ int hvf_vcpu_exec(CPUState *cpu)
} }
case EXIT_REASON_TASK_SWITCH: { case EXIT_REASON_TASK_SWITCH: {
uint64_t vinfo = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO); uint64_t vinfo = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO);
x68_segment_selector sel = {.sel = exit_qual & 0xffff}; x86_segment_selector sel = {.sel = exit_qual & 0xffff};
vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3, vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
& VMCS_INTR_T_MASK); & VMCS_INTR_T_MASK);

View File

@ -48,7 +48,7 @@
bool x86_read_segment_descriptor(CPUState *cpu, bool x86_read_segment_descriptor(CPUState *cpu,
struct x86_segment_descriptor *desc, struct x86_segment_descriptor *desc,
x68_segment_selector sel) x86_segment_selector sel)
{ {
target_ulong base; target_ulong base;
uint32_t limit; uint32_t limit;
@ -78,7 +78,7 @@ bool x86_read_segment_descriptor(CPUState *cpu,
bool x86_write_segment_descriptor(CPUState *cpu, bool x86_write_segment_descriptor(CPUState *cpu,
struct x86_segment_descriptor *desc, struct x86_segment_descriptor *desc,
x68_segment_selector sel) x86_segment_selector sel)
{ {
target_ulong base; target_ulong base;
uint32_t limit; uint32_t limit;

View File

@ -183,7 +183,7 @@ static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)
#define GDT_SEL 0 #define GDT_SEL 0
#define LDT_SEL 1 #define LDT_SEL 1
typedef struct x68_segment_selector { typedef struct x86_segment_selector {
union { union {
uint16_t sel; uint16_t sel;
struct { struct {
@ -192,7 +192,7 @@ typedef struct x68_segment_selector {
uint16_t index:13; uint16_t index:13;
}; };
}; };
} __attribute__ ((__packed__)) x68_segment_selector; } __attribute__ ((__packed__)) x86_segment_selector;
/* useful register access macros */ /* useful register access macros */
#define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg]) #define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg])
@ -250,10 +250,10 @@ typedef struct x68_segment_selector {
/* deal with GDT/LDT descriptors in memory */ /* deal with GDT/LDT descriptors in memory */
bool x86_read_segment_descriptor(CPUState *cpu, bool x86_read_segment_descriptor(CPUState *cpu,
struct x86_segment_descriptor *desc, struct x86_segment_descriptor *desc,
x68_segment_selector sel); x86_segment_selector sel);
bool x86_write_segment_descriptor(CPUState *cpu, bool x86_write_segment_descriptor(CPUState *cpu,
struct x86_segment_descriptor *desc, struct x86_segment_descriptor *desc,
x68_segment_selector sel); x86_segment_selector sel);
bool x86_read_call_gate(CPUState *cpu, struct x86_call_gate *idt_desc, bool x86_read_call_gate(CPUState *cpu, struct x86_call_gate *idt_desc,
int gate); int gate);

View File

@ -60,14 +60,14 @@ uint64_t vmx_read_segment_base(CPUState *cpu, X86Seg seg)
return rvmcs(cpu->accel->fd, vmx_segment_fields[seg].base); return rvmcs(cpu->accel->fd, vmx_segment_fields[seg].base);
} }
x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg) x86_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg)
{ {
x68_segment_selector sel; x86_segment_selector sel;
sel.sel = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector); sel.sel = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector);
return sel; return sel;
} }
void vmx_write_segment_selector(CPUState *cpu, x68_segment_selector selector, X86Seg seg) void vmx_write_segment_selector(CPUState *cpu, x86_segment_selector selector, X86Seg seg)
{ {
wvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector, selector.sel); wvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector, selector.sel);
} }
@ -90,7 +90,7 @@ void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Se
wvmcs(cpu->accel->fd, sf->ar_bytes, desc->ar); wvmcs(cpu->accel->fd, sf->ar_bytes, desc->ar);
} }
void x86_segment_descriptor_to_vmx(CPUState *cpu, x68_segment_selector selector, void x86_segment_descriptor_to_vmx(CPUState *cpu, x86_segment_selector selector,
struct x86_segment_descriptor *desc, struct x86_segment_descriptor *desc,
struct vmx_segment *vmx_desc) struct vmx_segment *vmx_desc)
{ {

View File

@ -34,10 +34,10 @@ void vmx_read_segment_descriptor(CPUState *cpu,
void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc,
enum X86Seg seg); enum X86Seg seg);
x68_segment_selector vmx_read_segment_selector(CPUState *cpu, x86_segment_selector vmx_read_segment_selector(CPUState *cpu,
enum X86Seg seg); enum X86Seg seg);
void vmx_write_segment_selector(CPUState *cpu, void vmx_write_segment_selector(CPUState *cpu,
x68_segment_selector selector, x86_segment_selector selector,
enum X86Seg seg); enum X86Seg seg);
uint64_t vmx_read_segment_base(CPUState *cpu, enum X86Seg seg); uint64_t vmx_read_segment_base(CPUState *cpu, enum X86Seg seg);
@ -45,7 +45,7 @@ void vmx_write_segment_base(CPUState *cpu, enum X86Seg seg,
uint64_t base); uint64_t base);
void x86_segment_descriptor_to_vmx(CPUState *cpu, void x86_segment_descriptor_to_vmx(CPUState *cpu,
x68_segment_selector selector, x86_segment_selector selector,
struct x86_segment_descriptor *desc, struct x86_segment_descriptor *desc,
struct vmx_segment *vmx_desc); struct vmx_segment *vmx_desc);

View File

@ -76,16 +76,16 @@ static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
RSI(env) = tss->esi; RSI(env) = tss->esi;
RDI(env) = tss->edi; RDI(env) = tss->edi;
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, R_LDTR); vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ldt}}, R_LDTR);
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, R_ES); vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->es}}, R_ES);
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, R_CS); vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->cs}}, R_CS);
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, R_SS); vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ss}}, R_SS);
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, R_DS); vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ds}}, R_DS);
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, R_FS); vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->fs}}, R_FS);
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, R_GS); vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->gs}}, R_GS);
} }
static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel, static int task_switch_32(CPUState *cpu, x86_segment_selector tss_sel, x86_segment_selector old_tss_sel,
uint64_t old_tss_base, struct x86_segment_descriptor *new_desc) uint64_t old_tss_base, struct x86_segment_descriptor *new_desc)
{ {
struct x86_tss_segment32 tss_seg; struct x86_tss_segment32 tss_seg;
@ -108,7 +108,7 @@ static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segme
return 0; return 0;
} }
void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type) void vmx_handle_task_switch(CPUState *cpu, x86_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
{ {
uint64_t rip = rreg(cpu->accel->fd, HV_X86_RIP); uint64_t rip = rreg(cpu->accel->fd, HV_X86_RIP);
if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION && if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
@ -122,7 +122,7 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
load_regs(cpu); load_regs(cpu);
struct x86_segment_descriptor curr_tss_desc, next_tss_desc; struct x86_segment_descriptor curr_tss_desc, next_tss_desc;
x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR); x86_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR);
uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR); uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR);
uint32_t desc_limit; uint32_t desc_limit;
struct x86_call_gate task_gate_desc; struct x86_call_gate task_gate_desc;
@ -140,7 +140,7 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
x86_read_call_gate(cpu, &task_gate_desc, gate); x86_read_call_gate(cpu, &task_gate_desc, gate);
dpl = task_gate_desc.dpl; dpl = task_gate_desc.dpl;
x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS); x86_segment_selector cs = vmx_read_segment_selector(cpu, R_CS);
if (tss_sel.rpl > dpl || cs.rpl > dpl) if (tss_sel.rpl > dpl || cs.rpl > dpl)
;//DPRINTF("emulate_gp"); ;//DPRINTF("emulate_gp");
} }

View File

@ -15,6 +15,6 @@
#ifndef HVF_X86_TASK_H #ifndef HVF_X86_TASK_H
#define HVF_X86_TASK_H #define HVF_X86_TASK_H
void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, void vmx_handle_task_switch(CPUState *cpu, x86_segment_selector tss_sel,
int reason, bool gate_valid, uint8_t gate, uint64_t gate_type); int reason, bool gate_valid, uint8_t gate, uint64_t gate_type);
#endif #endif