hvf: Use standard CR0 and CR4 register definitions

No need to have our own definitions of these registers.

Signed-off-by: Cameron Esfahani <dirty@apple.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
This commit is contained in:
Cameron Esfahani 2021-10-28 18:33:15 -07:00 committed by Philippe Mathieu-Daudé
parent 004900acbc
commit 704afe34d8
5 changed files with 15 additions and 47 deletions

View File

@ -124,10 +124,11 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER); uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);
uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0); uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);
uint64_t changed_cr0 = old_cr0 ^ cr0; uint64_t changed_cr0 = old_cr0 ^ cr0;
uint64_t mask = CR0_PG | CR0_CD | CR0_NW | CR0_NE | CR0_ET; uint64_t mask = CR0_PG_MASK | CR0_CD_MASK | CR0_NW_MASK |
CR0_NE_MASK | CR0_ET_MASK;
uint64_t entry_ctls; uint64_t entry_ctls;
if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) && if ((cr0 & CR0_PG_MASK) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE_MASK) &&
!(efer & MSR_EFER_LME)) { !(efer & MSR_EFER_LME)) {
address_space_read(&address_space_memory, address_space_read(&address_space_memory,
rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f, rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,
@ -142,8 +143,8 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
wvmcs(vcpu, VMCS_CR0_SHADOW, cr0); wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);
if (efer & MSR_EFER_LME) { if (efer & MSR_EFER_LME) {
if (changed_cr0 & CR0_PG) { if (changed_cr0 & CR0_PG_MASK) {
if (cr0 & CR0_PG) { if (cr0 & CR0_PG_MASK) {
enter_long_mode(vcpu, cr0, efer); enter_long_mode(vcpu, cr0, efer);
} else { } else {
exit_long_mode(vcpu, cr0, efer); exit_long_mode(vcpu, cr0, efer);
@ -155,8 +156,8 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
} }
/* Filter new CR0 after we are finished examining it above. */ /* Filter new CR0 after we are finished examining it above. */
cr0 = (cr0 & ~(mask & ~CR0_PG)); cr0 = (cr0 & ~(mask & ~CR0_PG_MASK));
wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET); wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE_MASK | CR0_ET_MASK);
hv_vcpu_invalidate_tlb(vcpu); hv_vcpu_invalidate_tlb(vcpu);
hv_vcpu_flush(vcpu); hv_vcpu_flush(vcpu);
@ -164,11 +165,11 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4) static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)
{ {
uint64_t guest_cr4 = cr4 | CR4_VMXE; uint64_t guest_cr4 = cr4 | CR4_VMXE_MASK;
wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4); wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4);
wvmcs(vcpu, VMCS_CR4_SHADOW, cr4); wvmcs(vcpu, VMCS_CR4_SHADOW, cr4);
wvmcs(vcpu, VMCS_CR4_MASK, CR4_VMXE); wvmcs(vcpu, VMCS_CR4_MASK, CR4_VMXE_MASK);
hv_vcpu_invalidate_tlb(vcpu); hv_vcpu_invalidate_tlb(vcpu);
hv_vcpu_flush(vcpu); hv_vcpu_flush(vcpu);

View File

@ -119,7 +119,7 @@ bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
bool x86_is_protected(struct CPUState *cpu) bool x86_is_protected(struct CPUState *cpu)
{ {
uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
return cr0 & CR0_PE; return cr0 & CR0_PE_MASK;
} }
bool x86_is_real(struct CPUState *cpu) bool x86_is_real(struct CPUState *cpu)
@ -150,13 +150,13 @@ bool x86_is_long64_mode(struct CPUState *cpu)
bool x86_is_paging_mode(struct CPUState *cpu) bool x86_is_paging_mode(struct CPUState *cpu)
{ {
uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
return cr0 & CR0_PG; return cr0 & CR0_PG_MASK;
} }
bool x86_is_pae_enabled(struct CPUState *cpu) bool x86_is_pae_enabled(struct CPUState *cpu)
{ {
uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4); uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4);
return cr4 & CR4_PAE; return cr4 & CR4_PAE_MASK;
} }
target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, X86Seg seg) target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, X86Seg seg)

View File

@ -42,40 +42,6 @@ typedef struct x86_register {
}; };
} __attribute__ ((__packed__)) x86_register; } __attribute__ ((__packed__)) x86_register;
typedef enum x86_reg_cr0 {
CR0_PE = (1L << 0),
CR0_MP = (1L << 1),
CR0_EM = (1L << 2),
CR0_TS = (1L << 3),
CR0_ET = (1L << 4),
CR0_NE = (1L << 5),
CR0_WP = (1L << 16),
CR0_AM = (1L << 18),
CR0_NW = (1L << 29),
CR0_CD = (1L << 30),
CR0_PG = (1L << 31),
} x86_reg_cr0;
typedef enum x86_reg_cr4 {
CR4_VME = (1L << 0),
CR4_PVI = (1L << 1),
CR4_TSD = (1L << 2),
CR4_DE = (1L << 3),
CR4_PSE = (1L << 4),
CR4_PAE = (1L << 5),
CR4_MSE = (1L << 6),
CR4_PGE = (1L << 7),
CR4_PCE = (1L << 8),
CR4_OSFXSR = (1L << 9),
CR4_OSXMMEXCPT = (1L << 10),
CR4_VMXE = (1L << 13),
CR4_SMXE = (1L << 14),
CR4_FSGSBASE = (1L << 16),
CR4_PCIDE = (1L << 17),
CR4_OSXSAVE = (1L << 18),
CR4_SMEP = (1L << 20),
} x86_reg_cr4;
/* 16 bit Task State Segment */ /* 16 bit Task State Segment */
typedef struct x86_tss_segment16 { typedef struct x86_tss_segment16 {
uint16_t link; uint16_t link;

View File

@ -129,7 +129,7 @@ static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
uint32_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); uint32_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
/* check protection */ /* check protection */
if (cr0 & CR0_WP) { if (cr0 & CR0_WP_MASK) {
if (pt->write_access && !pte_write_access(pte)) { if (pt->write_access && !pte_write_access(pte)) {
return false; return false;
} }

View File

@ -174,7 +174,8 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
//ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
VM_PANIC("task_switch_16"); VM_PANIC("task_switch_16");
macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | CR0_TS); macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) |
CR0_TS_MASK);
x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg); x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR); vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);