target/i386: VMRUN and VMLOAD canonicalizations
APM2 requires that VMRUN and VMLOAD canonicalize (sign extend to 63 from 48/57) all base addresses in the segment registers that have been respectively loaded. Signed-off-by: Lara Lazier <laramglazier@gmail.com> Message-Id: <20210804113058.45186-1-laramglazier@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
69e3895f9d
commit
97afb47e15
@ -5115,6 +5115,15 @@ static void x86_register_cpudef_types(const X86CPUDefinition *def)
|
||||
|
||||
}
|
||||
|
||||
uint32_t cpu_x86_virtual_addr_width(CPUX86State *env)
|
||||
{
|
||||
if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
|
||||
return 57; /* 57 bits virtual */
|
||||
} else {
|
||||
return 48; /* 48 bits virtual */
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
uint32_t *eax, uint32_t *ebx,
|
||||
uint32_t *ecx, uint32_t *edx)
|
||||
@ -5517,16 +5526,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
break;
|
||||
case 0x80000008:
|
||||
/* virtual & phys address size in low 2 bytes. */
|
||||
*eax = cpu->phys_bits;
|
||||
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
|
||||
/* 64 bit processor */
|
||||
*eax = cpu->phys_bits; /* configurable physical bits */
|
||||
if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
|
||||
*eax |= 0x00003900; /* 57 bits virtual */
|
||||
} else {
|
||||
*eax |= 0x00003000; /* 48 bits virtual */
|
||||
}
|
||||
} else {
|
||||
*eax = cpu->phys_bits;
|
||||
*eax |= (cpu_x86_virtual_addr_width(env) << 8);
|
||||
}
|
||||
*ebx = env->features[FEAT_8000_0008_EBX];
|
||||
if (cs->nr_cores * cs->nr_threads > 1) {
|
||||
|
@ -1955,6 +1955,8 @@ typedef struct PropValue {
|
||||
} PropValue;
|
||||
void x86_cpu_apply_props(X86CPU *cpu, PropValue *props);
|
||||
|
||||
uint32_t cpu_x86_virtual_addr_width(CPUX86State *env);
|
||||
|
||||
/* cpu.c other functions (cpuid) */
|
||||
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
uint32_t *eax, uint32_t *ebx,
|
||||
|
@ -41,6 +41,16 @@ static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
|
||||
((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
|
||||
}
|
||||
|
||||
/*
|
||||
* VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
|
||||
* addresses in the segment registers that have been loaded.
|
||||
*/
|
||||
static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
|
||||
{
|
||||
uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
|
||||
*seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
|
||||
}
|
||||
|
||||
static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
|
||||
SegmentCache *sc)
|
||||
{
|
||||
@ -53,6 +63,7 @@ static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
|
||||
sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
|
||||
flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
|
||||
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
|
||||
svm_canonicalization(env, &sc->base);
|
||||
}
|
||||
|
||||
static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
|
||||
@ -245,16 +256,6 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
||||
env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
|
||||
offsetof(struct vmcb, control.tsc_offset));
|
||||
|
||||
env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.gdtr.base));
|
||||
env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.gdtr.limit));
|
||||
|
||||
env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.idtr.base));
|
||||
env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.idtr.limit));
|
||||
|
||||
new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
|
||||
if (new_cr0 & SVM_CR0_RESERVED_MASK) {
|
||||
cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
|
||||
@ -308,6 +309,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
||||
R_SS);
|
||||
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
|
||||
R_DS);
|
||||
svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.idtr),
|
||||
&env->idt);
|
||||
svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.gdtr),
|
||||
&env->gdt);
|
||||
|
||||
env->eip = x86_ldq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.rip));
|
||||
@ -446,6 +451,7 @@ void helper_vmload(CPUX86State *env, int aflag)
|
||||
env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
|
||||
env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
|
||||
env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
|
||||
svm_canonicalization(env, &env->kernelgsbase);
|
||||
#endif
|
||||
env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
|
||||
env->sysenter_cs = x86_ldq_phys(cs,
|
||||
@ -454,6 +460,7 @@ void helper_vmload(CPUX86State *env, int aflag)
|
||||
save.sysenter_esp));
|
||||
env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
|
||||
save.sysenter_eip));
|
||||
|
||||
}
|
||||
|
||||
void helper_vmsave(CPUX86State *env, int aflag)
|
||||
|
Loading…
Reference in New Issue
Block a user