target/i386: Use MMU_NESTED_IDX for vmload/vmsave
Use MMU_NESTED_IDX for each memory access, rather than just a single translation to physical. Adjust svm_save_seg and svm_load_seg to pass in mmu_idx. This removes the last use of get_hphys so remove it. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20221002172956.265735-7-richard.henderson@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
98281984a3
commit
726ea33531
@ -2387,8 +2387,6 @@ static inline bool ctl_has_irq(CPUX86State *env)
|
|||||||
return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
|
return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
|
||||||
}
|
}
|
||||||
|
|
||||||
hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
|
|
||||||
int *prot);
|
|
||||||
#if defined(TARGET_X86_64) && \
|
#if defined(TARGET_X86_64) && \
|
||||||
defined(CONFIG_USER_ONLY) && \
|
defined(CONFIG_USER_ONLY) && \
|
||||||
defined(CONFIG_LINUX)
|
defined(CONFIG_LINUX)
|
||||||
|
@ -413,37 +413,6 @@ static G_NORETURN void raise_stage2(CPUX86State *env, TranslateFault *err,
|
|||||||
cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, retaddr);
|
cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
|
|
||||||
int *prot)
|
|
||||||
{
|
|
||||||
CPUX86State *env = cs->env_ptr;
|
|
||||||
|
|
||||||
if (likely(!(env->hflags2 & HF2_NPT_MASK))) {
|
|
||||||
return gphys;
|
|
||||||
} else {
|
|
||||||
TranslateParams in = {
|
|
||||||
.addr = gphys,
|
|
||||||
.cr3 = env->nested_cr3,
|
|
||||||
.pg_mode = env->nested_pg_mode,
|
|
||||||
.mmu_idx = MMU_USER_IDX,
|
|
||||||
.access_type = access_type,
|
|
||||||
.use_stage2 = false,
|
|
||||||
};
|
|
||||||
TranslateResult out;
|
|
||||||
TranslateFault err;
|
|
||||||
|
|
||||||
if (!mmu_translate(env, &in, &out, &err)) {
|
|
||||||
err.stage2 = prot ? SVM_NPTEXIT_GPA : SVM_NPTEXIT_GPT;
|
|
||||||
raise_stage2(env, &err, env->retaddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (prot) {
|
|
||||||
*prot &= out.prot;
|
|
||||||
}
|
|
||||||
return out.paddr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool get_physical_address(CPUX86State *env, vaddr addr,
|
static bool get_physical_address(CPUX86State *env, vaddr addr,
|
||||||
MMUAccessType access_type, int mmu_idx,
|
MMUAccessType access_type, int mmu_idx,
|
||||||
TranslateResult *out, TranslateFault *err)
|
TranslateResult *out, TranslateFault *err)
|
||||||
|
@ -27,19 +27,19 @@
|
|||||||
|
|
||||||
/* Secure Virtual Machine helpers */
|
/* Secure Virtual Machine helpers */
|
||||||
|
|
||||||
static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
|
static void svm_save_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
|
||||||
const SegmentCache *sc)
|
const SegmentCache *sc)
|
||||||
{
|
{
|
||||||
CPUState *cs = env_cpu(env);
|
cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
|
||||||
|
sc->selector, mmu_idx, 0);
|
||||||
x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
|
cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
|
||||||
sc->selector);
|
sc->base, mmu_idx, 0);
|
||||||
x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
|
cpu_stl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
|
||||||
sc->base);
|
sc->limit, mmu_idx, 0);
|
||||||
x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
|
cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
|
||||||
sc->limit);
|
((sc->flags >> 8) & 0xff)
|
||||||
x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
|
| ((sc->flags >> 12) & 0x0f00),
|
||||||
((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
|
mmu_idx, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -52,29 +52,36 @@ static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base
|
|||||||
*seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
|
*seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
|
static void svm_load_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
|
||||||
SegmentCache *sc)
|
SegmentCache *sc)
|
||||||
{
|
{
|
||||||
CPUState *cs = env_cpu(env);
|
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
|
|
||||||
sc->selector = x86_lduw_phys(cs,
|
sc->selector =
|
||||||
addr + offsetof(struct vmcb_seg, selector));
|
cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
|
||||||
sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
|
mmu_idx, 0);
|
||||||
sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
|
sc->base =
|
||||||
flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
|
cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
|
||||||
|
mmu_idx, 0);
|
||||||
|
sc->limit =
|
||||||
|
cpu_ldl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
|
||||||
|
mmu_idx, 0);
|
||||||
|
flags =
|
||||||
|
cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
|
||||||
|
mmu_idx, 0);
|
||||||
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
|
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
|
||||||
|
|
||||||
svm_canonicalization(env, &sc->base);
|
svm_canonicalization(env, &sc->base);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
|
static void svm_load_seg_cache(CPUX86State *env, int mmu_idx,
|
||||||
int seg_reg)
|
hwaddr addr, int seg_reg)
|
||||||
{
|
{
|
||||||
SegmentCache sc1, *sc = &sc1;
|
SegmentCache sc;
|
||||||
|
|
||||||
svm_load_seg(env, addr, sc);
|
svm_load_seg(env, mmu_idx, addr, &sc);
|
||||||
cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
|
cpu_x86_load_seg_cache(env, seg_reg, sc.selector,
|
||||||
sc->base, sc->limit, sc->flags);
|
sc.base, sc.limit, sc.flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool is_efer_invalid_state (CPUX86State *env)
|
static inline bool is_efer_invalid_state (CPUX86State *env)
|
||||||
@ -199,13 +206,17 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
|||||||
env->vm_hsave + offsetof(struct vmcb, save.rflags),
|
env->vm_hsave + offsetof(struct vmcb, save.rflags),
|
||||||
cpu_compute_eflags(env));
|
cpu_compute_eflags(env));
|
||||||
|
|
||||||
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
|
svm_save_seg(env, MMU_PHYS_IDX,
|
||||||
|
env->vm_hsave + offsetof(struct vmcb, save.es),
|
||||||
&env->segs[R_ES]);
|
&env->segs[R_ES]);
|
||||||
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
|
svm_save_seg(env, MMU_PHYS_IDX,
|
||||||
|
env->vm_hsave + offsetof(struct vmcb, save.cs),
|
||||||
&env->segs[R_CS]);
|
&env->segs[R_CS]);
|
||||||
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
|
svm_save_seg(env, MMU_PHYS_IDX,
|
||||||
|
env->vm_hsave + offsetof(struct vmcb, save.ss),
|
||||||
&env->segs[R_SS]);
|
&env->segs[R_SS]);
|
||||||
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
|
svm_save_seg(env, MMU_PHYS_IDX,
|
||||||
|
env->vm_hsave + offsetof(struct vmcb, save.ds),
|
||||||
&env->segs[R_DS]);
|
&env->segs[R_DS]);
|
||||||
|
|
||||||
x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
|
x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
|
||||||
@ -325,18 +336,18 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
|||||||
save.rflags)),
|
save.rflags)),
|
||||||
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
|
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
|
||||||
|
|
||||||
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
|
svm_load_seg_cache(env, MMU_PHYS_IDX,
|
||||||
R_ES);
|
env->vm_vmcb + offsetof(struct vmcb, save.es), R_ES);
|
||||||
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
|
svm_load_seg_cache(env, MMU_PHYS_IDX,
|
||||||
R_CS);
|
env->vm_vmcb + offsetof(struct vmcb, save.cs), R_CS);
|
||||||
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
|
svm_load_seg_cache(env, MMU_PHYS_IDX,
|
||||||
R_SS);
|
env->vm_vmcb + offsetof(struct vmcb, save.ss), R_SS);
|
||||||
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
|
svm_load_seg_cache(env, MMU_PHYS_IDX,
|
||||||
R_DS);
|
env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS);
|
||||||
svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.idtr),
|
svm_load_seg(env, MMU_PHYS_IDX,
|
||||||
&env->idt);
|
env->vm_vmcb + offsetof(struct vmcb, save.idtr), &env->idt);
|
||||||
svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.gdtr),
|
svm_load_seg(env, MMU_PHYS_IDX,
|
||||||
&env->gdt);
|
env->vm_vmcb + offsetof(struct vmcb, save.gdtr), &env->gdt);
|
||||||
|
|
||||||
env->eip = x86_ldq_phys(cs,
|
env->eip = x86_ldq_phys(cs,
|
||||||
env->vm_vmcb + offsetof(struct vmcb, save.rip));
|
env->vm_vmcb + offsetof(struct vmcb, save.rip));
|
||||||
@ -451,9 +462,8 @@ void helper_vmmcall(CPUX86State *env)
|
|||||||
|
|
||||||
void helper_vmload(CPUX86State *env, int aflag)
|
void helper_vmload(CPUX86State *env, int aflag)
|
||||||
{
|
{
|
||||||
CPUState *cs = env_cpu(env);
|
int mmu_idx = MMU_PHYS_IDX;
|
||||||
target_ulong addr;
|
target_ulong addr;
|
||||||
int prot;
|
|
||||||
|
|
||||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
|
cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
|
||||||
|
|
||||||
@ -464,43 +474,52 @@ void helper_vmload(CPUX86State *env, int aflag)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) {
|
if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) {
|
||||||
addr = get_hphys(cs, addr, MMU_DATA_LOAD, &prot);
|
mmu_idx = MMU_NESTED_IDX;
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
|
svm_load_seg_cache(env, mmu_idx,
|
||||||
"\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
|
addr + offsetof(struct vmcb, save.fs), R_FS);
|
||||||
addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
|
svm_load_seg_cache(env, mmu_idx,
|
||||||
save.fs.base)),
|
addr + offsetof(struct vmcb, save.gs), R_GS);
|
||||||
env->segs[R_FS].base);
|
svm_load_seg(env, mmu_idx,
|
||||||
|
addr + offsetof(struct vmcb, save.tr), &env->tr);
|
||||||
svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
|
svm_load_seg(env, mmu_idx,
|
||||||
svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
|
addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
|
||||||
svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
|
|
||||||
svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
|
|
||||||
|
|
||||||
#ifdef TARGET_X86_64
|
#ifdef TARGET_X86_64
|
||||||
env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
|
env->kernelgsbase =
|
||||||
save.kernel_gs_base));
|
cpu_ldq_mmuidx_ra(env,
|
||||||
env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
|
addr + offsetof(struct vmcb, save.kernel_gs_base),
|
||||||
env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
|
mmu_idx, 0);
|
||||||
env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
|
env->lstar =
|
||||||
|
cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
|
||||||
|
mmu_idx, 0);
|
||||||
|
env->cstar =
|
||||||
|
cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
|
||||||
|
mmu_idx, 0);
|
||||||
|
env->fmask =
|
||||||
|
cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
|
||||||
|
mmu_idx, 0);
|
||||||
svm_canonicalization(env, &env->kernelgsbase);
|
svm_canonicalization(env, &env->kernelgsbase);
|
||||||
#endif
|
#endif
|
||||||
env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
|
env->star =
|
||||||
env->sysenter_cs = x86_ldq_phys(cs,
|
cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
|
||||||
addr + offsetof(struct vmcb, save.sysenter_cs));
|
mmu_idx, 0);
|
||||||
env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
|
env->sysenter_cs =
|
||||||
save.sysenter_esp));
|
cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
|
||||||
env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
|
mmu_idx, 0);
|
||||||
save.sysenter_eip));
|
env->sysenter_esp =
|
||||||
|
cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
|
||||||
|
mmu_idx, 0);
|
||||||
|
env->sysenter_eip =
|
||||||
|
cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
|
||||||
|
mmu_idx, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_vmsave(CPUX86State *env, int aflag)
|
void helper_vmsave(CPUX86State *env, int aflag)
|
||||||
{
|
{
|
||||||
CPUState *cs = env_cpu(env);
|
int mmu_idx = MMU_PHYS_IDX;
|
||||||
target_ulong addr;
|
target_ulong addr;
|
||||||
int prot;
|
|
||||||
|
|
||||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
|
cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
|
||||||
|
|
||||||
@ -511,38 +530,36 @@ void helper_vmsave(CPUX86State *env, int aflag)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) {
|
if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) {
|
||||||
addr = get_hphys(cs, addr, MMU_DATA_STORE, &prot);
|
mmu_idx = MMU_NESTED_IDX;
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
|
svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.fs),
|
||||||
"\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
|
|
||||||
addr, x86_ldq_phys(cs,
|
|
||||||
addr + offsetof(struct vmcb, save.fs.base)),
|
|
||||||
env->segs[R_FS].base);
|
|
||||||
|
|
||||||
svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
|
|
||||||
&env->segs[R_FS]);
|
&env->segs[R_FS]);
|
||||||
svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
|
svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.gs),
|
||||||
&env->segs[R_GS]);
|
&env->segs[R_GS]);
|
||||||
svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
|
svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.tr),
|
||||||
&env->tr);
|
&env->tr);
|
||||||
svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
|
svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.ldtr),
|
||||||
&env->ldt);
|
&env->ldt);
|
||||||
|
|
||||||
#ifdef TARGET_X86_64
|
#ifdef TARGET_X86_64
|
||||||
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
|
cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.kernel_gs_base),
|
||||||
env->kernelgsbase);
|
env->kernelgsbase, mmu_idx, 0);
|
||||||
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
|
cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
|
||||||
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
|
env->lstar, mmu_idx, 0);
|
||||||
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
|
cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
|
||||||
|
env->cstar, mmu_idx, 0);
|
||||||
|
cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
|
||||||
|
env->fmask, mmu_idx, 0);
|
||||||
#endif
|
#endif
|
||||||
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
|
cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
|
||||||
x86_stq_phys(cs,
|
env->star, mmu_idx, 0);
|
||||||
addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
|
cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
|
||||||
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
|
env->sysenter_cs, mmu_idx, 0);
|
||||||
env->sysenter_esp);
|
cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
|
||||||
x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
|
env->sysenter_esp, mmu_idx, 0);
|
||||||
env->sysenter_eip);
|
cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
|
||||||
|
env->sysenter_eip, mmu_idx, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_stgi(CPUX86State *env)
|
void helper_stgi(CPUX86State *env)
|
||||||
@ -725,13 +742,17 @@ void do_vmexit(CPUX86State *env)
|
|||||||
tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX);
|
tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX);
|
||||||
|
|
||||||
/* Save the VM state in the vmcb */
|
/* Save the VM state in the vmcb */
|
||||||
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
|
svm_save_seg(env, MMU_PHYS_IDX,
|
||||||
|
env->vm_vmcb + offsetof(struct vmcb, save.es),
|
||||||
&env->segs[R_ES]);
|
&env->segs[R_ES]);
|
||||||
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
|
svm_save_seg(env, MMU_PHYS_IDX,
|
||||||
|
env->vm_vmcb + offsetof(struct vmcb, save.cs),
|
||||||
&env->segs[R_CS]);
|
&env->segs[R_CS]);
|
||||||
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
|
svm_save_seg(env, MMU_PHYS_IDX,
|
||||||
|
env->vm_vmcb + offsetof(struct vmcb, save.ss),
|
||||||
&env->segs[R_SS]);
|
&env->segs[R_SS]);
|
||||||
svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
|
svm_save_seg(env, MMU_PHYS_IDX,
|
||||||
|
env->vm_vmcb + offsetof(struct vmcb, save.ds),
|
||||||
&env->segs[R_DS]);
|
&env->segs[R_DS]);
|
||||||
|
|
||||||
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
|
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
|
||||||
@ -812,14 +833,14 @@ void do_vmexit(CPUX86State *env)
|
|||||||
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
|
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
|
||||||
VM_MASK));
|
VM_MASK));
|
||||||
|
|
||||||
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
|
svm_load_seg_cache(env, MMU_PHYS_IDX,
|
||||||
R_ES);
|
env->vm_hsave + offsetof(struct vmcb, save.es), R_ES);
|
||||||
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
|
svm_load_seg_cache(env, MMU_PHYS_IDX,
|
||||||
R_CS);
|
env->vm_hsave + offsetof(struct vmcb, save.cs), R_CS);
|
||||||
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
|
svm_load_seg_cache(env, MMU_PHYS_IDX,
|
||||||
R_SS);
|
env->vm_hsave + offsetof(struct vmcb, save.ss), R_SS);
|
||||||
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
|
svm_load_seg_cache(env, MMU_PHYS_IDX,
|
||||||
R_DS);
|
env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS);
|
||||||
|
|
||||||
env->eip = x86_ldq_phys(cs,
|
env->eip = x86_ldq_phys(cs,
|
||||||
env->vm_hsave + offsetof(struct vmcb, save.rip));
|
env->vm_hsave + offsetof(struct vmcb, save.rip));
|
||||||
|
Loading…
Reference in New Issue
Block a user