target/i386: Added vVMLOAD and vVMSAVE feature

The feature allows the VMSAVE and VMLOAD instructions to execute in guest mode without
causing a VMEXIT. (APM2 15.33.1)

Signed-off-by: Lara Lazier <laramglazier@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Lara Lazier 2021-08-09 16:26:28 +02:00 committed by Paolo Bonzini
parent 7760bb069f
commit 52fb8ad37a
4 changed files with 34 additions and 1 deletions

View File

@ -2261,6 +2261,8 @@ static inline bool ctl_has_irq(CPUX86State *env)
return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
}
hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
int *prot);
#if defined(TARGET_X86_64) && \
defined(CONFIG_USER_ONLY) && \
defined(CONFIG_LINUX)

View File

@ -24,6 +24,8 @@
#define V_INTR_MASKING_SHIFT 24
#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
#define V_VMLOAD_VMSAVE_ENABLED_MASK (1 << 1)
#define SVM_INTERRUPT_SHADOW_MASK 1
#define SVM_IOIO_STR_SHIFT 2

View File

@ -358,7 +358,7 @@ do_check_protect_pse36:
return error_code;
}
static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
int *prot)
{
CPUX86State *env = &X86_CPU(cs)->env;

View File

@ -120,6 +120,25 @@ static inline bool virtual_gif_enabled(CPUX86State *env)
return false;
}
static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t retaddr)
{
uint64_t lbr_ctl;
if (likely(env->hflags & HF_GUEST_MASK)) {
if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) {
cpu_vmexit(env, exit_code, 0, retaddr);
}
lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb,
control.lbr_ctl));
return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD)
&& (lbr_ctl & V_VMLOAD_VMSAVE_ENABLED_MASK);
}
return false;
}
static inline bool virtual_gif_set(CPUX86State *env)
{
return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK);
@ -431,6 +450,7 @@ void helper_vmload(CPUX86State *env, int aflag)
{
CPUState *cs = env_cpu(env);
target_ulong addr;
int prot;
cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
@ -440,6 +460,10 @@ void helper_vmload(CPUX86State *env, int aflag)
addr = (uint32_t)env->regs[R_EAX];
}
if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) {
addr = get_hphys(cs, addr, MMU_DATA_LOAD, &prot);
}
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
@ -473,6 +497,7 @@ void helper_vmsave(CPUX86State *env, int aflag)
{
CPUState *cs = env_cpu(env);
target_ulong addr;
int prot;
cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
@ -482,6 +507,10 @@ void helper_vmsave(CPUX86State *env, int aflag)
addr = (uint32_t)env->regs[R_EAX];
}
if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) {
addr = get_hphys(cs, addr, MMU_DATA_STORE, &prot);
}
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
addr, x86_ldq_phys(cs,