target/i386: Moved int_ctl into CPUX86State structure

Moved int_ctl into the CPUX86State structure.  It removes some
unnecessary stores and loads, and prepares for tracking the vIRQ
state even when it is masked due to vGIF.

Signed-off-by: Lara Lazier <laramglazier@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Lara Lazier 2021-08-14 09:51:00 +02:00 committed by Paolo Bonzini
parent 900eeca579
commit e3126a5c92
6 changed files with 41 additions and 38 deletions

View File

@ -5655,7 +5655,7 @@ static void x86_cpu_reset(DeviceState *dev)
env->old_exception = -1;
/* init to reset state */
env->int_ctl = 0;
env->hflags2 |= HF2_GIF_MASK;
env->hflags &= ~HF_GUEST_MASK;

View File

@ -1578,6 +1578,7 @@ typedef struct CPUX86State {
uint64_t nested_cr3;
uint32_t nested_pg_mode;
uint8_t v_tpr;
uint32_t int_ctl;
/* KVM states, automatically cleared on reset */
uint8_t nmi_injected;

View File

@ -203,7 +203,7 @@ static int cpu_pre_save(void *opaque)
X86CPU *cpu = opaque;
CPUX86State *env = &cpu->env;
int i;
env->v_tpr = env->int_ctl & V_TPR_MASK;
/* FPU */
env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
env->fptag_vmstate = 0;
@ -1356,6 +1356,25 @@ static const VMStateDescription vmstate_svm_npt = {
}
};
static bool svm_guest_needed(void *opaque)
{
X86CPU *cpu = opaque;
CPUX86State *env = &cpu->env;
return tcg_enabled() && env->int_ctl;
}
static const VMStateDescription vmstate_svm_guest = {
.name = "cpu/svm_guest",
.version_id = 1,
.minimum_version_id = 1,
.needed = svm_guest_needed,
.fields = (VMStateField[]){
VMSTATE_UINT32(env.int_ctl, X86CPU),
VMSTATE_END_OF_LIST()
}
};
#ifndef TARGET_X86_64
static bool intel_efer32_needed(void *opaque)
{
@ -1524,6 +1543,7 @@ const VMStateDescription vmstate_x86_cpu = {
&vmstate_msr_intel_pt,
&vmstate_msr_virt_ssbd,
&vmstate_svm_npt,
&vmstate_svm_guest,
#ifndef TARGET_X86_64
&vmstate_efer32,
#endif

View File

@ -1166,7 +1166,6 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
break;
#if !defined(CONFIG_USER_ONLY)
case CPU_INTERRUPT_VIRQ:
/* FIXME: this should respect TPR */
cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
intno = x86_ldl_phys(cs, env->vm_vmcb
+ offsetof(struct vmcb, control.int_vector));
@ -1174,6 +1173,7 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
"Servicing virtual hardware INT=0x%02x\n", intno);
do_interrupt_x86_hardirq(env, intno, 1);
cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
env->int_ctl &= ~V_IRQ_MASK;
break;
#endif
}

View File

@ -73,7 +73,7 @@ target_ulong helper_read_crN(CPUX86State *env, int reg)
if (!(env->hflags2 & HF2_VINTR_MASK)) {
val = cpu_get_apic_tpr(env_archcpu(env)->apic_state);
} else {
val = env->v_tpr;
val = env->int_ctl & V_TPR_MASK;
}
break;
}
@ -121,7 +121,7 @@ void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0);
qemu_mutex_unlock_iothread();
}
env->v_tpr = t0 & 0x0f;
env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK);
break;
default:
env->cr[reg] = t0;

View File

@ -76,14 +76,14 @@ static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
sc->base, sc->limit, sc->flags);
}
static inline bool ctl_has_irq(uint32_t int_ctl)
static inline bool ctl_has_irq(CPUX86State *env)
{
uint32_t int_prio;
uint32_t tpr;
int_prio = (int_ctl & V_INTR_PRIO_MASK) >> V_INTR_PRIO_SHIFT;
tpr = int_ctl & V_TPR_MASK;
return (int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
int_prio = (env->int_ctl & V_INTR_PRIO_MASK) >> V_INTR_PRIO_SHIFT;
tpr = env->int_ctl & V_TPR_MASK;
return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
}
static inline bool is_efer_invalid_state (CPUX86State *env)
@ -121,13 +121,11 @@ static inline bool is_efer_invalid_state (CPUX86State *env)
return false;
}
static inline bool virtual_gif_enabled(CPUX86State *env, uint32_t *int_ctl)
static inline bool virtual_gif_enabled(CPUX86State *env)
{
if (likely(env->hflags & HF_GUEST_MASK)) {
*int_ctl = x86_ldl_phys(env_cpu(env),
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
return (env->features[FEAT_SVM] & CPUID_SVM_VGIF)
&& (*int_ctl & V_GIF_ENABLED_MASK);
&& (env->int_ctl & V_GIF_ENABLED_MASK);
}
return false;
}
@ -139,7 +137,6 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
target_ulong addr;
uint64_t nested_ctl;
uint32_t event_inj;
uint32_t int_ctl;
uint32_t asid;
uint64_t new_cr0;
uint64_t new_cr3;
@ -292,11 +289,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
cpu_x86_update_cr3(env, new_cr3);
env->cr[2] = x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.cr2));
int_ctl = x86_ldl_phys(cs,
env->int_ctl = x86_ldl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
if (int_ctl & V_INTR_MASKING_MASK) {
env->v_tpr = int_ctl & V_TPR_MASK;
if (env->int_ctl & V_INTR_MASKING_MASK) {
env->hflags2 |= HF2_VINTR_MASK;
if (env->eflags & IF_MASK) {
env->hflags2 |= HF2_HIF_MASK;
@ -362,7 +358,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
env->hflags2 |= HF2_GIF_MASK;
if (ctl_has_irq(int_ctl)) {
if (ctl_has_irq(env)) {
CPUState *cs = env_cpu(env);
cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
@ -522,11 +518,8 @@ void helper_stgi(CPUX86State *env)
{
cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
CPUState *cs = env_cpu(env);
uint32_t int_ctl;
if (virtual_gif_enabled(env, &int_ctl)) {
x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
int_ctl | V_GIF_MASK);
if (virtual_gif_enabled(env)) {
env->int_ctl |= V_GIF_MASK;
} else {
env->hflags2 |= HF2_GIF_MASK;
}
@ -536,11 +529,8 @@ void helper_clgi(CPUX86State *env)
{
cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
CPUState *cs = env_cpu(env);
uint32_t int_ctl;
if (virtual_gif_enabled(env, &int_ctl)) {
x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
int_ctl & ~V_GIF_MASK);
if (virtual_gif_enabled(env)) {
env->int_ctl &= ~V_GIF_MASK;
} else {
env->hflags2 &= ~HF2_GIF_MASK;
}
@ -688,7 +678,6 @@ void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
void do_vmexit(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
uint32_t int_ctl;
if (env->hflags & HF_INHIBIT_IRQ_MASK) {
x86_stl_phys(cs,
@ -731,16 +720,8 @@ void do_vmexit(CPUX86State *env)
env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
int_ctl = x86_ldl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
int_ctl |= env->v_tpr & V_TPR_MASK;
if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
int_ctl |= V_IRQ_MASK;
}
x86_stl_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl);
x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
cpu_compute_eflags(env));
@ -763,6 +744,7 @@ void do_vmexit(CPUX86State *env)
env->intercept = 0;
env->intercept_exceptions = 0;
cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
env->int_ctl = 0;
env->tsc_offset = 0;
env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,