diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index aeda902b3e..e39a810a4e 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -2524,10 +2524,6 @@ static int kvm_init(MachineState *ms) #ifdef KVM_CAP_VCPU_EVENTS s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); #endif - - s->robust_singlestep = - kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); - s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE); s->irq_set_ioctl = KVM_IRQ_LINE; @@ -3143,11 +3139,6 @@ int kvm_has_vcpu_events(void) return kvm_state->vcpu_events; } -int kvm_has_robust_singlestep(void) -{ - return kvm_state->robust_singlestep; -} - int kvm_max_nested_state_length(void) { return kvm_state->max_nested_state_len; diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index 02c031d1f2..80b69d88f6 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -192,7 +192,6 @@ unsigned int kvm_get_max_memslots(void); unsigned int kvm_get_free_memslots(void); bool kvm_has_sync_mmu(void); int kvm_has_vcpu_events(void); -int kvm_has_robust_singlestep(void); int kvm_max_nested_state_length(void); int kvm_has_gsi_routing(void); diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h index 151ecc8423..fd846394be 100644 --- a/include/sysemu/kvm_int.h +++ b/include/sysemu/kvm_int.h @@ -78,7 +78,6 @@ struct KVMState struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; bool coalesced_flush_in_progress; int vcpu_events; - int robust_singlestep; #ifdef KVM_CAP_SET_GUEST_DEBUG QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints; #endif diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 513a90f630..4a244174a0 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -95,6 +95,8 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_INFO(IRQ_ROUTING), KVM_CAP_INFO(DEBUGREGS), KVM_CAP_INFO(XSAVE), + KVM_CAP_INFO(VCPU_EVENTS), + KVM_CAP_INFO(X86_ROBUST_SINGLESTEP), KVM_CAP_LAST_INFO }; @@ -690,15 +692,6 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false); } -static void kvm_reset_exception(CPUX86State *env) -{ - env->exception_nr = -1; - env->exception_pending = 0; - env->exception_injected = 0; - env->exception_has_payload = false; - env->exception_payload = 0; -} - static void kvm_queue_exception(CPUX86State *env, int32_t exception_nr, uint8_t exception_has_payload, @@ -731,38 +724,6 @@ static void kvm_queue_exception(CPUX86State *env, } } -static int kvm_inject_mce_oldstyle(X86CPU *cpu) -{ - CPUX86State *env = &cpu->env; - - if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) { - unsigned int bank, bank_num = env->mcg_cap & 0xff; - struct kvm_x86_mce mce; - - kvm_reset_exception(env); - - /* - * There must be at least one bank in use if an MCE is pending. - * Find it and use its values for the event injection. - */ - for (bank = 0; bank < bank_num; bank++) { - if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) { - break; - } - } - assert(bank < bank_num); - - mce.bank = bank; - mce.status = env->mce_banks[bank * 4 + 1]; - mce.mcg_status = env->mcg_status; - mce.addr = env->mce_banks[bank * 4 + 2]; - mce.misc = env->mce_banks[bank * 4 + 3]; - - return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce); - } - return 0; -} - static void cpu_update_state(void *opaque, bool running, RunState state) { CPUX86State *env = opaque; @@ -4359,10 +4320,6 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level) CPUX86State *env = &cpu->env; struct kvm_vcpu_events events = {}; - if (!kvm_has_vcpu_events()) { - return 0; - } - events.flags = 0; if (has_exception_payload) { @@ -4430,10 +4387,6 @@ static int kvm_get_vcpu_events(X86CPU *cpu) struct kvm_vcpu_events events; int ret; - if (!kvm_has_vcpu_events()) { - return 0; - } - memset(&events, 0, sizeof(events)); ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events); if (ret < 0) { @@ -4499,37 +4452,6 @@ static int kvm_get_vcpu_events(X86CPU *cpu) return 0; } -static int kvm_guest_debug_workarounds(X86CPU *cpu) -{ - CPUState *cs = CPU(cpu); - CPUX86State *env = &cpu->env; - int ret = 0; - unsigned long reinject_trap = 0; - - if (!kvm_has_vcpu_events()) { - if (env->exception_nr == EXCP01_DB) { - reinject_trap = KVM_GUESTDBG_INJECT_DB; - } else if (env->exception_injected == EXCP03_INT3) { - reinject_trap = KVM_GUESTDBG_INJECT_BP; - } - kvm_reset_exception(env); - } - - /* - * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF - * injected via SET_GUEST_DEBUG while updating GP regs. Work around this - * by updating the debug state once again if single-stepping is on. - * Another reason to call kvm_update_guest_debug here is a pending debug - * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to - * reinject them via SET_GUEST_DEBUG. - */ - if (reinject_trap || - (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) { - ret = kvm_update_guest_debug(cs, reinject_trap); - } - return ret; -} - static int kvm_put_debugregs(X86CPU *cpu) { CPUX86State *env = &cpu->env; @@ -4702,11 +4624,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level) if (ret < 0) { return ret; } - /* must be before kvm_put_msrs */ - ret = kvm_inject_mce_oldstyle(x86_cpu); - if (ret < 0) { - return ret; - } ret = kvm_put_msrs(x86_cpu, level); if (ret < 0) { return ret; @@ -4730,11 +4647,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level) if (ret < 0) { return ret; } - /* must be last */ - ret = kvm_guest_debug_workarounds(x86_cpu); - if (ret < 0) { - return ret; - } return 0; }