From 9121923c457d1d8667a6e3a67302c29e5c5add6b Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Thu, 24 Oct 2019 16:03:26 -0700 Subject: [PATCH 1/3] kvm: Allocate memslots and buses before calling kvm_arch_init_vm This reorganization will allow us to call kvm_arch_destroy_vm in the event that kvm_create_vm fails after calling kvm_arch_init_vm. Suggested-by: Junaid Shahid Signed-off-by: Jim Mattson Reviewed-by: Junaid Shahid Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 67ef3f2e19e8..ec14dae2f538 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -627,8 +627,9 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) static struct kvm *kvm_create_vm(unsigned long type) { - int r, i; struct kvm *kvm = kvm_arch_alloc_vm(); + int r = -ENOMEM; + int i; if (!kvm) return ERR_PTR(-ENOMEM); @@ -643,6 +644,25 @@ static struct kvm *kvm_create_vm(unsigned long type) refcount_set(&kvm->users_count, 1); INIT_LIST_HEAD(&kvm->devices); + BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); + + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + struct kvm_memslots *slots = kvm_alloc_memslots(); + + if (!slots) + goto out_err_no_disable; + /* Generations must be different for each address space. */ + slots->generation = i; + rcu_assign_pointer(kvm->memslots[i], slots); + } + + for (i = 0; i < KVM_NR_BUSES; i++) { + rcu_assign_pointer(kvm->buses[i], + kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); + if (!kvm->buses[i]) + goto out_err_no_disable; + } + r = kvm_arch_init_vm(kvm, type); if (r) goto out_err_no_disable; @@ -655,28 +675,10 @@ static struct kvm *kvm_create_vm(unsigned long type) INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); #endif - BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); - - r = -ENOMEM; - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { - struct kvm_memslots *slots = kvm_alloc_memslots(); - if (!slots) - goto out_err_no_srcu; - /* Generations must be different for each address space. */ - slots->generation = i; - rcu_assign_pointer(kvm->memslots[i], slots); - } - if (init_srcu_struct(&kvm->srcu)) goto out_err_no_srcu; if (init_srcu_struct(&kvm->irq_srcu)) goto out_err_no_irq_srcu; - for (i = 0; i < KVM_NR_BUSES; i++) { - rcu_assign_pointer(kvm->buses[i], - kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); - if (!kvm->buses[i]) - goto out_err; - } r = kvm_init_mmu_notifier(kvm); if (r) From a97b0e773e492ae319a7e981e98962a1060215f9 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Fri, 25 Oct 2019 13:34:58 +0200 Subject: [PATCH 2/3] kvm: call kvm_arch_destroy_vm if vm creation fails In kvm_create_vm(), if we've successfully called kvm_arch_init_vm(), but then fail later in the function, we need to call kvm_arch_destroy_vm() so that it can do any necessary cleanup (like freeing memory). Fixes: 44a95dae1d229a ("KVM: x86: Detect and Initialize AVIC support") Signed-off-by: John Sperbeck Signed-off-by: Jim Mattson Reviewed-by: Junaid Shahid [Remove dependency on "kvm: Don't clear reference count on kvm_create_vm() error path" which was not committed. - Paolo] Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ec14dae2f538..d6f0696d98ef 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -641,7 +641,6 @@ static struct kvm *kvm_create_vm(unsigned long type) mutex_init(&kvm->lock); mutex_init(&kvm->irq_lock); mutex_init(&kvm->slots_lock); - refcount_set(&kvm->users_count, 1); INIT_LIST_HEAD(&kvm->devices); BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); @@ -650,7 +649,7 @@ static struct kvm *kvm_create_vm(unsigned long type) struct kvm_memslots *slots = kvm_alloc_memslots(); if (!slots) - goto out_err_no_disable; + goto out_err_no_arch_destroy_vm; /* Generations must be different for each address space. */ slots->generation = i; rcu_assign_pointer(kvm->memslots[i], slots); @@ -660,12 +659,13 @@ static struct kvm *kvm_create_vm(unsigned long type) rcu_assign_pointer(kvm->buses[i], kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); if (!kvm->buses[i]) - goto out_err_no_disable; + goto out_err_no_arch_destroy_vm; } + refcount_set(&kvm->users_count, 1); r = kvm_arch_init_vm(kvm, type); if (r) - goto out_err_no_disable; + goto out_err_no_arch_destroy_vm; r = hardware_enable_all(); if (r) @@ -699,7 +699,9 @@ out_err_no_irq_srcu: out_err_no_srcu: hardware_disable_all(); out_err_no_disable: - refcount_set(&kvm->users_count, 0); + kvm_arch_destroy_vm(kvm); + WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); +out_err_no_arch_destroy_vm: for (i = 0; i < KVM_NR_BUSES; i++) kfree(kvm_get_bus(kvm, i)); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) From 9167ab79936206118cc60e47dcb926c3489f3bd5 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Sun, 27 Oct 2019 16:23:23 +0100 Subject: [PATCH 3/3] KVM: vmx, svm: always run with EFER.NXE=1 when shadow paging is active VMX already does so if the host has SMEP, in order to support the combination of CR0.WP=1 and CR4.SMEP=1. However, it is perfectly safe to always do so, and in fact VMX already ends up running with EFER.NXE=1 on old processors that lack the "load EFER" controls, because it may help avoiding a slow MSR write. Removing all the conditionals simplifies the code. SVM does not have similar code, but it should since recent AMD processors do support SMEP. So this patch also makes the code for the two vendors more similar while fixing NPT=0, CR0.WP=1 and CR4.SMEP=1 on AMD processors. Cc: stable@vger.kernel.org Cc: Joerg Roedel Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 10 ++++++++-- arch/x86/kvm/vmx/vmx.c | 14 +++----------- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ca200b50cde4..c5673bda4b66 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -734,8 +734,14 @@ static int get_npt_level(struct kvm_vcpu *vcpu) static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) { vcpu->arch.efer = efer; - if (!npt_enabled && !(efer & EFER_LMA)) - efer &= ~EFER_LME; + + if (!npt_enabled) { + /* Shadow paging assumes NX to be available. */ + efer |= EFER_NX; + + if (!(efer & EFER_LMA)) + efer &= ~EFER_LME; + } to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 8f01019295a1..5d21a4ab28cf 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -969,17 +969,9 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) u64 guest_efer = vmx->vcpu.arch.efer; u64 ignore_bits = 0; - if (!enable_ept) { - /* - * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing - * host CPUID is more efficient than testing guest CPUID - * or CR4. Host SMEP is anyway a requirement for guest SMEP. - */ - if (boot_cpu_has(X86_FEATURE_SMEP)) - guest_efer |= EFER_NX; - else if (!(guest_efer & EFER_NX)) - ignore_bits |= EFER_NX; - } + /* Shadow paging assumes NX to be available. */ + if (!enable_ept) + guest_efer |= EFER_NX; /* * LMA and LME handled by hardware; SCE meaningless outside long mode.