x86/KVM: Clean up host's steal time structure

commit a6bd811f12 upstream.

Now that we are mapping kvm_steal_time from the guest directly we
don't need keep a copy of it in kvm_vcpu_arch.st. The same is true
for the stime field.

This is part of CVE-2019-3016.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Boris Ostrovsky 2019-12-06 15:36:12 +00:00 committed by Greg Kroah-Hartman
parent f7c1a6c67f
commit 2aebc6ed84
2 changed files with 4 additions and 10 deletions

View File

@ -667,10 +667,9 @@ struct kvm_vcpu_arch {
bool pvclock_set_guest_stopped_request; bool pvclock_set_guest_stopped_request;
struct { struct {
u8 preempted;
u64 msr_val; u64 msr_val;
u64 last_steal; u64 last_steal;
struct gfn_to_hva_cache stime;
struct kvm_steal_time steal;
struct gfn_to_pfn_cache cache; struct gfn_to_pfn_cache cache;
} st; } st;

View File

@ -2616,7 +2616,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB) if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
kvm_vcpu_flush_tlb(vcpu, false); kvm_vcpu_flush_tlb(vcpu, false);
vcpu->arch.st.steal.preempted = 0; vcpu->arch.st.preempted = 0;
if (st->version & 1) if (st->version & 1)
st->version += 1; /* first time write, random junk */ st->version += 1; /* first time write, random junk */
@ -2786,11 +2786,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data & KVM_STEAL_RESERVED_MASK) if (data & KVM_STEAL_RESERVED_MASK)
return 1; return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
data & KVM_STEAL_VALID_BITS,
sizeof(struct kvm_steal_time)))
return 1;
vcpu->arch.st.msr_val = data; vcpu->arch.st.msr_val = data;
if (!(data & KVM_MSR_ENABLED)) if (!(data & KVM_MSR_ENABLED))
@ -3504,7 +3499,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return; return;
if (vcpu->arch.st.steal.preempted) if (vcpu->arch.st.preempted)
return; return;
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
@ -3514,7 +3509,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
st = map.hva + st = map.hva +
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
st->preempted = vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED; st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
} }