x86/kvm: Cache gfn to pfn translation
commit 917248144d
upstream.
__kvm_map_gfn()'s call to gfn_to_pfn_memslot() is
* relatively expensive
* in certain cases (such as when done from atomic context) cannot be called
Stashing gfn-to-pfn mapping should help with both cases.
This is part of CVE-2019-3016.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
d71eef9fcc
commit
f7c1a6c67f
|
@ -671,6 +671,7 @@ struct kvm_vcpu_arch {
|
||||||
u64 last_steal;
|
u64 last_steal;
|
||||||
struct gfn_to_hva_cache stime;
|
struct gfn_to_hva_cache stime;
|
||||||
struct kvm_steal_time steal;
|
struct kvm_steal_time steal;
|
||||||
|
struct gfn_to_pfn_cache cache;
|
||||||
} st;
|
} st;
|
||||||
|
|
||||||
u64 tsc_offset;
|
u64 tsc_offset;
|
||||||
|
|
|
@ -9081,6 +9081,9 @@ static void fx_init(struct kvm_vcpu *vcpu)
|
||||||
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
|
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
|
||||||
|
struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
|
||||||
|
|
||||||
|
kvm_release_pfn(cache->pfn, cache->dirty, cache);
|
||||||
|
|
||||||
kvmclock_reset(vcpu);
|
kvmclock_reset(vcpu);
|
||||||
|
|
||||||
|
@ -9745,11 +9748,18 @@ out_free:
|
||||||
|
|
||||||
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
|
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
|
||||||
{
|
{
|
||||||
|
struct kvm_vcpu *vcpu;
|
||||||
|
int i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* memslots->generation has been incremented.
|
* memslots->generation has been incremented.
|
||||||
* mmio generation may have reached its maximum value.
|
* mmio generation may have reached its maximum value.
|
||||||
*/
|
*/
|
||||||
kvm_mmu_invalidate_mmio_sptes(kvm, gen);
|
kvm_mmu_invalidate_mmio_sptes(kvm, gen);
|
||||||
|
|
||||||
|
/* Force re-initialization of steal_time cache */
|
||||||
|
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||||
|
kvm_vcpu_kick(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||||
|
|
|
@ -728,6 +728,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
|
||||||
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
|
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
|
||||||
void kvm_get_pfn(kvm_pfn_t pfn);
|
void kvm_get_pfn(kvm_pfn_t pfn);
|
||||||
|
|
||||||
|
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
|
||||||
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
|
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
|
||||||
int len);
|
int len);
|
||||||
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
|
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
|
||||||
|
@ -758,10 +759,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
|
||||||
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
|
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||||
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
|
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
|
||||||
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
|
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
|
||||||
|
struct gfn_to_pfn_cache *cache, bool atomic);
|
||||||
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
|
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||||
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
|
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
|
||||||
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
|
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
|
||||||
|
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
|
||||||
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
|
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||||
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
|
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
|
||||||
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
|
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
|
||||||
|
|
|
@ -18,7 +18,7 @@ struct kvm_memslots;
|
||||||
|
|
||||||
enum kvm_mr_change;
|
enum kvm_mr_change;
|
||||||
|
|
||||||
#include <asm/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Address types:
|
* Address types:
|
||||||
|
@ -49,4 +49,11 @@ struct gfn_to_hva_cache {
|
||||||
struct kvm_memory_slot *memslot;
|
struct kvm_memory_slot *memslot;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct gfn_to_pfn_cache {
|
||||||
|
u64 generation;
|
||||||
|
gfn_t gfn;
|
||||||
|
kvm_pfn_t pfn;
|
||||||
|
bool dirty;
|
||||||
|
};
|
||||||
|
|
||||||
#endif /* __KVM_TYPES_H__ */
|
#endif /* __KVM_TYPES_H__ */
|
||||||
|
|
|
@ -1809,27 +1809,72 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(gfn_to_page);
|
EXPORT_SYMBOL_GPL(gfn_to_page);
|
||||||
|
|
||||||
|
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
|
||||||
|
{
|
||||||
|
if (pfn == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (cache)
|
||||||
|
cache->pfn = cache->gfn = 0;
|
||||||
|
|
||||||
|
if (dirty)
|
||||||
|
kvm_release_pfn_dirty(pfn);
|
||||||
|
else
|
||||||
|
kvm_release_pfn_clean(pfn);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||||
|
struct gfn_to_pfn_cache *cache, u64 gen)
|
||||||
|
{
|
||||||
|
kvm_release_pfn(cache->pfn, cache->dirty, cache);
|
||||||
|
|
||||||
|
cache->pfn = gfn_to_pfn_memslot(slot, gfn);
|
||||||
|
cache->gfn = gfn;
|
||||||
|
cache->dirty = false;
|
||||||
|
cache->generation = gen;
|
||||||
|
}
|
||||||
|
|
||||||
static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
|
static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
|
||||||
struct kvm_host_map *map)
|
struct kvm_host_map *map,
|
||||||
|
struct gfn_to_pfn_cache *cache,
|
||||||
|
bool atomic)
|
||||||
{
|
{
|
||||||
kvm_pfn_t pfn;
|
kvm_pfn_t pfn;
|
||||||
void *hva = NULL;
|
void *hva = NULL;
|
||||||
struct page *page = KVM_UNMAPPED_PAGE;
|
struct page *page = KVM_UNMAPPED_PAGE;
|
||||||
struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
|
struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
|
||||||
|
u64 gen = slots->generation;
|
||||||
|
|
||||||
if (!map)
|
if (!map)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
pfn = gfn_to_pfn_memslot(slot, gfn);
|
if (cache) {
|
||||||
|
if (!cache->pfn || cache->gfn != gfn ||
|
||||||
|
cache->generation != gen) {
|
||||||
|
if (atomic)
|
||||||
|
return -EAGAIN;
|
||||||
|
kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
|
||||||
|
}
|
||||||
|
pfn = cache->pfn;
|
||||||
|
} else {
|
||||||
|
if (atomic)
|
||||||
|
return -EAGAIN;
|
||||||
|
pfn = gfn_to_pfn_memslot(slot, gfn);
|
||||||
|
}
|
||||||
if (is_error_noslot_pfn(pfn))
|
if (is_error_noslot_pfn(pfn))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (pfn_valid(pfn)) {
|
if (pfn_valid(pfn)) {
|
||||||
page = pfn_to_page(pfn);
|
page = pfn_to_page(pfn);
|
||||||
hva = kmap(page);
|
if (atomic)
|
||||||
|
hva = kmap_atomic(page);
|
||||||
|
else
|
||||||
|
hva = kmap(page);
|
||||||
#ifdef CONFIG_HAS_IOMEM
|
#ifdef CONFIG_HAS_IOMEM
|
||||||
} else {
|
} else if (!atomic) {
|
||||||
hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
|
hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
|
||||||
|
} else {
|
||||||
|
return -EINVAL;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1844,20 +1889,25 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
|
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
|
||||||
|
struct gfn_to_pfn_cache *cache, bool atomic)
|
||||||
{
|
{
|
||||||
return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
|
return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
|
||||||
|
cache, atomic);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_map_gfn);
|
EXPORT_SYMBOL_GPL(kvm_map_gfn);
|
||||||
|
|
||||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
|
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
|
||||||
{
|
{
|
||||||
return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
|
return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
|
||||||
|
NULL, false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_vcpu_map);
|
EXPORT_SYMBOL_GPL(kvm_vcpu_map);
|
||||||
|
|
||||||
static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
|
static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
|
||||||
struct kvm_host_map *map, bool dirty)
|
struct kvm_host_map *map,
|
||||||
|
struct gfn_to_pfn_cache *cache,
|
||||||
|
bool dirty, bool atomic)
|
||||||
{
|
{
|
||||||
if (!map)
|
if (!map)
|
||||||
return;
|
return;
|
||||||
|
@ -1865,34 +1915,44 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
|
||||||
if (!map->hva)
|
if (!map->hva)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (map->page != KVM_UNMAPPED_PAGE)
|
if (map->page != KVM_UNMAPPED_PAGE) {
|
||||||
kunmap(map->page);
|
if (atomic)
|
||||||
|
kunmap_atomic(map->hva);
|
||||||
|
else
|
||||||
|
kunmap(map->page);
|
||||||
|
}
|
||||||
#ifdef CONFIG_HAS_IOMEM
|
#ifdef CONFIG_HAS_IOMEM
|
||||||
else
|
else if (!atomic)
|
||||||
memunmap(map->hva);
|
memunmap(map->hva);
|
||||||
|
else
|
||||||
|
WARN_ONCE(1, "Unexpected unmapping in atomic context");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (dirty) {
|
if (dirty)
|
||||||
mark_page_dirty_in_slot(memslot, map->gfn);
|
mark_page_dirty_in_slot(memslot, map->gfn);
|
||||||
kvm_release_pfn_dirty(map->pfn);
|
|
||||||
} else {
|
if (cache)
|
||||||
kvm_release_pfn_clean(map->pfn);
|
cache->dirty |= dirty;
|
||||||
}
|
else
|
||||||
|
kvm_release_pfn(map->pfn, dirty, NULL);
|
||||||
|
|
||||||
map->hva = NULL;
|
map->hva = NULL;
|
||||||
map->page = NULL;
|
map->page = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
|
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
|
||||||
|
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
|
||||||
{
|
{
|
||||||
__kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
|
__kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
|
||||||
|
cache, dirty, atomic);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
|
EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
|
||||||
|
|
||||||
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
|
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
|
||||||
{
|
{
|
||||||
__kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
|
__kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
|
||||||
|
dirty, false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
|
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue