powerpc: Disable relocation on exceptions whenever PR KVM is active

For PR KVM we allow userspace to map 0xc000000000000000. Because
transitioning from userspace to the guest kernel may use the relocated
exception vectors we have to disable relocation on exceptions whenever
PR KVM is active as we cannot trust that address.

This issue does not apply to HV KVM, since changing from a guest to the
hypervisor will never use the relocated exception vectors.

Currently the hypervisor interface only allows us to toggle relocation
on exceptions on a partition wide scope, so we need to globally disable
relocation on exceptions when the first PR KVM instance is started and
only re-enable them when all PR KVM instances have been destroyed.

It's a bit heavy handed, but until the hypervisor gives us a lightweight
way to toggle relocation on exceptions on a single thread it's only real
option.

Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
Ian Munsie 2012-12-03 18:36:13 +00:00 committed by Benjamin Herrenschmidt
parent 96f013fe1b
commit a413f474a0
3 changed files with 32 additions and 3 deletions

View File

@ -395,6 +395,15 @@ static inline unsigned long cmo_get_page_size(void)
{
return CMO_PageSize;
}
extern long pSeries_enable_reloc_on_exc(void);
extern long pSeries_disable_reloc_on_exc(void);
#else
#define pSeries_enable_reloc_on_exc() do {} while (0)
#define pSeries_disable_reloc_on_exc() do {} while (0)
#endif /* CONFIG_PPC_PSERIES */
#endif /* __ASSEMBLY__ */

View File

@ -34,6 +34,7 @@
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
#include <asm/switch_to.h>
#include <asm/firmware.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
@ -1284,12 +1285,21 @@ void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
}
static unsigned int kvm_global_user_count = 0;
static DEFINE_SPINLOCK(kvm_global_user_count_lock);
int kvmppc_core_init_vm(struct kvm *kvm)
{
#ifdef CONFIG_PPC64
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
#endif
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
spin_lock(&kvm_global_user_count_lock);
if (++kvm_global_user_count == 1)
pSeries_disable_reloc_on_exc();
spin_unlock(&kvm_global_user_count_lock);
}
return 0;
}
@ -1298,6 +1308,14 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
#ifdef CONFIG_PPC64
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
#endif
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
spin_lock(&kvm_global_user_count_lock);
BUG_ON(kvm_global_user_count == 0);
if (--kvm_global_user_count == 0)
pSeries_enable_reloc_on_exc();
spin_unlock(&kvm_global_user_count_lock);
}
}
static int kvmppc_book3s_init(void)

View File

@ -375,7 +375,7 @@ static void pSeries_idle(void)
* to ever be a problem in practice we can move this into a kernel thread to
* finish off the process later in boot.
*/
static int __init pSeries_enable_reloc_on_exc(void)
long pSeries_enable_reloc_on_exc(void)
{
long rc;
unsigned int delay, total_delay = 0;
@ -397,9 +397,9 @@ static int __init pSeries_enable_reloc_on_exc(void)
mdelay(delay);
}
}
EXPORT_SYMBOL(pSeries_enable_reloc_on_exc);
#ifdef CONFIG_KEXEC
static long pSeries_disable_reloc_on_exc(void)
long pSeries_disable_reloc_on_exc(void)
{
long rc;
@ -410,7 +410,9 @@ static long pSeries_disable_reloc_on_exc(void)
mdelay(get_longbusy_msecs(rc));
}
}
EXPORT_SYMBOL(pSeries_disable_reloc_on_exc);
#ifdef CONFIG_KEXEC
static void pSeries_machine_kexec(struct kimage *image)
{
long rc;