2008-11-05 17:29:27 +01:00
|
|
|
/*
|
|
|
|
* QEMU KVM support
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006-2008 Qumranet Technologies
|
|
|
|
* Copyright IBM, Corp. 2008
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#include <sys/mman.h>
|
2010-10-21 17:35:04 +02:00
|
|
|
#include <sys/utsname.h>
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
#include <linux/kvm.h>
|
|
|
|
|
|
|
|
#include "qemu-common.h"
|
|
|
|
#include "sysemu.h"
|
|
|
|
#include "kvm.h"
|
|
|
|
#include "cpu.h"
|
2009-03-12 21:12:48 +01:00
|
|
|
#include "gdbstub.h"
|
2009-11-06 19:39:24 +01:00
|
|
|
#include "host-utils.h"
|
2010-02-15 18:33:46 +01:00
|
|
|
#include "hw/pc.h"
|
2010-06-17 08:17:33 +02:00
|
|
|
#include "hw/apic.h"
|
2010-03-01 04:29:21 +01:00
|
|
|
#include "ioport.h"
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2010-01-13 14:25:06 +01:00
|
|
|
#ifdef CONFIG_KVM_PARA
|
|
|
|
#include <linux/kvm_para.h>
|
|
|
|
#endif
|
|
|
|
//
|
2008-11-05 17:29:27 +01:00
|
|
|
//#define DEBUG_KVM
|
|
|
|
|
|
|
|
#ifdef DEBUG_KVM
|
2010-04-18 16:22:14 +02:00
|
|
|
#define DPRINTF(fmt, ...) \
|
2008-11-05 17:29:27 +01:00
|
|
|
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
|
|
|
|
#else
|
2010-04-18 16:22:14 +02:00
|
|
|
#define DPRINTF(fmt, ...) \
|
2008-11-05 17:29:27 +01:00
|
|
|
do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2009-10-22 14:26:56 +02:00
|
|
|
#define MSR_KVM_WALL_CLOCK 0x11
|
|
|
|
#define MSR_KVM_SYSTEM_TIME 0x12
|
|
|
|
|
2010-10-11 20:31:21 +02:00
|
|
|
#ifndef BUS_MCEERR_AR
|
|
|
|
#define BUS_MCEERR_AR 4
|
|
|
|
#endif
|
|
|
|
#ifndef BUS_MCEERR_AO
|
|
|
|
#define BUS_MCEERR_AO 5
|
|
|
|
#endif
|
|
|
|
|
2011-01-21 21:48:17 +01:00
|
|
|
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
|
|
|
KVM_CAP_INFO(SET_TSS_ADDR),
|
|
|
|
KVM_CAP_INFO(EXT_CPUID),
|
|
|
|
KVM_CAP_INFO(MP_STATE),
|
|
|
|
KVM_CAP_LAST_INFO
|
|
|
|
};
|
2010-10-21 17:35:04 +02:00
|
|
|
|
2011-01-21 21:48:13 +01:00
|
|
|
static bool has_msr_star;
|
|
|
|
static bool has_msr_hsave_pa;
|
2011-01-21 21:48:22 +01:00
|
|
|
#if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
|
|
|
|
static bool has_msr_async_pf_en;
|
|
|
|
#endif
|
2010-10-21 17:35:04 +02:00
|
|
|
static int lm_capable_kernel;
|
2009-05-03 16:04:01 +02:00
|
|
|
|
|
|
|
static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
|
|
|
|
{
|
|
|
|
struct kvm_cpuid2 *cpuid;
|
|
|
|
int r, size;
|
|
|
|
|
|
|
|
size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
|
|
|
|
cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size);
|
|
|
|
cpuid->nent = max;
|
|
|
|
r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
|
2009-05-19 19:55:21 +02:00
|
|
|
if (r == 0 && cpuid->nent >= max) {
|
|
|
|
r = -E2BIG;
|
|
|
|
}
|
2009-05-03 16:04:01 +02:00
|
|
|
if (r < 0) {
|
|
|
|
if (r == -E2BIG) {
|
|
|
|
qemu_free(cpuid);
|
|
|
|
return NULL;
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
|
|
|
|
strerror(-r));
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return cpuid;
|
|
|
|
}
|
|
|
|
|
2010-06-17 09:18:13 +02:00
|
|
|
uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
|
|
|
|
uint32_t index, int reg)
|
2009-05-03 16:04:01 +02:00
|
|
|
{
|
|
|
|
struct kvm_cpuid2 *cpuid;
|
|
|
|
int i, max;
|
|
|
|
uint32_t ret = 0;
|
|
|
|
uint32_t cpuid_1_edx;
|
|
|
|
|
|
|
|
max = 1;
|
|
|
|
while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) {
|
|
|
|
max *= 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < cpuid->nent; ++i) {
|
2010-06-17 09:18:13 +02:00
|
|
|
if (cpuid->entries[i].function == function &&
|
|
|
|
cpuid->entries[i].index == index) {
|
2009-05-03 16:04:01 +02:00
|
|
|
switch (reg) {
|
|
|
|
case R_EAX:
|
|
|
|
ret = cpuid->entries[i].eax;
|
|
|
|
break;
|
|
|
|
case R_EBX:
|
|
|
|
ret = cpuid->entries[i].ebx;
|
|
|
|
break;
|
|
|
|
case R_ECX:
|
|
|
|
ret = cpuid->entries[i].ecx;
|
|
|
|
break;
|
|
|
|
case R_EDX:
|
|
|
|
ret = cpuid->entries[i].edx;
|
2010-02-03 21:16:37 +01:00
|
|
|
switch (function) {
|
|
|
|
case 1:
|
|
|
|
/* KVM before 2.6.30 misreports the following features */
|
|
|
|
ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
|
|
|
|
break;
|
|
|
|
case 0x80000001:
|
2009-05-03 16:04:01 +02:00
|
|
|
/* On Intel, kvm returns cpuid according to the Intel spec,
|
|
|
|
* so add missing bits according to the AMD spec:
|
|
|
|
*/
|
2010-06-17 09:18:13 +02:00
|
|
|
cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX);
|
2010-05-11 08:41:25 +02:00
|
|
|
ret |= cpuid_1_edx & 0x183f7ff;
|
2010-02-03 21:16:37 +01:00
|
|
|
break;
|
2009-05-03 16:04:01 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_free(cpuid);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-01-13 14:25:06 +01:00
|
|
|
#ifdef CONFIG_KVM_PARA
|
|
|
|
struct kvm_para_features {
|
2010-12-27 16:19:29 +01:00
|
|
|
int cap;
|
|
|
|
int feature;
|
2010-01-13 14:25:06 +01:00
|
|
|
} para_features[] = {
|
2010-12-27 16:19:29 +01:00
|
|
|
{ KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
|
|
|
|
{ KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
|
|
|
|
{ KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
|
2010-10-24 14:27:55 +02:00
|
|
|
#ifdef KVM_CAP_ASYNC_PF
|
2010-12-27 16:19:29 +01:00
|
|
|
{ KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
|
2010-01-13 14:25:06 +01:00
|
|
|
#endif
|
2010-12-27 16:19:29 +01:00
|
|
|
{ -1, -1 }
|
2010-01-13 14:25:06 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static int get_para_features(CPUState *env)
|
|
|
|
{
|
2010-12-27 16:19:29 +01:00
|
|
|
int i, features = 0;
|
2010-01-13 14:25:06 +01:00
|
|
|
|
2010-12-27 16:19:29 +01:00
|
|
|
for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) {
|
|
|
|
if (kvm_check_extension(env->kvm_state, para_features[i].cap)) {
|
|
|
|
features |= (1 << para_features[i].feature);
|
2010-01-13 14:25:06 +01:00
|
|
|
}
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2011-02-01 22:23:24 +01:00
|
|
|
#ifdef KVM_CAP_ASYNC_PF
|
2011-01-21 21:48:22 +01:00
|
|
|
has_msr_async_pf_en = features & (1 << KVM_FEATURE_ASYNC_PF);
|
2011-02-01 22:23:24 +01:00
|
|
|
#endif
|
2010-12-27 16:19:29 +01:00
|
|
|
return features;
|
2010-01-13 14:25:06 +01:00
|
|
|
}
|
2011-03-02 08:56:12 +01:00
|
|
|
#endif /* CONFIG_KVM_PARA */
|
2010-01-13 14:25:06 +01:00
|
|
|
|
KVM, MCE, unpoison memory address across reboot
In Linux kernel HWPoison processing implementation, the virtual
address in processes mapping the error physical memory page is marked
as HWPoison. So that, the further accessing to the virtual
address will kill corresponding processes with SIGBUS.
If the error physical memory page is used by a KVM guest, the SIGBUS
will be sent to QEMU, and QEMU will simulate a MCE to report that
memory error to the guest OS. If the guest OS can not recover from
the error (for example, the page is accessed by kernel code), guest OS
will reboot the system. But because the underlying host virtual
address backing the guest physical memory is still poisoned, if the
guest system accesses the corresponding guest physical memory even
after rebooting, the SIGBUS will still be sent to QEMU and MCE will be
simulated. That is, guest system can not recover via rebooting.
In fact, across rebooting, the contents of guest physical memory page
need not to be kept. We can allocate a new host physical page to
back the corresponding guest physical address.
This patch fixes this issue in QEMU-KVM via calling qemu_ram_remap()
to clear the corresponding page table entry, so that make it possible
to allocate a new page to recover the issue.
[ Jan: rebasing and tiny cleanups]
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2011-03-02 08:56:20 +01:00
|
|
|
typedef struct HWPoisonPage {
|
|
|
|
ram_addr_t ram_addr;
|
|
|
|
QLIST_ENTRY(HWPoisonPage) list;
|
|
|
|
} HWPoisonPage;
|
|
|
|
|
|
|
|
static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
|
|
|
|
QLIST_HEAD_INITIALIZER(hwpoison_page_list);
|
|
|
|
|
|
|
|
static void kvm_unpoison_all(void *param)
|
|
|
|
{
|
|
|
|
HWPoisonPage *page, *next_page;
|
|
|
|
|
|
|
|
QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
|
|
|
|
QLIST_REMOVE(page, list);
|
|
|
|
qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
|
|
|
|
qemu_free(page);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-10-11 20:31:18 +02:00
|
|
|
#ifdef KVM_CAP_MCE
|
KVM, MCE, unpoison memory address across reboot
In Linux kernel HWPoison processing implementation, the virtual
address in processes mapping the error physical memory page is marked
as HWPoison. So that, the further accessing to the virtual
address will kill corresponding processes with SIGBUS.
If the error physical memory page is used by a KVM guest, the SIGBUS
will be sent to QEMU, and QEMU will simulate a MCE to report that
memory error to the guest OS. If the guest OS can not recover from
the error (for example, the page is accessed by kernel code), guest OS
will reboot the system. But because the underlying host virtual
address backing the guest physical memory is still poisoned, if the
guest system accesses the corresponding guest physical memory even
after rebooting, the SIGBUS will still be sent to QEMU and MCE will be
simulated. That is, guest system can not recover via rebooting.
In fact, across rebooting, the contents of guest physical memory page
need not to be kept. We can allocate a new host physical page to
back the corresponding guest physical address.
This patch fixes this issue in QEMU-KVM via calling qemu_ram_remap()
to clear the corresponding page table entry, so that make it possible
to allocate a new page to recover the issue.
[ Jan: rebasing and tiny cleanups]
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2011-03-02 08:56:20 +01:00
|
|
|
static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
|
|
|
|
{
|
|
|
|
HWPoisonPage *page;
|
|
|
|
|
|
|
|
QLIST_FOREACH(page, &hwpoison_page_list, list) {
|
|
|
|
if (page->ram_addr == ram_addr) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
page = qemu_malloc(sizeof(HWPoisonPage));
|
|
|
|
page->ram_addr = ram_addr;
|
|
|
|
QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
|
|
|
|
}
|
|
|
|
|
2010-10-11 20:31:18 +02:00
|
|
|
static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
|
|
|
|
int *max_banks)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
2010-12-10 08:52:36 +01:00
|
|
|
r = kvm_check_extension(s, KVM_CAP_MCE);
|
2010-10-11 20:31:18 +02:00
|
|
|
if (r > 0) {
|
|
|
|
*max_banks = r;
|
|
|
|
return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
|
|
|
|
}
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
2011-03-02 08:56:16 +01:00
|
|
|
static void kvm_mce_inject(CPUState *env, target_phys_addr_t paddr, int code)
|
2010-10-11 20:31:18 +02:00
|
|
|
{
|
2011-03-02 08:56:16 +01:00
|
|
|
uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
|
|
|
|
MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
|
|
|
|
uint64_t mcg_status = MCG_STATUS_MCIP;
|
2010-10-11 20:31:18 +02:00
|
|
|
|
2011-03-02 08:56:16 +01:00
|
|
|
if (code == BUS_MCEERR_AR) {
|
|
|
|
status |= MCI_STATUS_AR | 0x134;
|
|
|
|
mcg_status |= MCG_STATUS_EIPV;
|
|
|
|
} else {
|
|
|
|
status |= 0xc0;
|
|
|
|
mcg_status |= MCG_STATUS_RIPV;
|
2011-03-02 08:56:12 +01:00
|
|
|
}
|
2011-03-02 08:56:16 +01:00
|
|
|
cpu_x86_inject_mce(NULL, env, 9, status, mcg_status, paddr,
|
|
|
|
(MCM_ADDR_PHYS << 6) | 0xc,
|
|
|
|
cpu_x86_support_mca_broadcast(env) ?
|
|
|
|
MCE_INJECT_BROADCAST : 0);
|
2011-03-02 08:56:12 +01:00
|
|
|
}
|
|
|
|
#endif /* KVM_CAP_MCE */
|
|
|
|
|
|
|
|
static void hardware_memory_error(void)
|
|
|
|
{
|
|
|
|
fprintf(stderr, "Hardware memory error!\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
|
|
|
|
{
|
|
|
|
#ifdef KVM_CAP_MCE
|
|
|
|
ram_addr_t ram_addr;
|
|
|
|
target_phys_addr_t paddr;
|
|
|
|
|
|
|
|
if ((env->mcg_cap & MCG_SER_P) && addr
|
2011-03-02 08:56:16 +01:00
|
|
|
&& (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
|
|
|
|
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
|
|
|
|
!kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr,
|
|
|
|
&paddr)) {
|
2011-03-02 08:56:12 +01:00
|
|
|
fprintf(stderr, "Hardware memory error for memory used by "
|
|
|
|
"QEMU itself instead of guest system!\n");
|
|
|
|
/* Hope we are lucky for AO MCE */
|
|
|
|
if (code == BUS_MCEERR_AO) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
hardware_memory_error();
|
|
|
|
}
|
|
|
|
}
|
KVM, MCE, unpoison memory address across reboot
In Linux kernel HWPoison processing implementation, the virtual
address in processes mapping the error physical memory page is marked
as HWPoison. So that, the further accessing to the virtual
address will kill corresponding processes with SIGBUS.
If the error physical memory page is used by a KVM guest, the SIGBUS
will be sent to QEMU, and QEMU will simulate a MCE to report that
memory error to the guest OS. If the guest OS can not recover from
the error (for example, the page is accessed by kernel code), guest OS
will reboot the system. But because the underlying host virtual
address backing the guest physical memory is still poisoned, if the
guest system accesses the corresponding guest physical memory even
after rebooting, the SIGBUS will still be sent to QEMU and MCE will be
simulated. That is, guest system can not recover via rebooting.
In fact, across rebooting, the contents of guest physical memory page
need not to be kept. We can allocate a new host physical page to
back the corresponding guest physical address.
This patch fixes this issue in QEMU-KVM via calling qemu_ram_remap()
to clear the corresponding page table entry, so that make it possible
to allocate a new page to recover the issue.
[ Jan: rebasing and tiny cleanups]
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2011-03-02 08:56:20 +01:00
|
|
|
kvm_hwpoison_page_add(ram_addr);
|
2011-03-02 08:56:16 +01:00
|
|
|
kvm_mce_inject(env, paddr, code);
|
2011-03-02 08:56:12 +01:00
|
|
|
} else
|
|
|
|
#endif /* KVM_CAP_MCE */
|
|
|
|
{
|
|
|
|
if (code == BUS_MCEERR_AO) {
|
|
|
|
return 0;
|
|
|
|
} else if (code == BUS_MCEERR_AR) {
|
|
|
|
hardware_memory_error();
|
|
|
|
} else {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_on_sigbus(int code, void *addr)
|
|
|
|
{
|
|
|
|
#ifdef KVM_CAP_MCE
|
|
|
|
if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
|
|
|
|
ram_addr_t ram_addr;
|
|
|
|
target_phys_addr_t paddr;
|
|
|
|
|
|
|
|
/* Hope we are lucky for AO MCE */
|
2011-03-02 08:56:16 +01:00
|
|
|
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
|
2011-03-02 08:56:12 +01:00
|
|
|
!kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr,
|
|
|
|
&paddr)) {
|
|
|
|
fprintf(stderr, "Hardware memory error for memory used by "
|
|
|
|
"QEMU itself instead of guest system!: %p\n", addr);
|
|
|
|
return 0;
|
|
|
|
}
|
KVM, MCE, unpoison memory address across reboot
In Linux kernel HWPoison processing implementation, the virtual
address in processes mapping the error physical memory page is marked
as HWPoison. So that, the further accessing to the virtual
address will kill corresponding processes with SIGBUS.
If the error physical memory page is used by a KVM guest, the SIGBUS
will be sent to QEMU, and QEMU will simulate a MCE to report that
memory error to the guest OS. If the guest OS can not recover from
the error (for example, the page is accessed by kernel code), guest OS
will reboot the system. But because the underlying host virtual
address backing the guest physical memory is still poisoned, if the
guest system accesses the corresponding guest physical memory even
after rebooting, the SIGBUS will still be sent to QEMU and MCE will be
simulated. That is, guest system can not recover via rebooting.
In fact, across rebooting, the contents of guest physical memory page
need not to be kept. We can allocate a new host physical page to
back the corresponding guest physical address.
This patch fixes this issue in QEMU-KVM via calling qemu_ram_remap()
to clear the corresponding page table entry, so that make it possible
to allocate a new page to recover the issue.
[ Jan: rebasing and tiny cleanups]
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2011-03-02 08:56:20 +01:00
|
|
|
kvm_hwpoison_page_add(ram_addr);
|
2011-03-02 08:56:16 +01:00
|
|
|
kvm_mce_inject(first_cpu, paddr, code);
|
2011-03-02 08:56:12 +01:00
|
|
|
} else
|
|
|
|
#endif /* KVM_CAP_MCE */
|
|
|
|
{
|
|
|
|
if (code == BUS_MCEERR_AO) {
|
|
|
|
return 0;
|
|
|
|
} else if (code == BUS_MCEERR_AR) {
|
|
|
|
hardware_memory_error();
|
|
|
|
} else {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2010-10-11 20:31:18 +02:00
|
|
|
|
2011-03-02 08:56:14 +01:00
|
|
|
static int kvm_inject_mce_oldstyle(CPUState *env)
|
|
|
|
{
|
|
|
|
#ifdef KVM_CAP_MCE
|
|
|
|
if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
|
|
|
|
unsigned int bank, bank_num = env->mcg_cap & 0xff;
|
|
|
|
struct kvm_x86_mce mce;
|
|
|
|
|
|
|
|
env->exception_injected = -1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There must be at least one bank in use if an MCE is pending.
|
|
|
|
* Find it and use its values for the event injection.
|
|
|
|
*/
|
|
|
|
for (bank = 0; bank < bank_num; bank++) {
|
|
|
|
if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(bank < bank_num);
|
|
|
|
|
|
|
|
mce.bank = bank;
|
|
|
|
mce.status = env->mce_banks[bank * 4 + 1];
|
|
|
|
mce.mcg_status = env->mcg_status;
|
|
|
|
mce.addr = env->mce_banks[bank * 4 + 2];
|
|
|
|
mce.misc = env->mce_banks[bank * 4 + 3];
|
|
|
|
|
|
|
|
return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, &mce);
|
|
|
|
}
|
|
|
|
#endif /* KVM_CAP_MCE */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-03 20:19:53 +01:00
|
|
|
static void cpu_update_state(void *opaque, int running, int reason)
|
|
|
|
{
|
|
|
|
CPUState *env = opaque;
|
|
|
|
|
|
|
|
if (running) {
|
|
|
|
env->tsc_valid = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
int kvm_arch_init_vcpu(CPUState *env)
|
|
|
|
{
|
|
|
|
struct {
|
2009-02-09 16:50:31 +01:00
|
|
|
struct kvm_cpuid2 cpuid;
|
|
|
|
struct kvm_cpuid_entry2 entries[100];
|
2008-11-05 17:29:27 +01:00
|
|
|
} __attribute__((packed)) cpuid_data;
|
2009-02-09 16:50:31 +01:00
|
|
|
uint32_t limit, i, j, cpuid_i;
|
2009-04-17 22:50:54 +02:00
|
|
|
uint32_t unused;
|
2010-01-13 14:25:06 +01:00
|
|
|
struct kvm_cpuid_entry2 *c;
|
2011-01-21 21:48:15 +01:00
|
|
|
#ifdef CONFIG_KVM_PARA
|
2010-01-13 14:25:06 +01:00
|
|
|
uint32_t signature[3];
|
|
|
|
#endif
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2010-06-17 09:18:13 +02:00
|
|
|
env->cpuid_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX);
|
2009-06-25 00:08:04 +02:00
|
|
|
|
|
|
|
i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
|
2010-06-17 09:18:13 +02:00
|
|
|
env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_ECX);
|
2009-06-25 00:08:04 +02:00
|
|
|
env->cpuid_ext_features |= i;
|
|
|
|
|
2010-03-11 14:39:01 +01:00
|
|
|
env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
|
2010-06-17 09:18:13 +02:00
|
|
|
0, R_EDX);
|
2010-03-11 14:39:01 +01:00
|
|
|
env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
|
2010-06-17 09:18:13 +02:00
|
|
|
0, R_ECX);
|
2010-09-27 15:16:17 +02:00
|
|
|
env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(env, 0x8000000A,
|
|
|
|
0, R_EDX);
|
|
|
|
|
2009-06-25 00:08:02 +02:00
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
cpuid_i = 0;
|
|
|
|
|
2010-01-13 14:25:06 +01:00
|
|
|
#ifdef CONFIG_KVM_PARA
|
|
|
|
/* Paravirtualization CPUIDs */
|
|
|
|
memcpy(signature, "KVMKVMKVM\0\0\0", 12);
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
memset(c, 0, sizeof(*c));
|
|
|
|
c->function = KVM_CPUID_SIGNATURE;
|
|
|
|
c->eax = 0;
|
|
|
|
c->ebx = signature[0];
|
|
|
|
c->ecx = signature[1];
|
|
|
|
c->edx = signature[2];
|
|
|
|
|
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
|
|
|
memset(c, 0, sizeof(*c));
|
|
|
|
c->function = KVM_CPUID_FEATURES;
|
|
|
|
c->eax = env->cpuid_kvm_features & get_para_features(env);
|
|
|
|
#endif
|
|
|
|
|
2009-04-17 22:50:54 +02:00
|
|
|
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
for (i = 0; i <= limit; i++) {
|
2010-01-13 14:25:06 +01:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
2009-02-09 16:50:31 +01:00
|
|
|
|
|
|
|
switch (i) {
|
2009-02-09 16:50:36 +01:00
|
|
|
case 2: {
|
|
|
|
/* Keep reading function 2 till all the input is received */
|
|
|
|
int times;
|
|
|
|
|
|
|
|
c->function = i;
|
2009-04-17 22:50:54 +02:00
|
|
|
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
|
|
|
|
KVM_CPUID_FLAG_STATE_READ_NEXT;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
|
|
|
times = c->eax & 0xff;
|
2009-02-09 16:50:36 +01:00
|
|
|
|
|
|
|
for (j = 1; j < times; ++j) {
|
2009-04-17 22:50:54 +02:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
2009-02-09 16:50:36 +01:00
|
|
|
c->function = i;
|
2009-04-17 22:50:54 +02:00
|
|
|
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
2009-02-09 16:50:36 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2009-02-09 16:50:31 +01:00
|
|
|
case 4:
|
|
|
|
case 0xb:
|
|
|
|
case 0xd:
|
|
|
|
for (j = 0; ; j++) {
|
|
|
|
c->function = i;
|
|
|
|
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
|
|
|
c->index = j;
|
2009-04-17 22:50:54 +02:00
|
|
|
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
2009-02-09 16:50:31 +01:00
|
|
|
|
2010-12-27 16:19:29 +01:00
|
|
|
if (i == 4 && c->eax == 0) {
|
2009-02-09 16:50:31 +01:00
|
|
|
break;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
|
|
|
if (i == 0xb && !(c->ecx & 0xff00)) {
|
2009-02-09 16:50:31 +01:00
|
|
|
break;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
|
|
|
if (i == 0xd && c->eax == 0) {
|
2009-02-09 16:50:31 +01:00
|
|
|
break;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2009-04-17 22:50:54 +02:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
2009-02-09 16:50:31 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
c->function = i;
|
2009-04-17 22:50:54 +02:00
|
|
|
c->flags = 0;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
2009-02-09 16:50:31 +01:00
|
|
|
break;
|
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
2009-04-17 22:50:54 +02:00
|
|
|
cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
for (i = 0x80000000; i <= limit; i++) {
|
2010-01-13 14:25:06 +01:00
|
|
|
c = &cpuid_data.entries[cpuid_i++];
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
c->function = i;
|
2009-04-17 22:50:54 +02:00
|
|
|
c->flags = 0;
|
|
|
|
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
cpuid_data.cpuid.nent = cpuid_i;
|
|
|
|
|
2010-10-11 20:31:18 +02:00
|
|
|
#ifdef KVM_CAP_MCE
|
|
|
|
if (((env->cpuid_version >> 8)&0xF) >= 6
|
|
|
|
&& (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)
|
|
|
|
&& kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
|
|
|
|
uint64_t mcg_cap;
|
|
|
|
int banks;
|
2011-03-02 08:56:17 +01:00
|
|
|
int ret;
|
2010-10-11 20:31:18 +02:00
|
|
|
|
2011-03-02 08:56:18 +01:00
|
|
|
ret = kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
|
|
|
|
return ret;
|
2010-10-11 20:31:18 +02:00
|
|
|
}
|
2011-03-02 08:56:18 +01:00
|
|
|
|
|
|
|
if (banks > MCE_BANKS_DEF) {
|
|
|
|
banks = MCE_BANKS_DEF;
|
|
|
|
}
|
|
|
|
mcg_cap &= MCE_CAP_DEF;
|
|
|
|
mcg_cap |= banks;
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, &mcg_cap);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
env->mcg_cap = mcg_cap;
|
2010-10-11 20:31:18 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-02-03 20:19:53 +01:00
|
|
|
qemu_add_vm_change_state_handler(cpu_update_state, env);
|
|
|
|
|
2009-02-09 16:50:31 +01:00
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
|
2009-11-06 19:39:24 +01:00
|
|
|
void kvm_arch_reset_vcpu(CPUState *env)
|
|
|
|
{
|
2010-01-06 15:30:10 +01:00
|
|
|
env->exception_injected = -1;
|
2009-11-06 19:39:24 +01:00
|
|
|
env->interrupt_injected = -1;
|
2011-01-21 21:48:12 +01:00
|
|
|
env->xcr0 = 1;
|
2010-03-23 17:37:14 +01:00
|
|
|
if (kvm_irqchip_in_kernel()) {
|
|
|
|
env->mp_state = cpu_is_bsp(env) ? KVM_MP_STATE_RUNNABLE :
|
|
|
|
KVM_MP_STATE_UNINITIALIZED;
|
|
|
|
} else {
|
|
|
|
env->mp_state = KVM_MP_STATE_RUNNABLE;
|
|
|
|
}
|
2009-11-06 19:39:24 +01:00
|
|
|
}
|
|
|
|
|
2011-01-21 21:48:13 +01:00
|
|
|
static int kvm_get_supported_msrs(KVMState *s)
|
2008-11-05 17:29:27 +01:00
|
|
|
{
|
2010-10-21 17:35:02 +02:00
|
|
|
static int kvm_supported_msrs;
|
2011-01-21 21:48:13 +01:00
|
|
|
int ret = 0;
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
/* first time */
|
2010-10-21 17:35:02 +02:00
|
|
|
if (kvm_supported_msrs == 0) {
|
2008-11-05 17:29:27 +01:00
|
|
|
struct kvm_msr_list msr_list, *kvm_msr_list;
|
|
|
|
|
2010-10-21 17:35:02 +02:00
|
|
|
kvm_supported_msrs = -1;
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
/* Obtain MSR list from KVM. These are the MSRs that we must
|
|
|
|
* save/restore */
|
2008-12-13 21:41:58 +01:00
|
|
|
msr_list.nmsrs = 0;
|
2011-01-21 21:48:13 +01:00
|
|
|
ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
|
2009-12-06 15:51:24 +01:00
|
|
|
if (ret < 0 && ret != -E2BIG) {
|
2011-01-21 21:48:13 +01:00
|
|
|
return ret;
|
2009-12-06 15:51:24 +01:00
|
|
|
}
|
2009-07-02 22:04:48 +02:00
|
|
|
/* Old kernel modules had a bug and could write beyond the provided
|
|
|
|
memory. Allocate at least a safe amount of 1K. */
|
|
|
|
kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) +
|
|
|
|
msr_list.nmsrs *
|
|
|
|
sizeof(msr_list.indices[0])));
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2008-12-13 21:49:31 +01:00
|
|
|
kvm_msr_list->nmsrs = msr_list.nmsrs;
|
2011-01-21 21:48:13 +01:00
|
|
|
ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
|
2008-11-05 17:29:27 +01:00
|
|
|
if (ret >= 0) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < kvm_msr_list->nmsrs; i++) {
|
|
|
|
if (kvm_msr_list->indices[i] == MSR_STAR) {
|
2011-01-21 21:48:13 +01:00
|
|
|
has_msr_star = true;
|
2010-10-21 17:35:02 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
|
2011-01-21 21:48:13 +01:00
|
|
|
has_msr_hsave_pa = true;
|
2010-10-21 17:35:02 +02:00
|
|
|
continue;
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free(kvm_msr_list);
|
|
|
|
}
|
|
|
|
|
2011-01-21 21:48:13 +01:00
|
|
|
return ret;
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
|
2011-01-21 21:48:16 +01:00
|
|
|
int kvm_arch_init(KVMState *s)
|
2010-03-23 17:37:12 +01:00
|
|
|
{
|
2011-01-21 21:48:18 +01:00
|
|
|
uint64_t identity_base = 0xfffbc000;
|
2010-03-23 17:37:12 +01:00
|
|
|
int ret;
|
2010-10-21 17:35:04 +02:00
|
|
|
struct utsname utsname;
|
2010-03-23 17:37:12 +01:00
|
|
|
|
2011-01-21 21:48:13 +01:00
|
|
|
ret = kvm_get_supported_msrs(s);
|
2010-03-23 17:37:12 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2010-10-21 17:35:04 +02:00
|
|
|
|
|
|
|
uname(&utsname);
|
|
|
|
lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
|
|
|
|
|
2010-02-15 18:33:46 +01:00
|
|
|
/*
|
2011-01-21 21:48:18 +01:00
|
|
|
* On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
|
|
|
|
* In order to use vm86 mode, an EPT identity map and a TSS are needed.
|
|
|
|
* Since these must be part of guest physical memory, we need to allocate
|
|
|
|
* them, both by setting their start addresses in the kernel and by
|
|
|
|
* creating a corresponding e820 entry. We need 4 pages before the BIOS.
|
|
|
|
*
|
|
|
|
* Older KVM versions may not support setting the identity map base. In
|
|
|
|
* that case we need to stick with the default, i.e. a 256K maximum BIOS
|
|
|
|
* size.
|
2010-02-15 18:33:46 +01:00
|
|
|
*/
|
2011-01-21 21:48:18 +01:00
|
|
|
#ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
|
|
|
|
if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
|
|
|
|
/* Allows up to 16M BIOSes. */
|
|
|
|
identity_base = 0xfeffc000;
|
|
|
|
|
|
|
|
ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2010-02-15 18:33:46 +01:00
|
|
|
}
|
2011-01-21 21:48:18 +01:00
|
|
|
#endif
|
|
|
|
/* Set TSS base one page after EPT identity map. */
|
|
|
|
ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
|
2010-03-23 17:37:12 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-01-21 21:48:18 +01:00
|
|
|
/* Tell fw_cfg to notify the BIOS to reserve the range. */
|
|
|
|
ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
|
2010-03-23 17:37:12 +01:00
|
|
|
if (ret < 0) {
|
2011-01-21 21:48:18 +01:00
|
|
|
fprintf(stderr, "e820_add_entry() table is full\n");
|
2010-03-23 17:37:12 +01:00
|
|
|
return ret;
|
|
|
|
}
|
KVM, MCE, unpoison memory address across reboot
In Linux kernel HWPoison processing implementation, the virtual
address in processes mapping the error physical memory page is marked
as HWPoison. So that, the further accessing to the virtual
address will kill corresponding processes with SIGBUS.
If the error physical memory page is used by a KVM guest, the SIGBUS
will be sent to QEMU, and QEMU will simulate a MCE to report that
memory error to the guest OS. If the guest OS can not recover from
the error (for example, the page is accessed by kernel code), guest OS
will reboot the system. But because the underlying host virtual
address backing the guest physical memory is still poisoned, if the
guest system accesses the corresponding guest physical memory even
after rebooting, the SIGBUS will still be sent to QEMU and MCE will be
simulated. That is, guest system can not recover via rebooting.
In fact, across rebooting, the contents of guest physical memory page
need not to be kept. We can allocate a new host physical page to
back the corresponding guest physical address.
This patch fixes this issue in QEMU-KVM via calling qemu_ram_remap()
to clear the corresponding page table entry, so that make it possible
to allocate a new page to recover the issue.
[ Jan: rebasing and tiny cleanups]
Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
2011-03-02 08:56:20 +01:00
|
|
|
qemu_register_reset(kvm_unpoison_all, NULL);
|
2010-03-23 17:37:12 +01:00
|
|
|
|
2011-01-21 21:48:18 +01:00
|
|
|
return 0;
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
2010-12-27 16:19:29 +01:00
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
|
|
|
|
{
|
|
|
|
lhs->selector = rhs->selector;
|
|
|
|
lhs->base = rhs->base;
|
|
|
|
lhs->limit = rhs->limit;
|
|
|
|
lhs->type = 3;
|
|
|
|
lhs->present = 1;
|
|
|
|
lhs->dpl = 3;
|
|
|
|
lhs->db = 0;
|
|
|
|
lhs->s = 1;
|
|
|
|
lhs->l = 0;
|
|
|
|
lhs->g = 0;
|
|
|
|
lhs->avl = 0;
|
|
|
|
lhs->unusable = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
|
|
|
|
{
|
|
|
|
unsigned flags = rhs->flags;
|
|
|
|
lhs->selector = rhs->selector;
|
|
|
|
lhs->base = rhs->base;
|
|
|
|
lhs->limit = rhs->limit;
|
|
|
|
lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
|
|
|
|
lhs->present = (flags & DESC_P_MASK) != 0;
|
2010-12-27 15:56:44 +01:00
|
|
|
lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
|
2008-11-05 17:29:27 +01:00
|
|
|
lhs->db = (flags >> DESC_B_SHIFT) & 1;
|
|
|
|
lhs->s = (flags & DESC_S_MASK) != 0;
|
|
|
|
lhs->l = (flags >> DESC_L_SHIFT) & 1;
|
|
|
|
lhs->g = (flags & DESC_G_MASK) != 0;
|
|
|
|
lhs->avl = (flags & DESC_AVL_MASK) != 0;
|
|
|
|
lhs->unusable = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
|
|
|
|
{
|
|
|
|
lhs->selector = rhs->selector;
|
|
|
|
lhs->base = rhs->base;
|
|
|
|
lhs->limit = rhs->limit;
|
2010-12-27 16:19:29 +01:00
|
|
|
lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
|
|
|
|
(rhs->present * DESC_P_MASK) |
|
|
|
|
(rhs->dpl << DESC_DPL_SHIFT) |
|
|
|
|
(rhs->db << DESC_B_SHIFT) |
|
|
|
|
(rhs->s * DESC_S_MASK) |
|
|
|
|
(rhs->l << DESC_L_SHIFT) |
|
|
|
|
(rhs->g * DESC_G_MASK) |
|
|
|
|
(rhs->avl * DESC_AVL_MASK);
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
|
|
|
|
{
|
2010-12-27 16:19:29 +01:00
|
|
|
if (set) {
|
2008-11-05 17:29:27 +01:00
|
|
|
*kvm_reg = *qemu_reg;
|
2010-12-27 16:19:29 +01:00
|
|
|
} else {
|
2008-11-05 17:29:27 +01:00
|
|
|
*qemu_reg = *kvm_reg;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_getput_regs(CPUState *env, int set)
|
|
|
|
{
|
|
|
|
struct kvm_regs regs;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!set) {
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2008-11-05 17:29:27 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
|
|
|
|
kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
|
|
|
|
kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
|
|
|
|
kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
|
|
|
|
kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
|
|
|
|
kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
|
|
|
|
kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
|
|
|
|
kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
kvm_getput_reg(®s.r8, &env->regs[8], set);
|
|
|
|
kvm_getput_reg(®s.r9, &env->regs[9], set);
|
|
|
|
kvm_getput_reg(®s.r10, &env->regs[10], set);
|
|
|
|
kvm_getput_reg(®s.r11, &env->regs[11], set);
|
|
|
|
kvm_getput_reg(®s.r12, &env->regs[12], set);
|
|
|
|
kvm_getput_reg(®s.r13, &env->regs[13], set);
|
|
|
|
kvm_getput_reg(®s.r14, &env->regs[14], set);
|
|
|
|
kvm_getput_reg(®s.r15, &env->regs[15], set);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
kvm_getput_reg(®s.rflags, &env->eflags, set);
|
|
|
|
kvm_getput_reg(®s.rip, &env->eip, set);
|
|
|
|
|
2010-12-27 16:19:29 +01:00
|
|
|
if (set) {
|
2008-11-05 17:29:27 +01:00
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_put_fpu(CPUState *env)
|
|
|
|
{
|
|
|
|
struct kvm_fpu fpu;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
memset(&fpu, 0, sizeof fpu);
|
|
|
|
fpu.fsw = env->fpus & ~(7 << 11);
|
|
|
|
fpu.fsw |= (env->fpstt & 7) << 11;
|
|
|
|
fpu.fcw = env->fpuc;
|
2010-12-27 16:19:29 +01:00
|
|
|
for (i = 0; i < 8; ++i) {
|
|
|
|
fpu.ftwx |= (!env->fptags[i]) << i;
|
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
|
|
|
|
memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
|
|
|
|
fpu.mxcsr = env->mxcsr;
|
|
|
|
|
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
|
|
|
|
}
|
|
|
|
|
2010-06-17 11:53:07 +02:00
|
|
|
#ifdef KVM_CAP_XSAVE
|
|
|
|
#define XSAVE_CWD_RIP 2
|
|
|
|
#define XSAVE_CWD_RDP 4
|
|
|
|
#define XSAVE_MXCSR 6
|
|
|
|
#define XSAVE_ST_SPACE 8
|
|
|
|
#define XSAVE_XMM_SPACE 40
|
|
|
|
#define XSAVE_XSTATE_BV 128
|
|
|
|
#define XSAVE_YMMH_SPACE 144
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int kvm_put_xsave(CPUState *env)
|
|
|
|
{
|
|
|
|
#ifdef KVM_CAP_XSAVE
|
2010-10-19 13:00:34 +02:00
|
|
|
int i, r;
|
2010-06-17 11:53:07 +02:00
|
|
|
struct kvm_xsave* xsave;
|
|
|
|
uint16_t cwd, swd, twd, fop;
|
|
|
|
|
2010-12-27 16:19:29 +01:00
|
|
|
if (!kvm_has_xsave()) {
|
2010-06-17 11:53:07 +02:00
|
|
|
return kvm_put_fpu(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-06-17 11:53:07 +02:00
|
|
|
|
|
|
|
xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
|
|
|
|
memset(xsave, 0, sizeof(struct kvm_xsave));
|
|
|
|
cwd = swd = twd = fop = 0;
|
|
|
|
swd = env->fpus & ~(7 << 11);
|
|
|
|
swd |= (env->fpstt & 7) << 11;
|
|
|
|
cwd = env->fpuc;
|
2010-12-27 16:19:29 +01:00
|
|
|
for (i = 0; i < 8; ++i) {
|
2010-06-17 11:53:07 +02:00
|
|
|
twd |= (!env->fptags[i]) << i;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-06-17 11:53:07 +02:00
|
|
|
xsave->region[0] = (uint32_t)(swd << 16) + cwd;
|
|
|
|
xsave->region[1] = (uint32_t)(fop << 16) + twd;
|
|
|
|
memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
|
|
|
|
sizeof env->fpregs);
|
|
|
|
memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs,
|
|
|
|
sizeof env->xmm_regs);
|
|
|
|
xsave->region[XSAVE_MXCSR] = env->mxcsr;
|
|
|
|
*(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
|
|
|
|
memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
|
|
|
|
sizeof env->ymmh_regs);
|
2010-10-19 13:00:34 +02:00
|
|
|
r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
|
|
|
|
qemu_free(xsave);
|
|
|
|
return r;
|
2010-06-17 11:53:07 +02:00
|
|
|
#else
|
|
|
|
return kvm_put_fpu(env);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_put_xcrs(CPUState *env)
|
|
|
|
{
|
|
|
|
#ifdef KVM_CAP_XCRS
|
|
|
|
struct kvm_xcrs xcrs;
|
|
|
|
|
2010-12-27 16:19:29 +01:00
|
|
|
if (!kvm_has_xcrs()) {
|
2010-06-17 11:53:07 +02:00
|
|
|
return 0;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-06-17 11:53:07 +02:00
|
|
|
|
|
|
|
xcrs.nr_xcrs = 1;
|
|
|
|
xcrs.flags = 0;
|
|
|
|
xcrs.xcrs[0].xcr = 0;
|
|
|
|
xcrs.xcrs[0].value = env->xcr0;
|
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
static int kvm_put_sregs(CPUState *env)
|
|
|
|
{
|
|
|
|
struct kvm_sregs sregs;
|
|
|
|
|
2009-11-06 19:39:24 +01:00
|
|
|
memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
|
|
|
|
if (env->interrupt_injected >= 0) {
|
|
|
|
sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
|
|
|
|
(uint64_t)1 << (env->interrupt_injected % 64);
|
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
if ((env->eflags & VM_MASK)) {
|
2010-12-27 16:19:29 +01:00
|
|
|
set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
|
|
|
|
set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
|
|
|
|
set_v8086_seg(&sregs.es, &env->segs[R_ES]);
|
|
|
|
set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
|
|
|
|
set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
|
|
|
|
set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
|
2008-11-05 17:29:27 +01:00
|
|
|
} else {
|
2010-12-27 16:19:29 +01:00
|
|
|
set_seg(&sregs.cs, &env->segs[R_CS]);
|
|
|
|
set_seg(&sregs.ds, &env->segs[R_DS]);
|
|
|
|
set_seg(&sregs.es, &env->segs[R_ES]);
|
|
|
|
set_seg(&sregs.fs, &env->segs[R_FS]);
|
|
|
|
set_seg(&sregs.gs, &env->segs[R_GS]);
|
|
|
|
set_seg(&sregs.ss, &env->segs[R_SS]);
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
set_seg(&sregs.tr, &env->tr);
|
|
|
|
set_seg(&sregs.ldt, &env->ldt);
|
|
|
|
|
|
|
|
sregs.idt.limit = env->idt.limit;
|
|
|
|
sregs.idt.base = env->idt.base;
|
|
|
|
sregs.gdt.limit = env->gdt.limit;
|
|
|
|
sregs.gdt.base = env->gdt.base;
|
|
|
|
|
|
|
|
sregs.cr0 = env->cr[0];
|
|
|
|
sregs.cr2 = env->cr[2];
|
|
|
|
sregs.cr3 = env->cr[3];
|
|
|
|
sregs.cr4 = env->cr[4];
|
|
|
|
|
2010-06-19 09:42:31 +02:00
|
|
|
sregs.cr8 = cpu_get_apic_tpr(env->apic_state);
|
|
|
|
sregs.apic_base = cpu_get_apic_base(env->apic_state);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
sregs.efer = env->efer;
|
|
|
|
|
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
|
|
|
|
uint32_t index, uint64_t value)
|
|
|
|
{
|
|
|
|
entry->index = index;
|
|
|
|
entry->data = value;
|
|
|
|
}
|
|
|
|
|
2010-03-01 19:10:31 +01:00
|
|
|
static int kvm_put_msrs(CPUState *env, int level)
|
2008-11-05 17:29:27 +01:00
|
|
|
{
|
|
|
|
struct {
|
|
|
|
struct kvm_msrs info;
|
|
|
|
struct kvm_msr_entry entries[100];
|
|
|
|
} msr_data;
|
|
|
|
struct kvm_msr_entry *msrs = msr_data.entries;
|
2010-10-21 10:23:14 +02:00
|
|
|
int n = 0;
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
|
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
|
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
|
2011-03-15 12:26:23 +01:00
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_PAT, env->pat);
|
2011-01-21 21:48:13 +01:00
|
|
|
if (has_msr_star) {
|
2010-12-27 16:19:29 +01:00
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
|
|
|
|
}
|
2011-01-21 21:48:13 +01:00
|
|
|
if (has_msr_hsave_pa) {
|
2010-10-21 17:35:02 +02:00
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
#ifdef TARGET_X86_64
|
2010-10-21 17:35:04 +02:00
|
|
|
if (lm_capable_kernel) {
|
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
|
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
|
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
|
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
|
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
#endif
|
2010-03-01 19:10:31 +01:00
|
|
|
if (level == KVM_PUT_FULL_STATE) {
|
2010-10-21 17:35:03 +02:00
|
|
|
/*
|
|
|
|
* KVM is yet unable to synchronize TSC values of multiple VCPUs on
|
|
|
|
* writeback. Until this is fixed, we only write the offset to SMP
|
|
|
|
* guests after migration, desynchronizing the VCPUs, but avoiding
|
|
|
|
* huge jump-backs that would occur without any writeback at all.
|
|
|
|
*/
|
|
|
|
if (smp_cpus == 1 || env->tsc != 0) {
|
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
|
|
|
|
}
|
2011-01-21 21:48:14 +01:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The following paravirtual MSRs have side effects on the guest or are
|
|
|
|
* too heavy for normal writeback. Limit them to reset or full state
|
|
|
|
* updates.
|
|
|
|
*/
|
|
|
|
if (level >= KVM_PUT_RESET_STATE) {
|
2010-03-01 19:10:31 +01:00
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
|
|
|
|
env->system_time_msr);
|
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
|
2011-01-21 21:48:15 +01:00
|
|
|
#if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
|
2011-01-21 21:48:22 +01:00
|
|
|
if (has_msr_async_pf_en) {
|
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN,
|
|
|
|
env->async_pf_en_msr);
|
|
|
|
}
|
2010-10-24 14:27:55 +02:00
|
|
|
#endif
|
2010-03-01 19:10:31 +01:00
|
|
|
}
|
2010-10-11 20:31:22 +02:00
|
|
|
#ifdef KVM_CAP_MCE
|
|
|
|
if (env->mcg_cap) {
|
2010-10-21 10:23:14 +02:00
|
|
|
int i;
|
2010-12-27 16:19:29 +01:00
|
|
|
|
2011-03-02 08:56:16 +01:00
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
|
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
|
|
|
|
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
|
|
|
|
kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
|
2010-10-11 20:31:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2009-10-22 14:26:56 +02:00
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
msr_data.info.nmsrs = n;
|
|
|
|
|
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int kvm_get_fpu(CPUState *env)
|
|
|
|
{
|
|
|
|
struct kvm_fpu fpu;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2008-11-05 17:29:27 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
env->fpstt = (fpu.fsw >> 11) & 7;
|
|
|
|
env->fpus = fpu.fsw;
|
|
|
|
env->fpuc = fpu.fcw;
|
2010-12-27 16:19:29 +01:00
|
|
|
for (i = 0; i < 8; ++i) {
|
|
|
|
env->fptags[i] = !((fpu.ftwx >> i) & 1);
|
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
|
|
|
|
memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
|
|
|
|
env->mxcsr = fpu.mxcsr;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-06-17 11:53:07 +02:00
|
|
|
static int kvm_get_xsave(CPUState *env)
|
|
|
|
{
|
|
|
|
#ifdef KVM_CAP_XSAVE
|
|
|
|
struct kvm_xsave* xsave;
|
|
|
|
int ret, i;
|
|
|
|
uint16_t cwd, swd, twd, fop;
|
|
|
|
|
2010-12-27 16:19:29 +01:00
|
|
|
if (!kvm_has_xsave()) {
|
2010-06-17 11:53:07 +02:00
|
|
|
return kvm_get_fpu(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-06-17 11:53:07 +02:00
|
|
|
|
|
|
|
xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
|
2010-10-19 13:00:34 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
qemu_free(xsave);
|
2010-06-17 11:53:07 +02:00
|
|
|
return ret;
|
2010-10-19 13:00:34 +02:00
|
|
|
}
|
2010-06-17 11:53:07 +02:00
|
|
|
|
|
|
|
cwd = (uint16_t)xsave->region[0];
|
|
|
|
swd = (uint16_t)(xsave->region[0] >> 16);
|
|
|
|
twd = (uint16_t)xsave->region[1];
|
|
|
|
fop = (uint16_t)(xsave->region[1] >> 16);
|
|
|
|
env->fpstt = (swd >> 11) & 7;
|
|
|
|
env->fpus = swd;
|
|
|
|
env->fpuc = cwd;
|
2010-12-27 16:19:29 +01:00
|
|
|
for (i = 0; i < 8; ++i) {
|
2010-06-17 11:53:07 +02:00
|
|
|
env->fptags[i] = !((twd >> i) & 1);
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-06-17 11:53:07 +02:00
|
|
|
env->mxcsr = xsave->region[XSAVE_MXCSR];
|
|
|
|
memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
|
|
|
|
sizeof env->fpregs);
|
|
|
|
memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE],
|
|
|
|
sizeof env->xmm_regs);
|
|
|
|
env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
|
|
|
|
memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE],
|
|
|
|
sizeof env->ymmh_regs);
|
2010-10-19 13:00:34 +02:00
|
|
|
qemu_free(xsave);
|
2010-06-17 11:53:07 +02:00
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
return kvm_get_fpu(env);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_get_xcrs(CPUState *env)
|
|
|
|
{
|
|
|
|
#ifdef KVM_CAP_XCRS
|
|
|
|
int i, ret;
|
|
|
|
struct kvm_xcrs xcrs;
|
|
|
|
|
2010-12-27 16:19:29 +01:00
|
|
|
if (!kvm_has_xcrs()) {
|
2010-06-17 11:53:07 +02:00
|
|
|
return 0;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-06-17 11:53:07 +02:00
|
|
|
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2010-06-17 11:53:07 +02:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-06-17 11:53:07 +02:00
|
|
|
|
2010-12-27 16:19:29 +01:00
|
|
|
for (i = 0; i < xcrs.nr_xcrs; i++) {
|
2010-06-17 11:53:07 +02:00
|
|
|
/* Only support xcr0 now */
|
|
|
|
if (xcrs.xcrs[0].xcr == 0) {
|
|
|
|
env->xcr0 = xcrs.xcrs[0].value;
|
|
|
|
break;
|
|
|
|
}
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-06-17 11:53:07 +02:00
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
static int kvm_get_sregs(CPUState *env)
|
|
|
|
{
|
|
|
|
struct kvm_sregs sregs;
|
|
|
|
uint32_t hflags;
|
2009-11-06 19:39:24 +01:00
|
|
|
int bit, i, ret;
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2008-11-05 17:29:27 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2009-11-06 19:39:24 +01:00
|
|
|
/* There can only be one pending IRQ set in the bitmap at a time, so try
|
|
|
|
to find it and save its number instead (-1 for none). */
|
|
|
|
env->interrupt_injected = -1;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
|
|
|
|
if (sregs.interrupt_bitmap[i]) {
|
|
|
|
bit = ctz64(sregs.interrupt_bitmap[i]);
|
|
|
|
env->interrupt_injected = i * 64 + bit;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
get_seg(&env->segs[R_CS], &sregs.cs);
|
|
|
|
get_seg(&env->segs[R_DS], &sregs.ds);
|
|
|
|
get_seg(&env->segs[R_ES], &sregs.es);
|
|
|
|
get_seg(&env->segs[R_FS], &sregs.fs);
|
|
|
|
get_seg(&env->segs[R_GS], &sregs.gs);
|
|
|
|
get_seg(&env->segs[R_SS], &sregs.ss);
|
|
|
|
|
|
|
|
get_seg(&env->tr, &sregs.tr);
|
|
|
|
get_seg(&env->ldt, &sregs.ldt);
|
|
|
|
|
|
|
|
env->idt.limit = sregs.idt.limit;
|
|
|
|
env->idt.base = sregs.idt.base;
|
|
|
|
env->gdt.limit = sregs.gdt.limit;
|
|
|
|
env->gdt.base = sregs.gdt.base;
|
|
|
|
|
|
|
|
env->cr[0] = sregs.cr0;
|
|
|
|
env->cr[2] = sregs.cr2;
|
|
|
|
env->cr[3] = sregs.cr3;
|
|
|
|
env->cr[4] = sregs.cr4;
|
|
|
|
|
2010-06-19 09:42:31 +02:00
|
|
|
cpu_set_apic_base(env->apic_state, sregs.apic_base);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
env->efer = sregs.efer;
|
2010-06-19 09:42:31 +02:00
|
|
|
//cpu_set_apic_tpr(env->apic_state, sregs.cr8);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2010-12-27 16:19:29 +01:00
|
|
|
#define HFLAG_COPY_MASK \
|
|
|
|
~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
|
|
|
|
HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
|
|
|
|
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
|
|
|
|
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
|
|
|
|
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
|
|
|
|
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
|
2010-12-27 16:19:29 +01:00
|
|
|
(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
|
2008-11-05 17:29:27 +01:00
|
|
|
hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
|
|
|
|
hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
|
2010-12-27 16:19:29 +01:00
|
|
|
(HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
if (env->efer & MSR_EFER_LMA) {
|
|
|
|
hflags |= HF_LMA_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
|
|
|
|
hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
|
|
|
|
} else {
|
|
|
|
hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
|
2010-12-27 16:19:29 +01:00
|
|
|
(DESC_B_SHIFT - HF_CS32_SHIFT);
|
2008-11-05 17:29:27 +01:00
|
|
|
hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
|
2010-12-27 16:19:29 +01:00
|
|
|
(DESC_B_SHIFT - HF_SS32_SHIFT);
|
|
|
|
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
|
|
|
|
!(hflags & HF_CS32_MASK)) {
|
|
|
|
hflags |= HF_ADDSEG_MASK;
|
|
|
|
} else {
|
|
|
|
hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
|
|
|
|
env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
|
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_get_msrs(CPUState *env)
|
|
|
|
{
|
|
|
|
struct {
|
|
|
|
struct kvm_msrs info;
|
|
|
|
struct kvm_msr_entry entries[100];
|
|
|
|
} msr_data;
|
|
|
|
struct kvm_msr_entry *msrs = msr_data.entries;
|
|
|
|
int ret, i, n;
|
|
|
|
|
|
|
|
n = 0;
|
|
|
|
msrs[n++].index = MSR_IA32_SYSENTER_CS;
|
|
|
|
msrs[n++].index = MSR_IA32_SYSENTER_ESP;
|
|
|
|
msrs[n++].index = MSR_IA32_SYSENTER_EIP;
|
2011-03-15 12:26:23 +01:00
|
|
|
msrs[n++].index = MSR_PAT;
|
2011-01-21 21:48:13 +01:00
|
|
|
if (has_msr_star) {
|
2010-12-27 16:19:29 +01:00
|
|
|
msrs[n++].index = MSR_STAR;
|
|
|
|
}
|
2011-01-21 21:48:13 +01:00
|
|
|
if (has_msr_hsave_pa) {
|
2010-10-21 17:35:02 +02:00
|
|
|
msrs[n++].index = MSR_VM_HSAVE_PA;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2011-02-03 20:19:53 +01:00
|
|
|
|
|
|
|
if (!env->tsc_valid) {
|
|
|
|
msrs[n++].index = MSR_IA32_TSC;
|
|
|
|
env->tsc_valid = !vm_running;
|
|
|
|
}
|
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
#ifdef TARGET_X86_64
|
2010-10-21 17:35:04 +02:00
|
|
|
if (lm_capable_kernel) {
|
|
|
|
msrs[n++].index = MSR_CSTAR;
|
|
|
|
msrs[n++].index = MSR_KERNELGSBASE;
|
|
|
|
msrs[n++].index = MSR_FMASK;
|
|
|
|
msrs[n++].index = MSR_LSTAR;
|
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
#endif
|
2009-10-22 14:26:56 +02:00
|
|
|
msrs[n++].index = MSR_KVM_SYSTEM_TIME;
|
|
|
|
msrs[n++].index = MSR_KVM_WALL_CLOCK;
|
2011-01-21 21:48:15 +01:00
|
|
|
#if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
|
2011-01-21 21:48:22 +01:00
|
|
|
if (has_msr_async_pf_en) {
|
|
|
|
msrs[n++].index = MSR_KVM_ASYNC_PF_EN;
|
|
|
|
}
|
2010-10-24 14:27:55 +02:00
|
|
|
#endif
|
2009-10-22 14:26:56 +02:00
|
|
|
|
2010-10-11 20:31:22 +02:00
|
|
|
#ifdef KVM_CAP_MCE
|
|
|
|
if (env->mcg_cap) {
|
|
|
|
msrs[n++].index = MSR_MCG_STATUS;
|
|
|
|
msrs[n++].index = MSR_MCG_CTL;
|
2010-12-27 16:19:29 +01:00
|
|
|
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
|
2010-10-11 20:31:22 +02:00
|
|
|
msrs[n++].index = MSR_MC0_CTL + i;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-10-11 20:31:22 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
msr_data.info.nmsrs = n;
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2008-11-05 17:29:27 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
|
|
|
|
for (i = 0; i < ret; i++) {
|
|
|
|
switch (msrs[i].index) {
|
|
|
|
case MSR_IA32_SYSENTER_CS:
|
|
|
|
env->sysenter_cs = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_SYSENTER_ESP:
|
|
|
|
env->sysenter_esp = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_SYSENTER_EIP:
|
|
|
|
env->sysenter_eip = msrs[i].data;
|
|
|
|
break;
|
2011-03-15 12:26:23 +01:00
|
|
|
case MSR_PAT:
|
|
|
|
env->pat = msrs[i].data;
|
|
|
|
break;
|
2008-11-05 17:29:27 +01:00
|
|
|
case MSR_STAR:
|
|
|
|
env->star = msrs[i].data;
|
|
|
|
break;
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
case MSR_CSTAR:
|
|
|
|
env->cstar = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_KERNELGSBASE:
|
|
|
|
env->kernelgsbase = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_FMASK:
|
|
|
|
env->fmask = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_LSTAR:
|
|
|
|
env->lstar = msrs[i].data;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
case MSR_IA32_TSC:
|
|
|
|
env->tsc = msrs[i].data;
|
|
|
|
break;
|
2010-10-21 17:35:01 +02:00
|
|
|
case MSR_VM_HSAVE_PA:
|
|
|
|
env->vm_hsave = msrs[i].data;
|
|
|
|
break;
|
2009-10-22 14:26:56 +02:00
|
|
|
case MSR_KVM_SYSTEM_TIME:
|
|
|
|
env->system_time_msr = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_KVM_WALL_CLOCK:
|
|
|
|
env->wall_clock_msr = msrs[i].data;
|
|
|
|
break;
|
2010-10-11 20:31:22 +02:00
|
|
|
#ifdef KVM_CAP_MCE
|
|
|
|
case MSR_MCG_STATUS:
|
|
|
|
env->mcg_status = msrs[i].data;
|
|
|
|
break;
|
|
|
|
case MSR_MCG_CTL:
|
|
|
|
env->mcg_ctl = msrs[i].data;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
#ifdef KVM_CAP_MCE
|
|
|
|
if (msrs[i].index >= MSR_MC0_CTL &&
|
|
|
|
msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
|
|
|
|
env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
|
|
|
|
}
|
|
|
|
#endif
|
2010-10-21 10:23:14 +02:00
|
|
|
break;
|
2011-01-21 21:48:15 +01:00
|
|
|
#if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
|
2010-10-24 14:27:55 +02:00
|
|
|
case MSR_KVM_ASYNC_PF_EN:
|
|
|
|
env->async_pf_en_msr = msrs[i].data;
|
|
|
|
break;
|
|
|
|
#endif
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-09 22:05:37 +01:00
|
|
|
static int kvm_put_mp_state(CPUState *env)
|
|
|
|
{
|
|
|
|
struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
|
|
|
|
|
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_get_mp_state(CPUState *env)
|
|
|
|
{
|
|
|
|
struct kvm_mp_state mp_state;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
env->mp_state = mp_state.mp_state;
|
2011-01-21 21:48:10 +01:00
|
|
|
if (kvm_irqchip_in_kernel()) {
|
|
|
|
env->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
|
|
|
|
}
|
2009-11-09 22:05:37 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-03-01 19:10:31 +01:00
|
|
|
static int kvm_put_vcpu_events(CPUState *env, int level)
|
2009-11-25 00:33:03 +01:00
|
|
|
{
|
|
|
|
#ifdef KVM_CAP_VCPU_EVENTS
|
|
|
|
struct kvm_vcpu_events events;
|
|
|
|
|
|
|
|
if (!kvm_has_vcpu_events()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-14 12:26:17 +01:00
|
|
|
events.exception.injected = (env->exception_injected >= 0);
|
|
|
|
events.exception.nr = env->exception_injected;
|
2009-11-25 00:33:03 +01:00
|
|
|
events.exception.has_error_code = env->has_error_code;
|
|
|
|
events.exception.error_code = env->error_code;
|
|
|
|
|
|
|
|
events.interrupt.injected = (env->interrupt_injected >= 0);
|
|
|
|
events.interrupt.nr = env->interrupt_injected;
|
|
|
|
events.interrupt.soft = env->soft_interrupt;
|
|
|
|
|
|
|
|
events.nmi.injected = env->nmi_injected;
|
|
|
|
events.nmi.pending = env->nmi_pending;
|
|
|
|
events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
|
|
|
|
|
|
|
|
events.sipi_vector = env->sipi_vector;
|
|
|
|
|
2010-03-01 19:10:31 +01:00
|
|
|
events.flags = 0;
|
|
|
|
if (level >= KVM_PUT_RESET_STATE) {
|
|
|
|
events.flags |=
|
|
|
|
KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
|
|
|
|
}
|
2010-01-28 09:30:51 +01:00
|
|
|
|
2009-11-25 00:33:03 +01:00
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_get_vcpu_events(CPUState *env)
|
|
|
|
{
|
|
|
|
#ifdef KVM_CAP_VCPU_EVENTS
|
|
|
|
struct kvm_vcpu_events events;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!kvm_has_vcpu_events()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2009-12-14 12:26:17 +01:00
|
|
|
env->exception_injected =
|
2009-11-25 00:33:03 +01:00
|
|
|
events.exception.injected ? events.exception.nr : -1;
|
|
|
|
env->has_error_code = events.exception.has_error_code;
|
|
|
|
env->error_code = events.exception.error_code;
|
|
|
|
|
|
|
|
env->interrupt_injected =
|
|
|
|
events.interrupt.injected ? events.interrupt.nr : -1;
|
|
|
|
env->soft_interrupt = events.interrupt.soft;
|
|
|
|
|
|
|
|
env->nmi_injected = events.nmi.injected;
|
|
|
|
env->nmi_pending = events.nmi.pending;
|
|
|
|
if (events.nmi.masked) {
|
|
|
|
env->hflags2 |= HF2_NMI_MASK;
|
|
|
|
} else {
|
|
|
|
env->hflags2 &= ~HF2_NMI_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
env->sipi_vector = events.sipi_vector;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-03-01 19:10:29 +01:00
|
|
|
static int kvm_guest_debug_workarounds(CPUState *env)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
|
|
|
unsigned long reinject_trap = 0;
|
|
|
|
|
|
|
|
if (!kvm_has_vcpu_events()) {
|
|
|
|
if (env->exception_injected == 1) {
|
|
|
|
reinject_trap = KVM_GUESTDBG_INJECT_DB;
|
|
|
|
} else if (env->exception_injected == 3) {
|
|
|
|
reinject_trap = KVM_GUESTDBG_INJECT_BP;
|
|
|
|
}
|
|
|
|
env->exception_injected = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
|
|
|
|
* injected via SET_GUEST_DEBUG while updating GP regs. Work around this
|
|
|
|
* by updating the debug state once again if single-stepping is on.
|
|
|
|
* Another reason to call kvm_update_guest_debug here is a pending debug
|
|
|
|
* trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
|
|
|
|
* reinject them via SET_GUEST_DEBUG.
|
|
|
|
*/
|
|
|
|
if (reinject_trap ||
|
|
|
|
(!kvm_has_robust_singlestep() && env->singlestep_enabled)) {
|
|
|
|
ret = kvm_update_guest_debug(env, reinject_trap);
|
|
|
|
}
|
|
|
|
#endif /* KVM_CAP_SET_GUEST_DEBUG */
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-03-12 15:20:49 +01:00
|
|
|
static int kvm_put_debugregs(CPUState *env)
|
|
|
|
{
|
|
|
|
#ifdef KVM_CAP_DEBUGREGS
|
|
|
|
struct kvm_debugregs dbgregs;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!kvm_has_debugregs()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
dbgregs.db[i] = env->dr[i];
|
|
|
|
}
|
|
|
|
dbgregs.dr6 = env->dr[6];
|
|
|
|
dbgregs.dr7 = env->dr[7];
|
|
|
|
dbgregs.flags = 0;
|
|
|
|
|
|
|
|
return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_get_debugregs(CPUState *env)
|
|
|
|
{
|
|
|
|
#ifdef KVM_CAP_DEBUGREGS
|
|
|
|
struct kvm_debugregs dbgregs;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
if (!kvm_has_debugregs()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs);
|
|
|
|
if (ret < 0) {
|
2010-12-27 16:19:29 +01:00
|
|
|
return ret;
|
2010-03-12 15:20:49 +01:00
|
|
|
}
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
env->dr[i] = dbgregs.db[i];
|
|
|
|
}
|
|
|
|
env->dr[4] = env->dr[6] = dbgregs.dr6;
|
|
|
|
env->dr[5] = env->dr[7] = dbgregs.dr7;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-03-01 19:10:30 +01:00
|
|
|
int kvm_arch_put_registers(CPUState *env, int level)
|
2008-11-05 17:29:27 +01:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2011-03-12 17:43:51 +01:00
|
|
|
assert(cpu_is_stopped(env) || qemu_cpu_is_self(env));
|
2010-05-04 14:45:26 +02:00
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
ret = kvm_getput_regs(env, 1);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2008-11-05 17:29:27 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-06-17 11:53:07 +02:00
|
|
|
ret = kvm_put_xsave(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2010-06-17 11:53:07 +02:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-06-17 11:53:07 +02:00
|
|
|
ret = kvm_put_xcrs(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2008-11-05 17:29:27 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
ret = kvm_put_sregs(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2008-11-05 17:29:27 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2011-03-02 08:56:14 +01:00
|
|
|
/* must be before kvm_put_msrs */
|
|
|
|
ret = kvm_inject_mce_oldstyle(env);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2010-03-01 19:10:31 +01:00
|
|
|
ret = kvm_put_msrs(env, level);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2008-11-05 17:29:27 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-03-01 19:10:31 +01:00
|
|
|
if (level >= KVM_PUT_RESET_STATE) {
|
|
|
|
ret = kvm_put_mp_state(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2010-03-01 19:10:31 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-03-01 19:10:31 +01:00
|
|
|
}
|
|
|
|
ret = kvm_put_vcpu_events(env, level);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2009-11-25 00:33:03 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2011-01-21 21:48:09 +01:00
|
|
|
ret = kvm_put_debugregs(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2010-03-01 19:10:29 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-03-01 19:10:29 +01:00
|
|
|
/* must be last */
|
|
|
|
ret = kvm_guest_debug_workarounds(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2010-03-12 15:20:49 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_get_registers(CPUState *env)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2011-03-12 17:43:51 +01:00
|
|
|
assert(cpu_is_stopped(env) || qemu_cpu_is_self(env));
|
2010-05-04 14:45:26 +02:00
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
ret = kvm_getput_regs(env, 0);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2008-11-05 17:29:27 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-06-17 11:53:07 +02:00
|
|
|
ret = kvm_get_xsave(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2010-06-17 11:53:07 +02:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-06-17 11:53:07 +02:00
|
|
|
ret = kvm_get_xcrs(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2008-11-05 17:29:27 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
ret = kvm_get_sregs(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2008-11-05 17:29:27 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
ret = kvm_get_msrs(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2008-11-05 17:29:27 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2009-11-25 00:31:03 +01:00
|
|
|
ret = kvm_get_mp_state(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2009-11-25 00:31:03 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2009-11-25 00:33:03 +01:00
|
|
|
ret = kvm_get_vcpu_events(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2009-11-25 00:33:03 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-03-12 15:20:49 +01:00
|
|
|
ret = kvm_get_debugregs(env);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (ret < 0) {
|
2010-03-12 15:20:49 +01:00
|
|
|
return ret;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-07 12:19:20 +01:00
|
|
|
void kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
|
2008-11-05 17:29:27 +01:00
|
|
|
{
|
2011-02-07 12:19:21 +01:00
|
|
|
int ret;
|
|
|
|
|
2010-12-10 08:42:53 +01:00
|
|
|
/* Inject NMI */
|
|
|
|
if (env->interrupt_request & CPU_INTERRUPT_NMI) {
|
|
|
|
env->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
|
|
|
DPRINTF("injected NMI\n");
|
2011-02-07 12:19:21 +01:00
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_NMI);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
|
|
|
|
strerror(-ret));
|
|
|
|
}
|
2010-12-10 08:42:53 +01:00
|
|
|
}
|
|
|
|
|
2011-02-07 12:19:19 +01:00
|
|
|
if (!kvm_irqchip_in_kernel()) {
|
|
|
|
/* Force the VCPU out of its inner loop to process the INIT request */
|
|
|
|
if (env->interrupt_request & CPU_INTERRUPT_INIT) {
|
|
|
|
env->exit_request = 1;
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
|
2011-02-07 12:19:19 +01:00
|
|
|
/* Try to inject an interrupt if the guest can accept it */
|
|
|
|
if (run->ready_for_interrupt_injection &&
|
|
|
|
(env->interrupt_request & CPU_INTERRUPT_HARD) &&
|
|
|
|
(env->eflags & IF_MASK)) {
|
|
|
|
int irq;
|
|
|
|
|
|
|
|
env->interrupt_request &= ~CPU_INTERRUPT_HARD;
|
|
|
|
irq = cpu_get_pic_interrupt(env);
|
|
|
|
if (irq >= 0) {
|
|
|
|
struct kvm_interrupt intr;
|
|
|
|
|
|
|
|
intr.irq = irq;
|
|
|
|
DPRINTF("injected interrupt %d\n", irq);
|
2011-02-07 12:19:21 +01:00
|
|
|
ret = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"KVM: injection failed, interrupt lost (%s)\n",
|
|
|
|
strerror(-ret));
|
|
|
|
}
|
2011-02-07 12:19:19 +01:00
|
|
|
}
|
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
|
2011-02-07 12:19:19 +01:00
|
|
|
/* If we have an interrupt but the guest is not ready to receive an
|
|
|
|
* interrupt, request an interrupt window exit. This will
|
|
|
|
* cause a return to userspace as soon as the guest is ready to
|
|
|
|
* receive interrupts. */
|
|
|
|
if ((env->interrupt_request & CPU_INTERRUPT_HARD)) {
|
|
|
|
run->request_interrupt_window = 1;
|
|
|
|
} else {
|
|
|
|
run->request_interrupt_window = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
DPRINTF("setting tpr\n");
|
|
|
|
run->cr8 = cpu_get_apic_tpr(env->apic_state);
|
|
|
|
}
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
|
2011-02-07 12:19:20 +01:00
|
|
|
void kvm_arch_post_run(CPUState *env, struct kvm_run *run)
|
2008-11-05 17:29:27 +01:00
|
|
|
{
|
2010-12-27 16:19:29 +01:00
|
|
|
if (run->if_flag) {
|
2008-11-05 17:29:27 +01:00
|
|
|
env->eflags |= IF_MASK;
|
2010-12-27 16:19:29 +01:00
|
|
|
} else {
|
2008-11-05 17:29:27 +01:00
|
|
|
env->eflags &= ~IF_MASK;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2010-06-19 09:42:31 +02:00
|
|
|
cpu_set_apic_tpr(env->apic_state, run->cr8);
|
|
|
|
cpu_set_apic_base(env->apic_state, run->apic_base);
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
|
2011-03-02 08:56:13 +01:00
|
|
|
int kvm_arch_process_async_events(CPUState *env)
|
2010-05-04 14:45:27 +02:00
|
|
|
{
|
2011-03-02 08:56:14 +01:00
|
|
|
if (env->interrupt_request & CPU_INTERRUPT_MCE) {
|
|
|
|
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
|
|
|
|
assert(env->mcg_cap);
|
|
|
|
|
|
|
|
env->interrupt_request &= ~CPU_INTERRUPT_MCE;
|
|
|
|
|
|
|
|
kvm_cpu_synchronize_state(env);
|
|
|
|
|
|
|
|
if (env->exception_injected == EXCP08_DBLE) {
|
|
|
|
/* this means triple fault */
|
|
|
|
qemu_system_reset_request();
|
|
|
|
env->exit_request = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
env->exception_injected = EXCP12_MCHK;
|
|
|
|
env->has_error_code = 0;
|
|
|
|
|
|
|
|
env->halted = 0;
|
|
|
|
if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
|
|
|
|
env->mp_state = KVM_MP_STATE_RUNNABLE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-07 12:19:19 +01:00
|
|
|
if (kvm_irqchip_in_kernel()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-03-15 12:26:19 +01:00
|
|
|
if (((env->interrupt_request & CPU_INTERRUPT_HARD) &&
|
|
|
|
(env->eflags & IF_MASK)) ||
|
|
|
|
(env->interrupt_request & CPU_INTERRUPT_NMI)) {
|
2011-02-07 12:19:18 +01:00
|
|
|
env->halted = 0;
|
|
|
|
}
|
2010-05-04 14:45:27 +02:00
|
|
|
if (env->interrupt_request & CPU_INTERRUPT_INIT) {
|
|
|
|
kvm_cpu_synchronize_state(env);
|
|
|
|
do_cpu_init(env);
|
|
|
|
}
|
|
|
|
if (env->interrupt_request & CPU_INTERRUPT_SIPI) {
|
|
|
|
kvm_cpu_synchronize_state(env);
|
|
|
|
do_cpu_sipi(env);
|
|
|
|
}
|
|
|
|
|
|
|
|
return env->halted;
|
|
|
|
}
|
|
|
|
|
2008-11-05 17:29:27 +01:00
|
|
|
static int kvm_handle_halt(CPUState *env)
|
|
|
|
{
|
|
|
|
if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
|
|
|
|
(env->eflags & IF_MASK)) &&
|
|
|
|
!(env->interrupt_request & CPU_INTERRUPT_NMI)) {
|
|
|
|
env->halted = 1;
|
2011-03-15 12:26:28 +01:00
|
|
|
return EXCP_HLT;
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
|
2011-03-15 12:26:28 +01:00
|
|
|
return 0;
|
2008-11-05 17:29:27 +01:00
|
|
|
}
|
|
|
|
|
2009-03-12 21:12:48 +01:00
|
|
|
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
|
|
|
int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
|
|
|
|
{
|
2009-09-23 01:19:02 +02:00
|
|
|
static const uint8_t int3 = 0xcc;
|
2009-03-28 18:51:40 +01:00
|
|
|
|
2009-03-12 21:12:48 +01:00
|
|
|
if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
|
2010-12-27 16:19:29 +01:00
|
|
|
cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1)) {
|
2009-03-12 21:12:48 +01:00
|
|
|
return -EINVAL;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2009-03-12 21:12:48 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
|
|
|
|
{
|
|
|
|
uint8_t int3;
|
|
|
|
|
|
|
|
if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
|
2010-12-27 16:19:29 +01:00
|
|
|
cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
|
2009-03-12 21:12:48 +01:00
|
|
|
return -EINVAL;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2009-03-12 21:12:48 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct {
|
|
|
|
target_ulong addr;
|
|
|
|
int len;
|
|
|
|
int type;
|
|
|
|
} hw_breakpoint[4];
|
|
|
|
|
|
|
|
static int nb_hw_breakpoint;
|
|
|
|
|
|
|
|
static int find_hw_breakpoint(target_ulong addr, int len, int type)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
2010-12-27 16:19:29 +01:00
|
|
|
for (n = 0; n < nb_hw_breakpoint; n++) {
|
2009-03-12 21:12:48 +01:00
|
|
|
if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
|
2010-12-27 16:19:29 +01:00
|
|
|
(hw_breakpoint[n].len == len || len == -1)) {
|
2009-03-12 21:12:48 +01:00
|
|
|
return n;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
|
|
|
}
|
2009-03-12 21:12:48 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
|
|
|
|
target_ulong len, int type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case GDB_BREAKPOINT_HW:
|
|
|
|
len = 1;
|
|
|
|
break;
|
|
|
|
case GDB_WATCHPOINT_WRITE:
|
|
|
|
case GDB_WATCHPOINT_ACCESS:
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
case 4:
|
|
|
|
case 8:
|
2010-12-27 16:19:29 +01:00
|
|
|
if (addr & (len - 1)) {
|
2009-03-12 21:12:48 +01:00
|
|
|
return -EINVAL;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2009-03-12 21:12:48 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
2010-12-27 16:19:29 +01:00
|
|
|
if (nb_hw_breakpoint == 4) {
|
2009-03-12 21:12:48 +01:00
|
|
|
return -ENOBUFS;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
|
|
|
if (find_hw_breakpoint(addr, len, type) >= 0) {
|
2009-03-12 21:12:48 +01:00
|
|
|
return -EEXIST;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2009-03-12 21:12:48 +01:00
|
|
|
hw_breakpoint[nb_hw_breakpoint].addr = addr;
|
|
|
|
hw_breakpoint[nb_hw_breakpoint].len = len;
|
|
|
|
hw_breakpoint[nb_hw_breakpoint].type = type;
|
|
|
|
nb_hw_breakpoint++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_remove_hw_breakpoint(target_ulong addr,
|
|
|
|
target_ulong len, int type)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
|
2010-12-27 16:19:29 +01:00
|
|
|
if (n < 0) {
|
2009-03-12 21:12:48 +01:00
|
|
|
return -ENOENT;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2009-03-12 21:12:48 +01:00
|
|
|
nb_hw_breakpoint--;
|
|
|
|
hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_remove_all_hw_breakpoints(void)
|
|
|
|
{
|
|
|
|
nb_hw_breakpoint = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static CPUWatchpoint hw_watchpoint;
|
|
|
|
|
2011-03-15 12:26:30 +01:00
|
|
|
static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info)
|
2009-03-12 21:12:48 +01:00
|
|
|
{
|
2011-03-15 12:26:30 +01:00
|
|
|
int ret = 0;
|
2009-03-12 21:12:48 +01:00
|
|
|
int n;
|
|
|
|
|
|
|
|
if (arch_info->exception == 1) {
|
|
|
|
if (arch_info->dr6 & (1 << 14)) {
|
2010-12-27 16:19:29 +01:00
|
|
|
if (cpu_single_env->singlestep_enabled) {
|
2011-03-15 12:26:30 +01:00
|
|
|
ret = EXCP_DEBUG;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2009-03-12 21:12:48 +01:00
|
|
|
} else {
|
2010-12-27 16:19:29 +01:00
|
|
|
for (n = 0; n < 4; n++) {
|
|
|
|
if (arch_info->dr6 & (1 << n)) {
|
2009-03-12 21:12:48 +01:00
|
|
|
switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
|
|
|
|
case 0x0:
|
2011-03-15 12:26:30 +01:00
|
|
|
ret = EXCP_DEBUG;
|
2009-03-12 21:12:48 +01:00
|
|
|
break;
|
|
|
|
case 0x1:
|
2011-03-15 12:26:30 +01:00
|
|
|
ret = EXCP_DEBUG;
|
2009-03-12 21:12:48 +01:00
|
|
|
cpu_single_env->watchpoint_hit = &hw_watchpoint;
|
|
|
|
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
|
|
|
|
hw_watchpoint.flags = BP_MEM_WRITE;
|
|
|
|
break;
|
|
|
|
case 0x3:
|
2011-03-15 12:26:30 +01:00
|
|
|
ret = EXCP_DEBUG;
|
2009-03-12 21:12:48 +01:00
|
|
|
cpu_single_env->watchpoint_hit = &hw_watchpoint;
|
|
|
|
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
|
|
|
|
hw_watchpoint.flags = BP_MEM_ACCESS;
|
|
|
|
break;
|
|
|
|
}
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
|
|
|
}
|
2009-03-12 21:12:48 +01:00
|
|
|
}
|
2010-12-27 16:19:29 +01:00
|
|
|
} else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) {
|
2011-03-15 12:26:30 +01:00
|
|
|
ret = EXCP_DEBUG;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2011-03-15 12:26:30 +01:00
|
|
|
if (ret == 0) {
|
2010-03-01 19:10:29 +01:00
|
|
|
cpu_synchronize_state(cpu_single_env);
|
|
|
|
assert(cpu_single_env->exception_injected == -1);
|
|
|
|
|
2011-03-15 12:26:30 +01:00
|
|
|
/* pass to guest */
|
2010-03-01 19:10:29 +01:00
|
|
|
cpu_single_env->exception_injected = arch_info->exception;
|
|
|
|
cpu_single_env->has_error_code = 0;
|
|
|
|
}
|
2009-03-12 21:12:48 +01:00
|
|
|
|
2011-03-15 12:26:30 +01:00
|
|
|
return ret;
|
2009-03-12 21:12:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
|
|
|
|
{
|
|
|
|
const uint8_t type_code[] = {
|
|
|
|
[GDB_BREAKPOINT_HW] = 0x0,
|
|
|
|
[GDB_WATCHPOINT_WRITE] = 0x1,
|
|
|
|
[GDB_WATCHPOINT_ACCESS] = 0x3
|
|
|
|
};
|
|
|
|
const uint8_t len_code[] = {
|
|
|
|
[1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
|
|
|
|
};
|
|
|
|
int n;
|
|
|
|
|
2010-12-27 16:19:29 +01:00
|
|
|
if (kvm_sw_breakpoints_active(env)) {
|
2009-03-12 21:12:48 +01:00
|
|
|
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
|
2010-12-27 16:19:29 +01:00
|
|
|
}
|
2009-03-12 21:12:48 +01:00
|
|
|
if (nb_hw_breakpoint > 0) {
|
|
|
|
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
|
|
|
|
dbg->arch.debugreg[7] = 0x0600;
|
|
|
|
for (n = 0; n < nb_hw_breakpoint; n++) {
|
|
|
|
dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
|
|
|
|
dbg->arch.debugreg[7] |= (2 << (n * 2)) |
|
|
|
|
(type_code[hw_breakpoint[n].type] << (16 + n*4)) |
|
2010-12-27 15:58:23 +01:00
|
|
|
((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
|
2009-03-12 21:12:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* KVM_CAP_SET_GUEST_DEBUG */
|
2010-05-10 10:21:34 +02:00
|
|
|
|
2011-03-15 12:26:29 +01:00
|
|
|
static bool host_supports_vmx(void)
|
|
|
|
{
|
|
|
|
uint32_t ecx, unused;
|
|
|
|
|
|
|
|
host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
|
|
|
|
return ecx & CPUID_EXT_VMX;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define VMX_INVALID_GUEST_STATE 0x80000021
|
|
|
|
|
|
|
|
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
|
|
|
|
{
|
|
|
|
uint64_t code;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
switch (run->exit_reason) {
|
|
|
|
case KVM_EXIT_HLT:
|
|
|
|
DPRINTF("handle_hlt\n");
|
|
|
|
ret = kvm_handle_halt(env);
|
|
|
|
break;
|
|
|
|
case KVM_EXIT_SET_TPR:
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
case KVM_EXIT_FAIL_ENTRY:
|
|
|
|
code = run->fail_entry.hardware_entry_failure_reason;
|
|
|
|
fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
|
|
|
|
code);
|
|
|
|
if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"\nIf you're runnning a guest on an Intel machine without "
|
|
|
|
"unrestricted mode\n"
|
|
|
|
"support, the failure can be most likely due to the guest "
|
|
|
|
"entering an invalid\n"
|
|
|
|
"state for Intel VT. For example, the guest maybe running "
|
|
|
|
"in big real mode\n"
|
|
|
|
"which is not supported on less recent Intel processors."
|
|
|
|
"\n\n");
|
|
|
|
}
|
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
case KVM_EXIT_EXCEPTION:
|
|
|
|
fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
|
|
|
|
run->ex.exception, run->ex.error_code);
|
|
|
|
ret = -1;
|
|
|
|
break;
|
2011-03-15 12:26:30 +01:00
|
|
|
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
|
|
|
case KVM_EXIT_DEBUG:
|
|
|
|
DPRINTF("kvm_exit_debug\n");
|
|
|
|
ret = kvm_handle_debug(&run->debug.arch);
|
|
|
|
break;
|
|
|
|
#endif /* KVM_CAP_SET_GUEST_DEBUG */
|
2011-03-15 12:26:29 +01:00
|
|
|
default:
|
|
|
|
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
|
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-05-10 10:21:34 +02:00
|
|
|
bool kvm_arch_stop_on_emulation_error(CPUState *env)
|
|
|
|
{
|
2010-12-27 16:19:29 +01:00
|
|
|
return !(env->cr[0] & CR0_PE_MASK) ||
|
|
|
|
((env->segs[R_CS].selector & 3) != 3);
|
2010-05-10 10:21:34 +02:00
|
|
|
}
|