Merge remote branch 'qemu-kvm/uq/master' into staging

This commit is contained in:
Anthony Liguori 2011-03-21 17:42:20 -05:00
commit 31b7c261a2
24 changed files with 640 additions and 588 deletions

View File

@ -863,10 +863,14 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
extern int phys_ram_fd;
extern ram_addr_t ram_size;
/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
#define RAM_PREALLOC_MASK (1 << 0)
typedef struct RAMBlock {
uint8_t *host;
ram_addr_t offset;
ram_addr_t length;
uint32_t flags;
char idstr[256];
QLIST_ENTRY(RAMBlock) next;
#if defined(__linux__) && !defined(TARGET_S390X)
@ -971,8 +975,4 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
uint8_t *buf, int len, int is_write);
void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
uint64_t mcg_status, uint64_t addr, uint64_t misc,
int broadcast);
#endif /* CPU_ALL_H */

View File

@ -50,6 +50,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
ram_addr_t size, void *host);
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size);
void qemu_ram_free(ram_addr_t addr);
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
/* This should only be used for ram local to a device. */
void *qemu_get_ram_ptr(ram_addr_t addr);
/* Same but slower, to use for migration, where the order of

View File

@ -203,6 +203,7 @@ typedef struct CPUWatchpoint {
int nr_cores; /* number of cores within this CPU package */ \
int nr_threads;/* number of threads within this CPU */ \
int running; /* Nonzero if cpu is currently running(usermode). */ \
int thread_id; \
/* user data */ \
void *opaque; \
\

View File

@ -196,6 +196,30 @@ static inline TranslationBlock *tb_find_fast(void)
return tb;
}
static CPUDebugExcpHandler *debug_excp_handler;
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
{
CPUDebugExcpHandler *old_handler = debug_excp_handler;
debug_excp_handler = handler;
return old_handler;
}
static void cpu_handle_debug_exception(CPUState *env)
{
CPUWatchpoint *wp;
if (!env->watchpoint_hit) {
QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
if (debug_excp_handler) {
debug_excp_handler(env);
}
}
/* main execution loop */
volatile sig_atomic_t exit_request;
@ -269,6 +293,9 @@ int cpu_exec(CPUState *env1)
if (env->exception_index >= EXCP_INTERRUPT) {
/* exit request from the cpu execution loop */
ret = env->exception_index;
if (ret == EXCP_DEBUG) {
cpu_handle_debug_exception(env);
}
break;
} else {
#if defined(CONFIG_USER_ONLY)

126
cpus.c
View File

@ -148,7 +148,8 @@ static bool cpu_thread_is_idle(CPUState *env)
if (env->stopped || !vm_running) {
return true;
}
if (!env->halted || qemu_cpu_has_work(env)) {
if (!env->halted || qemu_cpu_has_work(env) ||
(kvm_enabled() && kvm_irqchip_in_kernel())) {
return false;
}
return true;
@ -166,29 +167,8 @@ static bool all_cpu_threads_idle(void)
return true;
}
static CPUDebugExcpHandler *debug_excp_handler;
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
static void cpu_handle_guest_debug(CPUState *env)
{
CPUDebugExcpHandler *old_handler = debug_excp_handler;
debug_excp_handler = handler;
return old_handler;
}
static void cpu_handle_debug_exception(CPUState *env)
{
CPUWatchpoint *wp;
if (!env->watchpoint_hit) {
QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
if (debug_excp_handler) {
debug_excp_handler(env);
}
gdb_set_stop_cpu(env);
qemu_system_debug_request();
#ifdef CONFIG_IOTHREAD
@ -245,11 +225,58 @@ static void qemu_init_sigbus(void)
prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
}
static void qemu_kvm_eat_signals(CPUState *env)
{
struct timespec ts = { 0, 0 };
siginfo_t siginfo;
sigset_t waitset;
sigset_t chkset;
int r;
sigemptyset(&waitset);
sigaddset(&waitset, SIG_IPI);
sigaddset(&waitset, SIGBUS);
do {
r = sigtimedwait(&waitset, &siginfo, &ts);
if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
perror("sigtimedwait");
exit(1);
}
switch (r) {
case SIGBUS:
if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) {
sigbus_reraise();
}
break;
default:
break;
}
r = sigpending(&chkset);
if (r == -1) {
perror("sigpending");
exit(1);
}
} while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
#ifndef CONFIG_IOTHREAD
if (sigismember(&chkset, SIGIO) || sigismember(&chkset, SIGALRM)) {
qemu_notify_event();
}
#endif
}
#else /* !CONFIG_LINUX */
static void qemu_init_sigbus(void)
{
}
static void qemu_kvm_eat_signals(CPUState *env)
{
}
#endif /* !CONFIG_LINUX */
#ifndef _WIN32
@ -455,49 +482,6 @@ static void qemu_tcg_init_cpu_signals(void)
#endif
}
static void qemu_kvm_eat_signals(CPUState *env)
{
struct timespec ts = { 0, 0 };
siginfo_t siginfo;
sigset_t waitset;
sigset_t chkset;
int r;
sigemptyset(&waitset);
sigaddset(&waitset, SIG_IPI);
sigaddset(&waitset, SIGBUS);
do {
r = sigtimedwait(&waitset, &siginfo, &ts);
if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
perror("sigtimedwait");
exit(1);
}
switch (r) {
case SIGBUS:
if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) {
sigbus_reraise();
}
break;
default:
break;
}
r = sigpending(&chkset);
if (r == -1) {
perror("sigpending");
exit(1);
}
} while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
#ifndef CONFIG_IOTHREAD
if (sigismember(&chkset, SIGIO) || sigismember(&chkset, SIGALRM)) {
qemu_notify_event();
}
#endif
}
#else /* _WIN32 */
HANDLE qemu_event_handle;
@ -526,10 +510,6 @@ static void qemu_event_increment(void)
}
}
static void qemu_kvm_eat_signals(CPUState *env)
{
}
static int qemu_signal_init(void)
{
return 0;
@ -796,6 +776,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
qemu_mutex_lock(&qemu_global_mutex);
qemu_thread_get_self(env->thread);
env->thread_id = qemu_get_thread_id();
r = kvm_init_vcpu(env);
if (r < 0) {
@ -818,7 +799,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
if (cpu_can_run(env)) {
r = kvm_cpu_exec(env);
if (r == EXCP_DEBUG) {
cpu_handle_debug_exception(env);
cpu_handle_guest_debug(env);
}
}
qemu_kvm_wait_io_event(env);
@ -837,6 +818,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
/* signal CPU creation */
qemu_mutex_lock(&qemu_global_mutex);
for (env = first_cpu; env != NULL; env = env->next_cpu) {
env->thread_id = qemu_get_thread_id();
env->created = 1;
}
qemu_cond_signal(&qemu_cpu_cond);
@ -1110,7 +1092,7 @@ bool cpu_exec_all(void)
r = tcg_cpu_exec(env);
}
if (r == EXCP_DEBUG) {
cpu_handle_debug_exception(env);
cpu_handle_guest_debug(env);
break;
}
} else if (env->stop || env->stopped) {

70
exec.c
View File

@ -638,6 +638,9 @@ void cpu_exec_init(CPUState *env)
env->numa_node = 0;
QTAILQ_INIT(&env->breakpoints);
QTAILQ_INIT(&env->watchpoints);
#ifndef CONFIG_USER_ONLY
env->thread_id = qemu_get_thread_id();
#endif
*penv = env;
#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
@ -2867,6 +2870,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
if (host) {
new_block->host = host;
new_block->flags |= RAM_PREALLOC_MASK;
} else {
if (mem_path) {
#if defined (__linux__) && !defined(TARGET_S390X)
@ -2920,7 +2924,9 @@ void qemu_ram_free(ram_addr_t addr)
QLIST_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
QLIST_REMOVE(block, next);
if (mem_path) {
if (block->flags & RAM_PREALLOC_MASK) {
;
} else if (mem_path) {
#if defined (__linux__) && !defined(TARGET_S390X)
if (block->fd) {
munmap(block->host, block->length);
@ -2928,6 +2934,8 @@ void qemu_ram_free(ram_addr_t addr)
} else {
qemu_vfree(block->host);
}
#else
abort();
#endif
} else {
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
@ -2943,6 +2951,66 @@ void qemu_ram_free(ram_addr_t addr)
}
#ifndef _WIN32
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
{
RAMBlock *block;
ram_addr_t offset;
int flags;
void *area, *vaddr;
QLIST_FOREACH(block, &ram_list.blocks, next) {
offset = addr - block->offset;
if (offset < block->length) {
vaddr = block->host + offset;
if (block->flags & RAM_PREALLOC_MASK) {
;
} else {
flags = MAP_FIXED;
munmap(vaddr, length);
if (mem_path) {
#if defined(__linux__) && !defined(TARGET_S390X)
if (block->fd) {
#ifdef MAP_POPULATE
flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
MAP_PRIVATE;
#else
flags |= MAP_PRIVATE;
#endif
area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
flags, block->fd, offset);
} else {
flags |= MAP_PRIVATE | MAP_ANONYMOUS;
area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
flags, -1, 0);
}
#else
abort();
#endif
} else {
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
flags |= MAP_SHARED | MAP_ANONYMOUS;
area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
flags, -1, 0);
#else
flags |= MAP_PRIVATE | MAP_ANONYMOUS;
area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
flags, -1, 0);
#endif
}
if (area != vaddr) {
fprintf(stderr, "Could not remap addr: %lx@%lx\n",
length, addr);
exit(1);
}
qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
}
return;
}
}
}
#endif /* !_WIN32 */
/* Return a host pointer to ram allocated with qemu_ram_alloc.
With the exception of the softmmu code in this file, this should
only be used for local memory (e.g. video ram) that the device owns,

View File

@ -211,6 +211,7 @@ int kvm_init_vcpu(CPUState *env)
env->kvm_fd = ret;
env->kvm_state = s;
env->kvm_vcpu_dirty = 1;
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0) {
@ -830,7 +831,7 @@ static int kvm_handle_internal_error(CPUState *env, struct kvm_run *run)
fprintf(stderr, "emulation failure\n");
if (!kvm_arch_stop_on_emulation_error(env)) {
cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
return 0;
return EXCP_INTERRUPT;
}
}
/* FIXME: Should trigger a qmp message to let management know
@ -889,11 +890,11 @@ void kvm_cpu_synchronize_post_init(CPUState *env)
int kvm_cpu_exec(CPUState *env)
{
struct kvm_run *run = env->kvm_run;
int ret;
int ret, run_ret;
DPRINTF("kvm_cpu_exec()\n");
if (kvm_arch_process_irqchip_events(env)) {
if (kvm_arch_process_async_events(env)) {
env->exit_request = 0;
return EXCP_HLT;
}
@ -919,7 +920,7 @@ int kvm_cpu_exec(CPUState *env)
cpu_single_env = NULL;
qemu_mutex_unlock_iothread();
ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
run_ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
qemu_mutex_lock_iothread();
cpu_single_env = env;
@ -927,18 +928,16 @@ int kvm_cpu_exec(CPUState *env)
kvm_flush_coalesced_mmio_buffer();
if (ret == -EINTR || ret == -EAGAIN) {
DPRINTF("io window exit\n");
ret = 0;
break;
}
if (ret < 0) {
DPRINTF("kvm run failed %s\n", strerror(-ret));
if (run_ret < 0) {
if (run_ret == -EINTR || run_ret == -EAGAIN) {
DPRINTF("io window exit\n");
ret = EXCP_INTERRUPT;
break;
}
DPRINTF("kvm run failed %s\n", strerror(-run_ret));
abort();
}
ret = 0; /* exit loop */
switch (run->exit_reason) {
case KVM_EXIT_IO:
DPRINTF("handle_io\n");
@ -947,7 +946,7 @@ int kvm_cpu_exec(CPUState *env)
run->io.direction,
run->io.size,
run->io.count);
ret = 1;
ret = 0;
break;
case KVM_EXIT_MMIO:
DPRINTF("handle_mmio\n");
@ -955,14 +954,16 @@ int kvm_cpu_exec(CPUState *env)
run->mmio.data,
run->mmio.len,
run->mmio.is_write);
ret = 1;
ret = 0;
break;
case KVM_EXIT_IRQ_WINDOW_OPEN:
DPRINTF("irq_window_open\n");
ret = EXCP_INTERRUPT;
break;
case KVM_EXIT_SHUTDOWN:
DPRINTF("shutdown\n");
qemu_system_reset_request();
ret = EXCP_INTERRUPT;
break;
case KVM_EXIT_UNKNOWN:
fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
@ -974,31 +975,18 @@ int kvm_cpu_exec(CPUState *env)
ret = kvm_handle_internal_error(env, run);
break;
#endif
case KVM_EXIT_DEBUG:
DPRINTF("kvm_exit_debug\n");
#ifdef KVM_CAP_SET_GUEST_DEBUG
if (kvm_arch_debug(&run->debug.arch)) {
ret = EXCP_DEBUG;
goto out;
}
/* re-enter, this exception was guest-internal */
ret = 1;
#endif /* KVM_CAP_SET_GUEST_DEBUG */
break;
default:
DPRINTF("kvm_arch_handle_exit\n");
ret = kvm_arch_handle_exit(env, run);
break;
}
} while (ret > 0);
} while (ret == 0);
if (ret < 0) {
cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
vm_stop(VMSTOP_PANIC);
}
ret = EXCP_INTERRUPT;
out:
env->exit_request = 0;
cpu_single_env = NULL;
return ret;

4
kvm.h
View File

@ -102,7 +102,7 @@ void kvm_arch_post_run(CPUState *env, struct kvm_run *run);
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run);
int kvm_arch_process_irqchip_events(CPUState *env);
int kvm_arch_process_async_events(CPUState *env);
int kvm_arch_get_registers(CPUState *env);
@ -136,8 +136,6 @@ struct kvm_sw_breakpoint {
QTAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint);
int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info);
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
target_ulong pc);

View File

@ -897,6 +897,9 @@ static void print_cpu_iter(QObject *obj, void *opaque)
monitor_printf(mon, " (halted)");
}
monitor_printf(mon, " thread_id=%" PRId64 " ",
qdict_get_int(cpu, "thread_id"));
monitor_printf(mon, "\n");
}
@ -941,6 +944,7 @@ static void do_info_cpus(Monitor *mon, QObject **ret_data)
#elif defined(TARGET_MIPS)
qdict_put(cpu, "PC", qint_from_int(env->active_tc.PC));
#endif
qdict_put(cpu, "thread_id", qint_from_int(env->thread_id));
qlist_append(cpu_list, cpu);
}
@ -2709,12 +2713,15 @@ static void do_inject_mce(Monitor *mon, const QDict *qdict)
uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
uint64_t addr = qdict_get_int(qdict, "addr");
uint64_t misc = qdict_get_int(qdict, "misc");
int broadcast = qdict_get_try_bool(qdict, "broadcast", 0);
int flags = MCE_INJECT_UNCOND_AO;
if (qdict_get_try_bool(qdict, "broadcast", 0)) {
flags |= MCE_INJECT_BROADCAST;
}
for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) {
if (cenv->cpu_index == cpu_index && cenv->mcg_cap) {
cpu_inject_x86_mce(cenv, bank, status, mcg_status, addr, misc,
broadcast);
if (cenv->cpu_index == cpu_index) {
cpu_x86_inject_mce(mon, cenv, bank, status, mcg_status, addr, misc,
flags);
break;
}
}

View File

@ -41,6 +41,7 @@
#ifdef CONFIG_LINUX
#include <sys/prctl.h>
#include <sys/syscall.h>
#endif
#ifdef CONFIG_EVENTFD
@ -382,3 +383,12 @@ int qemu_create_pidfile(const char *filename)
return 0;
}
int qemu_get_thread_id(void)
{
#if defined (__linux__)
return syscall(SYS_gettid);
#else
return getpid();
#endif
}

View File

@ -266,3 +266,8 @@ int qemu_create_pidfile(const char *filename)
}
return 0;
}
int qemu_get_thread_id(void)
{
return GetCurrentThreadId();
}

View File

@ -128,6 +128,7 @@ void qemu_vfree(void *ptr);
int qemu_madvise(void *addr, size_t len, int advice);
int qemu_create_pidfile(const char *filename);
int qemu_get_thread_id(void);
#ifdef _WIN32
static inline void qemu_timersub(const struct timeval *val1,

View File

@ -18,6 +18,9 @@ typedef struct QEMUFile QEMUFile;
typedef struct QEMUBH QEMUBH;
typedef struct DeviceState DeviceState;
struct Monitor;
typedef struct Monitor Monitor;
/* we put basic includes here to avoid repeating them in device drivers */
#include <stdlib.h>
#include <stdio.h>
@ -327,9 +330,6 @@ void qemu_iovec_memset(QEMUIOVector *qiov, int c, size_t count);
void qemu_iovec_memset_skip(QEMUIOVector *qiov, int c, size_t count,
size_t skip);
struct Monitor;
typedef struct Monitor Monitor;
/* Convert a byte between binary and BCD. */
static inline uint8_t to_bcd(uint8_t val)
{

View File

@ -1194,6 +1194,7 @@ Return a json-array. Each CPU is represented by a json-object, which contains:
"nip": PPC (json-int)
"pc" and "npc": sparc (json-int)
"PC": mips (json-int)
- "thread_id": ID of the underlying host thread (json-int)
Example:
@ -1205,12 +1206,14 @@ Example:
"current":true,
"halted":false,
"pc":3227107138
"thread_id":3134
},
{
"CPU":1,
"current":false,
"halted":true,
"pc":7108165
"thread_id":3135
}
]
}

View File

@ -685,7 +685,7 @@ typedef struct CPUX86State {
uint64_t tsc;
uint64_t pat;
uint64_t mcg_status;
/* exception/interrupt handling */
int error_code;
@ -705,6 +705,8 @@ typedef struct CPUX86State {
CPU_COMMON
uint64_t pat;
/* processor features (e.g. for CPUID insn) */
uint32_t cpuid_level;
uint32_t cpuid_vendor1;
@ -741,7 +743,6 @@ typedef struct CPUX86State {
struct DeviceState *apic_state;
uint64_t mcg_cap;
uint64_t mcg_status;
uint64_t mcg_ctl;
uint64_t mce_banks[MCE_BANKS_DEF*4];
@ -985,4 +986,12 @@ static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
void do_cpu_init(CPUState *env);
void do_cpu_sipi(CPUState *env);
#define MCE_INJECT_BROADCAST 1
#define MCE_INJECT_UNCOND_AO 2
void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
uint64_t status, uint64_t mcg_status, uint64_t addr,
uint64_t misc, int flags);
#endif /* CPU_I386_H */

View File

@ -847,7 +847,6 @@ int cpu_x86_register (CPUX86State *env, const char *cpu_model)
env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
env->cpuid_version |= def->stepping;
env->cpuid_features = def->features;
env->pat = 0x0007040600070406ULL;
env->cpuid_ext_features = def->ext_features;
env->cpuid_ext2_features = def->ext2_features;
env->cpuid_ext3_features = def->ext3_features;

View File

@ -293,15 +293,12 @@ static inline void load_eflags(int eflags, int update_mask)
static inline int cpu_has_work(CPUState *env)
{
int work;
work = (env->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK);
work |= env->interrupt_request & CPU_INTERRUPT_NMI;
work |= env->interrupt_request & CPU_INTERRUPT_INIT;
work |= env->interrupt_request & CPU_INTERRUPT_SIPI;
return work;
return ((env->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
(env->interrupt_request & (CPU_INTERRUPT_NMI |
CPU_INTERRUPT_INIT |
CPU_INTERRUPT_SIPI |
CPU_INTERRUPT_MCE));
}
/* load efer and update the corresponding hflags. XXX: do consistency

View File

@ -27,7 +27,10 @@
#include "exec-all.h"
#include "qemu-common.h"
#include "kvm.h"
#include "kvm_x86.h"
#ifndef CONFIG_USER_ONLY
#include "sysemu.h"
#include "monitor.h"
#endif
//#define DEBUG_MMU
@ -96,13 +99,13 @@ void cpu_reset(CPUX86State *env)
env->mxcsr = 0x1f80;
env->pat = 0x0007040600070406ULL;
memset(env->dr, 0, sizeof(env->dr));
env->dr[6] = DR6_FIXED_1;
env->dr[7] = DR7_FIXED_1;
cpu_breakpoint_remove_all(env, BP_CPU);
cpu_watchpoint_remove_all(env, BP_CPU);
env->mcg_status = 0;
}
void cpu_x86_close(CPUX86State *env)
@ -1065,91 +1068,138 @@ static void breakpoint_handler(CPUState *env)
prev_debug_excp_handler(env);
}
/* This should come from sysemu.h - if we could include it here... */
void qemu_system_reset_request(void);
typedef struct MCEInjectionParams {
Monitor *mon;
CPUState *env;
int bank;
uint64_t status;
uint64_t mcg_status;
uint64_t addr;
uint64_t misc;
int flags;
} MCEInjectionParams;
static void qemu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
uint64_t mcg_status, uint64_t addr, uint64_t misc)
static void do_inject_x86_mce(void *data)
{
uint64_t mcg_cap = cenv->mcg_cap;
uint64_t *banks = cenv->mce_banks;
MCEInjectionParams *params = data;
CPUState *cenv = params->env;
uint64_t *banks = cenv->mce_banks + 4 * params->bank;
cpu_synchronize_state(cenv);
/*
* if MSR_MCG_CTL is not all 1s, the uncorrected error
* reporting is disabled
* If there is an MCE exception being processed, ignore this SRAO MCE
* unless unconditional injection was requested.
*/
if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
cenv->mcg_ctl != ~(uint64_t)0)
if (!(params->flags & MCE_INJECT_UNCOND_AO)
&& !(params->status & MCI_STATUS_AR)
&& (cenv->mcg_status & MCG_STATUS_MCIP)) {
return;
banks += 4 * bank;
/*
* if MSR_MCi_CTL is not all 1s, the uncorrected error
* reporting is disabled for the bank
*/
if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
return;
if (status & MCI_STATUS_UC) {
}
if (params->status & MCI_STATUS_UC) {
/*
* if MSR_MCG_CTL is not all 1s, the uncorrected error
* reporting is disabled
*/
if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
monitor_printf(params->mon,
"CPU %d: Uncorrected error reporting disabled\n",
cenv->cpu_index);
return;
}
/*
* if MSR_MCi_CTL is not all 1s, the uncorrected error
* reporting is disabled for the bank
*/
if (banks[0] != ~(uint64_t)0) {
monitor_printf(params->mon,
"CPU %d: Uncorrected error reporting disabled for"
" bank %d\n",
cenv->cpu_index, params->bank);
return;
}
if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
!(cenv->cr[4] & CR4_MCE_MASK)) {
fprintf(stderr, "injects mce exception while previous "
"one is in progress!\n");
monitor_printf(params->mon,
"CPU %d: Previous MCE still in progress, raising"
" triple fault\n",
cenv->cpu_index);
qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
qemu_system_reset_request();
return;
}
if (banks[1] & MCI_STATUS_VAL)
status |= MCI_STATUS_OVER;
banks[2] = addr;
banks[3] = misc;
cenv->mcg_status = mcg_status;
banks[1] = status;
if (banks[1] & MCI_STATUS_VAL) {
params->status |= MCI_STATUS_OVER;
}
banks[2] = params->addr;
banks[3] = params->misc;
cenv->mcg_status = params->mcg_status;
banks[1] = params->status;
cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
} else if (!(banks[1] & MCI_STATUS_VAL)
|| !(banks[1] & MCI_STATUS_UC)) {
if (banks[1] & MCI_STATUS_VAL)
status |= MCI_STATUS_OVER;
banks[2] = addr;
banks[3] = misc;
banks[1] = status;
} else
if (banks[1] & MCI_STATUS_VAL) {
params->status |= MCI_STATUS_OVER;
}
banks[2] = params->addr;
banks[3] = params->misc;
banks[1] = params->status;
} else {
banks[1] |= MCI_STATUS_OVER;
}
}
void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
uint64_t mcg_status, uint64_t addr, uint64_t misc,
int broadcast)
void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
uint64_t status, uint64_t mcg_status, uint64_t addr,
uint64_t misc, int flags)
{
MCEInjectionParams params = {
.mon = mon,
.env = cenv,
.bank = bank,
.status = status,
.mcg_status = mcg_status,
.addr = addr,
.misc = misc,
.flags = flags,
};
unsigned bank_num = cenv->mcg_cap & 0xff;
CPUState *env;
int flag = 0;
if (bank >= bank_num || !(status & MCI_STATUS_VAL)) {
if (!cenv->mcg_cap) {
monitor_printf(mon, "MCE injection not supported\n");
return;
}
if (bank >= bank_num) {
monitor_printf(mon, "Invalid MCE bank number\n");
return;
}
if (!(status & MCI_STATUS_VAL)) {
monitor_printf(mon, "Invalid MCE status code\n");
return;
}
if ((flags & MCE_INJECT_BROADCAST)
&& !cpu_x86_support_mca_broadcast(cenv)) {
monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
return;
}
if (broadcast) {
if (!cpu_x86_support_mca_broadcast(cenv)) {
fprintf(stderr, "Current CPU does not support broadcast\n");
return;
}
}
if (kvm_enabled()) {
if (broadcast) {
flag |= MCE_BROADCAST;
}
kvm_inject_x86_mce(cenv, bank, status, mcg_status, addr, misc, flag);
} else {
qemu_inject_x86_mce(cenv, bank, status, mcg_status, addr, misc);
if (broadcast) {
for (env = first_cpu; env != NULL; env = env->next_cpu) {
if (cenv == env) {
continue;
}
qemu_inject_x86_mce(env, 1, MCI_STATUS_VAL | MCI_STATUS_UC,
MCG_STATUS_MCIP | MCG_STATUS_RIPV, 0, 0);
run_on_cpu(cenv, do_inject_x86_mce, &params);
if (flags & MCE_INJECT_BROADCAST) {
params.bank = 1;
params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
params.addr = 0;
params.misc = 0;
for (env = first_cpu; env != NULL; env = env->next_cpu) {
if (cenv == env) {
continue;
}
params.env = env;
run_on_cpu(cenv, do_inject_x86_mce, &params);
}
}
}
@ -1157,15 +1207,16 @@ void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
static void mce_init(CPUX86State *cenv)
{
unsigned int bank, bank_num;
unsigned int bank;
if (((cenv->cpuid_version >> 8)&0xf) >= 6
&& (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
if (((cenv->cpuid_version >> 8) & 0xf) >= 6
&& (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
(CPUID_MCE | CPUID_MCA)) {
cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
cenv->mcg_ctl = ~(uint64_t)0;
bank_num = MCE_BANKS_DEF;
for (bank = 0; bank < bank_num; bank++)
cenv->mce_banks[bank*4] = ~(uint64_t)0;
for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
cenv->mce_banks[bank * 4] = ~(uint64_t)0;
}
}
}
@ -1231,8 +1282,11 @@ CPUX86State *cpu_x86_init(const char *cpu_model)
void do_cpu_init(CPUState *env)
{
int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
uint64_t pat = env->pat;
cpu_reset(env);
env->interrupt_request = sipi;
env->pat = pat;
apic_init_reset(env->apic_state);
env->halted = !cpu_is_bsp(env);
}

View File

@ -28,7 +28,6 @@
#include "hw/pc.h"
#include "hw/apic.h"
#include "ioport.h"
#include "kvm_x86.h"
#ifdef CONFIG_KVM_PARA
#include <linux/kvm_para.h>
@ -172,9 +171,42 @@ static int get_para_features(CPUState *env)
#endif
return features;
}
#endif
#endif /* CONFIG_KVM_PARA */
typedef struct HWPoisonPage {
ram_addr_t ram_addr;
QLIST_ENTRY(HWPoisonPage) list;
} HWPoisonPage;
static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
QLIST_HEAD_INITIALIZER(hwpoison_page_list);
static void kvm_unpoison_all(void *param)
{
HWPoisonPage *page, *next_page;
QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
QLIST_REMOVE(page, list);
qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
qemu_free(page);
}
}
#ifdef KVM_CAP_MCE
static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
{
HWPoisonPage *page;
QLIST_FOREACH(page, &hwpoison_page_list, list) {
if (page->ram_addr == ram_addr) {
return;
}
}
page = qemu_malloc(sizeof(HWPoisonPage));
page->ram_addr = ram_addr;
QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
}
static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
int *max_banks)
{
@ -188,117 +220,129 @@ static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
return -ENOSYS;
}
static int kvm_setup_mce(CPUState *env, uint64_t *mcg_cap)
static void kvm_mce_inject(CPUState *env, target_phys_addr_t paddr, int code)
{
return kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, mcg_cap);
}
uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
uint64_t mcg_status = MCG_STATUS_MCIP;
static int kvm_set_mce(CPUState *env, struct kvm_x86_mce *m)
{
return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, m);
}
static int kvm_get_msr(CPUState *env, struct kvm_msr_entry *msrs, int n)
{
struct kvm_msrs *kmsrs = qemu_malloc(sizeof *kmsrs + n * sizeof *msrs);
int r;
kmsrs->nmsrs = n;
memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
r = kvm_vcpu_ioctl(env, KVM_GET_MSRS, kmsrs);
memcpy(msrs, kmsrs->entries, n * sizeof *msrs);
free(kmsrs);
return r;
}
/* FIXME: kill this and kvm_get_msr, use env->mcg_status instead */
static int kvm_mce_in_progress(CPUState *env)
{
struct kvm_msr_entry msr_mcg_status = {
.index = MSR_MCG_STATUS,
};
int r;
r = kvm_get_msr(env, &msr_mcg_status, 1);
if (r == -1 || r == 0) {
fprintf(stderr, "Failed to get MCE status\n");
return 0;
if (code == BUS_MCEERR_AR) {
status |= MCI_STATUS_AR | 0x134;
mcg_status |= MCG_STATUS_EIPV;
} else {
status |= 0xc0;
mcg_status |= MCG_STATUS_RIPV;
}
return !!(msr_mcg_status.data & MCG_STATUS_MCIP);
cpu_x86_inject_mce(NULL, env, 9, status, mcg_status, paddr,
(MCM_ADDR_PHYS << 6) | 0xc,
cpu_x86_support_mca_broadcast(env) ?
MCE_INJECT_BROADCAST : 0);
}
#endif /* KVM_CAP_MCE */
static void hardware_memory_error(void)
{
fprintf(stderr, "Hardware memory error!\n");
exit(1);
}
struct kvm_x86_mce_data
{
CPUState *env;
struct kvm_x86_mce *mce;
int abort_on_error;
};
static void kvm_do_inject_x86_mce(void *_data)
{
struct kvm_x86_mce_data *data = _data;
int r;
/* If there is an MCE exception being processed, ignore this SRAO MCE */
if ((data->env->mcg_cap & MCG_SER_P) &&
!(data->mce->status & MCI_STATUS_AR)) {
if (kvm_mce_in_progress(data->env)) {
return;
}
}
r = kvm_set_mce(data->env, data->mce);
if (r < 0) {
perror("kvm_set_mce FAILED");
if (data->abort_on_error) {
abort();
}
}
}
static void kvm_inject_x86_mce_on(CPUState *env, struct kvm_x86_mce *mce,
int flag)
{
struct kvm_x86_mce_data data = {
.env = env,
.mce = mce,
.abort_on_error = (flag & ABORT_ON_ERROR),
};
if (!env->mcg_cap) {
fprintf(stderr, "MCE support is not enabled!\n");
return;
}
run_on_cpu(env, kvm_do_inject_x86_mce, &data);
}
static void kvm_mce_broadcast_rest(CPUState *env);
#endif
void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
uint64_t mcg_status, uint64_t addr, uint64_t misc,
int flag)
int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
{
#ifdef KVM_CAP_MCE
struct kvm_x86_mce mce = {
.bank = bank,
.status = status,
.mcg_status = mcg_status,
.addr = addr,
.misc = misc,
};
ram_addr_t ram_addr;
target_phys_addr_t paddr;
if (flag & MCE_BROADCAST) {
kvm_mce_broadcast_rest(cenv);
if ((env->mcg_cap & MCG_SER_P) && addr
&& (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
!kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr,
&paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!\n");
/* Hope we are lucky for AO MCE */
if (code == BUS_MCEERR_AO) {
return 0;
} else {
hardware_memory_error();
}
}
kvm_hwpoison_page_add(ram_addr);
kvm_mce_inject(env, paddr, code);
} else
#endif /* KVM_CAP_MCE */
{
if (code == BUS_MCEERR_AO) {
return 0;
} else if (code == BUS_MCEERR_AR) {
hardware_memory_error();
} else {
return 1;
}
}
return 0;
}
kvm_inject_x86_mce_on(cenv, &mce, flag);
#else
if (flag & ABORT_ON_ERROR) {
abort();
int kvm_arch_on_sigbus(int code, void *addr)
{
#ifdef KVM_CAP_MCE
if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
ram_addr_t ram_addr;
target_phys_addr_t paddr;
/* Hope we are lucky for AO MCE */
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
!kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr,
&paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!: %p\n", addr);
return 0;
}
kvm_hwpoison_page_add(ram_addr);
kvm_mce_inject(first_cpu, paddr, code);
} else
#endif /* KVM_CAP_MCE */
{
if (code == BUS_MCEERR_AO) {
return 0;
} else if (code == BUS_MCEERR_AR) {
hardware_memory_error();
} else {
return 1;
}
}
#endif
return 0;
}
static int kvm_inject_mce_oldstyle(CPUState *env)
{
#ifdef KVM_CAP_MCE
if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
unsigned int bank, bank_num = env->mcg_cap & 0xff;
struct kvm_x86_mce mce;
env->exception_injected = -1;
/*
* There must be at least one bank in use if an MCE is pending.
* Find it and use its values for the event injection.
*/
for (bank = 0; bank < bank_num; bank++) {
if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
break;
}
}
assert(bank < bank_num);
mce.bank = bank;
mce.status = env->mce_banks[bank * 4 + 1];
mce.mcg_status = env->mcg_status;
mce.addr = env->mce_banks[bank * 4 + 2];
mce.misc = env->mce_banks[bank * 4 + 3];
return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, &mce);
}
#endif /* KVM_CAP_MCE */
return 0;
}
static void cpu_update_state(void *opaque, int running, int reason)
@ -426,20 +470,26 @@ int kvm_arch_init_vcpu(CPUState *env)
&& kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
uint64_t mcg_cap;
int banks;
int ret;
if (kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks)) {
perror("kvm_get_mce_cap_supported FAILED");
} else {
if (banks > MCE_BANKS_DEF)
banks = MCE_BANKS_DEF;
mcg_cap &= MCE_CAP_DEF;
mcg_cap |= banks;
if (kvm_setup_mce(env, &mcg_cap)) {
perror("kvm_setup_mce FAILED");
} else {
env->mcg_cap = mcg_cap;
}
ret = kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks);
if (ret < 0) {
fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
return ret;
}
if (banks > MCE_BANKS_DEF) {
banks = MCE_BANKS_DEF;
}
mcg_cap &= MCE_CAP_DEF;
mcg_cap |= banks;
ret = kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, &mcg_cap);
if (ret < 0) {
fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
return ret;
}
env->mcg_cap = mcg_cap;
}
#endif
@ -556,6 +606,7 @@ int kvm_arch_init(KVMState *s)
fprintf(stderr, "e820_add_entry() table is full\n");
return ret;
}
qemu_register_reset(kvm_unpoison_all, NULL);
return 0;
}
@ -810,6 +861,7 @@ static int kvm_put_msrs(CPUState *env, int level)
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
kvm_msr_entry_set(&msrs[n++], MSR_PAT, env->pat);
if (has_msr_star) {
kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
}
@ -855,14 +907,10 @@ static int kvm_put_msrs(CPUState *env, int level)
if (env->mcg_cap) {
int i;
if (level == KVM_PUT_RESET_STATE) {
kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
} else if (level == KVM_PUT_FULL_STATE) {
kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
}
kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
}
}
#endif
@ -1066,6 +1114,7 @@ static int kvm_get_msrs(CPUState *env)
msrs[n++].index = MSR_IA32_SYSENTER_CS;
msrs[n++].index = MSR_IA32_SYSENTER_ESP;
msrs[n++].index = MSR_IA32_SYSENTER_EIP;
msrs[n++].index = MSR_PAT;
if (has_msr_star) {
msrs[n++].index = MSR_STAR;
}
@ -1121,6 +1170,9 @@ static int kvm_get_msrs(CPUState *env)
case MSR_IA32_SYSENTER_EIP:
env->sysenter_eip = msrs[i].data;
break;
case MSR_PAT:
env->pat = msrs[i].data;
break;
case MSR_STAR:
env->star = msrs[i].data;
break;
@ -1373,6 +1425,11 @@ int kvm_arch_put_registers(CPUState *env, int level)
if (ret < 0) {
return ret;
}
/* must be before kvm_put_msrs */
ret = kvm_inject_mce_oldstyle(env);
if (ret < 0) {
return ret;
}
ret = kvm_put_msrs(env, level);
if (ret < 0) {
return ret;
@ -1509,13 +1566,38 @@ void kvm_arch_post_run(CPUState *env, struct kvm_run *run)
cpu_set_apic_base(env->apic_state, run->apic_base);
}
int kvm_arch_process_irqchip_events(CPUState *env)
int kvm_arch_process_async_events(CPUState *env)
{
if (env->interrupt_request & CPU_INTERRUPT_MCE) {
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
assert(env->mcg_cap);
env->interrupt_request &= ~CPU_INTERRUPT_MCE;
kvm_cpu_synchronize_state(env);
if (env->exception_injected == EXCP08_DBLE) {
/* this means triple fault */
qemu_system_reset_request();
env->exit_request = 1;
return 0;
}
env->exception_injected = EXCP12_MCHK;
env->has_error_code = 0;
env->halted = 0;
if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
env->mp_state = KVM_MP_STATE_RUNNABLE;
}
}
if (kvm_irqchip_in_kernel()) {
return 0;
}
if (env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI)) {
if (((env->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
(env->interrupt_request & CPU_INTERRUPT_NMI)) {
env->halted = 0;
}
if (env->interrupt_request & CPU_INTERRUPT_INIT) {
@ -1536,64 +1618,10 @@ static int kvm_handle_halt(CPUState *env)
(env->eflags & IF_MASK)) &&
!(env->interrupt_request & CPU_INTERRUPT_NMI)) {
env->halted = 1;
return 0;
return EXCP_HLT;
}
return 1;
}
static bool host_supports_vmx(void)
{
uint32_t ecx, unused;
host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
return ecx & CPUID_EXT_VMX;
}
#define VMX_INVALID_GUEST_STATE 0x80000021
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
{
uint64_t code;
int ret = 0;
switch (run->exit_reason) {
case KVM_EXIT_HLT:
DPRINTF("handle_hlt\n");
ret = kvm_handle_halt(env);
break;
case KVM_EXIT_SET_TPR:
ret = 1;
break;
case KVM_EXIT_FAIL_ENTRY:
code = run->fail_entry.hardware_entry_failure_reason;
fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
code);
if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
fprintf(stderr,
"\nIf you're runnning a guest on an Intel machine without "
"unrestricted mode\n"
"support, the failure can be most likely due to the guest "
"entering an invalid\n"
"state for Intel VT. For example, the guest maybe running "
"in big real mode\n"
"which is not supported on less recent Intel processors."
"\n\n");
}
ret = -1;
break;
case KVM_EXIT_EXCEPTION:
fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
run->ex.exception, run->ex.error_code);
ret = -1;
break;
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
ret = -1;
break;
}
return ret;
return 0;
}
#ifdef KVM_CAP_SET_GUEST_DEBUG
@ -1703,31 +1731,31 @@ void kvm_arch_remove_all_hw_breakpoints(void)
static CPUWatchpoint hw_watchpoint;
int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info)
static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info)
{
int handle = 0;
int ret = 0;
int n;
if (arch_info->exception == 1) {
if (arch_info->dr6 & (1 << 14)) {
if (cpu_single_env->singlestep_enabled) {
handle = 1;
ret = EXCP_DEBUG;
}
} else {
for (n = 0; n < 4; n++) {
if (arch_info->dr6 & (1 << n)) {
switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
case 0x0:
handle = 1;
ret = EXCP_DEBUG;
break;
case 0x1:
handle = 1;
ret = EXCP_DEBUG;
cpu_single_env->watchpoint_hit = &hw_watchpoint;
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
hw_watchpoint.flags = BP_MEM_WRITE;
break;
case 0x3:
handle = 1;
ret = EXCP_DEBUG;
cpu_single_env->watchpoint_hit = &hw_watchpoint;
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
hw_watchpoint.flags = BP_MEM_ACCESS;
@ -1737,17 +1765,18 @@ int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info)
}
}
} else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) {
handle = 1;
ret = EXCP_DEBUG;
}
if (!handle) {
if (ret == 0) {
cpu_synchronize_state(cpu_single_env);
assert(cpu_single_env->exception_injected == -1);
/* pass to guest */
cpu_single_env->exception_injected = arch_info->exception;
cpu_single_env->has_error_code = 0;
}
return handle;
return ret;
}
void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
@ -1778,178 +1807,68 @@ void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
}
#endif /* KVM_CAP_SET_GUEST_DEBUG */
static bool host_supports_vmx(void)
{
uint32_t ecx, unused;
host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
return ecx & CPUID_EXT_VMX;
}
#define VMX_INVALID_GUEST_STATE 0x80000021
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
{
uint64_t code;
int ret;
switch (run->exit_reason) {
case KVM_EXIT_HLT:
DPRINTF("handle_hlt\n");
ret = kvm_handle_halt(env);
break;
case KVM_EXIT_SET_TPR:
ret = 0;
break;
case KVM_EXIT_FAIL_ENTRY:
code = run->fail_entry.hardware_entry_failure_reason;
fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
code);
if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
fprintf(stderr,
"\nIf you're runnning a guest on an Intel machine without "
"unrestricted mode\n"
"support, the failure can be most likely due to the guest "
"entering an invalid\n"
"state for Intel VT. For example, the guest maybe running "
"in big real mode\n"
"which is not supported on less recent Intel processors."
"\n\n");
}
ret = -1;
break;
case KVM_EXIT_EXCEPTION:
fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
run->ex.exception, run->ex.error_code);
ret = -1;
break;
#ifdef KVM_CAP_SET_GUEST_DEBUG
case KVM_EXIT_DEBUG:
DPRINTF("kvm_exit_debug\n");
ret = kvm_handle_debug(&run->debug.arch);
break;
#endif /* KVM_CAP_SET_GUEST_DEBUG */
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
ret = -1;
break;
}
return ret;
}
bool kvm_arch_stop_on_emulation_error(CPUState *env)
{
return !(env->cr[0] & CR0_PE_MASK) ||
((env->segs[R_CS].selector & 3) != 3);
}
static void hardware_memory_error(void)
{
fprintf(stderr, "Hardware memory error!\n");
exit(1);
}
#ifdef KVM_CAP_MCE
static void kvm_mce_broadcast_rest(CPUState *env)
{
struct kvm_x86_mce mce = {
.bank = 1,
.status = MCI_STATUS_VAL | MCI_STATUS_UC,
.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV,
.addr = 0,
.misc = 0,
};
CPUState *cenv;
/* Broadcast MCA signal for processor version 06H_EH and above */
if (cpu_x86_support_mca_broadcast(env)) {
for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) {
if (cenv == env) {
continue;
}
kvm_inject_x86_mce_on(cenv, &mce, ABORT_ON_ERROR);
}
}
}
static void kvm_mce_inj_srar_dataload(CPUState *env, target_phys_addr_t paddr)
{
struct kvm_x86_mce mce = {
.bank = 9,
.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
| MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
| MCI_STATUS_AR | 0x134,
.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV,
.addr = paddr,
.misc = (MCM_ADDR_PHYS << 6) | 0xc,
};
int r;
r = kvm_set_mce(env, &mce);
if (r < 0) {
fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno));
abort();
}
kvm_mce_broadcast_rest(env);
}
static void kvm_mce_inj_srao_memscrub(CPUState *env, target_phys_addr_t paddr)
{
struct kvm_x86_mce mce = {
.bank = 9,
.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
| MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
| 0xc0,
.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV,
.addr = paddr,
.misc = (MCM_ADDR_PHYS << 6) | 0xc,
};
int r;
r = kvm_set_mce(env, &mce);
if (r < 0) {
fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno));
abort();
}
kvm_mce_broadcast_rest(env);
}
static void kvm_mce_inj_srao_memscrub2(CPUState *env, target_phys_addr_t paddr)
{
struct kvm_x86_mce mce = {
.bank = 9,
.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
| MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
| 0xc0,
.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV,
.addr = paddr,
.misc = (MCM_ADDR_PHYS << 6) | 0xc,
};
kvm_inject_x86_mce_on(env, &mce, ABORT_ON_ERROR);
kvm_mce_broadcast_rest(env);
}
#endif
int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
{
#if defined(KVM_CAP_MCE)
void *vaddr;
ram_addr_t ram_addr;
target_phys_addr_t paddr;
if ((env->mcg_cap & MCG_SER_P) && addr
&& (code == BUS_MCEERR_AR
|| code == BUS_MCEERR_AO)) {
vaddr = (void *)addr;
if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
!kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr, &paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!\n");
/* Hope we are lucky for AO MCE */
if (code == BUS_MCEERR_AO) {
return 0;
} else {
hardware_memory_error();
}
}
if (code == BUS_MCEERR_AR) {
/* Fake an Intel architectural Data Load SRAR UCR */
kvm_mce_inj_srar_dataload(env, paddr);
} else {
/*
* If there is an MCE excpetion being processed, ignore
* this SRAO MCE
*/
if (!kvm_mce_in_progress(env)) {
/* Fake an Intel architectural Memory scrubbing UCR */
kvm_mce_inj_srao_memscrub(env, paddr);
}
}
} else
#endif
{
if (code == BUS_MCEERR_AO) {
return 0;
} else if (code == BUS_MCEERR_AR) {
hardware_memory_error();
} else {
return 1;
}
}
return 0;
}
int kvm_arch_on_sigbus(int code, void *addr)
{
#if defined(KVM_CAP_MCE)
if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
void *vaddr;
ram_addr_t ram_addr;
target_phys_addr_t paddr;
/* Hope we are lucky for AO MCE */
vaddr = addr;
if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
!kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr, &paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!: %p\n", addr);
return 0;
}
kvm_mce_inj_srao_memscrub2(first_cpu, paddr);
} else
#endif
{
if (code == BUS_MCEERR_AO) {
return 0;
} else if (code == BUS_MCEERR_AR) {
hardware_memory_error();
} else {
return 1;
}
}
return 0;
}

View File

@ -1,25 +0,0 @@
/*
* QEMU KVM support
*
* Copyright (C) 2009 Red Hat Inc.
* Copyright IBM, Corp. 2008
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef __KVM_X86_H__
#define __KVM_X86_H__
#define ABORT_ON_ERROR 0x01
#define MCE_BROADCAST 0x02
void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
uint64_t mcg_status, uint64_t addr, uint64_t misc,
int flag);
#endif

View File

@ -491,6 +491,8 @@ static const VMStateDescription vmstate_cpu = {
VMSTATE_UINT64_V(xcr0, CPUState, 12),
VMSTATE_UINT64_V(xstate_bv, CPUState, 12),
VMSTATE_YMMH_REGS_VARS(ymmh_regs, CPUState, CPU_NB_REGS, 12),
VMSTATE_UINT64_V(pat, CPUState, 13),
VMSTATE_END_OF_LIST()
/* The above list is not sorted /wrt version numbers, watch out! */
},

View File

@ -222,7 +222,7 @@ int kvmppc_set_interrupt(CPUState *env, int irq, int level)
#define PPC_INPUT_INT PPC6xx_INPUT_INT
#endif
int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
void kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
{
int r;
unsigned irq;
@ -253,15 +253,15 @@ int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
/* We don't know if there are more interrupts pending after this. However,
* the guest will return to userspace in the course of handling this one
* anyways, so we will get a chance to deliver the rest. */
return 0;
}
void kvm_arch_post_run(CPUState *env, struct kvm_run *run)
{
}
void kvm_arch_process_irqchip_events(CPUState *env)
int kvm_arch_process_async_events(CPUState *env)
{
return 0;
}
static int kvmppc_handle_halt(CPUState *env)
@ -271,7 +271,7 @@ static int kvmppc_handle_halt(CPUState *env)
env->exception_index = EXCP_HLT;
}
return 1;
return 0;
}
/* map dcr access to existing qemu dcr emulation */
@ -280,7 +280,7 @@ static int kvmppc_handle_dcr_read(CPUState *env, uint32_t dcrn, uint32_t *data)
if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
return 1;
return 0;
}
static int kvmppc_handle_dcr_write(CPUState *env, uint32_t dcrn, uint32_t data)
@ -288,12 +288,12 @@ static int kvmppc_handle_dcr_write(CPUState *env, uint32_t dcrn, uint32_t data)
if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
return 1;
return 0;
}
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
{
int ret = 0;
int ret;
switch (run->exit_reason) {
case KVM_EXIT_DCR:

View File

@ -177,7 +177,7 @@ void kvm_arch_post_run(CPUState *env, struct kvm_run *run)
{
}
int kvm_arch_process_irqchip_events(CPUState *env)
int kvm_arch_process_async_events(CPUState *env)
{
return 0;
}
@ -497,6 +497,11 @@ int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
break;
}
if (ret == 0) {
ret = EXCP_INTERRUPT;
} else if (ret > 0) {
ret = 0;
}
return ret;
}

1
vl.c
View File

@ -1450,6 +1450,7 @@ static void main_loop(void)
}
if (qemu_reset_requested()) {
pause_all_vcpus();
cpu_synchronize_all_states();
qemu_system_reset();
resume_all_vcpus();
}