kvm: x86: Consolidate TCG and KVM MCE injection code

This switches KVM's MCE injection path to cpu_x86_inject_mce, both for
SIGBUS and monitor initiated events. This means we prepare the MCA MSRs
in the VCPUState also for KVM.

We have to drop the MSRs writeback restrictions for this purpose which
is now safe as every uncoordinated MSR injection is removed with this
patch.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
CC: Huang Ying <ying.huang@intel.com>
CC: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
CC: Jin Dongming <jin.dongming@np.css.fujitsu.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
Jan Kiszka 2011-03-02 08:56:16 +01:00 committed by Marcelo Tosatti
parent d5bfda334a
commit c34d440a72
3 changed files with 37 additions and 260 deletions

View File

@ -27,7 +27,6 @@
#include "exec-all.h"
#include "qemu-common.h"
#include "kvm.h"
#include "kvm_x86.h"
#ifndef CONFIG_USER_ONLY
#include "sysemu.h"
#include "monitor.h"
@ -1167,7 +1166,6 @@ void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
};
unsigned bank_num = cenv->mcg_cap & 0xff;
CPUState *env;
int flag = 0;
if (!cenv->mcg_cap) {
monitor_printf(mon, "MCE injection not supported\n");
@ -1187,27 +1185,19 @@ void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
return;
}
if (kvm_enabled()) {
if (flags & MCE_INJECT_BROADCAST) {
flag |= MCE_BROADCAST;
}
kvm_inject_x86_mce(cenv, bank, status, mcg_status, addr, misc, flag);
} else {
run_on_cpu(cenv, do_inject_x86_mce, &params);
if (flags & MCE_INJECT_BROADCAST) {
params.bank = 1;
params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
params.addr = 0;
params.misc = 0;
for (env = first_cpu; env != NULL; env = env->next_cpu) {
if (cenv == env) {
continue;
}
params.env = env;
run_on_cpu(cenv, do_inject_x86_mce, &params);
run_on_cpu(cenv, do_inject_x86_mce, &params);
if (flags & MCE_INJECT_BROADCAST) {
params.bank = 1;
params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
params.addr = 0;
params.misc = 0;
for (env = first_cpu; env != NULL; env = env->next_cpu) {
if (cenv == env) {
continue;
}
params.env = env;
run_on_cpu(cenv, do_inject_x86_mce, &params);
}
}
}

View File

@ -28,7 +28,6 @@
#include "hw/pc.h"
#include "hw/apic.h"
#include "ioport.h"
#include "kvm_x86.h"
#ifdef CONFIG_KVM_PARA
#include <linux/kvm_para.h>
@ -193,164 +192,23 @@ static int kvm_setup_mce(CPUState *env, uint64_t *mcg_cap)
return kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, mcg_cap);
}
static int kvm_set_mce(CPUState *env, struct kvm_x86_mce *m)
static void kvm_mce_inject(CPUState *env, target_phys_addr_t paddr, int code)
{
return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, m);
}
uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
uint64_t mcg_status = MCG_STATUS_MCIP;
static int kvm_get_msr(CPUState *env, struct kvm_msr_entry *msrs, int n)
{
struct kvm_msrs *kmsrs = qemu_malloc(sizeof *kmsrs + n * sizeof *msrs);
int r;
kmsrs->nmsrs = n;
memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
r = kvm_vcpu_ioctl(env, KVM_GET_MSRS, kmsrs);
memcpy(msrs, kmsrs->entries, n * sizeof *msrs);
free(kmsrs);
return r;
}
/* FIXME: kill this and kvm_get_msr, use env->mcg_status instead */
static int kvm_mce_in_progress(CPUState *env)
{
struct kvm_msr_entry msr_mcg_status = {
.index = MSR_MCG_STATUS,
};
int r;
r = kvm_get_msr(env, &msr_mcg_status, 1);
if (r == -1 || r == 0) {
fprintf(stderr, "Failed to get MCE status\n");
return 0;
if (code == BUS_MCEERR_AR) {
status |= MCI_STATUS_AR | 0x134;
mcg_status |= MCG_STATUS_EIPV;
} else {
status |= 0xc0;
mcg_status |= MCG_STATUS_RIPV;
}
return !!(msr_mcg_status.data & MCG_STATUS_MCIP);
}
struct kvm_x86_mce_data
{
CPUState *env;
struct kvm_x86_mce *mce;
int abort_on_error;
};
static void kvm_do_inject_x86_mce(void *_data)
{
struct kvm_x86_mce_data *data = _data;
int r;
/* If there is an MCE exception being processed, ignore this SRAO MCE */
if ((data->env->mcg_cap & MCG_SER_P) &&
!(data->mce->status & MCI_STATUS_AR)) {
if (kvm_mce_in_progress(data->env)) {
return;
}
}
r = kvm_set_mce(data->env, data->mce);
if (r < 0) {
perror("kvm_set_mce FAILED");
if (data->abort_on_error) {
abort();
}
}
}
static void kvm_inject_x86_mce_on(CPUState *env, struct kvm_x86_mce *mce,
int flag)
{
struct kvm_x86_mce_data data = {
.env = env,
.mce = mce,
.abort_on_error = (flag & ABORT_ON_ERROR),
};
if (!env->mcg_cap) {
fprintf(stderr, "MCE support is not enabled!\n");
return;
}
run_on_cpu(env, kvm_do_inject_x86_mce, &data);
}
static void kvm_mce_broadcast_rest(CPUState *env)
{
struct kvm_x86_mce mce = {
.bank = 1,
.status = MCI_STATUS_VAL | MCI_STATUS_UC,
.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV,
.addr = 0,
.misc = 0,
};
CPUState *cenv;
/* Broadcast MCA signal for processor version 06H_EH and above */
if (cpu_x86_support_mca_broadcast(env)) {
for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) {
if (cenv == env) {
continue;
}
kvm_inject_x86_mce_on(cenv, &mce, ABORT_ON_ERROR);
}
}
}
static void kvm_mce_inj_srar_dataload(CPUState *env, target_phys_addr_t paddr)
{
struct kvm_x86_mce mce = {
.bank = 9,
.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
| MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
| MCI_STATUS_AR | 0x134,
.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV,
.addr = paddr,
.misc = (MCM_ADDR_PHYS << 6) | 0xc,
};
int r;
r = kvm_set_mce(env, &mce);
if (r < 0) {
fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno));
abort();
}
kvm_mce_broadcast_rest(env);
}
static void kvm_mce_inj_srao_memscrub(CPUState *env, target_phys_addr_t paddr)
{
struct kvm_x86_mce mce = {
.bank = 9,
.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
| MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
| 0xc0,
.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV,
.addr = paddr,
.misc = (MCM_ADDR_PHYS << 6) | 0xc,
};
int r;
r = kvm_set_mce(env, &mce);
if (r < 0) {
fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno));
abort();
}
kvm_mce_broadcast_rest(env);
}
static void kvm_mce_inj_srao_memscrub2(CPUState *env, target_phys_addr_t paddr)
{
struct kvm_x86_mce mce = {
.bank = 9,
.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
| MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
| 0xc0,
.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV,
.addr = paddr,
.misc = (MCM_ADDR_PHYS << 6) | 0xc,
};
kvm_inject_x86_mce_on(env, &mce, ABORT_ON_ERROR);
kvm_mce_broadcast_rest(env);
cpu_x86_inject_mce(NULL, env, 9, status, mcg_status, paddr,
(MCM_ADDR_PHYS << 6) | 0xc,
cpu_x86_support_mca_broadcast(env) ?
MCE_INJECT_BROADCAST : 0);
}
#endif /* KVM_CAP_MCE */
@ -363,16 +221,14 @@ static void hardware_memory_error(void)
int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
{
#ifdef KVM_CAP_MCE
void *vaddr;
ram_addr_t ram_addr;
target_phys_addr_t paddr;
if ((env->mcg_cap & MCG_SER_P) && addr
&& (code == BUS_MCEERR_AR
|| code == BUS_MCEERR_AO)) {
vaddr = (void *)addr;
if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
!kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr, &paddr)) {
&& (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
!kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr,
&paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!\n");
/* Hope we are lucky for AO MCE */
@ -382,20 +238,7 @@ int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
hardware_memory_error();
}
}
if (code == BUS_MCEERR_AR) {
/* Fake an Intel architectural Data Load SRAR UCR */
kvm_mce_inj_srar_dataload(env, paddr);
} else {
/*
* If there is an MCE excpetion being processed, ignore
* this SRAO MCE
*/
if (!kvm_mce_in_progress(env)) {
/* Fake an Intel architectural Memory scrubbing UCR */
kvm_mce_inj_srao_memscrub(env, paddr);
}
}
kvm_mce_inject(env, paddr, code);
} else
#endif /* KVM_CAP_MCE */
{
@ -414,20 +257,18 @@ int kvm_arch_on_sigbus(int code, void *addr)
{
#ifdef KVM_CAP_MCE
if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
void *vaddr;
ram_addr_t ram_addr;
target_phys_addr_t paddr;
/* Hope we are lucky for AO MCE */
vaddr = addr;
if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
!kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr,
&paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!: %p\n", addr);
return 0;
}
kvm_mce_inj_srao_memscrub2(first_cpu, paddr);
kvm_mce_inject(first_cpu, paddr, code);
} else
#endif /* KVM_CAP_MCE */
{
@ -442,31 +283,6 @@ int kvm_arch_on_sigbus(int code, void *addr)
return 0;
}
void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
uint64_t mcg_status, uint64_t addr, uint64_t misc,
int flag)
{
#ifdef KVM_CAP_MCE
struct kvm_x86_mce mce = {
.bank = bank,
.status = status,
.mcg_status = mcg_status,
.addr = addr,
.misc = misc,
};
if (flag & MCE_BROADCAST) {
kvm_mce_broadcast_rest(cenv);
}
kvm_inject_x86_mce_on(cenv, &mce, flag);
#else /* !KVM_CAP_MCE*/
if (flag & ABORT_ON_ERROR) {
abort();
}
#endif /* !KVM_CAP_MCE*/
}
static int kvm_inject_mce_oldstyle(CPUState *env)
{
#ifdef KVM_CAP_MCE
@ -1053,14 +869,10 @@ static int kvm_put_msrs(CPUState *env, int level)
if (env->mcg_cap) {
int i;
if (level == KVM_PUT_RESET_STATE) {
kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
} else if (level == KVM_PUT_FULL_STATE) {
kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
}
kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
}
}
#endif

View File

@ -1,25 +0,0 @@
/*
* QEMU KVM support
*
* Copyright (C) 2009 Red Hat Inc.
* Copyright IBM, Corp. 2008
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef __KVM_X86_H__
#define __KVM_X86_H__
#define ABORT_ON_ERROR 0x01
#define MCE_BROADCAST 0x02
void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
uint64_t mcg_status, uint64_t addr, uint64_t misc,
int flag);
#endif