QOM CPUState refactorings / X86CPU
* TLB invalidation optimizations * X86CPU initialization cleanups * Preparations for X86CPU hot-unplug -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABAgAGBQJSuYNoAAoJEPou0S0+fgE/N4EQALP2tSLE7fhfgsQOSjgAf8zp Yl85T6kWPxW1ey8jdwnGlUBeBlIJj7ZNW5rQM97rqi92tnJUWtpXsQgzSIxpGjJa zHwKSNs9ViSz1YsM7qz4NzuMfPHIf8HzNsDL3GyLcygcnLQHbvnOB4OnrNGEofZM mUU06RIbq+AjCgonHHMpsQpuN3HZtJKDs7hHHtzXSxYO9Fg1owIo8nZtSKd26E5m q7Uo9koBNch4CzxBsjgk83hh8nmw1jFVkJLAsKdYeaQWFF8DJ+V8479wXuZLtIKY KPVXzFf6O/P1kCZQCD53oQFbiVW8QKe1kb6Hfzen+6f9lSngsFkWJE92hpaSW0hv wcOW7QXIJKOdLIpyeTxr3vhU8bR5Znm36z2UBnlbC/xQxd4VYFB6w/r23NTsZfwU MbrSQBOdTT8R1aJdIVpMEOa2qZ+B5aavN22ZkChQ1iXg6en6Qgvuj+Pg1b1fMejp qfI8QDpSC8agiSgws75XAAkU1lV4x2fuV5lK/D2AqiYfzVnm4A8uKj1J46+Lcfp1 Zv26Rv5YSH7ZrboqG1wEhynNm7LIvShApyNgPlHDcBczCGb7jspjLiX94x3KLqfC IlwNA2Sksc5emv4omc+jN/x35pujgol5Ep/rNK45W7ATRKa/jJ94OvToZI31hT9a /ezfiYdupHJ4u2RnBvEY =FJv4 -----END PGP SIGNATURE----- Merge remote-tracking branch 'afaerber/tags/qom-cpu-for-anthony' into staging QOM CPUState refactorings / X86CPU * TLB invalidation optimizations * X86CPU initialization cleanups * Preparations for X86CPU hot-unplug # gpg: Signature made Tue 24 Dec 2013 04:51:52 AM PST using RSA key ID 3E7E013F # gpg: Good signature from "Andreas Färber <afaerber@suse.de>" # gpg: aka "Andreas Färber <afaerber@suse.com>" # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 174F 0347 1BCC 221A 6175 6F96 FA2E D12D 3E7E 013F * afaerber/tags/qom-cpu-for-anthony: target-i386: Cleanup 'foo=val' feature handling target-i386: Cleanup 'foo' feature handling target-i386: Convert 'check' and 'enforce' to static properties target-i386: Convert 'hv_spinlocks' to static property target-i386: Convert 'hv_vapic' to static property target-i386: Convert 'hv_relaxed' to static property cpu-exec: Optimize X86CPU usage in cpu_exec() target-i386: Move apic_state field from CPUX86State to X86CPU cputlb: Tidy memset() of arrays cputlb: Use memset() when flushing entries
This commit is contained in:
commit
4cddc7f44f
14
cpu-exec.c
14
cpu-exec.c
@ -223,6 +223,9 @@ int cpu_exec(CPUArchState *env)
|
||||
#if !(defined(CONFIG_USER_ONLY) && \
|
||||
(defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
#endif
|
||||
#ifdef TARGET_I386
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
#endif
|
||||
int ret, interrupt_request;
|
||||
TranslationBlock *tb;
|
||||
@ -338,24 +341,24 @@ int cpu_exec(CPUArchState *env)
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
if (interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
||||
apic_poll_irq(env->apic_state);
|
||||
apic_poll_irq(x86_cpu->apic_state);
|
||||
}
|
||||
#endif
|
||||
if (interrupt_request & CPU_INTERRUPT_INIT) {
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
|
||||
0);
|
||||
do_cpu_init(x86_env_get_cpu(env));
|
||||
do_cpu_init(x86_cpu);
|
||||
env->exception_index = EXCP_HALTED;
|
||||
cpu_loop_exit(env);
|
||||
} else if (interrupt_request & CPU_INTERRUPT_SIPI) {
|
||||
do_cpu_sipi(x86_env_get_cpu(env));
|
||||
do_cpu_sipi(x86_cpu);
|
||||
} else if (env->hflags2 & HF2_GIF_MASK) {
|
||||
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
|
||||
!(env->hflags & HF_SMM_MASK)) {
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
|
||||
0);
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
|
||||
do_smm_enter(x86_env_get_cpu(env));
|
||||
do_smm_enter(x86_cpu);
|
||||
next_tb = 0;
|
||||
} else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
|
||||
!(env->hflags2 & HF2_NMI_MASK)) {
|
||||
@ -686,6 +689,9 @@ int cpu_exec(CPUArchState *env)
|
||||
#if !(defined(CONFIG_USER_ONLY) && \
|
||||
(defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
|
||||
cc = CPU_GET_CLASS(cpu);
|
||||
#endif
|
||||
#ifdef TARGET_I386
|
||||
x86_cpu = X86_CPU(cpu);
|
||||
#endif
|
||||
}
|
||||
} /* for(;;) */
|
||||
|
5
cpus.c
5
cpus.c
@ -1458,12 +1458,11 @@ void qmp_inject_nmi(Error **errp)
|
||||
|
||||
CPU_FOREACH(cs) {
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
if (!env->apic_state) {
|
||||
if (!cpu->apic_state) {
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_NMI);
|
||||
} else {
|
||||
apic_deliver_nmi(env->apic_state);
|
||||
apic_deliver_nmi(cpu->apic_state);
|
||||
}
|
||||
}
|
||||
#elif defined(TARGET_S390X)
|
||||
|
21
cputlb.c
21
cputlb.c
@ -33,13 +33,6 @@
|
||||
/* statistics */
|
||||
int tlb_flush_count;
|
||||
|
||||
static const CPUTLBEntry s_cputlb_empty_entry = {
|
||||
.addr_read = -1,
|
||||
.addr_write = -1,
|
||||
.addr_code = -1,
|
||||
.addend = -1,
|
||||
};
|
||||
|
||||
/* NOTE:
|
||||
* If flush_global is true (the usual case), flush all tlb entries.
|
||||
* If flush_global is false, flush (at least) all tlb entries not
|
||||
@ -55,7 +48,6 @@ static const CPUTLBEntry s_cputlb_empty_entry = {
|
||||
void tlb_flush(CPUArchState *env, int flush_global)
|
||||
{
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
int i;
|
||||
|
||||
#if defined(DEBUG_TLB)
|
||||
printf("tlb_flush:\n");
|
||||
@ -64,15 +56,8 @@ void tlb_flush(CPUArchState *env, int flush_global)
|
||||
links while we are modifying them */
|
||||
cpu->current_tb = NULL;
|
||||
|
||||
for (i = 0; i < CPU_TLB_SIZE; i++) {
|
||||
int mmu_idx;
|
||||
|
||||
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
|
||||
env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
|
||||
}
|
||||
}
|
||||
|
||||
memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
|
||||
memset(env->tlb_table, -1, sizeof(env->tlb_table));
|
||||
memset(env->tb_jmp_cache, 0, sizeof(env->tb_jmp_cache));
|
||||
|
||||
env->tlb_flush_addr = -1;
|
||||
env->tlb_flush_mask = 0;
|
||||
@ -87,7 +72,7 @@ static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
|
||||
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
|
||||
addr == (tlb_entry->addr_code &
|
||||
(TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
*tlb_entry = s_cputlb_empty_entry;
|
||||
memset(tlb_entry, -1, sizeof(*tlb_entry));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -366,7 +366,7 @@ static int vapic_enable(VAPICROMState *s, X86CPU *cpu)
|
||||
(((hwaddr)cpu_number) << VAPIC_CPU_SHIFT);
|
||||
cpu_physical_memory_rw(vapic_paddr + offsetof(VAPICState, enabled),
|
||||
(void *)&enabled, sizeof(enabled), 1);
|
||||
apic_enable_vapic(cpu->env.apic_state, vapic_paddr);
|
||||
apic_enable_vapic(cpu->apic_state, vapic_paddr);
|
||||
|
||||
s->state = VAPIC_ACTIVE;
|
||||
|
||||
@ -496,12 +496,10 @@ static void vapic_enable_tpr_reporting(bool enable)
|
||||
};
|
||||
CPUState *cs;
|
||||
X86CPU *cpu;
|
||||
CPUX86State *env;
|
||||
|
||||
CPU_FOREACH(cs) {
|
||||
cpu = X86_CPU(cs);
|
||||
env = &cpu->env;
|
||||
info.apic = env->apic_state;
|
||||
info.apic = cpu->apic_state;
|
||||
run_on_cpu(cs, vapic_do_enable_tpr_reporting, &info);
|
||||
}
|
||||
}
|
||||
@ -700,7 +698,7 @@ static void vapic_write(void *opaque, hwaddr addr, uint64_t data,
|
||||
default:
|
||||
case 4:
|
||||
if (!kvm_irqchip_in_kernel()) {
|
||||
apic_poll_irq(env->apic_state);
|
||||
apic_poll_irq(cpu->apic_state);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
17
hw/i386/pc.c
17
hw/i386/pc.c
@ -171,14 +171,15 @@ void cpu_smm_update(CPUX86State *env)
|
||||
/* IRQ handling */
|
||||
int cpu_get_pic_interrupt(CPUX86State *env)
|
||||
{
|
||||
X86CPU *cpu = x86_env_get_cpu(env);
|
||||
int intno;
|
||||
|
||||
intno = apic_get_interrupt(env->apic_state);
|
||||
intno = apic_get_interrupt(cpu->apic_state);
|
||||
if (intno >= 0) {
|
||||
return intno;
|
||||
}
|
||||
/* read the irq from the PIC */
|
||||
if (!apic_accept_pic_intr(env->apic_state)) {
|
||||
if (!apic_accept_pic_intr(cpu->apic_state)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -190,15 +191,13 @@ static void pic_irq_request(void *opaque, int irq, int level)
|
||||
{
|
||||
CPUState *cs = first_cpu;
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
DPRINTF("pic_irqs: %s irq %d\n", level? "raise" : "lower", irq);
|
||||
if (env->apic_state) {
|
||||
if (cpu->apic_state) {
|
||||
CPU_FOREACH(cs) {
|
||||
cpu = X86_CPU(cs);
|
||||
env = &cpu->env;
|
||||
if (apic_accept_pic_intr(env->apic_state)) {
|
||||
apic_deliver_pic_intr(env->apic_state, level);
|
||||
if (apic_accept_pic_intr(cpu->apic_state)) {
|
||||
apic_deliver_pic_intr(cpu->apic_state, level);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -913,7 +912,7 @@ DeviceState *cpu_get_current_apic(void)
|
||||
{
|
||||
if (current_cpu) {
|
||||
X86CPU *cpu = X86_CPU(current_cpu);
|
||||
return cpu->env.apic_state;
|
||||
return cpu->apic_state;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
@ -1007,7 +1006,7 @@ void pc_cpus_init(const char *cpu_model, DeviceState *icc_bridge)
|
||||
}
|
||||
|
||||
/* map APIC MMIO area if CPU has APIC */
|
||||
if (cpu && cpu->env.apic_state) {
|
||||
if (cpu && cpu->apic_state) {
|
||||
/* XXX: what if the base changes? */
|
||||
sysbus_mmio_map_overlap(SYS_BUS_DEVICE(icc_bridge), 0,
|
||||
APIC_DEFAULT_ADDRESS, 0x1000);
|
||||
|
@ -69,6 +69,8 @@ typedef struct X86CPU {
|
||||
bool hyperv_vapic;
|
||||
bool hyperv_relaxed_timing;
|
||||
int hyperv_spinlock_attempts;
|
||||
bool check_cpuid;
|
||||
bool enforce_cpuid;
|
||||
|
||||
/* if true the CPUID code directly forward host cache leaves to the guest */
|
||||
bool cache_info_passthrough;
|
||||
@ -82,6 +84,10 @@ typedef struct X86CPU {
|
||||
* capabilities) directly to the guest.
|
||||
*/
|
||||
bool enable_pmu;
|
||||
|
||||
/* in order to simplify APIC support, we leave this pointer to the
|
||||
user */
|
||||
struct DeviceState *apic_state;
|
||||
} X86CPU;
|
||||
|
||||
static inline X86CPU *x86_env_get_cpu(CPUX86State *env)
|
||||
|
@ -354,9 +354,6 @@ typedef struct model_features_t {
|
||||
FeatureWord feat_word;
|
||||
} model_features_t;
|
||||
|
||||
int check_cpuid = 0;
|
||||
int enforce_cpuid = 0;
|
||||
|
||||
static uint32_t kvm_default_features = (1 << KVM_FEATURE_CLOCKSOURCE) |
|
||||
(1 << KVM_FEATURE_NOP_IO_DELAY) |
|
||||
(1 << KVM_FEATURE_CLOCKSOURCE2) |
|
||||
@ -1596,6 +1593,46 @@ static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
|
||||
error_propagate(errp, err);
|
||||
}
|
||||
|
||||
static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
|
||||
const char *name, Error **errp)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(obj);
|
||||
int64_t value = cpu->hyperv_spinlock_attempts;
|
||||
|
||||
visit_type_int(v, &value, name, errp);
|
||||
}
|
||||
|
||||
static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
|
||||
const char *name, Error **errp)
|
||||
{
|
||||
const int64_t min = 0xFFF;
|
||||
const int64_t max = UINT_MAX;
|
||||
X86CPU *cpu = X86_CPU(obj);
|
||||
Error *err = NULL;
|
||||
int64_t value;
|
||||
|
||||
visit_type_int(v, &value, name, &err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
return;
|
||||
}
|
||||
|
||||
if (value < min || value > max) {
|
||||
error_setg(errp, "Property %s.%s doesn't take value %" PRId64
|
||||
" (minimum: %" PRId64 ", maximum: %" PRId64 ")",
|
||||
object_get_typename(obj), name ? name : "null",
|
||||
value, min, max);
|
||||
return;
|
||||
}
|
||||
cpu->hyperv_spinlock_attempts = value;
|
||||
}
|
||||
|
||||
static PropertyInfo qdev_prop_spinlocks = {
|
||||
.name = "int",
|
||||
.get = x86_get_hv_spinlocks,
|
||||
.set = x86_set_hv_spinlocks,
|
||||
};
|
||||
|
||||
static int cpu_x86_find_by_name(X86CPU *cpu, x86_def_t *x86_cpu_def,
|
||||
const char *name)
|
||||
{
|
||||
@ -1669,15 +1706,7 @@ static void cpu_x86_parse_featurestr(X86CPU *cpu, char *features, Error **errp)
|
||||
} else if ((val = strchr(featurestr, '='))) {
|
||||
*val = 0; val++;
|
||||
feat2prop(featurestr);
|
||||
if (!strcmp(featurestr, "family")) {
|
||||
object_property_parse(OBJECT(cpu), val, featurestr, errp);
|
||||
} else if (!strcmp(featurestr, "model")) {
|
||||
object_property_parse(OBJECT(cpu), val, featurestr, errp);
|
||||
} else if (!strcmp(featurestr, "stepping")) {
|
||||
object_property_parse(OBJECT(cpu), val, featurestr, errp);
|
||||
} else if (!strcmp(featurestr, "level")) {
|
||||
object_property_parse(OBJECT(cpu), val, featurestr, errp);
|
||||
} else if (!strcmp(featurestr, "xlevel")) {
|
||||
if (!strcmp(featurestr, "xlevel")) {
|
||||
char *err;
|
||||
char num[32];
|
||||
|
||||
@ -1693,10 +1722,6 @@ static void cpu_x86_parse_featurestr(X86CPU *cpu, char *features, Error **errp)
|
||||
}
|
||||
snprintf(num, sizeof(num), "%" PRIu32, numvalue);
|
||||
object_property_parse(OBJECT(cpu), num, featurestr, errp);
|
||||
} else if (!strcmp(featurestr, "vendor")) {
|
||||
object_property_parse(OBJECT(cpu), val, featurestr, errp);
|
||||
} else if (!strcmp(featurestr, "model-id")) {
|
||||
object_property_parse(OBJECT(cpu), val, featurestr, errp);
|
||||
} else if (!strcmp(featurestr, "tsc-freq")) {
|
||||
int64_t tsc_freq;
|
||||
char *err;
|
||||
@ -1713,6 +1738,7 @@ static void cpu_x86_parse_featurestr(X86CPU *cpu, char *features, Error **errp)
|
||||
} else if (!strcmp(featurestr, "hv-spinlocks")) {
|
||||
char *err;
|
||||
const int min = 0xFFF;
|
||||
char num[32];
|
||||
numvalue = strtoul(val, &err, 0);
|
||||
if (!*val || *err) {
|
||||
error_setg(errp, "bad numerical value %s", val);
|
||||
@ -1724,23 +1750,14 @@ static void cpu_x86_parse_featurestr(X86CPU *cpu, char *features, Error **errp)
|
||||
min);
|
||||
numvalue = min;
|
||||
}
|
||||
cpu->hyperv_spinlock_attempts = numvalue;
|
||||
snprintf(num, sizeof(num), "%" PRId32, numvalue);
|
||||
object_property_parse(OBJECT(cpu), num, featurestr, errp);
|
||||
} else {
|
||||
error_setg(errp, "unrecognized feature %s", featurestr);
|
||||
goto out;
|
||||
object_property_parse(OBJECT(cpu), val, featurestr, errp);
|
||||
}
|
||||
} else if (!strcmp(featurestr, "check")) {
|
||||
check_cpuid = 1;
|
||||
} else if (!strcmp(featurestr, "enforce")) {
|
||||
check_cpuid = enforce_cpuid = 1;
|
||||
} else if (!strcmp(featurestr, "hv_relaxed")) {
|
||||
cpu->hyperv_relaxed_timing = true;
|
||||
} else if (!strcmp(featurestr, "hv_vapic")) {
|
||||
cpu->hyperv_vapic = true;
|
||||
} else {
|
||||
error_setg(errp, "feature string `%s' not in format (+feature|"
|
||||
"-feature|feature=xyz)", featurestr);
|
||||
goto out;
|
||||
feat2prop(featurestr);
|
||||
object_property_parse(OBJECT(cpu), "on", featurestr, errp);
|
||||
}
|
||||
if (error_is_set(errp)) {
|
||||
goto out;
|
||||
@ -2449,7 +2466,7 @@ static void x86_cpu_reset(CPUState *s)
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
/* We hard-wire the BSP to the first CPU. */
|
||||
if (s->cpu_index == 0) {
|
||||
apic_designate_bsp(env->apic_state);
|
||||
apic_designate_bsp(cpu->apic_state);
|
||||
}
|
||||
|
||||
s->halted = !cpu_is_bsp(cpu);
|
||||
@ -2459,7 +2476,7 @@ static void x86_cpu_reset(CPUState *s)
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
bool cpu_is_bsp(X86CPU *cpu)
|
||||
{
|
||||
return cpu_get_apic_base(cpu->env.apic_state) & MSR_IA32_APICBASE_BSP;
|
||||
return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
|
||||
}
|
||||
|
||||
/* TODO: remove me, when reset over QOM tree is implemented */
|
||||
@ -2500,31 +2517,29 @@ static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
|
||||
apic_type = "xen-apic";
|
||||
}
|
||||
|
||||
env->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
|
||||
if (env->apic_state == NULL) {
|
||||
cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
|
||||
if (cpu->apic_state == NULL) {
|
||||
error_setg(errp, "APIC device '%s' could not be created", apic_type);
|
||||
return;
|
||||
}
|
||||
|
||||
object_property_add_child(OBJECT(cpu), "apic",
|
||||
OBJECT(env->apic_state), NULL);
|
||||
qdev_prop_set_uint8(env->apic_state, "id", env->cpuid_apic_id);
|
||||
OBJECT(cpu->apic_state), NULL);
|
||||
qdev_prop_set_uint8(cpu->apic_state, "id", env->cpuid_apic_id);
|
||||
/* TODO: convert to link<> */
|
||||
apic = APIC_COMMON(env->apic_state);
|
||||
apic = APIC_COMMON(cpu->apic_state);
|
||||
apic->cpu = cpu;
|
||||
}
|
||||
|
||||
static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
if (env->apic_state == NULL) {
|
||||
if (cpu->apic_state == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (qdev_init(env->apic_state)) {
|
||||
if (qdev_init(cpu->apic_state)) {
|
||||
error_setg(errp, "APIC device '%s' could not be initialized",
|
||||
object_get_typename(OBJECT(env->apic_state)));
|
||||
object_get_typename(OBJECT(cpu->apic_state)));
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -2568,8 +2583,8 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||
env->features[FEAT_8000_0001_ECX] &= TCG_EXT3_FEATURES;
|
||||
env->features[FEAT_SVM] &= TCG_SVM_FEATURES;
|
||||
} else {
|
||||
if (check_cpuid && kvm_check_features_against_host(cpu)
|
||||
&& enforce_cpuid) {
|
||||
if ((cpu->check_cpuid || cpu->enforce_cpuid)
|
||||
&& kvm_check_features_against_host(cpu) && cpu->enforce_cpuid) {
|
||||
error_setg(&local_err,
|
||||
"Host's CPU doesn't support requested features");
|
||||
goto out;
|
||||
@ -2728,6 +2743,11 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
|
||||
|
||||
static Property x86_cpu_properties[] = {
|
||||
DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
|
||||
{ .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
|
||||
DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
|
||||
DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
|
||||
DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
|
||||
DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
|
||||
DEFINE_PROP_END_OF_LIST()
|
||||
};
|
||||
|
||||
|
@ -895,10 +895,6 @@ typedef struct CPUX86State {
|
||||
int tsc_khz;
|
||||
void *kvm_xsave_buf;
|
||||
|
||||
/* in order to simplify APIC support, we leave this pointer to the
|
||||
user */
|
||||
struct DeviceState *apic_state;
|
||||
|
||||
uint64_t mcg_cap;
|
||||
uint64_t mcg_ctl;
|
||||
uint64_t mce_banks[MCE_BANKS_DEF*4];
|
||||
|
@ -1247,14 +1247,16 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
|
||||
|
||||
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
|
||||
{
|
||||
X86CPU *cpu = x86_env_get_cpu(env);
|
||||
|
||||
if (kvm_enabled()) {
|
||||
env->tpr_access_type = access;
|
||||
|
||||
cpu_interrupt(CPU(x86_env_get_cpu(env)), CPU_INTERRUPT_TPR);
|
||||
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_TPR);
|
||||
} else {
|
||||
cpu_restore_state(env, env->mem_io_pc);
|
||||
|
||||
apic_handle_tpr_access_report(env->apic_state, env->eip, access);
|
||||
apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
|
||||
}
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
@ -1301,14 +1303,12 @@ void do_cpu_init(X86CPU *cpu)
|
||||
cpu_reset(cs);
|
||||
cs->interrupt_request = sipi;
|
||||
env->pat = pat;
|
||||
apic_init_reset(env->apic_state);
|
||||
apic_init_reset(cpu->apic_state);
|
||||
}
|
||||
|
||||
void do_cpu_sipi(X86CPU *cpu)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
apic_sipi(env->apic_state);
|
||||
apic_sipi(cpu->apic_state);
|
||||
}
|
||||
#else
|
||||
void do_cpu_init(X86CPU *cpu)
|
||||
|
@ -1069,8 +1069,8 @@ static int kvm_put_sregs(X86CPU *cpu)
|
||||
sregs.cr3 = env->cr[3];
|
||||
sregs.cr4 = env->cr[4];
|
||||
|
||||
sregs.cr8 = cpu_get_apic_tpr(env->apic_state);
|
||||
sregs.apic_base = cpu_get_apic_base(env->apic_state);
|
||||
sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
|
||||
sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
|
||||
|
||||
sregs.efer = env->efer;
|
||||
|
||||
@ -1619,8 +1619,7 @@ static int kvm_get_mp_state(X86CPU *cpu)
|
||||
|
||||
static int kvm_get_apic(X86CPU *cpu)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
DeviceState *apic = env->apic_state;
|
||||
DeviceState *apic = cpu->apic_state;
|
||||
struct kvm_lapic_state kapic;
|
||||
int ret;
|
||||
|
||||
@ -1637,8 +1636,7 @@ static int kvm_get_apic(X86CPU *cpu)
|
||||
|
||||
static int kvm_put_apic(X86CPU *cpu)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
DeviceState *apic = env->apic_state;
|
||||
DeviceState *apic = cpu->apic_state;
|
||||
struct kvm_lapic_state kapic;
|
||||
|
||||
if (apic && kvm_irqchip_in_kernel()) {
|
||||
@ -1962,7 +1960,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
|
||||
}
|
||||
|
||||
DPRINTF("setting tpr\n");
|
||||
run->cr8 = cpu_get_apic_tpr(env->apic_state);
|
||||
run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1976,8 +1974,8 @@ void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
|
||||
} else {
|
||||
env->eflags &= ~IF_MASK;
|
||||
}
|
||||
cpu_set_apic_tpr(env->apic_state, run->cr8);
|
||||
cpu_set_apic_base(env->apic_state, run->apic_base);
|
||||
cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
|
||||
cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
|
||||
}
|
||||
|
||||
int kvm_arch_process_async_events(CPUState *cs)
|
||||
@ -2014,7 +2012,7 @@ int kvm_arch_process_async_events(CPUState *cs)
|
||||
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
||||
apic_poll_irq(env->apic_state);
|
||||
apic_poll_irq(cpu->apic_state);
|
||||
}
|
||||
if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
(env->eflags & IF_MASK)) ||
|
||||
@ -2032,7 +2030,7 @@ int kvm_arch_process_async_events(CPUState *cs)
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
|
||||
kvm_cpu_synchronize_state(cs);
|
||||
apic_handle_tpr_access_report(env->apic_state, env->eip,
|
||||
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
|
||||
env->tpr_access_type);
|
||||
}
|
||||
|
||||
@ -2056,11 +2054,10 @@ static int kvm_handle_halt(X86CPU *cpu)
|
||||
|
||||
static int kvm_handle_tpr_access(X86CPU *cpu)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
CPUState *cs = CPU(cpu);
|
||||
struct kvm_run *run = cs->kvm_run;
|
||||
|
||||
apic_handle_tpr_access_report(env->apic_state, run->tpr_access.rip,
|
||||
apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
|
||||
run->tpr_access.is_write ? TPR_ACCESS_WRITE
|
||||
: TPR_ACCESS_READ);
|
||||
return 1;
|
||||
|
@ -155,7 +155,7 @@ target_ulong helper_read_crN(CPUX86State *env, int reg)
|
||||
break;
|
||||
case 8:
|
||||
if (!(env->hflags2 & HF2_VINTR_MASK)) {
|
||||
val = cpu_get_apic_tpr(env->apic_state);
|
||||
val = cpu_get_apic_tpr(x86_env_get_cpu(env)->apic_state);
|
||||
} else {
|
||||
val = env->v_tpr;
|
||||
}
|
||||
@ -179,7 +179,7 @@ void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
|
||||
break;
|
||||
case 8:
|
||||
if (!(env->hflags2 & HF2_VINTR_MASK)) {
|
||||
cpu_set_apic_tpr(env->apic_state, t0);
|
||||
cpu_set_apic_tpr(x86_env_get_cpu(env)->apic_state, t0);
|
||||
}
|
||||
env->v_tpr = t0 & 0x0f;
|
||||
break;
|
||||
@ -286,7 +286,7 @@ void helper_wrmsr(CPUX86State *env)
|
||||
env->sysenter_eip = val;
|
||||
break;
|
||||
case MSR_IA32_APICBASE:
|
||||
cpu_set_apic_base(env->apic_state, val);
|
||||
cpu_set_apic_base(x86_env_get_cpu(env)->apic_state, val);
|
||||
break;
|
||||
case MSR_EFER:
|
||||
{
|
||||
@ -437,7 +437,7 @@ void helper_rdmsr(CPUX86State *env)
|
||||
val = env->sysenter_eip;
|
||||
break;
|
||||
case MSR_IA32_APICBASE:
|
||||
val = cpu_get_apic_base(env->apic_state);
|
||||
val = cpu_get_apic_base(x86_env_get_cpu(env)->apic_state);
|
||||
break;
|
||||
case MSR_EFER:
|
||||
val = env->efer;
|
||||
|
@ -703,11 +703,10 @@ void tb_flush(CPUArchState *env1)
|
||||
CPU_FOREACH(cpu) {
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
|
||||
memset(env->tb_jmp_cache, 0, sizeof(env->tb_jmp_cache));
|
||||
}
|
||||
|
||||
memset(tcg_ctx.tb_ctx.tb_phys_hash, 0,
|
||||
CODE_GEN_PHYS_HASH_SIZE * sizeof(void *));
|
||||
memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
|
||||
page_flush_tb();
|
||||
|
||||
tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
|
||||
|
Loading…
Reference in New Issue
Block a user