x86 and machine queue, 2016-09-27

-----BEGIN PGP SIGNATURE-----
 
 iQIcBAABCAAGBQJX6tIeAAoJECgHk2+YTcWm1HMQAJQfxCT4nHpiRC6Shl/cfobU
 1WMeHNzwfIq6PGIZZW+6QntLD5qyvk4hP8i3k71qAN9YciFdtjw4YD1v6tKd/pTz
 YDL+NVIwflKU4yxvmUY2X/ZqD40zeBd1RhOBdlt1clKZDkRNvBi7TL8oX3daEi02
 0cLsUzVIL3FtrxZCnF5QVEydH0mA5lCBA/qNQOeJmZf1sPBgwzPe/2AxJ8UBYtdT
 djBE07C0iLWFNc21mIph1ejONTqswgZT78Mjk7y4YCQ0wXYTdz1vzLVPur9e1hUE
 OOJLOKMqs1tn5qbUTJGpmNgOlyF2VTScEciyiBdARwWKww3W3cP/u38obo74NH79
 FuY5V8hK673Y7zyt6CxMfQT/2txddOcDakMwSNBPb14BssbyPbknzhw6ff2iGIab
 N+h7+jxZHlR9ZQGSuftEy9HALRkMw30jJvJQDXrzA0ASpyil+cJ6fltCznYaLhTy
 m5qU5f63T8XS3j2mNwQmf2OEfAnKA4nthQhMZSzXRiNeQwaDJ9j63dljlIjQcdUb
 I1VgjpKcBK/Cd+UnRZov96Ovak6m59Tcvu2Y3HYXmKJ/CyAnADC58gxOQBJeliOD
 PMFTpeCe5/0tWCTlqYv989F5uoUMbY5LSL+1WM0nDZ63YXtSIM82lIMZA9HBhJDN
 hQ35XM4z7WD5q/8+BCzg
 =upBR
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/ehabkost/tags/x86-pull-request' into staging

x86 and machine queue, 2016-09-27

# gpg: Signature made Tue 27 Sep 2016 21:10:06 BST
# gpg:                using RSA key 0x2807936F984DC5A6
# gpg: Good signature from "Eduardo Habkost <ehabkost@redhat.com>"
# Primary key fingerprint: 5A32 2FD5 ABC4 D3DB ACCF  D1AA 2807 936F 984D C5A6

* remotes/ehabkost/tags/x86-pull-request:
  sysbus: Remove ignored return value of FindSysbusDeviceFunc
  target-i386: Remove has_msr_* global vars for KVM features
  target-i386: Clear KVM CPUID features if KVM is disabled
  target-i386: Remove has_msr_hv_tsc global variable
  target-i386: Remove has_msr_hv_apic global variable
  target-i386: Remove has_msr_mtrr global variable
  target-i386: Move xsave component mask to features array
  target-i386: xsave: Calculate set of xsave components on realize
  target-i386: xsave: Helper function to calculate xsave area size
  target-i386: xsave: Simplify CPUID[0xD,0].{EAX,EDX} calculation
  target-i386: xsave: Calculate enabled components only once
  target-i386: Don't try to enable PT State xsave component
  target-i386: Move feature name arrays inside FeatureWordInfo
  linux-user: remove #define smp_{cores, threads}
  target-i386: Enable CPUID[0x8000000A] if SVM is enabled
  target-i386: Automatically set level/xlevel/xlevel2 when needed
  tests: Test CPUID level handling for old machines
  tests: Add test code for CPUID level/xlevel handling
  target-i386: Add a marker to end of the region zeroed on reset
  target-i386: Remove unused X86CPUDefinition::xlevel2 field

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2016-09-27 23:10:12 +01:00
commit 25930ed60a
15 changed files with 540 additions and 302 deletions

View File

@ -436,7 +436,7 @@ static const NodeCreationPair add_fdt_node_functions[] = {
* are dynamically instantiable and if so call the node creation
* function.
*/
static int add_fdt_node(SysBusDevice *sbdev, void *opaque)
static void add_fdt_node(SysBusDevice *sbdev, void *opaque)
{
int i, ret;
@ -445,7 +445,7 @@ static int add_fdt_node(SysBusDevice *sbdev, void *opaque)
add_fdt_node_functions[i].typename)) {
ret = add_fdt_node_functions[i].add_fdt_node_fn(sbdev, opaque);
assert(!ret);
return 0;
return;
}
}
error_report("Device %s can not be dynamically instantiated",

View File

@ -332,7 +332,7 @@ static bool machine_get_enforce_config_section(Object *obj, Error **errp)
return ms->enforce_config_section;
}
static int error_on_sysbus_device(SysBusDevice *sbdev, void *opaque)
static void error_on_sysbus_device(SysBusDevice *sbdev, void *opaque)
{
error_report("Option '-device %s' cannot be handled by this machine",
object_class_get_name(object_get_class(OBJECT(sbdev))));

View File

@ -74,7 +74,7 @@ hwaddr platform_bus_get_mmio_addr(PlatformBusDevice *pbus, SysBusDevice *sbdev,
return object_property_get_int(OBJECT(sbdev_mr), "addr", NULL);
}
static int platform_bus_count_irqs(SysBusDevice *sbdev, void *opaque)
static void platform_bus_count_irqs(SysBusDevice *sbdev, void *opaque)
{
PlatformBusDevice *pbus = opaque;
qemu_irq sbirq;
@ -93,8 +93,6 @@ static int platform_bus_count_irqs(SysBusDevice *sbdev, void *opaque)
}
}
}
return 0;
}
/*
@ -168,7 +166,7 @@ static void platform_bus_map_mmio(PlatformBusDevice *pbus, SysBusDevice *sbdev,
* For each sysbus device, look for unassigned IRQ lines as well as
* unassociated MMIO regions. Connect them to the platform bus if available.
*/
static int link_sysbus_device(SysBusDevice *sbdev, void *opaque)
static void link_sysbus_device(SysBusDevice *sbdev, void *opaque)
{
PlatformBusDevice *pbus = opaque;
int i;
@ -180,8 +178,6 @@ static int link_sysbus_device(SysBusDevice *sbdev, void *opaque)
for (i = 0; sysbus_has_mmio(sbdev, i); i++) {
platform_bus_map_mmio(pbus, sbdev, i);
}
return 0;
}
static void platform_bus_init_notify(Notifier *notifier, void *data)

View File

@ -196,7 +196,7 @@ static int create_devtree_etsec(SysBusDevice *sbdev, PlatformDevtreeData *data)
return 0;
}
static int sysbus_device_create_devtree(SysBusDevice *sbdev, void *opaque)
static void sysbus_device_create_devtree(SysBusDevice *sbdev, void *opaque)
{
PlatformDevtreeData *data = opaque;
bool matched = false;
@ -211,8 +211,6 @@ static int sysbus_device_create_devtree(SysBusDevice *sbdev, void *opaque)
qdev_fw_name(DEVICE(sbdev)));
exit(1);
}
return 0;
}
static void platform_bus_create_devtree(PPCE500Params *params, void *fdt,

View File

@ -1110,7 +1110,7 @@ static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift,
}
}
static int find_unknown_sysbus_device(SysBusDevice *sbdev, void *opaque)
static void find_unknown_sysbus_device(SysBusDevice *sbdev, void *opaque)
{
bool matched = false;
@ -1123,8 +1123,6 @@ static int find_unknown_sysbus_device(SysBusDevice *sbdev, void *opaque)
qdev_fw_name(DEVICE(sbdev)));
exit(1);
}
return 0;
}
static void ppc_spapr_reset(void)

View File

@ -374,6 +374,11 @@ bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
.driver = TYPE_X86_CPU,\
.property = "l3-cache",\
.value = "off",\
},\
{\
.driver = TYPE_X86_CPU,\
.property = "full-cpuid-auto-level",\
.value = "off",\
},
#define PC_COMPAT_2_6 \

View File

@ -75,7 +75,7 @@ struct SysBusDevice {
uint32_t pio[QDEV_MAX_PIO];
};
typedef int FindSysbusDeviceFunc(SysBusDevice *sbdev, void *opaque);
typedef void FindSysbusDeviceFunc(SysBusDevice *sbdev, void *opaque);
void sysbus_init_mmio(SysBusDevice *dev, MemoryRegion *memory);
MemoryRegion *sysbus_mmio_get_region(SysBusDevice *dev, int n);

View File

@ -29,12 +29,9 @@ void qtest_clock_warp(int64_t dest);
#ifndef CONFIG_USER_ONLY
/* vl.c */
/* *-user doesn't have configurable SMP topology */
extern int smp_cores;
extern int smp_threads;
#else
/* *-user doesn't have configurable SMP topology */
#define smp_cores 1
#define smp_threads 1
#endif
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg);

View File

@ -181,185 +181,6 @@ static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
dst[CPUID_VENDOR_SZ] = '\0';
}
/* feature flags taken from "Intel Processor Identification and the CPUID
* Instruction" and AMD's "CPUID Specification". In cases of disagreement
* between feature naming conventions, aliases may be added.
*/
static const char *feature_name[] = {
"fpu", "vme", "de", "pse",
"tsc", "msr", "pae", "mce",
"cx8", "apic", NULL, "sep",
"mtrr", "pge", "mca", "cmov",
"pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
NULL, "ds" /* Intel dts */, "acpi", "mmx",
"fxsr", "sse", "sse2", "ss",
"ht" /* Intel htt */, "tm", "ia64", "pbe",
};
static const char *ext_feature_name[] = {
"pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
"ds_cpl", "vmx", "smx", "est",
"tm2", "ssse3", "cid", NULL,
"fma", "cx16", "xtpr", "pdcm",
NULL, "pcid", "dca", "sse4.1|sse4_1",
"sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
"tsc-deadline", "aes", "xsave", "osxsave",
"avx", "f16c", "rdrand", "hypervisor",
};
/* Feature names that are already defined on feature_name[] but are set on
* CPUID[8000_0001].EDX on AMD CPUs don't have their names on
* ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
* if and only if CPU vendor is AMD.
*/
static const char *ext2_feature_name[] = {
NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
"nx|xd", NULL, "mmxext", NULL /* mmx */,
NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
NULL, "lm|i64", "3dnowext", "3dnow",
};
static const char *ext3_feature_name[] = {
"lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
"cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
"3dnowprefetch", "osvw", "ibs", "xop",
"skinit", "wdt", NULL, "lwp",
"fma4", "tce", NULL, "nodeid_msr",
NULL, "tbm", "topoext", "perfctr_core",
"perfctr_nb", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
};
static const char *ext4_feature_name[] = {
NULL, NULL, "xstore", "xstore-en",
NULL, NULL, "xcrypt", "xcrypt-en",
"ace2", "ace2-en", "phe", "phe-en",
"pmm", "pmm-en", NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
};
static const char *kvm_feature_name[] = {
"kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
"kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
"kvmclock-stable-bit", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
};
static const char *hyperv_priv_feature_name[] = {
NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
};
static const char *hyperv_ident_feature_name[] = {
NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
NULL /* hv_post_messages */, NULL /* hv_signal_events */,
NULL /* hv_create_port */, NULL /* hv_connect_port */,
NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
};
static const char *hyperv_misc_feature_name[] = {
NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
NULL, NULL,
NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
};
static const char *svm_feature_name[] = {
"npt", "lbrv", "svm_lock", "nrip_save",
"tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
NULL, NULL, "pause_filter", NULL,
"pfthreshold", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
};
static const char *cpuid_7_0_ebx_feature_name[] = {
"fsgsbase", "tsc_adjust", NULL, "bmi1",
"hle", "avx2", NULL, "smep",
"bmi2", "erms", "invpcid", "rtm",
NULL, NULL, "mpx", NULL,
"avx512f", "avx512dq", "rdseed", "adx",
"smap", "avx512ifma", "pcommit", "clflushopt",
"clwb", NULL, "avx512pf", "avx512er",
"avx512cd", NULL, "avx512bw", "avx512vl",
};
static const char *cpuid_7_0_ecx_feature_name[] = {
NULL, "avx512vbmi", "umip", "pku",
"ospke", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, "rdpid", NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
};
static const char *cpuid_apm_edx_feature_name[] = {
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
"invtsc", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
};
static const char *cpuid_xsave_feature_name[] = {
"xsaveopt", "xsavec", "xgetbv1", "xsaves",
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
};
static const char *cpuid_6_feature_name[] = {
NULL, NULL, "arat", NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
};
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
@ -425,7 +246,12 @@ static const char *cpuid_6_feature_name[] = {
CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
typedef struct FeatureWordInfo {
const char **feat_names;
/* feature flags names are taken from "Intel Processor Identification and
* the CPUID Instruction" and AMD's "CPUID Specification".
* In cases of disagreement between feature naming conventions,
* aliases may be added.
*/
const char *feat_names[32];
uint32_t cpuid_eax; /* Input EAX for CPUID */
bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
uint32_t cpuid_ecx; /* Input ECX value for CPUID */
@ -436,85 +262,245 @@ typedef struct FeatureWordInfo {
static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_1_EDX] = {
.feat_names = feature_name,
.feat_names = {
"fpu", "vme", "de", "pse",
"tsc", "msr", "pae", "mce",
"cx8", "apic", NULL, "sep",
"mtrr", "pge", "mca", "cmov",
"pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
NULL, "ds" /* Intel dts */, "acpi", "mmx",
"fxsr", "sse", "sse2", "ss",
"ht" /* Intel htt */, "tm", "ia64", "pbe",
},
.cpuid_eax = 1, .cpuid_reg = R_EDX,
.tcg_features = TCG_FEATURES,
},
[FEAT_1_ECX] = {
.feat_names = ext_feature_name,
.feat_names = {
"pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
"ds_cpl", "vmx", "smx", "est",
"tm2", "ssse3", "cid", NULL,
"fma", "cx16", "xtpr", "pdcm",
NULL, "pcid", "dca", "sse4.1|sse4_1",
"sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
"tsc-deadline", "aes", "xsave", "osxsave",
"avx", "f16c", "rdrand", "hypervisor",
},
.cpuid_eax = 1, .cpuid_reg = R_ECX,
.tcg_features = TCG_EXT_FEATURES,
},
/* Feature names that are already defined on feature_name[] but
* are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
* names on feat_names below. They are copied automatically
* to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
*/
[FEAT_8000_0001_EDX] = {
.feat_names = ext2_feature_name,
.feat_names = {
NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
"nx|xd", NULL, "mmxext", NULL /* mmx */,
NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb", "rdtscp",
NULL, "lm|i64", "3dnowext", "3dnow",
},
.cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
.tcg_features = TCG_EXT2_FEATURES,
},
[FEAT_8000_0001_ECX] = {
.feat_names = ext3_feature_name,
.feat_names = {
"lahf_lm", "cmp_legacy", "svm", "extapic",
"cr8legacy", "abm", "sse4a", "misalignsse",
"3dnowprefetch", "osvw", "ibs", "xop",
"skinit", "wdt", NULL, "lwp",
"fma4", "tce", NULL, "nodeid_msr",
NULL, "tbm", "topoext", "perfctr_core",
"perfctr_nb", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
.cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
.tcg_features = TCG_EXT3_FEATURES,
},
[FEAT_C000_0001_EDX] = {
.feat_names = ext4_feature_name,
.feat_names = {
NULL, NULL, "xstore", "xstore-en",
NULL, NULL, "xcrypt", "xcrypt-en",
"ace2", "ace2-en", "phe", "phe-en",
"pmm", "pmm-en", NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
.cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
.tcg_features = TCG_EXT4_FEATURES,
},
[FEAT_KVM] = {
.feat_names = kvm_feature_name,
.feat_names = {
"kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
"kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
"kvmclock-stable-bit", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
.cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
.tcg_features = TCG_KVM_FEATURES,
},
[FEAT_HYPERV_EAX] = {
.feat_names = hyperv_priv_feature_name,
.feat_names = {
NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
.cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
},
[FEAT_HYPERV_EBX] = {
.feat_names = hyperv_ident_feature_name,
.feat_names = {
NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
NULL /* hv_post_messages */, NULL /* hv_signal_events */,
NULL /* hv_create_port */, NULL /* hv_connect_port */,
NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
.cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
},
[FEAT_HYPERV_EDX] = {
.feat_names = hyperv_misc_feature_name,
.feat_names = {
NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
NULL, NULL,
NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
.cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
},
[FEAT_SVM] = {
.feat_names = svm_feature_name,
.feat_names = {
"npt", "lbrv", "svm_lock", "nrip_save",
"tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
NULL, NULL, "pause_filter", NULL,
"pfthreshold", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
.cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
.tcg_features = TCG_SVM_FEATURES,
},
[FEAT_7_0_EBX] = {
.feat_names = cpuid_7_0_ebx_feature_name,
.feat_names = {
"fsgsbase", "tsc_adjust", NULL, "bmi1",
"hle", "avx2", NULL, "smep",
"bmi2", "erms", "invpcid", "rtm",
NULL, NULL, "mpx", NULL,
"avx512f", "avx512dq", "rdseed", "adx",
"smap", "avx512ifma", "pcommit", "clflushopt",
"clwb", NULL, "avx512pf", "avx512er",
"avx512cd", NULL, "avx512bw", "avx512vl",
},
.cpuid_eax = 7,
.cpuid_needs_ecx = true, .cpuid_ecx = 0,
.cpuid_reg = R_EBX,
.tcg_features = TCG_7_0_EBX_FEATURES,
},
[FEAT_7_0_ECX] = {
.feat_names = cpuid_7_0_ecx_feature_name,
.feat_names = {
NULL, "avx512vbmi", "umip", "pku",
"ospke", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, "rdpid", NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
.cpuid_eax = 7,
.cpuid_needs_ecx = true, .cpuid_ecx = 0,
.cpuid_reg = R_ECX,
.tcg_features = TCG_7_0_ECX_FEATURES,
},
[FEAT_8000_0007_EDX] = {
.feat_names = cpuid_apm_edx_feature_name,
.feat_names = {
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
"invtsc", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
.cpuid_eax = 0x80000007,
.cpuid_reg = R_EDX,
.tcg_features = TCG_APM_FEATURES,
.unmigratable_flags = CPUID_APM_INVTSC,
},
[FEAT_XSAVE] = {
.feat_names = cpuid_xsave_feature_name,
.feat_names = {
"xsaveopt", "xsavec", "xgetbv1", "xsaves",
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
.cpuid_eax = 0xd,
.cpuid_needs_ecx = true, .cpuid_ecx = 1,
.cpuid_reg = R_EAX,
.tcg_features = TCG_XSAVE_FEATURES,
},
[FEAT_6_EAX] = {
.feat_names = cpuid_6_feature_name,
.feat_names = {
NULL, NULL, "arat", NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
.cpuid_eax = 6, .cpuid_reg = R_EAX,
.tcg_features = TCG_6_EAX_FEATURES,
},
[FEAT_XSAVE_COMP_LO] = {
.cpuid_eax = 0xD,
.cpuid_needs_ecx = true, .cpuid_ecx = 0,
.cpuid_reg = R_EAX,
.tcg_features = ~0U,
},
[FEAT_XSAVE_COMP_HI] = {
.cpuid_eax = 0xD,
.cpuid_needs_ecx = true, .cpuid_ecx = 0,
.cpuid_reg = R_EDX,
.tcg_features = ~0U,
},
};
typedef struct X86RegisterInfo32 {
@ -574,6 +560,26 @@ static const ExtSaveArea x86_ext_save_areas[] = {
.size = sizeof(XSavePKRU) },
};
static uint32_t xsave_area_size(uint64_t mask)
{
int i;
uint64_t ret = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader);
for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
const ExtSaveArea *esa = &x86_ext_save_areas[i];
if ((mask >> i) & 1) {
ret = MAX(ret, esa->offset + esa->size);
}
}
return ret;
}
static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
{
return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
cpu->env.features[FEAT_XSAVE_COMP_LO];
}
const char *get_register_name_32(unsigned int reg)
{
if (reg >= CPU_NB_REGS32) {
@ -711,8 +717,7 @@ static void add_flagname_to_bitmaps(const char *flagname,
FeatureWord w;
for (w = 0; w < FEATURE_WORDS; w++) {
FeatureWordInfo *wi = &feature_word_info[w];
if (wi->feat_names &&
lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
if (lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
break;
}
}
@ -761,7 +766,6 @@ struct X86CPUDefinition {
const char *name;
uint32_t level;
uint32_t xlevel;
uint32_t xlevel2;
/* vendor is zero-terminated, 12 character ASCII string */
char vendor[CPUID_VENDOR_SZ + 1];
int family;
@ -1644,9 +1648,12 @@ static void host_x86_cpu_initfn(Object *obj)
/* If KVM is disabled, x86_cpu_realizefn() will report an error later */
if (kvm_enabled()) {
env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
env->cpuid_min_level =
kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
env->cpuid_min_xlevel =
kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
env->cpuid_min_xlevel2 =
kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
if (lmce_supported()) {
object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
@ -2209,12 +2216,13 @@ static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
char host_vendor[CPUID_VENDOR_SZ + 1];
FeatureWord w;
object_property_set_int(OBJECT(cpu), def->level, "level", errp);
/* CPU models only set _minimum_ values for level/xlevel: */
object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
object_property_set_int(OBJECT(cpu), def->family, "family", errp);
object_property_set_int(OBJECT(cpu), def->model, "model", errp);
object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
for (w = 0; w < FEATURE_WORDS; w++) {
env->features[w] = def->features[w];
@ -2495,13 +2503,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
switch (count) {
case 0:
*eax = apicid_core_offset(smp_cores, smp_threads);
*ebx = smp_threads;
*eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
*ebx = cs->nr_threads;
*ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
break;
case 1:
*eax = apicid_pkg_offset(smp_cores, smp_threads);
*ebx = smp_cores * smp_threads;
*eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
*ebx = cs->nr_cores * cs->nr_threads;
*ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
break;
default:
@ -2514,10 +2522,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ebx &= 0xffff; /* The count doesn't need to be reliable. */
break;
case 0xD: {
KVMState *s = cs->kvm_state;
uint64_t ena_mask;
int i;
/* Processor Extended State */
*eax = 0;
*ebx = 0;
@ -2526,36 +2530,17 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
break;
}
if (kvm_enabled()) {
ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
ena_mask <<= 32;
ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
} else {
ena_mask = -1;
}
if (count == 0) {
*ecx = 0x240;
for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
const ExtSaveArea *esa = &x86_ext_save_areas[i];
if ((env->features[esa->feature] & esa->bits) == esa->bits
&& ((ena_mask >> i) & 1) != 0) {
if (i < 32) {
*eax |= 1u << i;
} else {
*edx |= 1u << (i - 32);
}
*ecx = MAX(*ecx, esa->offset + esa->size);
}
}
*eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
*ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
*eax = env->features[FEAT_XSAVE_COMP_LO];
*edx = env->features[FEAT_XSAVE_COMP_HI];
*ebx = *ecx;
} else if (count == 1) {
*eax = env->features[FEAT_XSAVE];
} else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
const ExtSaveArea *esa = &x86_ext_save_areas[count];
if ((env->features[esa->feature] & esa->bits) == esa->bits
&& ((ena_mask >> count) & 1) != 0) {
if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
const ExtSaveArea *esa = &x86_ext_save_areas[count];
*eax = esa->size;
*ebx = esa->offset;
}
@ -2716,7 +2701,7 @@ static void x86_cpu_reset(CPUState *s)
xcc->parent_reset(s);
memset(env, 0, offsetof(CPUX86State, cpuid_level));
memset(env, 0, offsetof(CPUX86State, end_reset_fields));
tlb_flush(s, 1);
@ -2790,7 +2775,7 @@ static void x86_cpu_reset(CPUState *s)
}
for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
const ExtSaveArea *esa = &x86_ext_save_areas[i];
if ((env->features[esa->feature] & esa->bits) == esa->bits) {
if (env->features[esa->feature] & esa->bits) {
xcr0 |= 1ull << i;
}
}
@ -2953,6 +2938,61 @@ static uint32_t x86_host_phys_bits(void)
return host_phys_bits;
}
static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
{
if (*min < value) {
*min = value;
}
}
/* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
{
CPUX86State *env = &cpu->env;
FeatureWordInfo *fi = &feature_word_info[w];
uint32_t eax = fi->cpuid_eax;
uint32_t region = eax & 0xF0000000;
if (!env->features[w]) {
return;
}
switch (region) {
case 0x00000000:
x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
break;
case 0x80000000:
x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
break;
case 0xC0000000:
x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
break;
}
}
/* Calculate XSAVE components based on the configured CPU feature flags */
static void x86_cpu_enable_xsave_components(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
int i;
uint64_t mask;
if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
return;
}
mask = (XSTATE_FP_MASK | XSTATE_SSE_MASK);
for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
const ExtSaveArea *esa = &x86_ext_save_areas[i];
if (env->features[esa->feature] & esa->bits) {
mask |= (1ULL << i);
}
}
env->features[FEAT_XSAVE_COMP_LO] = mask;
env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
}
#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
(env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
(env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
@ -2998,8 +3038,40 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
cpu->env.features[w] &= ~minus_features[w];
}
if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
env->cpuid_level = 7;
if (!kvm_enabled() || !cpu->expose_kvm) {
env->features[FEAT_KVM] = 0;
}
x86_cpu_enable_xsave_components(cpu);
/* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
if (cpu->full_cpuid_auto_level) {
x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
/* SVM requires CPUID[0x8000000A] */
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
}
}
/* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
if (env->cpuid_level == UINT32_MAX) {
env->cpuid_level = env->cpuid_min_level;
}
if (env->cpuid_xlevel == UINT32_MAX) {
env->cpuid_xlevel = env->cpuid_min_xlevel;
}
if (env->cpuid_xlevel2 == UINT32_MAX) {
env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
}
if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
@ -3262,9 +3334,6 @@ static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
char **names;
FeatureWordInfo *fi = &feature_word_info[w];
if (!fi->feat_names) {
return;
}
if (!fi->feat_names[bitnr]) {
return;
}
@ -3405,9 +3474,13 @@ static Property x86_cpu_properties[] = {
DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),

View File

@ -453,6 +453,8 @@ typedef enum FeatureWord {
FEAT_SVM, /* CPUID[8000_000A].EDX */
FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
FEAT_6_EAX, /* CPUID[6].EAX */
FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
FEATURE_WORDS,
} FeatureWord;
@ -1108,11 +1110,15 @@ typedef struct CPUX86State {
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
struct {} end_reset_fields;
/* processor features (e.g. for CPUID insn) */
uint32_t cpuid_level;
uint32_t cpuid_xlevel;
uint32_t cpuid_xlevel2;
/* Minimum level/xlevel/xlevel2, based on CPU model + features */
uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
/* Maximum level/xlevel/xlevel2 value for auto-assignment: */
uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2;
/* Actual level/xlevel/xlevel2 value: */
uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2;
uint32_t cpuid_vendor1;
uint32_t cpuid_vendor2;
uint32_t cpuid_vendor3;
@ -1217,6 +1223,9 @@ struct X86CPU {
/* Compatibility bits for old machine types: */
bool enable_cpuid_0xb;
/* Enable auto level-increase for all CPUID leaves */
bool full_cpuid_auto_level;
/* if true fill the top bits of the MTRR_PHYSMASKn variable range */
bool fill_mtrr_mask;

View File

@ -83,23 +83,17 @@ static bool has_msr_tsc_aux;
static bool has_msr_tsc_adjust;
static bool has_msr_tsc_deadline;
static bool has_msr_feature_control;
static bool has_msr_async_pf_en;
static bool has_msr_pv_eoi_en;
static bool has_msr_misc_enable;
static bool has_msr_smbase;
static bool has_msr_bndcfgs;
static bool has_msr_kvm_steal_time;
static int lm_capable_kernel;
static bool has_msr_hv_hypercall;
static bool has_msr_hv_vapic;
static bool has_msr_hv_tsc;
static bool has_msr_hv_crash;
static bool has_msr_hv_reset;
static bool has_msr_hv_vpindex;
static bool has_msr_hv_runtime;
static bool has_msr_hv_synic;
static bool has_msr_hv_stimer;
static bool has_msr_mtrr;
static bool has_msr_xss;
static bool has_msr_architectural_pmu;
@ -604,20 +598,22 @@ static int hyperv_handle_properties(CPUState *cs)
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
if (cpu->hyperv_time &&
kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) <= 0) {
cpu->hyperv_time = false;
}
if (cpu->hyperv_relaxed_timing) {
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
}
if (cpu->hyperv_vapic) {
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
has_msr_hv_vapic = true;
}
if (cpu->hyperv_time &&
kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
if (cpu->hyperv_time) {
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
env->features[FEAT_HYPERV_EAX] |= 0x200;
has_msr_hv_tsc = true;
}
if (cpu->hyperv_crash && has_msr_hv_crash) {
env->features[FEAT_HYPERV_EDX] |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
@ -729,7 +725,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (cpu->hyperv_relaxed_timing) {
c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
}
if (has_msr_hv_vapic) {
if (cpu->hyperv_vapic) {
c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
}
c->ebx = cpu->hyperv_spinlock_attempts;
@ -755,12 +751,6 @@ int kvm_arch_init_vcpu(CPUState *cs)
c = &cpuid_data.entries[cpuid_i++];
c->function = KVM_CPUID_FEATURES | kvm_base;
c->eax = env->features[FEAT_KVM];
has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
has_msr_kvm_steal_time = c->eax & (1 << KVM_FEATURE_STEAL_TIME);
}
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
@ -975,9 +965,6 @@ int kvm_arch_init_vcpu(CPUState *cs)
}
cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
has_msr_mtrr = true;
}
if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
has_msr_tsc_aux = false;
}
@ -1643,13 +1630,13 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
if (has_msr_async_pf_en) {
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
}
if (has_msr_pv_eoi_en) {
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
}
if (has_msr_kvm_steal_time) {
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
}
if (has_msr_architectural_pmu) {
@ -1685,11 +1672,11 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
env->msr_hv_hypercall);
}
if (has_msr_hv_vapic) {
if (cpu->hyperv_vapic) {
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
env->msr_hv_vapic);
}
if (has_msr_hv_tsc) {
if (cpu->hyperv_time) {
kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc);
}
if (has_msr_hv_crash) {
@ -1735,7 +1722,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
env->msr_hv_stimer_count[j]);
}
}
if (has_msr_mtrr) {
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
@ -2052,13 +2039,13 @@ static int kvm_get_msrs(X86CPU *cpu)
#endif
kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
if (has_msr_async_pf_en) {
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
}
if (has_msr_pv_eoi_en) {
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
}
if (has_msr_kvm_steal_time) {
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
}
if (has_msr_architectural_pmu) {
@ -2090,10 +2077,10 @@ static int kvm_get_msrs(X86CPU *cpu)
kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
}
if (has_msr_hv_vapic) {
if (cpu->hyperv_vapic) {
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
}
if (has_msr_hv_tsc) {
if (cpu->hyperv_time) {
kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
}
if (has_msr_hv_crash) {
@ -2125,7 +2112,7 @@ static int kvm_get_msrs(X86CPU *cpu)
kvm_msr_entry_add(cpu, msr, 0);
}
}
if (has_msr_mtrr) {
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);

View File

@ -9943,7 +9943,8 @@ static void ppc_cpu_unrealizefn(DeviceState *dev, Error **errp)
int ppc_get_compat_smt_threads(PowerPCCPU *cpu)
{
int ret = MIN(smp_threads, kvmppc_smt_threads());
CPUState *cs = CPU(cpu);
int ret = MIN(cs->nr_threads, kvmppc_smt_threads());
switch (cpu->cpu_version) {
case CPU_POWERPC_LOGICAL_2_05:

1
tests/.gitignore vendored
View File

@ -75,6 +75,7 @@ test-visitor-serialization
test-vmstate
test-write-threshold
test-x86-cpuid
test-x86-cpuid-compat
test-xbzrle
test-netfilter
test-filter-mirror

View File

@ -243,6 +243,7 @@ check-qtest-i386-y += tests/test-netfilter$(EXESUF)
check-qtest-i386-y += tests/test-filter-mirror$(EXESUF)
check-qtest-i386-y += tests/test-filter-redirector$(EXESUF)
check-qtest-i386-y += tests/postcopy-test$(EXESUF)
check-qtest-i386-y += tests/test-x86-cpuid-compat$(EXESUF)
check-qtest-x86_64-y += $(check-qtest-i386-y)
gcov-files-i386-y += i386-softmmu/hw/timer/mc146818rtc.c
gcov-files-x86_64-y = $(subst i386-softmmu/,x86_64-softmmu/,$(gcov-files-i386-y))
@ -663,6 +664,7 @@ tests/test-write-threshold$(EXESUF): tests/test-write-threshold.o $(test-block-o
tests/test-netfilter$(EXESUF): tests/test-netfilter.o $(qtest-obj-y)
tests/test-filter-mirror$(EXESUF): tests/test-filter-mirror.o $(qtest-obj-y)
tests/test-filter-redirector$(EXESUF): tests/test-filter-redirector.o $(qtest-obj-y)
tests/test-x86-cpuid-compat$(EXESUF): tests/test-x86-cpuid-compat.o $(qtest-obj-y)
tests/ivshmem-test$(EXESUF): tests/ivshmem-test.o contrib/ivshmem-server/ivshmem-server.o $(libqos-pc-obj-y)
tests/vhost-user-bridge$(EXESUF): tests/vhost-user-bridge.o
tests/ptimer-test$(EXESUF): tests/ptimer-test.o tests/ptimer-test-stubs.o hw/core/ptimer.o

View File

@ -0,0 +1,171 @@
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qapi/qmp/qlist.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qint.h"
#include "libqtest.h"
static char *get_cpu0_qom_path(void)
{
QDict *resp;
QList *ret;
QDict *cpu0;
char *path;
resp = qmp("{'execute': 'query-cpus', 'arguments': {}}");
g_assert(qdict_haskey(resp, "return"));
ret = qdict_get_qlist(resp, "return");
cpu0 = qobject_to_qdict(qlist_peek(ret));
path = g_strdup(qdict_get_str(cpu0, "qom_path"));
QDECREF(resp);
return path;
}
static QObject *qom_get(const char *path, const char *prop)
{
QDict *resp = qmp("{ 'execute': 'qom-get',"
" 'arguments': { 'path': %s,"
" 'property': %s } }",
path, prop);
QObject *ret = qdict_get(resp, "return");
qobject_incref(ret);
QDECREF(resp);
return ret;
}
typedef struct CpuidTestArgs {
const char *cmdline;
const char *property;
int64_t expected_value;
} CpuidTestArgs;
static void test_cpuid_prop(const void *data)
{
const CpuidTestArgs *args = data;
char *path;
QInt *value;
qtest_start(args->cmdline);
path = get_cpu0_qom_path();
value = qobject_to_qint(qom_get(path, args->property));
g_assert_cmpint(qint_get_int(value), ==, args->expected_value);
qtest_end();
QDECREF(value);
g_free(path);
}
static void add_cpuid_test(const char *name, const char *cmdline,
const char *property, int64_t expected_value)
{
CpuidTestArgs *args = g_new0(CpuidTestArgs, 1);
args->cmdline = cmdline;
args->property = property;
args->expected_value = expected_value;
qtest_add_data_func(name, args, test_cpuid_prop);
}
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
/* Original level values for CPU models: */
add_cpuid_test("x86/cpuid/phenom/level",
"-cpu phenom", "level", 5);
add_cpuid_test("x86/cpuid/Conroe/level",
"-cpu Conroe", "level", 10);
add_cpuid_test("x86/cpuid/SandyBridge/level",
"-cpu SandyBridge", "level", 0xd);
add_cpuid_test("x86/cpuid/486/xlevel",
"-cpu 486", "xlevel", 0);
add_cpuid_test("x86/cpuid/core2duo/xlevel",
"-cpu core2duo", "xlevel", 0x80000008);
add_cpuid_test("x86/cpuid/phenom/xlevel",
"-cpu phenom", "xlevel", 0x8000001A);
add_cpuid_test("x86/cpuid/athlon/xlevel",
"-cpu athlon", "xlevel", 0x80000008);
/* If level is not large enough, it should increase automatically: */
/* CPUID[6].EAX: */
add_cpuid_test("x86/cpuid/auto-level/phenom/arat",
"-cpu 486,+arat", "level", 6);
/* CPUID[EAX=7,ECX=0].EBX: */
add_cpuid_test("x86/cpuid/auto-level/phenom/fsgsbase",
"-cpu phenom,+fsgsbase", "level", 7);
/* CPUID[EAX=7,ECX=0].ECX: */
add_cpuid_test("x86/cpuid/auto-level/phenom/avx512vbmi",
"-cpu phenom,+avx512vbmi", "level", 7);
/* CPUID[EAX=0xd,ECX=1].EAX: */
add_cpuid_test("x86/cpuid/auto-level/phenom/xsaveopt",
"-cpu phenom,+xsaveopt", "level", 0xd);
/* CPUID[8000_0001].EDX: */
add_cpuid_test("x86/cpuid/auto-xlevel/486/3dnow",
"-cpu 486,+3dnow", "xlevel", 0x80000001);
/* CPUID[8000_0001].ECX: */
add_cpuid_test("x86/cpuid/auto-xlevel/486/sse4a",
"-cpu 486,+sse4a", "xlevel", 0x80000001);
/* CPUID[8000_0007].EDX: */
add_cpuid_test("x86/cpuid/auto-xlevel/486/invtsc",
"-cpu 486,+invtsc", "xlevel", 0x80000007);
/* CPUID[8000_000A].EDX: */
add_cpuid_test("x86/cpuid/auto-xlevel/486/npt",
"-cpu 486,+npt", "xlevel", 0x8000000A);
/* CPUID[C000_0001].EDX: */
add_cpuid_test("x86/cpuid/auto-xlevel2/phenom/xstore",
"-cpu phenom,+xstore", "xlevel2", 0xC0000001);
/* SVM needs CPUID[0x8000000A] */
add_cpuid_test("x86/cpuid/auto-xlevel/athlon/svm",
"-cpu athlon,+svm", "xlevel", 0x8000000A);
/* If level is already large enough, it shouldn't change: */
add_cpuid_test("x86/cpuid/auto-level/SandyBridge/multiple",
"-cpu SandyBridge,+arat,+fsgsbase,+avx512vbmi",
"level", 0xd);
/* If level is explicitly set, it shouldn't change: */
add_cpuid_test("x86/cpuid/auto-level/486/fixed/0xF",
"-cpu 486,level=0xF,+arat,+fsgsbase,+avx512vbmi,+xsaveopt",
"level", 0xF);
add_cpuid_test("x86/cpuid/auto-level/486/fixed/2",
"-cpu 486,level=2,+arat,+fsgsbase,+avx512vbmi,+xsaveopt",
"level", 2);
add_cpuid_test("x86/cpuid/auto-level/486/fixed/0",
"-cpu 486,level=0,+arat,+fsgsbase,+avx512vbmi,+xsaveopt",
"level", 0);
/* if xlevel is already large enough, it shouldn't change: */
add_cpuid_test("x86/cpuid/auto-xlevel/phenom/3dnow",
"-cpu phenom,+3dnow,+sse4a,+invtsc,+npt,+svm",
"xlevel", 0x8000001A);
/* If xlevel is explicitly set, it shouldn't change: */
add_cpuid_test("x86/cpuid/auto-xlevel/486/fixed/80000002",
"-cpu 486,xlevel=0x80000002,+3dnow,+sse4a,+invtsc,+npt,+svm",
"xlevel", 0x80000002);
add_cpuid_test("x86/cpuid/auto-xlevel/486/fixed/8000001A",
"-cpu 486,xlevel=0x8000001A,+3dnow,+sse4a,+invtsc,+npt,+svm",
"xlevel", 0x8000001A);
add_cpuid_test("x86/cpuid/auto-xlevel/phenom/fixed/0",
"-cpu 486,xlevel=0,+3dnow,+sse4a,+invtsc,+npt,+svm",
"xlevel", 0);
/* if xlevel2 is already large enough, it shouldn't change: */
add_cpuid_test("x86/cpuid/auto-xlevel2/486/fixed",
"-cpu 486,xlevel2=0xC0000002,+xstore",
"xlevel2", 0xC0000002);
/* Check compatibility of old machine-types that didn't
* auto-increase level/xlevel/xlevel2: */
add_cpuid_test("x86/cpuid/auto-level/pc-2.7",
"-machine pc-i440fx-2.7 -cpu 486,+arat,+avx512vbmi,+xsaveopt",
"level", 1);
add_cpuid_test("x86/cpuid/auto-xlevel/pc-2.7",
"-machine pc-i440fx-2.7 -cpu 486,+3dnow,+sse4a,+invtsc,+npt,+svm",
"xlevel", 0);
add_cpuid_test("x86/cpuid/auto-xlevel2/pc-2.7",
"-machine pc-i440fx-2.7 -cpu 486,+xstore",
"xlevel2", 0);
return g_test_run();
}