target/i386: Populate x86_ext_save_areas offsets using cpuid where possible

Rather than relying on the X86XSaveArea structure definition,
determine the offset of XSAVE state areas using CPUID leaf 0xd where
possible (KVM and HVF).

Signed-off-by: David Edmondson <david.edmondson@oracle.com>
Message-Id: <20210705104632.2902400-8-david.edmondson@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
David Edmondson 2021-07-05 11:46:31 +01:00 committed by Paolo Bonzini
parent 3568987f78
commit fea4500841
7 changed files with 94 additions and 13 deletions

View File

@ -1304,48 +1304,37 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
};
#undef REGISTER
const ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
[XSTATE_FP_BIT] = {
/* x87 FP state component is always enabled if XSAVE is supported */
.feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
/* x87 state is in the legacy region of the XSAVE area */
.offset = 0,
.size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
},
[XSTATE_SSE_BIT] = {
/* SSE state component is always enabled if XSAVE is supported */
.feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
/* SSE state is in the legacy region of the XSAVE area */
.offset = 0,
.size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
},
[XSTATE_YMM_BIT] =
{ .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
.offset = offsetof(X86XSaveArea, avx_state),
.size = sizeof(XSaveAVX) },
[XSTATE_BNDREGS_BIT] =
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
.offset = offsetof(X86XSaveArea, bndreg_state),
.size = sizeof(XSaveBNDREG) },
[XSTATE_BNDCSR_BIT] =
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
.offset = offsetof(X86XSaveArea, bndcsr_state),
.size = sizeof(XSaveBNDCSR) },
[XSTATE_OPMASK_BIT] =
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
.offset = offsetof(X86XSaveArea, opmask_state),
.size = sizeof(XSaveOpmask) },
[XSTATE_ZMM_Hi256_BIT] =
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
.offset = offsetof(X86XSaveArea, zmm_hi256_state),
.size = sizeof(XSaveZMM_Hi256) },
[XSTATE_Hi16_ZMM_BIT] =
{ .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
.offset = offsetof(X86XSaveArea, hi16_zmm_state),
.size = sizeof(XSaveHi16_ZMM) },
[XSTATE_PKRU_BIT] =
{ .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
.offset = offsetof(X86XSaveArea, pkru_state),
.size = sizeof(XSavePKRU) },
};

View File

@ -1377,7 +1377,7 @@ typedef struct ExtSaveArea {
#define XSAVE_STATE_AREA_COUNT (XSTATE_PKRU_BIT + 1)
extern const ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT];
extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT];
typedef enum TPRAccess {
TPR_ACCESS_READ,

View File

@ -30,6 +30,33 @@ static void hvf_cpu_max_instance_init(X86CPU *cpu)
hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
}
static void hvf_cpu_xsave_init(void)
{
static bool first = true;
int i;
if (!first) {
return;
}
first = false;
/* x87 and SSE states are in the legacy region of the XSAVE area. */
x86_ext_save_areas[XSTATE_FP_BIT].offset = 0;
x86_ext_save_areas[XSTATE_SSE_BIT].offset = 0;
for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) {
ExtSaveArea *esa = &x86_ext_save_areas[i];
if (esa->size) {
int sz = hvf_get_supported_cpuid(0xd, i, R_EAX);
if (sz != 0) {
assert(esa->size == sz);
esa->offset = hvf_get_supported_cpuid(0xd, i, R_EBX);
}
}
}
}
static void hvf_cpu_instance_init(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
@ -42,6 +69,8 @@ static void hvf_cpu_instance_init(CPUState *cs)
if (cpu->max_features) {
hvf_cpu_max_instance_init(cpu);
}
hvf_cpu_xsave_init();
}
static void hvf_cpu_accel_class_init(ObjectClass *oc, void *data)

View File

@ -270,6 +270,12 @@ int hvf_arch_init_vcpu(CPUState *cpu)
x86cpu->env.xsave_buf_len = 4096;
x86cpu->env.xsave_buf = qemu_memalign(4096, x86cpu->env.xsave_buf_len);
/*
* The allocated storage must be large enough for all of the
* possible XSAVE state components.
*/
assert(hvf_get_supported_cpuid(0xd, 0, R_ECX) <= x86cpu->env.xsave_buf_len);
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_STAR, 1);
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_LSTAR, 1);
hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_CSTAR, 1);

View File

@ -122,6 +122,34 @@ static void kvm_cpu_max_instance_init(X86CPU *cpu)
kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
}
static void kvm_cpu_xsave_init(void)
{
static bool first = true;
KVMState *s = kvm_state;
int i;
if (!first) {
return;
}
first = false;
/* x87 and SSE states are in the legacy region of the XSAVE area. */
x86_ext_save_areas[XSTATE_FP_BIT].offset = 0;
x86_ext_save_areas[XSTATE_SSE_BIT].offset = 0;
for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) {
ExtSaveArea *esa = &x86_ext_save_areas[i];
if (esa->size) {
int sz = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EAX);
if (sz != 0) {
assert(esa->size == sz);
esa->offset = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EBX);
}
}
}
}
static void kvm_cpu_instance_init(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
@ -141,6 +169,8 @@ static void kvm_cpu_instance_init(CPUState *cs)
if (cpu->max_features) {
kvm_cpu_max_instance_init(cpu);
}
kvm_cpu_xsave_init();
}
static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data)

View File

@ -1891,6 +1891,13 @@ int kvm_arch_init_vcpu(CPUState *cs)
env->xsave_buf_len = sizeof(struct kvm_xsave);
env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
memset(env->xsave_buf, 0, env->xsave_buf_len);
/*
* The allocated storage must be large enough for all of the
* possible XSAVE state components.
*/
assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX)
<= env->xsave_buf_len);
}
max_nested_state_len = kvm_max_nested_state_length();

View File

@ -80,6 +80,24 @@ static void tcg_cpu_class_init(CPUClass *cc)
cc->init_accel_cpu = tcg_cpu_init_ops;
}
static void tcg_cpu_xsave_init(void)
{
#define XO(bit, field) \
x86_ext_save_areas[bit].offset = offsetof(X86XSaveArea, field);
XO(XSTATE_FP_BIT, legacy);
XO(XSTATE_SSE_BIT, legacy);
XO(XSTATE_YMM_BIT, avx_state);
XO(XSTATE_BNDREGS_BIT, bndreg_state);
XO(XSTATE_BNDCSR_BIT, bndcsr_state);
XO(XSTATE_OPMASK_BIT, opmask_state);
XO(XSTATE_ZMM_Hi256_BIT, zmm_hi256_state);
XO(XSTATE_Hi16_ZMM_BIT, hi16_zmm_state);
XO(XSTATE_PKRU_BIT, pkru_state);
#undef XO
}
/*
* TCG-specific defaults that override all CPU models when using TCG
*/
@ -93,6 +111,8 @@ static void tcg_cpu_instance_init(CPUState *cs)
X86CPU *cpu = X86_CPU(cs);
/* Special cases not set in the X86CPUDefinition structs: */
x86_cpu_apply_props(cpu, tcg_default_props);
tcg_cpu_xsave_init();
}
static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data)