2017-06-26 22:08:32 +02:00
|
|
|
/*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
|
|
|
|
#include "cpu.h"
|
|
|
|
|
2021-07-05 12:46:28 +02:00
|
|
|
void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
|
2017-06-26 22:08:32 +02:00
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
2021-07-05 12:46:30 +02:00
|
|
|
const ExtSaveArea *e, *f;
|
2017-06-26 22:08:32 +02:00
|
|
|
int i;
|
2021-07-05 12:46:28 +02:00
|
|
|
|
2021-07-05 12:46:30 +02:00
|
|
|
X86LegacyXSaveArea *legacy;
|
|
|
|
X86XSaveHeader *header;
|
|
|
|
uint16_t cwd, swd, twd;
|
|
|
|
|
|
|
|
memset(buf, 0, buflen);
|
|
|
|
|
|
|
|
e = &x86_ext_save_areas[XSTATE_FP_BIT];
|
|
|
|
|
|
|
|
legacy = buf + e->offset;
|
|
|
|
header = buf + e->offset + sizeof(*legacy);
|
2021-07-05 12:46:28 +02:00
|
|
|
|
2017-06-26 22:08:32 +02:00
|
|
|
twd = 0;
|
|
|
|
swd = env->fpus & ~(7 << 11);
|
|
|
|
swd |= (env->fpstt & 7) << 11;
|
|
|
|
cwd = env->fpuc;
|
|
|
|
for (i = 0; i < 8; ++i) {
|
|
|
|
twd |= (!env->fptags[i]) << i;
|
|
|
|
}
|
2021-07-05 12:46:30 +02:00
|
|
|
legacy->fcw = cwd;
|
|
|
|
legacy->fsw = swd;
|
|
|
|
legacy->ftw = twd;
|
|
|
|
legacy->fpop = env->fpop;
|
|
|
|
legacy->fpip = env->fpip;
|
|
|
|
legacy->fpdp = env->fpdp;
|
|
|
|
memcpy(&legacy->fpregs, env->fpregs,
|
|
|
|
sizeof(env->fpregs));
|
|
|
|
legacy->mxcsr = env->mxcsr;
|
2017-06-26 22:08:32 +02:00
|
|
|
|
|
|
|
for (i = 0; i < CPU_NB_REGS; i++) {
|
2021-07-05 12:46:30 +02:00
|
|
|
uint8_t *xmm = legacy->xmm_regs[i];
|
|
|
|
|
2017-06-26 22:08:32 +02:00
|
|
|
stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
|
2021-07-05 12:46:30 +02:00
|
|
|
stq_p(xmm + 8, env->xmm_regs[i].ZMM_Q(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
header->xstate_bv = env->xstate_bv;
|
|
|
|
|
|
|
|
e = &x86_ext_save_areas[XSTATE_YMM_BIT];
|
|
|
|
if (e->size && e->offset) {
|
|
|
|
XSaveAVX *avx;
|
|
|
|
|
|
|
|
avx = buf + e->offset;
|
|
|
|
|
|
|
|
for (i = 0; i < CPU_NB_REGS; i++) {
|
|
|
|
uint8_t *ymmh = avx->ymmh[i];
|
|
|
|
|
|
|
|
stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
|
|
|
|
stq_p(ymmh + 8, env->xmm_regs[i].ZMM_Q(3));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
e = &x86_ext_save_areas[XSTATE_BNDREGS_BIT];
|
|
|
|
if (e->size && e->offset) {
|
|
|
|
XSaveBNDREG *bndreg;
|
|
|
|
XSaveBNDCSR *bndcsr;
|
|
|
|
|
|
|
|
f = &x86_ext_save_areas[XSTATE_BNDCSR_BIT];
|
|
|
|
assert(f->size);
|
|
|
|
assert(f->offset);
|
|
|
|
|
|
|
|
bndreg = buf + e->offset;
|
|
|
|
bndcsr = buf + f->offset;
|
|
|
|
|
|
|
|
memcpy(&bndreg->bnd_regs, env->bnd_regs,
|
|
|
|
sizeof(env->bnd_regs));
|
|
|
|
bndcsr->bndcsr = env->bndcs_regs;
|
2017-06-26 22:08:32 +02:00
|
|
|
}
|
|
|
|
|
2021-07-05 12:46:30 +02:00
|
|
|
e = &x86_ext_save_areas[XSTATE_OPMASK_BIT];
|
|
|
|
if (e->size && e->offset) {
|
|
|
|
XSaveOpmask *opmask;
|
|
|
|
XSaveZMM_Hi256 *zmm_hi256;
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
XSaveHi16_ZMM *hi16_zmm;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
f = &x86_ext_save_areas[XSTATE_ZMM_Hi256_BIT];
|
|
|
|
assert(f->size);
|
|
|
|
assert(f->offset);
|
|
|
|
|
|
|
|
opmask = buf + e->offset;
|
|
|
|
zmm_hi256 = buf + f->offset;
|
|
|
|
|
|
|
|
memcpy(&opmask->opmask_regs, env->opmask_regs,
|
|
|
|
sizeof(env->opmask_regs));
|
|
|
|
|
|
|
|
for (i = 0; i < CPU_NB_REGS; i++) {
|
|
|
|
uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
|
|
|
|
|
|
|
|
stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
|
|
|
|
stq_p(zmmh + 8, env->xmm_regs[i].ZMM_Q(5));
|
|
|
|
stq_p(zmmh + 16, env->xmm_regs[i].ZMM_Q(6));
|
|
|
|
stq_p(zmmh + 24, env->xmm_regs[i].ZMM_Q(7));
|
|
|
|
}
|
|
|
|
|
2017-06-26 22:08:32 +02:00
|
|
|
#ifdef TARGET_X86_64
|
2021-07-05 12:46:30 +02:00
|
|
|
f = &x86_ext_save_areas[XSTATE_Hi16_ZMM_BIT];
|
|
|
|
assert(f->size);
|
|
|
|
assert(f->offset);
|
|
|
|
|
|
|
|
hi16_zmm = buf + f->offset;
|
|
|
|
|
|
|
|
memcpy(&hi16_zmm->hi16_zmm, &env->xmm_regs[16],
|
|
|
|
16 * sizeof(env->xmm_regs[16]));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
e = &x86_ext_save_areas[XSTATE_PKRU_BIT];
|
|
|
|
if (e->size && e->offset) {
|
|
|
|
XSavePKRU *pkru = buf + e->offset;
|
|
|
|
|
|
|
|
memcpy(pkru, &env->pkru, sizeof(env->pkru));
|
|
|
|
}
|
2022-02-17 07:04:32 +01:00
|
|
|
|
|
|
|
e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT];
|
|
|
|
if (e->size && e->offset) {
|
|
|
|
XSaveXTILECFG *tilecfg = buf + e->offset;
|
|
|
|
|
|
|
|
memcpy(tilecfg, &env->xtilecfg, sizeof(env->xtilecfg));
|
|
|
|
}
|
|
|
|
|
|
|
|
e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT];
|
|
|
|
if (e->size && e->offset && buflen >= e->size + e->offset) {
|
|
|
|
XSaveXTILEDATA *tiledata = buf + e->offset;
|
|
|
|
|
|
|
|
memcpy(tiledata, &env->xtiledata, sizeof(env->xtiledata));
|
|
|
|
}
|
2017-06-26 22:08:32 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-07-05 12:46:28 +02:00
|
|
|
void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen)
|
2017-06-26 22:08:32 +02:00
|
|
|
{
|
|
|
|
CPUX86State *env = &cpu->env;
|
2021-07-05 12:46:30 +02:00
|
|
|
const ExtSaveArea *e, *f, *g;
|
2017-06-26 22:08:32 +02:00
|
|
|
int i;
|
2021-07-05 12:46:30 +02:00
|
|
|
|
|
|
|
const X86LegacyXSaveArea *legacy;
|
|
|
|
const X86XSaveHeader *header;
|
2017-06-26 22:08:32 +02:00
|
|
|
uint16_t cwd, swd, twd;
|
2021-07-05 12:46:28 +02:00
|
|
|
|
2021-07-05 12:46:30 +02:00
|
|
|
e = &x86_ext_save_areas[XSTATE_FP_BIT];
|
2021-07-05 12:46:28 +02:00
|
|
|
|
2021-07-05 12:46:30 +02:00
|
|
|
legacy = buf + e->offset;
|
|
|
|
header = buf + e->offset + sizeof(*legacy);
|
|
|
|
|
|
|
|
cwd = legacy->fcw;
|
|
|
|
swd = legacy->fsw;
|
|
|
|
twd = legacy->ftw;
|
|
|
|
env->fpop = legacy->fpop;
|
2017-06-26 22:08:32 +02:00
|
|
|
env->fpstt = (swd >> 11) & 7;
|
|
|
|
env->fpus = swd;
|
|
|
|
env->fpuc = cwd;
|
|
|
|
for (i = 0; i < 8; ++i) {
|
|
|
|
env->fptags[i] = !((twd >> i) & 1);
|
|
|
|
}
|
2021-07-05 12:46:30 +02:00
|
|
|
env->fpip = legacy->fpip;
|
|
|
|
env->fpdp = legacy->fpdp;
|
|
|
|
env->mxcsr = legacy->mxcsr;
|
|
|
|
memcpy(env->fpregs, &legacy->fpregs,
|
|
|
|
sizeof(env->fpregs));
|
2017-06-26 22:08:32 +02:00
|
|
|
|
|
|
|
for (i = 0; i < CPU_NB_REGS; i++) {
|
2021-07-05 12:46:30 +02:00
|
|
|
const uint8_t *xmm = legacy->xmm_regs[i];
|
|
|
|
|
2017-06-26 22:08:32 +02:00
|
|
|
env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
|
2021-07-05 12:46:30 +02:00
|
|
|
env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm + 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
env->xstate_bv = header->xstate_bv;
|
|
|
|
|
|
|
|
e = &x86_ext_save_areas[XSTATE_YMM_BIT];
|
|
|
|
if (e->size && e->offset) {
|
|
|
|
const XSaveAVX *avx;
|
|
|
|
|
|
|
|
avx = buf + e->offset;
|
|
|
|
for (i = 0; i < CPU_NB_REGS; i++) {
|
|
|
|
const uint8_t *ymmh = avx->ymmh[i];
|
|
|
|
|
|
|
|
env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
|
|
|
|
env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh + 8);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
e = &x86_ext_save_areas[XSTATE_BNDREGS_BIT];
|
|
|
|
if (e->size && e->offset) {
|
|
|
|
const XSaveBNDREG *bndreg;
|
|
|
|
const XSaveBNDCSR *bndcsr;
|
|
|
|
|
|
|
|
f = &x86_ext_save_areas[XSTATE_BNDCSR_BIT];
|
|
|
|
assert(f->size);
|
|
|
|
assert(f->offset);
|
|
|
|
|
|
|
|
bndreg = buf + e->offset;
|
|
|
|
bndcsr = buf + f->offset;
|
|
|
|
|
|
|
|
memcpy(env->bnd_regs, &bndreg->bnd_regs,
|
|
|
|
sizeof(env->bnd_regs));
|
|
|
|
env->bndcs_regs = bndcsr->bndcsr;
|
2017-06-26 22:08:32 +02:00
|
|
|
}
|
|
|
|
|
2021-07-05 12:46:30 +02:00
|
|
|
e = &x86_ext_save_areas[XSTATE_OPMASK_BIT];
|
|
|
|
if (e->size && e->offset) {
|
|
|
|
const XSaveOpmask *opmask;
|
|
|
|
const XSaveZMM_Hi256 *zmm_hi256;
|
2017-06-26 22:08:32 +02:00
|
|
|
#ifdef TARGET_X86_64
|
2021-07-05 12:46:30 +02:00
|
|
|
const XSaveHi16_ZMM *hi16_zmm;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
f = &x86_ext_save_areas[XSTATE_ZMM_Hi256_BIT];
|
|
|
|
assert(f->size);
|
|
|
|
assert(f->offset);
|
|
|
|
|
|
|
|
g = &x86_ext_save_areas[XSTATE_Hi16_ZMM_BIT];
|
|
|
|
assert(g->size);
|
|
|
|
assert(g->offset);
|
|
|
|
|
|
|
|
opmask = buf + e->offset;
|
|
|
|
zmm_hi256 = buf + f->offset;
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
hi16_zmm = buf + g->offset;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
memcpy(env->opmask_regs, &opmask->opmask_regs,
|
|
|
|
sizeof(env->opmask_regs));
|
|
|
|
|
|
|
|
for (i = 0; i < CPU_NB_REGS; i++) {
|
|
|
|
const uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
|
|
|
|
|
|
|
|
env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
|
|
|
|
env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh + 8);
|
|
|
|
env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh + 16);
|
|
|
|
env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh + 24);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
memcpy(&env->xmm_regs[16], &hi16_zmm->hi16_zmm,
|
|
|
|
16 * sizeof(env->xmm_regs[16]));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
e = &x86_ext_save_areas[XSTATE_PKRU_BIT];
|
|
|
|
if (e->size && e->offset) {
|
|
|
|
const XSavePKRU *pkru;
|
|
|
|
|
|
|
|
pkru = buf + e->offset;
|
|
|
|
memcpy(&env->pkru, pkru, sizeof(env->pkru));
|
|
|
|
}
|
2022-02-17 07:04:32 +01:00
|
|
|
|
|
|
|
e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT];
|
|
|
|
if (e->size && e->offset) {
|
|
|
|
const XSaveXTILECFG *tilecfg = buf + e->offset;
|
|
|
|
|
|
|
|
memcpy(&env->xtilecfg, tilecfg, sizeof(env->xtilecfg));
|
|
|
|
}
|
|
|
|
|
|
|
|
e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT];
|
|
|
|
if (e->size && e->offset && buflen >= e->size + e->offset) {
|
|
|
|
const XSaveXTILEDATA *tiledata = buf + e->offset;
|
|
|
|
|
|
|
|
memcpy(&env->xtiledata, tiledata, sizeof(env->xtiledata));
|
|
|
|
}
|
2017-06-26 22:08:32 +02:00
|
|
|
#endif
|
|
|
|
}
|