2012-04-29 20:20:34 +02:00
|
|
|
/*
|
|
|
|
* x86 misc helpers
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2020-10-23 14:28:01 +02:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2012-04-29 20:20:34 +02:00
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2016-01-26 19:17:03 +01:00
|
|
|
#include "qemu/osdep.h"
|
2017-03-30 17:04:09 +02:00
|
|
|
#include "qemu/main-loop.h"
|
2012-04-29 20:20:34 +02:00
|
|
|
#include "cpu.h"
|
2014-04-08 07:31:41 +02:00
|
|
|
#include "exec/helper-proto.h"
|
2016-03-15 13:18:37 +01:00
|
|
|
#include "exec/exec-all.h"
|
2014-03-28 19:42:10 +01:00
|
|
|
#include "exec/cpu_ldst.h"
|
2015-04-08 14:45:53 +02:00
|
|
|
#include "exec/address-spaces.h"
|
2020-12-12 16:55:14 +01:00
|
|
|
#include "helper-tcg.h"
|
2012-04-29 22:35:48 +02:00
|
|
|
|
2020-12-12 16:55:15 +01:00
|
|
|
/*
|
|
|
|
* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS
|
|
|
|
* after generating a call to a helper that uses this.
|
|
|
|
*/
|
|
|
|
void cpu_load_eflags(CPUX86State *env, int eflags, int update_mask)
|
|
|
|
{
|
|
|
|
CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
|
|
|
|
CC_OP = CC_OP_EFLAGS;
|
|
|
|
env->df = 1 - (2 * ((eflags >> 10) & 1));
|
|
|
|
env->eflags = (env->eflags & ~update_mask) |
|
|
|
|
(eflags & update_mask) | 0x2;
|
|
|
|
}
|
|
|
|
|
2015-04-08 14:45:53 +02:00
|
|
|
void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2015-04-08 14:45:53 +02:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
fprintf(stderr, "outb: port=0x%04x, data=%02x\n", port, data);
|
|
|
|
#else
|
|
|
|
address_space_stb(&address_space_io, port, data,
|
|
|
|
cpu_get_mem_attrs(env), NULL);
|
|
|
|
#endif
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
2015-04-08 14:45:53 +02:00
|
|
|
target_ulong helper_inb(CPUX86State *env, uint32_t port)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2015-04-08 14:45:53 +02:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
fprintf(stderr, "inb: port=0x%04x\n", port);
|
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
return address_space_ldub(&address_space_io, port,
|
|
|
|
cpu_get_mem_attrs(env), NULL);
|
|
|
|
#endif
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
2015-04-08 14:45:53 +02:00
|
|
|
void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2015-04-08 14:45:53 +02:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
fprintf(stderr, "outw: port=0x%04x, data=%04x\n", port, data);
|
|
|
|
#else
|
|
|
|
address_space_stw(&address_space_io, port, data,
|
|
|
|
cpu_get_mem_attrs(env), NULL);
|
|
|
|
#endif
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
2015-04-08 14:45:53 +02:00
|
|
|
target_ulong helper_inw(CPUX86State *env, uint32_t port)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2015-04-08 14:45:53 +02:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
fprintf(stderr, "inw: port=0x%04x\n", port);
|
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
return address_space_lduw(&address_space_io, port,
|
|
|
|
cpu_get_mem_attrs(env), NULL);
|
|
|
|
#endif
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
2015-04-08 14:45:53 +02:00
|
|
|
void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2015-04-08 14:45:53 +02:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
2020-05-17 13:01:47 +02:00
|
|
|
fprintf(stderr, "outl: port=0x%04x, data=%08x\n", port, data);
|
2015-04-08 14:45:53 +02:00
|
|
|
#else
|
|
|
|
address_space_stl(&address_space_io, port, data,
|
|
|
|
cpu_get_mem_attrs(env), NULL);
|
|
|
|
#endif
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
2015-04-08 14:45:53 +02:00
|
|
|
target_ulong helper_inl(CPUX86State *env, uint32_t port)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2015-04-08 14:45:53 +02:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
fprintf(stderr, "inl: port=0x%04x\n", port);
|
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
return address_space_ldl(&address_space_io, port,
|
|
|
|
cpu_get_mem_attrs(env), NULL);
|
|
|
|
#endif
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_into(CPUX86State *env, int next_eip_addend)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
|
|
|
int eflags;
|
|
|
|
|
2012-04-29 14:45:34 +02:00
|
|
|
eflags = cpu_cc_compute_all(env, CC_OP);
|
2012-04-29 20:20:34 +02:00
|
|
|
if (eflags & CC_O) {
|
|
|
|
raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_cpuid(CPUX86State *env)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
|
|
|
uint32_t eax, ebx, ecx, edx;
|
|
|
|
|
2017-02-16 12:30:05 +01:00
|
|
|
cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0, GETPC());
|
2012-04-29 20:20:34 +02:00
|
|
|
|
2013-05-28 10:21:10 +02:00
|
|
|
cpu_x86_cpuid(env, (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
|
|
|
|
&eax, &ebx, &ecx, &edx);
|
2013-05-28 10:20:59 +02:00
|
|
|
env->regs[R_EAX] = eax;
|
2013-05-28 10:21:00 +02:00
|
|
|
env->regs[R_EBX] = ebx;
|
2013-05-28 10:21:01 +02:00
|
|
|
env->regs[R_ECX] = ecx;
|
2013-05-28 10:21:02 +02:00
|
|
|
env->regs[R_EDX] = edx;
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_USER_ONLY)
|
2012-04-29 20:42:47 +02:00
|
|
|
target_ulong helper_read_crN(CPUX86State *env, int reg)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
#else
|
2012-04-29 20:42:47 +02:00
|
|
|
target_ulong helper_read_crN(CPUX86State *env, int reg)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
|
|
|
target_ulong val;
|
|
|
|
|
2017-02-16 12:30:05 +01:00
|
|
|
cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0, GETPC());
|
2012-04-29 20:20:34 +02:00
|
|
|
switch (reg) {
|
|
|
|
default:
|
|
|
|
val = env->cr[reg];
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
if (!(env->hflags2 & HF2_VINTR_MASK)) {
|
2019-03-23 02:08:48 +01:00
|
|
|
val = cpu_get_apic_tpr(env_archcpu(env)->apic_state);
|
2012-04-29 20:20:34 +02:00
|
|
|
} else {
|
|
|
|
val = env->v_tpr;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2017-02-16 12:30:05 +01:00
|
|
|
cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0, GETPC());
|
2012-04-29 20:20:34 +02:00
|
|
|
switch (reg) {
|
|
|
|
case 0:
|
|
|
|
cpu_x86_update_cr0(env, t0);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
cpu_x86_update_cr3(env, t0);
|
|
|
|
break;
|
|
|
|
case 4:
|
2021-03-18 14:42:32 +01:00
|
|
|
if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) &&
|
|
|
|
(env->hflags & HF_CS64_MASK)) {
|
|
|
|
raise_exception_ra(env, EXCP0D_GPF, GETPC());
|
|
|
|
}
|
2012-04-29 20:20:34 +02:00
|
|
|
cpu_x86_update_cr4(env, t0);
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
if (!(env->hflags2 & HF2_VINTR_MASK)) {
|
2017-03-30 17:04:09 +02:00
|
|
|
qemu_mutex_lock_iothread();
|
2019-03-23 02:08:48 +01:00
|
|
|
cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0);
|
2017-03-30 17:04:09 +02:00
|
|
|
qemu_mutex_unlock_iothread();
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
env->v_tpr = t0 & 0x0f;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
env->cr[reg] = t0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_lmsw(CPUX86State *env, target_ulong t0)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
|
|
|
/* only 4 lower bits of CR0 are modified. PE cannot be set to zero
|
|
|
|
if already set to one. */
|
|
|
|
t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
|
2012-04-29 20:42:47 +02:00
|
|
|
helper_write_crN(env, 0, t0);
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_invlpg(CPUX86State *env, target_ulong addr)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2019-03-23 02:08:48 +01:00
|
|
|
X86CPU *cpu = env_archcpu(env);
|
2013-09-04 01:29:02 +02:00
|
|
|
|
2017-02-16 12:30:05 +01:00
|
|
|
cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0, GETPC());
|
2013-09-04 01:29:02 +02:00
|
|
|
tlb_flush_page(CPU(cpu), addr);
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_rdtsc(CPUX86State *env)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
|
|
|
uint64_t val;
|
|
|
|
|
|
|
|
if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
|
2015-07-10 11:57:41 +02:00
|
|
|
raise_exception_ra(env, EXCP0D_GPF, GETPC());
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
2017-02-16 12:30:05 +01:00
|
|
|
cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0, GETPC());
|
2012-04-29 20:20:34 +02:00
|
|
|
|
|
|
|
val = cpu_get_tsc(env) + env->tsc_offset;
|
2013-05-28 10:20:59 +02:00
|
|
|
env->regs[R_EAX] = (uint32_t)(val);
|
2013-05-28 10:21:02 +02:00
|
|
|
env->regs[R_EDX] = (uint32_t)(val >> 32);
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_rdtscp(CPUX86State *env)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2012-04-29 20:42:47 +02:00
|
|
|
helper_rdtsc(env);
|
2013-05-28 10:21:01 +02:00
|
|
|
env->regs[R_ECX] = (uint32_t)(env->tsc_aux);
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_rdpmc(CPUX86State *env)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2021-02-25 06:47:57 +01:00
|
|
|
if (((env->cr[4] & CR4_PCE_MASK) == 0 ) &&
|
|
|
|
((env->hflags & HF_CPL_MASK) != 0)) {
|
2015-07-10 11:57:41 +02:00
|
|
|
raise_exception_ra(env, EXCP0D_GPF, GETPC());
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
2017-02-16 12:30:05 +01:00
|
|
|
cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0, GETPC());
|
2012-04-29 20:20:34 +02:00
|
|
|
|
|
|
|
/* currently unimplemented */
|
|
|
|
qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
|
|
|
|
raise_exception_err(env, EXCP06_ILLOP, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_USER_ONLY)
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_wrmsr(CPUX86State *env)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_rdmsr(CPUX86State *env)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
#else
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_wrmsr(CPUX86State *env)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
|
|
|
uint64_t val;
|
2021-01-27 09:28:49 +01:00
|
|
|
CPUState *cs = env_cpu(env);
|
2012-04-29 20:20:34 +02:00
|
|
|
|
2017-02-16 12:30:05 +01:00
|
|
|
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC());
|
2012-04-29 20:20:34 +02:00
|
|
|
|
2013-05-28 10:21:10 +02:00
|
|
|
val = ((uint32_t)env->regs[R_EAX]) |
|
|
|
|
((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
|
2012-04-29 20:20:34 +02:00
|
|
|
|
2013-05-28 10:21:01 +02:00
|
|
|
switch ((uint32_t)env->regs[R_ECX]) {
|
2012-04-29 20:20:34 +02:00
|
|
|
case MSR_IA32_SYSENTER_CS:
|
|
|
|
env->sysenter_cs = val & 0xffff;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_SYSENTER_ESP:
|
|
|
|
env->sysenter_esp = val;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_SYSENTER_EIP:
|
|
|
|
env->sysenter_eip = val;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_APICBASE:
|
2019-03-23 02:08:48 +01:00
|
|
|
cpu_set_apic_base(env_archcpu(env)->apic_state, val);
|
2012-04-29 20:20:34 +02:00
|
|
|
break;
|
|
|
|
case MSR_EFER:
|
|
|
|
{
|
|
|
|
uint64_t update_mask;
|
|
|
|
|
|
|
|
update_mask = 0;
|
2013-04-22 21:00:15 +02:00
|
|
|
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
|
2012-04-29 20:20:34 +02:00
|
|
|
update_mask |= MSR_EFER_SCE;
|
|
|
|
}
|
2013-04-22 21:00:15 +02:00
|
|
|
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
|
2012-04-29 20:20:34 +02:00
|
|
|
update_mask |= MSR_EFER_LME;
|
|
|
|
}
|
2013-04-22 21:00:15 +02:00
|
|
|
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
|
2012-04-29 20:20:34 +02:00
|
|
|
update_mask |= MSR_EFER_FFXSR;
|
|
|
|
}
|
2013-04-22 21:00:15 +02:00
|
|
|
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
|
2012-04-29 20:20:34 +02:00
|
|
|
update_mask |= MSR_EFER_NXE;
|
|
|
|
}
|
2013-04-22 21:00:15 +02:00
|
|
|
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
|
2012-04-29 20:20:34 +02:00
|
|
|
update_mask |= MSR_EFER_SVME;
|
|
|
|
}
|
2013-04-22 21:00:15 +02:00
|
|
|
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
|
2012-04-29 20:20:34 +02:00
|
|
|
update_mask |= MSR_EFER_FFXSR;
|
|
|
|
}
|
|
|
|
cpu_load_efer(env, (env->efer & ~update_mask) |
|
|
|
|
(val & update_mask));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MSR_STAR:
|
|
|
|
env->star = val;
|
|
|
|
break;
|
|
|
|
case MSR_PAT:
|
|
|
|
env->pat = val;
|
|
|
|
break;
|
2021-01-27 09:28:49 +01:00
|
|
|
case MSR_IA32_PKRS:
|
|
|
|
if (val & 0xFFFFFFFF00000000ull) {
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
env->pkrs = val;
|
|
|
|
tlb_flush(cs);
|
|
|
|
break;
|
2012-04-29 20:20:34 +02:00
|
|
|
case MSR_VM_HSAVE_PA:
|
|
|
|
env->vm_hsave = val;
|
|
|
|
break;
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
case MSR_LSTAR:
|
|
|
|
env->lstar = val;
|
|
|
|
break;
|
|
|
|
case MSR_CSTAR:
|
|
|
|
env->cstar = val;
|
|
|
|
break;
|
|
|
|
case MSR_FMASK:
|
|
|
|
env->fmask = val;
|
|
|
|
break;
|
|
|
|
case MSR_FSBASE:
|
|
|
|
env->segs[R_FS].base = val;
|
|
|
|
break;
|
|
|
|
case MSR_GSBASE:
|
|
|
|
env->segs[R_GS].base = val;
|
|
|
|
break;
|
|
|
|
case MSR_KERNELGSBASE:
|
|
|
|
env->kernelgsbase = val;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
case MSR_MTRRphysBase(0):
|
|
|
|
case MSR_MTRRphysBase(1):
|
|
|
|
case MSR_MTRRphysBase(2):
|
|
|
|
case MSR_MTRRphysBase(3):
|
|
|
|
case MSR_MTRRphysBase(4):
|
|
|
|
case MSR_MTRRphysBase(5):
|
|
|
|
case MSR_MTRRphysBase(6):
|
|
|
|
case MSR_MTRRphysBase(7):
|
2013-05-28 10:21:10 +02:00
|
|
|
env->mtrr_var[((uint32_t)env->regs[R_ECX] -
|
|
|
|
MSR_MTRRphysBase(0)) / 2].base = val;
|
2012-04-29 20:20:34 +02:00
|
|
|
break;
|
|
|
|
case MSR_MTRRphysMask(0):
|
|
|
|
case MSR_MTRRphysMask(1):
|
|
|
|
case MSR_MTRRphysMask(2):
|
|
|
|
case MSR_MTRRphysMask(3):
|
|
|
|
case MSR_MTRRphysMask(4):
|
|
|
|
case MSR_MTRRphysMask(5):
|
|
|
|
case MSR_MTRRphysMask(6):
|
|
|
|
case MSR_MTRRphysMask(7):
|
2013-05-28 10:21:10 +02:00
|
|
|
env->mtrr_var[((uint32_t)env->regs[R_ECX] -
|
|
|
|
MSR_MTRRphysMask(0)) / 2].mask = val;
|
2012-04-29 20:20:34 +02:00
|
|
|
break;
|
|
|
|
case MSR_MTRRfix64K_00000:
|
2013-05-28 10:21:10 +02:00
|
|
|
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
|
|
|
MSR_MTRRfix64K_00000] = val;
|
2012-04-29 20:20:34 +02:00
|
|
|
break;
|
|
|
|
case MSR_MTRRfix16K_80000:
|
|
|
|
case MSR_MTRRfix16K_A0000:
|
2013-05-28 10:21:10 +02:00
|
|
|
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
|
|
|
MSR_MTRRfix16K_80000 + 1] = val;
|
2012-04-29 20:20:34 +02:00
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_C0000:
|
|
|
|
case MSR_MTRRfix4K_C8000:
|
|
|
|
case MSR_MTRRfix4K_D0000:
|
|
|
|
case MSR_MTRRfix4K_D8000:
|
|
|
|
case MSR_MTRRfix4K_E0000:
|
|
|
|
case MSR_MTRRfix4K_E8000:
|
|
|
|
case MSR_MTRRfix4K_F0000:
|
|
|
|
case MSR_MTRRfix4K_F8000:
|
2013-05-28 10:21:10 +02:00
|
|
|
env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
|
|
|
MSR_MTRRfix4K_C0000 + 3] = val;
|
2012-04-29 20:20:34 +02:00
|
|
|
break;
|
|
|
|
case MSR_MTRRdefType:
|
|
|
|
env->mtrr_deftype = val;
|
|
|
|
break;
|
|
|
|
case MSR_MCG_STATUS:
|
|
|
|
env->mcg_status = val;
|
|
|
|
break;
|
|
|
|
case MSR_MCG_CTL:
|
|
|
|
if ((env->mcg_cap & MCG_CTL_P)
|
|
|
|
&& (val == 0 || val == ~(uint64_t)0)) {
|
|
|
|
env->mcg_ctl = val;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MSR_TSC_AUX:
|
|
|
|
env->tsc_aux = val;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_MISC_ENABLE:
|
|
|
|
env->msr_ia32_misc_enable = val;
|
|
|
|
break;
|
2015-07-02 16:57:14 +02:00
|
|
|
case MSR_IA32_BNDCFGS:
|
|
|
|
/* FIXME: #GP if reserved bits are set. */
|
|
|
|
/* FIXME: Extend highest implemented bit of linear address. */
|
|
|
|
env->msr_bndcfgs = val;
|
|
|
|
cpu_sync_bndcs_hflags(env);
|
|
|
|
break;
|
2012-04-29 20:20:34 +02:00
|
|
|
default:
|
2013-05-28 10:21:01 +02:00
|
|
|
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
|
2013-05-28 10:21:10 +02:00
|
|
|
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
|
|
|
|
(4 * env->mcg_cap & 0xff)) {
|
2013-05-28 10:21:01 +02:00
|
|
|
uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
|
2012-04-29 20:20:34 +02:00
|
|
|
if ((offset & 0x3) != 0
|
|
|
|
|| (val == 0 || val == ~(uint64_t)0)) {
|
|
|
|
env->mce_banks[offset] = val;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* XXX: exception? */
|
|
|
|
break;
|
|
|
|
}
|
2021-01-27 09:28:49 +01:00
|
|
|
return;
|
|
|
|
error:
|
|
|
|
raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_rdmsr(CPUX86State *env)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2020-02-06 18:10:22 +01:00
|
|
|
X86CPU *x86_cpu = env_archcpu(env);
|
2012-04-29 20:20:34 +02:00
|
|
|
uint64_t val;
|
|
|
|
|
2017-02-16 12:30:05 +01:00
|
|
|
cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC());
|
2012-04-29 20:20:34 +02:00
|
|
|
|
2013-05-28 10:21:01 +02:00
|
|
|
switch ((uint32_t)env->regs[R_ECX]) {
|
2012-04-29 20:20:34 +02:00
|
|
|
case MSR_IA32_SYSENTER_CS:
|
|
|
|
val = env->sysenter_cs;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_SYSENTER_ESP:
|
|
|
|
val = env->sysenter_esp;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_SYSENTER_EIP:
|
|
|
|
val = env->sysenter_eip;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_APICBASE:
|
2019-03-23 02:08:48 +01:00
|
|
|
val = cpu_get_apic_base(env_archcpu(env)->apic_state);
|
2012-04-29 20:20:34 +02:00
|
|
|
break;
|
|
|
|
case MSR_EFER:
|
|
|
|
val = env->efer;
|
|
|
|
break;
|
|
|
|
case MSR_STAR:
|
|
|
|
val = env->star;
|
|
|
|
break;
|
|
|
|
case MSR_PAT:
|
|
|
|
val = env->pat;
|
|
|
|
break;
|
2021-01-27 09:28:49 +01:00
|
|
|
case MSR_IA32_PKRS:
|
|
|
|
val = env->pkrs;
|
|
|
|
break;
|
2012-04-29 20:20:34 +02:00
|
|
|
case MSR_VM_HSAVE_PA:
|
|
|
|
val = env->vm_hsave;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_PERF_STATUS:
|
|
|
|
/* tsc_increment_by_tick */
|
|
|
|
val = 1000ULL;
|
|
|
|
/* CPU multiplier */
|
|
|
|
val |= (((uint64_t)4ULL) << 40);
|
|
|
|
break;
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
case MSR_LSTAR:
|
|
|
|
val = env->lstar;
|
|
|
|
break;
|
|
|
|
case MSR_CSTAR:
|
|
|
|
val = env->cstar;
|
|
|
|
break;
|
|
|
|
case MSR_FMASK:
|
|
|
|
val = env->fmask;
|
|
|
|
break;
|
|
|
|
case MSR_FSBASE:
|
|
|
|
val = env->segs[R_FS].base;
|
|
|
|
break;
|
|
|
|
case MSR_GSBASE:
|
|
|
|
val = env->segs[R_GS].base;
|
|
|
|
break;
|
|
|
|
case MSR_KERNELGSBASE:
|
|
|
|
val = env->kernelgsbase;
|
|
|
|
break;
|
|
|
|
case MSR_TSC_AUX:
|
|
|
|
val = env->tsc_aux;
|
|
|
|
break;
|
|
|
|
#endif
|
2018-07-24 13:59:40 +02:00
|
|
|
case MSR_SMI_COUNT:
|
|
|
|
val = env->msr_smi_count;
|
|
|
|
break;
|
2012-04-29 20:20:34 +02:00
|
|
|
case MSR_MTRRphysBase(0):
|
|
|
|
case MSR_MTRRphysBase(1):
|
|
|
|
case MSR_MTRRphysBase(2):
|
|
|
|
case MSR_MTRRphysBase(3):
|
|
|
|
case MSR_MTRRphysBase(4):
|
|
|
|
case MSR_MTRRphysBase(5):
|
|
|
|
case MSR_MTRRphysBase(6):
|
|
|
|
case MSR_MTRRphysBase(7):
|
2013-05-28 10:21:10 +02:00
|
|
|
val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
|
|
|
|
MSR_MTRRphysBase(0)) / 2].base;
|
2012-04-29 20:20:34 +02:00
|
|
|
break;
|
|
|
|
case MSR_MTRRphysMask(0):
|
|
|
|
case MSR_MTRRphysMask(1):
|
|
|
|
case MSR_MTRRphysMask(2):
|
|
|
|
case MSR_MTRRphysMask(3):
|
|
|
|
case MSR_MTRRphysMask(4):
|
|
|
|
case MSR_MTRRphysMask(5):
|
|
|
|
case MSR_MTRRphysMask(6):
|
|
|
|
case MSR_MTRRphysMask(7):
|
2013-05-28 10:21:10 +02:00
|
|
|
val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
|
|
|
|
MSR_MTRRphysMask(0)) / 2].mask;
|
2012-04-29 20:20:34 +02:00
|
|
|
break;
|
|
|
|
case MSR_MTRRfix64K_00000:
|
|
|
|
val = env->mtrr_fixed[0];
|
|
|
|
break;
|
|
|
|
case MSR_MTRRfix16K_80000:
|
|
|
|
case MSR_MTRRfix16K_A0000:
|
2013-05-28 10:21:10 +02:00
|
|
|
val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
|
|
|
MSR_MTRRfix16K_80000 + 1];
|
2012-04-29 20:20:34 +02:00
|
|
|
break;
|
|
|
|
case MSR_MTRRfix4K_C0000:
|
|
|
|
case MSR_MTRRfix4K_C8000:
|
|
|
|
case MSR_MTRRfix4K_D0000:
|
|
|
|
case MSR_MTRRfix4K_D8000:
|
|
|
|
case MSR_MTRRfix4K_E0000:
|
|
|
|
case MSR_MTRRfix4K_E8000:
|
|
|
|
case MSR_MTRRfix4K_F0000:
|
|
|
|
case MSR_MTRRfix4K_F8000:
|
2013-05-28 10:21:10 +02:00
|
|
|
val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
|
|
|
|
MSR_MTRRfix4K_C0000 + 3];
|
2012-04-29 20:20:34 +02:00
|
|
|
break;
|
|
|
|
case MSR_MTRRdefType:
|
|
|
|
val = env->mtrr_deftype;
|
|
|
|
break;
|
|
|
|
case MSR_MTRRcap:
|
2013-04-22 21:00:15 +02:00
|
|
|
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
|
2012-04-29 20:20:34 +02:00
|
|
|
val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
|
|
|
|
MSR_MTRRcap_WC_SUPPORTED;
|
|
|
|
} else {
|
|
|
|
/* XXX: exception? */
|
|
|
|
val = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MSR_MCG_CAP:
|
|
|
|
val = env->mcg_cap;
|
|
|
|
break;
|
|
|
|
case MSR_MCG_CTL:
|
|
|
|
if (env->mcg_cap & MCG_CTL_P) {
|
|
|
|
val = env->mcg_ctl;
|
|
|
|
} else {
|
|
|
|
val = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MSR_MCG_STATUS:
|
|
|
|
val = env->mcg_status;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_MISC_ENABLE:
|
|
|
|
val = env->msr_ia32_misc_enable;
|
|
|
|
break;
|
2015-07-02 16:57:14 +02:00
|
|
|
case MSR_IA32_BNDCFGS:
|
|
|
|
val = env->msr_bndcfgs;
|
|
|
|
break;
|
2020-02-06 18:10:22 +01:00
|
|
|
case MSR_IA32_UCODE_REV:
|
|
|
|
val = x86_cpu->ucode_rev;
|
|
|
|
break;
|
2012-04-29 20:20:34 +02:00
|
|
|
default:
|
2013-05-28 10:21:01 +02:00
|
|
|
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
|
2013-05-28 10:21:10 +02:00
|
|
|
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
|
|
|
|
(4 * env->mcg_cap & 0xff)) {
|
2013-05-28 10:21:01 +02:00
|
|
|
uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
|
2012-04-29 20:20:34 +02:00
|
|
|
val = env->mce_banks[offset];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* XXX: exception? */
|
|
|
|
val = 0;
|
|
|
|
break;
|
|
|
|
}
|
2013-05-28 10:20:59 +02:00
|
|
|
env->regs[R_EAX] = (uint32_t)(val);
|
2013-05-28 10:21:02 +02:00
|
|
|
env->regs[R_EDX] = (uint32_t)(val >> 32);
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
target-i386: yield to another VCPU on PAUSE
After commit b1bbfe7 (aio / timers: On timer modification, qemu_notify
or aio_notify, 2013-08-21) FreeBSD guests report a huge slowdown.
The problem shows up as soon as FreeBSD turns out its periodic (~1 ms)
tick, but the timers are only the trigger for a pre-existing problem.
Before the offending patch, setting a timer did a timer_settime system call.
After, setting the timer exits the event loop (which uses poll) and
reenters it with a new deadline. This does not cause any slowdown; the
difference is between one system call (timer_settime and a signal
delivery (SIGALRM) before the patch, and two system calls afterwards
(write to a pipe or eventfd + calling poll again when re-entering the
event loop).
Unfortunately, the exit/enter causes the main loop to grab the iothread
lock, which in turns kicks the VCPU thread out of execution. This
causes TCG to execute the next VCPU in its round-robin scheduling of
VCPUS. When the second VCPU is mostly unused, FreeBSD runs a "pause"
instruction in its idle loop which only burns cycles without any
progress. As soon as the timer tick expires, the first VCPU runs
the interrupt handler but very soon it sets it again---and QEMU
then goes back doing nothing in the second VCPU.
The fix is to make the pause instruction do "cpu_loop_exit".
Cc: Richard Henderson <rth@twiddle.net>
Reported-by: Luigi Rizzo <rizzo@iet.unipi.it>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Message-id: 1384948442-24217-1-git-send-email-pbonzini@redhat.com
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
2013-11-20 12:54:02 +01:00
|
|
|
static void do_pause(X86CPU *cpu)
|
|
|
|
{
|
2013-08-26 08:31:06 +02:00
|
|
|
CPUState *cs = CPU(cpu);
|
target-i386: yield to another VCPU on PAUSE
After commit b1bbfe7 (aio / timers: On timer modification, qemu_notify
or aio_notify, 2013-08-21) FreeBSD guests report a huge slowdown.
The problem shows up as soon as FreeBSD turns out its periodic (~1 ms)
tick, but the timers are only the trigger for a pre-existing problem.
Before the offending patch, setting a timer did a timer_settime system call.
After, setting the timer exits the event loop (which uses poll) and
reenters it with a new deadline. This does not cause any slowdown; the
difference is between one system call (timer_settime and a signal
delivery (SIGALRM) before the patch, and two system calls afterwards
(write to a pipe or eventfd + calling poll again when re-entering the
event loop).
Unfortunately, the exit/enter causes the main loop to grab the iothread
lock, which in turns kicks the VCPU thread out of execution. This
causes TCG to execute the next VCPU in its round-robin scheduling of
VCPUS. When the second VCPU is mostly unused, FreeBSD runs a "pause"
instruction in its idle loop which only burns cycles without any
progress. As soon as the timer tick expires, the first VCPU runs
the interrupt handler but very soon it sets it again---and QEMU
then goes back doing nothing in the second VCPU.
The fix is to make the pause instruction do "cpu_loop_exit".
Cc: Richard Henderson <rth@twiddle.net>
Reported-by: Luigi Rizzo <rizzo@iet.unipi.it>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Message-id: 1384948442-24217-1-git-send-email-pbonzini@redhat.com
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
2013-11-20 12:54:02 +01:00
|
|
|
|
|
|
|
/* Just let another CPU run. */
|
2013-08-26 08:31:06 +02:00
|
|
|
cs->exception_index = EXCP_INTERRUPT;
|
2013-08-27 17:52:12 +02:00
|
|
|
cpu_loop_exit(cs);
|
target-i386: yield to another VCPU on PAUSE
After commit b1bbfe7 (aio / timers: On timer modification, qemu_notify
or aio_notify, 2013-08-21) FreeBSD guests report a huge slowdown.
The problem shows up as soon as FreeBSD turns out its periodic (~1 ms)
tick, but the timers are only the trigger for a pre-existing problem.
Before the offending patch, setting a timer did a timer_settime system call.
After, setting the timer exits the event loop (which uses poll) and
reenters it with a new deadline. This does not cause any slowdown; the
difference is between one system call (timer_settime and a signal
delivery (SIGALRM) before the patch, and two system calls afterwards
(write to a pipe or eventfd + calling poll again when re-entering the
event loop).
Unfortunately, the exit/enter causes the main loop to grab the iothread
lock, which in turns kicks the VCPU thread out of execution. This
causes TCG to execute the next VCPU in its round-robin scheduling of
VCPUS. When the second VCPU is mostly unused, FreeBSD runs a "pause"
instruction in its idle loop which only burns cycles without any
progress. As soon as the timer tick expires, the first VCPU runs
the interrupt handler but very soon it sets it again---and QEMU
then goes back doing nothing in the second VCPU.
The fix is to make the pause instruction do "cpu_loop_exit".
Cc: Richard Henderson <rth@twiddle.net>
Reported-by: Luigi Rizzo <rizzo@iet.unipi.it>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Message-id: 1384948442-24217-1-git-send-email-pbonzini@redhat.com
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
2013-11-20 12:54:02 +01:00
|
|
|
}
|
|
|
|
|
2013-01-17 18:51:17 +01:00
|
|
|
static void do_hlt(X86CPU *cpu)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2013-01-17 18:51:17 +01:00
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
2012-04-29 20:20:34 +02:00
|
|
|
env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
|
2013-01-17 18:51:17 +01:00
|
|
|
cs->halted = 1;
|
2013-08-26 08:31:06 +02:00
|
|
|
cs->exception_index = EXCP_HLT;
|
2013-08-27 17:52:12 +02:00
|
|
|
cpu_loop_exit(cs);
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_hlt(CPUX86State *env, int next_eip_addend)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2019-03-23 02:08:48 +01:00
|
|
|
X86CPU *cpu = env_archcpu(env);
|
2013-01-17 18:51:17 +01:00
|
|
|
|
2017-02-16 12:30:05 +01:00
|
|
|
cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC());
|
2013-05-28 10:21:07 +02:00
|
|
|
env->eip += next_eip_addend;
|
2012-04-29 20:20:34 +02:00
|
|
|
|
2013-01-17 18:51:17 +01:00
|
|
|
do_hlt(cpu);
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_monitor(CPUX86State *env, target_ulong ptr)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2013-05-28 10:21:01 +02:00
|
|
|
if ((uint32_t)env->regs[R_ECX] != 0) {
|
2015-07-10 11:57:41 +02:00
|
|
|
raise_exception_ra(env, EXCP0D_GPF, GETPC());
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
/* XXX: store address? */
|
2017-02-16 12:30:05 +01:00
|
|
|
cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC());
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_mwait(CPUX86State *env, int next_eip_addend)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2019-03-23 02:08:48 +01:00
|
|
|
CPUState *cs = env_cpu(env);
|
|
|
|
X86CPU *cpu = env_archcpu(env);
|
2012-12-17 06:18:02 +01:00
|
|
|
|
2013-05-28 10:21:01 +02:00
|
|
|
if ((uint32_t)env->regs[R_ECX] != 0) {
|
2015-07-10 11:57:41 +02:00
|
|
|
raise_exception_ra(env, EXCP0D_GPF, GETPC());
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
2017-02-16 12:30:05 +01:00
|
|
|
cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC());
|
2013-05-28 10:21:07 +02:00
|
|
|
env->eip += next_eip_addend;
|
2012-04-29 20:20:34 +02:00
|
|
|
|
|
|
|
/* XXX: not complete but not completely erroneous */
|
2013-06-24 23:50:24 +02:00
|
|
|
if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
|
target-i386: yield to another VCPU on PAUSE
After commit b1bbfe7 (aio / timers: On timer modification, qemu_notify
or aio_notify, 2013-08-21) FreeBSD guests report a huge slowdown.
The problem shows up as soon as FreeBSD turns out its periodic (~1 ms)
tick, but the timers are only the trigger for a pre-existing problem.
Before the offending patch, setting a timer did a timer_settime system call.
After, setting the timer exits the event loop (which uses poll) and
reenters it with a new deadline. This does not cause any slowdown; the
difference is between one system call (timer_settime and a signal
delivery (SIGALRM) before the patch, and two system calls afterwards
(write to a pipe or eventfd + calling poll again when re-entering the
event loop).
Unfortunately, the exit/enter causes the main loop to grab the iothread
lock, which in turns kicks the VCPU thread out of execution. This
causes TCG to execute the next VCPU in its round-robin scheduling of
VCPUS. When the second VCPU is mostly unused, FreeBSD runs a "pause"
instruction in its idle loop which only burns cycles without any
progress. As soon as the timer tick expires, the first VCPU runs
the interrupt handler but very soon it sets it again---and QEMU
then goes back doing nothing in the second VCPU.
The fix is to make the pause instruction do "cpu_loop_exit".
Cc: Richard Henderson <rth@twiddle.net>
Reported-by: Luigi Rizzo <rizzo@iet.unipi.it>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Message-id: 1384948442-24217-1-git-send-email-pbonzini@redhat.com
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
2013-11-20 12:54:02 +01:00
|
|
|
do_pause(cpu);
|
2012-04-29 20:20:34 +02:00
|
|
|
} else {
|
2013-01-17 18:51:17 +01:00
|
|
|
do_hlt(cpu);
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
target-i386: yield to another VCPU on PAUSE
After commit b1bbfe7 (aio / timers: On timer modification, qemu_notify
or aio_notify, 2013-08-21) FreeBSD guests report a huge slowdown.
The problem shows up as soon as FreeBSD turns out its periodic (~1 ms)
tick, but the timers are only the trigger for a pre-existing problem.
Before the offending patch, setting a timer did a timer_settime system call.
After, setting the timer exits the event loop (which uses poll) and
reenters it with a new deadline. This does not cause any slowdown; the
difference is between one system call (timer_settime and a signal
delivery (SIGALRM) before the patch, and two system calls afterwards
(write to a pipe or eventfd + calling poll again when re-entering the
event loop).
Unfortunately, the exit/enter causes the main loop to grab the iothread
lock, which in turns kicks the VCPU thread out of execution. This
causes TCG to execute the next VCPU in its round-robin scheduling of
VCPUS. When the second VCPU is mostly unused, FreeBSD runs a "pause"
instruction in its idle loop which only burns cycles without any
progress. As soon as the timer tick expires, the first VCPU runs
the interrupt handler but very soon it sets it again---and QEMU
then goes back doing nothing in the second VCPU.
The fix is to make the pause instruction do "cpu_loop_exit".
Cc: Richard Henderson <rth@twiddle.net>
Reported-by: Luigi Rizzo <rizzo@iet.unipi.it>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Message-id: 1384948442-24217-1-git-send-email-pbonzini@redhat.com
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
2013-11-20 12:54:02 +01:00
|
|
|
void helper_pause(CPUX86State *env, int next_eip_addend)
|
|
|
|
{
|
2019-03-23 02:08:48 +01:00
|
|
|
X86CPU *cpu = env_archcpu(env);
|
target-i386: yield to another VCPU on PAUSE
After commit b1bbfe7 (aio / timers: On timer modification, qemu_notify
or aio_notify, 2013-08-21) FreeBSD guests report a huge slowdown.
The problem shows up as soon as FreeBSD turns out its periodic (~1 ms)
tick, but the timers are only the trigger for a pre-existing problem.
Before the offending patch, setting a timer did a timer_settime system call.
After, setting the timer exits the event loop (which uses poll) and
reenters it with a new deadline. This does not cause any slowdown; the
difference is between one system call (timer_settime and a signal
delivery (SIGALRM) before the patch, and two system calls afterwards
(write to a pipe or eventfd + calling poll again when re-entering the
event loop).
Unfortunately, the exit/enter causes the main loop to grab the iothread
lock, which in turns kicks the VCPU thread out of execution. This
causes TCG to execute the next VCPU in its round-robin scheduling of
VCPUS. When the second VCPU is mostly unused, FreeBSD runs a "pause"
instruction in its idle loop which only burns cycles without any
progress. As soon as the timer tick expires, the first VCPU runs
the interrupt handler but very soon it sets it again---and QEMU
then goes back doing nothing in the second VCPU.
The fix is to make the pause instruction do "cpu_loop_exit".
Cc: Richard Henderson <rth@twiddle.net>
Reported-by: Luigi Rizzo <rizzo@iet.unipi.it>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Message-id: 1384948442-24217-1-git-send-email-pbonzini@redhat.com
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
2013-11-20 12:54:02 +01:00
|
|
|
|
2017-02-16 12:30:05 +01:00
|
|
|
cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0, GETPC());
|
target-i386: yield to another VCPU on PAUSE
After commit b1bbfe7 (aio / timers: On timer modification, qemu_notify
or aio_notify, 2013-08-21) FreeBSD guests report a huge slowdown.
The problem shows up as soon as FreeBSD turns out its periodic (~1 ms)
tick, but the timers are only the trigger for a pre-existing problem.
Before the offending patch, setting a timer did a timer_settime system call.
After, setting the timer exits the event loop (which uses poll) and
reenters it with a new deadline. This does not cause any slowdown; the
difference is between one system call (timer_settime and a signal
delivery (SIGALRM) before the patch, and two system calls afterwards
(write to a pipe or eventfd + calling poll again when re-entering the
event loop).
Unfortunately, the exit/enter causes the main loop to grab the iothread
lock, which in turns kicks the VCPU thread out of execution. This
causes TCG to execute the next VCPU in its round-robin scheduling of
VCPUS. When the second VCPU is mostly unused, FreeBSD runs a "pause"
instruction in its idle loop which only burns cycles without any
progress. As soon as the timer tick expires, the first VCPU runs
the interrupt handler but very soon it sets it again---and QEMU
then goes back doing nothing in the second VCPU.
The fix is to make the pause instruction do "cpu_loop_exit".
Cc: Richard Henderson <rth@twiddle.net>
Reported-by: Luigi Rizzo <rizzo@iet.unipi.it>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Message-id: 1384948442-24217-1-git-send-email-pbonzini@redhat.com
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
2013-11-20 12:54:02 +01:00
|
|
|
env->eip += next_eip_addend;
|
|
|
|
|
|
|
|
do_pause(cpu);
|
|
|
|
}
|
|
|
|
|
2012-04-29 20:42:47 +02:00
|
|
|
void helper_debug(CPUX86State *env)
|
2012-04-29 20:20:34 +02:00
|
|
|
{
|
2019-03-23 02:08:48 +01:00
|
|
|
CPUState *cs = env_cpu(env);
|
2013-08-26 08:31:06 +02:00
|
|
|
|
|
|
|
cs->exception_index = EXCP_DEBUG;
|
2013-08-27 17:52:12 +02:00
|
|
|
cpu_loop_exit(cs);
|
2012-04-29 20:20:34 +02:00
|
|
|
}
|
2016-02-09 14:14:28 +01:00
|
|
|
|
|
|
|
uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx)
|
|
|
|
{
|
|
|
|
if ((env->cr[4] & CR4_PKE_MASK) == 0) {
|
|
|
|
raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
|
|
|
|
}
|
|
|
|
if (ecx != 0) {
|
|
|
|
raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
|
|
|
|
}
|
|
|
|
|
|
|
|
return env->pkru;
|
|
|
|
}
|
|
|
|
|
|
|
|
void helper_wrpkru(CPUX86State *env, uint32_t ecx, uint64_t val)
|
|
|
|
{
|
2019-03-23 02:08:48 +01:00
|
|
|
CPUState *cs = env_cpu(env);
|
2016-02-09 14:14:28 +01:00
|
|
|
|
|
|
|
if ((env->cr[4] & CR4_PKE_MASK) == 0) {
|
|
|
|
raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
|
|
|
|
}
|
|
|
|
if (ecx != 0 || (val & 0xFFFFFFFF00000000ull)) {
|
|
|
|
raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
|
|
|
|
}
|
|
|
|
|
|
|
|
env->pkru = val;
|
2016-11-14 15:17:28 +01:00
|
|
|
tlb_flush(cs);
|
2016-02-09 14:14:28 +01:00
|
|
|
}
|