* scsi-disk: support setting CD-ROM block size via device options

* target/i386: Implement MSR_CORE_THREAD_COUNT MSR
 * target/i386: notify VM exit support
 * target/i386: PC-relative translation block support
 * target/i386: support for XSAVE state in signal frames (linux-user)
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmNFKP4UHHBib256aW5p
 QHJlZGhhdC5jb20ACgkQv/vSX3jHroNJnwgAgCcOOxmY4Qem0Gd1L+SJKpEtGMOd
 4LY7443vT36pMpvqFNSfp5GBjDT1MgTD8BIY28miLMq959LT89LyM9g/H7IKOT82
 uyCsW3jW+6F19EZVkNvzTt+3USn/kaHn50zA4Ss9kvdNZr31b2LYqtglVCznfZwH
 oI1rDhvsXubq8oWvwkqH7IwduK8mw+EB5Yz7AjYQ6eiYjenTrQBObpwQNbb4rlUf
 oRm8dk/YJ2gfI2HQkoznGEbgpngy2tIU1vHNEpIk5NpwXxrulOyui3+sWaG4pH8f
 oAOrSDC23M5A6jBJJAzDJ1q6M677U/kwJypyGQ7IyvyhECXE3tR+lHX1eA==
 =tqeJ
 -----END PGP SIGNATURE-----

Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging

* scsi-disk: support setting CD-ROM block size via device options
* target/i386: Implement MSR_CORE_THREAD_COUNT MSR
* target/i386: notify VM exit support
* target/i386: PC-relative translation block support
* target/i386: support for XSAVE state in signal frames (linux-user)

# -----BEGIN PGP SIGNATURE-----
#
# iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmNFKP4UHHBib256aW5p
# QHJlZGhhdC5jb20ACgkQv/vSX3jHroNJnwgAgCcOOxmY4Qem0Gd1L+SJKpEtGMOd
# 4LY7443vT36pMpvqFNSfp5GBjDT1MgTD8BIY28miLMq959LT89LyM9g/H7IKOT82
# uyCsW3jW+6F19EZVkNvzTt+3USn/kaHn50zA4Ss9kvdNZr31b2LYqtglVCznfZwH
# oI1rDhvsXubq8oWvwkqH7IwduK8mw+EB5Yz7AjYQ6eiYjenTrQBObpwQNbb4rlUf
# oRm8dk/YJ2gfI2HQkoznGEbgpngy2tIU1vHNEpIk5NpwXxrulOyui3+sWaG4pH8f
# oAOrSDC23M5A6jBJJAzDJ1q6M677U/kwJypyGQ7IyvyhECXE3tR+lHX1eA==
# =tqeJ
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 11 Oct 2022 04:27:42 EDT
# gpg:                using RSA key F13338574B662389866C7682BFFBD25F78C7AE83
# gpg:                issuer "pbonzini@redhat.com"
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full]
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>" [full]
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* tag 'for-upstream' of https://gitlab.com/bonzini/qemu: (37 commits)
  linux-user: i386/signal: support XSAVE/XRSTOR for signal frame fpstate
  linux-user: i386/signal: support FXSAVE fpstate on 32-bit emulation
  linux-user: i386/signal: move fpstate at the end of the 32-bit frames
  KVM: x86: Implement MSR_CORE_THREAD_COUNT MSR
  i386: kvm: Add support for MSR filtering
  x86: Implement MSR_CORE_THREAD_COUNT MSR
  target/i386: Enable TARGET_TB_PCREL
  target/i386: Inline gen_jmp_im
  target/i386: Add cpu_eip
  target/i386: Create eip_cur_tl
  target/i386: Merge gen_jmp_tb and gen_goto_tb into gen_jmp_rel
  target/i386: Remove MemOp argument to gen_op_j*_ecx
  target/i386: Use gen_jmp_rel for DISAS_TOO_MANY
  target/i386: Use gen_jmp_rel for gen_jcc
  target/i386: Use gen_jmp_rel for loop, repz, jecxz insns
  target/i386: Create gen_jmp_rel
  target/i386: Use DISAS_TOO_MANY to exit after gen_io_start
  target/i386: Create eip_next_*
  target/i386: Truncate values for lcall_real to i32
  target/i386: Introduce DISAS_JUMP
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2022-10-13 13:55:03 -04:00
commit bb76f8e275
24 changed files with 1102 additions and 563 deletions

View File

@ -77,86 +77,12 @@
do { } while (0)
#endif
#define KVM_MSI_HASHTAB_SIZE 256
struct KVMParkedVcpu {
unsigned long vcpu_id;
int kvm_fd;
QLIST_ENTRY(KVMParkedVcpu) node;
};
enum KVMDirtyRingReaperState {
KVM_DIRTY_RING_REAPER_NONE = 0,
/* The reaper is sleeping */
KVM_DIRTY_RING_REAPER_WAIT,
/* The reaper is reaping for dirty pages */
KVM_DIRTY_RING_REAPER_REAPING,
};
/*
* KVM reaper instance, responsible for collecting the KVM dirty bits
* via the dirty ring.
*/
struct KVMDirtyRingReaper {
/* The reaper thread */
QemuThread reaper_thr;
volatile uint64_t reaper_iteration; /* iteration number of reaper thr */
volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */
};
struct KVMState
{
AccelState parent_obj;
int nr_slots;
int fd;
int vmfd;
int coalesced_mmio;
int coalesced_pio;
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
bool coalesced_flush_in_progress;
int vcpu_events;
int robust_singlestep;
int debugregs;
#ifdef KVM_CAP_SET_GUEST_DEBUG
QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
#endif
int max_nested_state_len;
int many_ioeventfds;
int intx_set_mask;
int kvm_shadow_mem;
bool kernel_irqchip_allowed;
bool kernel_irqchip_required;
OnOffAuto kernel_irqchip_split;
bool sync_mmu;
uint64_t manual_dirty_log_protect;
/* The man page (and posix) say ioctl numbers are signed int, but
* they're not. Linux, glibc and *BSD all treat ioctl numbers as
* unsigned, and treating them as signed here can break things */
unsigned irq_set_ioctl;
unsigned int sigmask_len;
GHashTable *gsimap;
#ifdef KVM_CAP_IRQ_ROUTING
struct kvm_irq_routing *irq_routes;
int nr_allocated_irq_routes;
unsigned long *used_gsi_bitmap;
unsigned int gsi_count;
QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
#endif
KVMMemoryListener memory_listener;
QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
/* For "info mtree -f" to tell if an MR is registered in KVM */
int nr_as;
struct KVMAs {
KVMMemoryListener *ml;
AddressSpace *as;
} *as;
uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */
uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */
struct KVMDirtyRingReaper reaper;
};
KVMState *kvm_state;
bool kvm_kernel_irqchip;
bool kvm_split_irqchip;
@ -3692,6 +3618,8 @@ static void kvm_accel_instance_init(Object *obj)
s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
/* KVM dirty ring is by default off */
s->kvm_dirty_ring_size = 0;
s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
s->notify_window = 0;
}
/**
@ -3731,6 +3659,8 @@ static void kvm_accel_class_init(ObjectClass *oc, void *data)
NULL, NULL);
object_class_property_set_description(oc, "dirty-ring-size",
"Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
kvm_arch_accel_class_init(oc);
}
static const TypeInfo kvm_accel_type = {

View File

@ -2544,6 +2544,7 @@ static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
AioContext *ctx;
int ret;
uint32_t blocksize = 2048;
if (!dev->conf.blk) {
/* Anonymous BlockBackend for an empty drive. As we put it into
@ -2553,9 +2554,13 @@ static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
assert(ret == 0);
}
if (dev->conf.physical_block_size != 0) {
blocksize = dev->conf.physical_block_size;
}
ctx = blk_get_aio_context(dev->conf.blk);
aio_context_acquire(ctx);
s->qdev.blocksize = 2048;
s->qdev.blocksize = blocksize;
s->qdev.type = TYPE_ROM;
s->features |= 1 << SCSI_DISK_F_REMOVABLE;
if (!s->product) {

View File

@ -349,6 +349,8 @@ bool kvm_device_supported(int vmfd, uint64_t type);
extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
void kvm_arch_accel_class_init(ObjectClass *oc);
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);

View File

@ -10,6 +10,7 @@
#define QEMU_KVM_INT_H
#include "exec/memory.h"
#include "qapi/qapi-types-common.h"
#include "qemu/accel.h"
#include "sysemu/kvm.h"
@ -36,6 +37,81 @@ typedef struct KVMMemoryListener {
int as_id;
} KVMMemoryListener;
#define KVM_MSI_HASHTAB_SIZE 256
enum KVMDirtyRingReaperState {
KVM_DIRTY_RING_REAPER_NONE = 0,
/* The reaper is sleeping */
KVM_DIRTY_RING_REAPER_WAIT,
/* The reaper is reaping for dirty pages */
KVM_DIRTY_RING_REAPER_REAPING,
};
/*
* KVM reaper instance, responsible for collecting the KVM dirty bits
* via the dirty ring.
*/
struct KVMDirtyRingReaper {
/* The reaper thread */
QemuThread reaper_thr;
volatile uint64_t reaper_iteration; /* iteration number of reaper thr */
volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */
};
struct KVMState
{
AccelState parent_obj;
int nr_slots;
int fd;
int vmfd;
int coalesced_mmio;
int coalesced_pio;
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
bool coalesced_flush_in_progress;
int vcpu_events;
int robust_singlestep;
int debugregs;
#ifdef KVM_CAP_SET_GUEST_DEBUG
QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
#endif
int max_nested_state_len;
int many_ioeventfds;
int intx_set_mask;
int kvm_shadow_mem;
bool kernel_irqchip_allowed;
bool kernel_irqchip_required;
OnOffAuto kernel_irqchip_split;
bool sync_mmu;
uint64_t manual_dirty_log_protect;
/* The man page (and posix) say ioctl numbers are signed int, but
* they're not. Linux, glibc and *BSD all treat ioctl numbers as
* unsigned, and treating them as signed here can break things */
unsigned irq_set_ioctl;
unsigned int sigmask_len;
GHashTable *gsimap;
#ifdef KVM_CAP_IRQ_ROUTING
struct kvm_irq_routing *irq_routes;
int nr_allocated_irq_routes;
unsigned long *used_gsi_bitmap;
unsigned int gsi_count;
QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
#endif
KVMMemoryListener memory_listener;
QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
/* For "info mtree -f" to tell if an MR is registered in KVM */
int nr_as;
struct KVMAs {
KVMMemoryListener *ml;
AddressSpace *as;
} *as;
uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */
uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */
struct KVMDirtyRingReaper reaper;
NotifyVmexitOption notify_vmexit;
uint32_t notify_window;
};
void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
AddressSpace *as, int as_id, const char *name);

View File

@ -24,6 +24,10 @@
/* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
#define TARGET_FP_XSTATE_MAGIC1 0x46505853U /* FPXS */
#define TARGET_FP_XSTATE_MAGIC2 0x46505845U /* FPXE */
#define TARGET_FP_XSTATE_MAGIC2_SIZE 4
struct target_fpreg {
uint16_t significand[4];
uint16_t exponent;
@ -39,6 +43,35 @@ struct target_xmmreg {
uint32_t element[4];
};
struct target_fpx_sw_bytes {
uint32_t magic1;
uint32_t extended_size;
uint64_t xfeatures;
uint32_t xstate_size;
uint32_t reserved[7];
};
QEMU_BUILD_BUG_ON(sizeof(struct target_fpx_sw_bytes) != 12*4);
struct target_fpstate_fxsave {
/* FXSAVE format */
uint16_t cw;
uint16_t sw;
uint16_t twd;
uint16_t fop;
uint64_t rip;
uint64_t rdp;
uint32_t mxcsr;
uint32_t mxcsr_mask;
uint32_t st_space[32];
uint32_t xmm_space[64];
uint32_t hw_reserved[12];
struct target_fpx_sw_bytes sw_reserved;
uint8_t xfeatures[];
};
#define TARGET_FXSAVE_SIZE sizeof(struct target_fpstate_fxsave)
QEMU_BUILD_BUG_ON(TARGET_FXSAVE_SIZE != 512);
QEMU_BUILD_BUG_ON(offsetof(struct target_fpstate_fxsave, sw_reserved) != 464);
struct target_fpstate_32 {
/* Regular FPU environment */
uint32_t cw;
@ -51,35 +84,21 @@ struct target_fpstate_32 {
struct target_fpreg st[8];
uint16_t status;
uint16_t magic; /* 0xffff = regular FPU data only */
/* FXSR FPU environment */
uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
uint32_t mxcsr;
uint32_t reserved;
struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
struct target_xmmreg xmm[8];
uint32_t padding[56];
struct target_fpstate_fxsave fxsave;
};
struct target_fpstate_64 {
/* FXSAVE format */
uint16_t cw;
uint16_t sw;
uint16_t twd;
uint16_t fop;
uint64_t rip;
uint64_t rdp;
uint32_t mxcsr;
uint32_t mxcsr_mask;
uint32_t st_space[32];
uint32_t xmm_space[64];
uint32_t reserved[24];
};
/*
* For simplicity, setup_frame aligns struct target_fpstate_32 to
* 16 bytes, so ensure that the FXSAVE area is also aligned.
*/
QEMU_BUILD_BUG_ON(offsetof(struct target_fpstate_32, fxsave) & 15);
#ifndef TARGET_X86_64
# define target_fpstate target_fpstate_32
# define TARGET_FPSTATE_FXSAVE_OFFSET offsetof(struct target_fpstate_32, fxsave)
#else
# define target_fpstate target_fpstate_64
# define target_fpstate target_fpstate_fxsave
# define TARGET_FPSTATE_FXSAVE_OFFSET 0
#endif
struct target_sigcontext_32 {
@ -163,10 +182,25 @@ struct sigframe {
abi_ulong pretcode;
int sig;
struct target_sigcontext sc;
struct target_fpstate fpstate;
/*
* The actual fpstate is placed after retcode[] below, to make
* room for the variable-sized xsave data. The older unused fpstate
* has to be kept to avoid changing the offset of extramask[], which
* is part of the ABI.
*/
struct target_fpstate fpstate_unused;
abi_ulong extramask[TARGET_NSIG_WORDS-1];
char retcode[8];
/*
* This field will be 16-byte aligned in memory. Applying QEMU_ALIGNED
* to it ensures that the base of the frame has an appropriate alignment
* too.
*/
struct target_fpstate fpstate QEMU_ALIGNED(8);
};
#define TARGET_SIGFRAME_FXSAVE_OFFSET ( \
offsetof(struct sigframe, fpstate) + TARGET_FPSTATE_FXSAVE_OFFSET)
struct rt_sigframe {
abi_ulong pretcode;
@ -175,26 +209,62 @@ struct rt_sigframe {
abi_ulong puc;
struct target_siginfo info;
struct target_ucontext uc;
struct target_fpstate fpstate;
char retcode[8];
struct target_fpstate fpstate QEMU_ALIGNED(8);
};
#define TARGET_RT_SIGFRAME_FXSAVE_OFFSET ( \
offsetof(struct rt_sigframe, fpstate) + TARGET_FPSTATE_FXSAVE_OFFSET)
#else
struct rt_sigframe {
abi_ulong pretcode;
struct target_ucontext uc;
struct target_siginfo info;
struct target_fpstate fpstate;
struct target_fpstate fpstate QEMU_ALIGNED(16);
};
#define TARGET_RT_SIGFRAME_FXSAVE_OFFSET ( \
offsetof(struct rt_sigframe, fpstate) + TARGET_FPSTATE_FXSAVE_OFFSET)
#endif
/*
* Set up a signal frame.
*/
/* XXX: save x87 state */
static void xsave_sigcontext(CPUX86State *env, struct target_fpstate_fxsave *fxsave,
abi_ulong fxsave_addr)
{
if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
/* fxsave_addr must be 16 byte aligned for fxsave */
assert(!(fxsave_addr & 0xf));
cpu_x86_fxsave(env, fxsave_addr);
__put_user(0, &fxsave->sw_reserved.magic1);
} else {
uint32_t xstate_size = xsave_area_size(env->xcr0, false);
uint32_t xfeatures_size = xstate_size - TARGET_FXSAVE_SIZE;
/*
* extended_size is the offset from fpstate_addr to right after the end
* of the extended save states. On 32-bit that includes the legacy
* FSAVE area.
*/
uint32_t extended_size = TARGET_FPSTATE_FXSAVE_OFFSET
+ xstate_size + TARGET_FP_XSTATE_MAGIC2_SIZE;
/* fxsave_addr must be 64 byte aligned for xsave */
assert(!(fxsave_addr & 0x3f));
/* Zero the header, XSAVE *adds* features to an existing save state. */
memset(fxsave->xfeatures, 0, 64);
cpu_x86_xsave(env, fxsave_addr);
__put_user(TARGET_FP_XSTATE_MAGIC1, &fxsave->sw_reserved.magic1);
__put_user(extended_size, &fxsave->sw_reserved.extended_size);
__put_user(env->xcr0, &fxsave->sw_reserved.xfeatures);
__put_user(xstate_size, &fxsave->sw_reserved.xstate_size);
__put_user(TARGET_FP_XSTATE_MAGIC2, (uint32_t *) &fxsave->xfeatures[xfeatures_size]);
}
}
static void setup_sigcontext(struct target_sigcontext *sc,
struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
abi_ulong fpstate_addr)
@ -226,13 +296,14 @@ static void setup_sigcontext(struct target_sigcontext *sc,
cpu_x86_fsave(env, fpstate_addr, 1);
fpstate->status = fpstate->sw;
magic = 0xffff;
if (!(env->features[FEAT_1_EDX] & CPUID_FXSR)) {
magic = 0xffff;
} else {
xsave_sigcontext(env, &fpstate->fxsave,
fpstate_addr + TARGET_FPSTATE_FXSAVE_OFFSET);
magic = 0;
}
__put_user(magic, &fpstate->magic);
__put_user(fpstate_addr, &sc->fpstate);
/* non-iBCS2 extensions.. */
__put_user(mask, &sc->oldmask);
__put_user(env->cr[2], &sc->cr2);
#else
__put_user(env->regs[R_EDI], &sc->rdi);
__put_user(env->regs[R_ESI], &sc->rsi);
@ -262,15 +333,14 @@ static void setup_sigcontext(struct target_sigcontext *sc,
__put_user((uint16_t)0, &sc->fs);
__put_user(env->segs[R_SS].selector, &sc->ss);
xsave_sigcontext(env, fpstate, fpstate_addr);
#endif
__put_user(fpstate_addr, &sc->fpstate);
/* non-iBCS2 extensions.. */
__put_user(mask, &sc->oldmask);
__put_user(env->cr[2], &sc->cr2);
/* fpstate_addr must be 16 byte aligned for fxsave */
assert(!(fpstate_addr & 0xf));
cpu_x86_fxsave(env, fpstate_addr);
__put_user(fpstate_addr, &sc->fpstate);
#endif
}
/*
@ -278,7 +348,7 @@ static void setup_sigcontext(struct target_sigcontext *sc,
*/
static inline abi_ulong
get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t fxsave_offset)
{
unsigned long esp;
@ -302,11 +372,15 @@ get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
#endif
}
#ifndef TARGET_X86_64
return (esp - frame_size) & -8ul;
#else
return ((esp - frame_size) & (~15ul)) - 8;
#endif
if (!(env->features[FEAT_1_EDX] & CPUID_FXSR)) {
return (esp - (fxsave_offset + TARGET_FXSAVE_SIZE)) & -8ul;
} else if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
return ((esp - TARGET_FXSAVE_SIZE) & -16ul) - fxsave_offset;
} else {
size_t xstate_size =
xsave_area_size(env->xcr0, false) + TARGET_FP_XSTATE_MAGIC2_SIZE;
return ((esp - xstate_size) & -64ul) - fxsave_offset;
}
}
#ifndef TARGET_X86_64
@ -334,7 +408,7 @@ void setup_frame(int sig, struct target_sigaction *ka,
struct sigframe *frame;
int i;
frame_addr = get_sigframe(ka, env, sizeof(*frame));
frame_addr = get_sigframe(ka, env, TARGET_SIGFRAME_FXSAVE_OFFSET);
trace_user_setup_frame(env, frame_addr);
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
@ -390,7 +464,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
struct rt_sigframe *frame;
int i;
frame_addr = get_sigframe(ka, env, sizeof(*frame));
frame_addr = get_sigframe(ka, env, TARGET_RT_SIGFRAME_FXSAVE_OFFSET);
trace_user_setup_rt_frame(env, frame_addr);
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
@ -409,7 +483,11 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
}
/* Create the ucontext. */
__put_user(0, &frame->uc.tuc_flags);
if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
__put_user(1, &frame->uc.tuc_flags);
} else {
__put_user(0, &frame->uc.tuc_flags);
}
__put_user(0, &frame->uc.tuc_link);
target_save_altstack(&frame->uc.tuc_stack, env);
setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
@ -463,10 +541,37 @@ give_sigsegv:
force_sigsegv(sig);
}
static int xrstor_sigcontext(CPUX86State *env, struct target_fpstate_fxsave *fxsave,
abi_ulong fxsave_addr)
{
if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
uint32_t extended_size = tswapl(fxsave->sw_reserved.extended_size);
uint32_t xstate_size = tswapl(fxsave->sw_reserved.xstate_size);
uint32_t xfeatures_size = xstate_size - TARGET_FXSAVE_SIZE;
/* Linux checks MAGIC2 using xstate_size, not extended_size. */
if (tswapl(fxsave->sw_reserved.magic1) == TARGET_FP_XSTATE_MAGIC1 &&
extended_size >= TARGET_FPSTATE_FXSAVE_OFFSET + xstate_size + TARGET_FP_XSTATE_MAGIC2_SIZE) {
if (!access_ok(env_cpu(env), VERIFY_READ, fxsave_addr,
extended_size - TARGET_FPSTATE_FXSAVE_OFFSET)) {
return 1;
}
if (tswapl(*(uint32_t *) &fxsave->xfeatures[xfeatures_size]) == TARGET_FP_XSTATE_MAGIC2) {
cpu_x86_xrstor(env, fxsave_addr);
return 0;
}
}
/* fall through to fxrstor */
}
cpu_x86_fxrstor(env, fxsave_addr);
return 0;
}
static int
restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
{
unsigned int err = 0;
int err = 1;
abi_ulong fpstate_addr;
unsigned int tmpflags;
@ -517,20 +622,28 @@ restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
fpstate_addr = tswapl(sc->fpstate);
if (fpstate_addr != 0) {
if (!access_ok(env_cpu(env), VERIFY_READ, fpstate_addr,
sizeof(struct target_fpstate))) {
goto badframe;
struct target_fpstate *fpstate;
if (!lock_user_struct(VERIFY_READ, fpstate, fpstate_addr,
sizeof(struct target_fpstate))) {
return err;
}
#ifndef TARGET_X86_64
cpu_x86_frstor(env, fpstate_addr, 1);
if (!(env->features[FEAT_1_EDX] & CPUID_FXSR)) {
cpu_x86_frstor(env, fpstate_addr, 1);
err = 0;
} else {
err = xrstor_sigcontext(env, &fpstate->fxsave,
fpstate_addr + TARGET_FPSTATE_FXSAVE_OFFSET);
}
#else
cpu_x86_fxrstor(env, fpstate_addr);
err = xrstor_sigcontext(env, fpstate, fpstate_addr);
#endif
unlock_user_struct(fpstate, fpstate_addr, 0);
} else {
err = 0;
}
return err;
badframe:
return 1;
}
/* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */

View File

@ -643,3 +643,20 @@
{ 'struct': 'MemoryFailureFlags',
'data': { 'action-required': 'bool',
'recursive': 'bool'} }
##
# @NotifyVmexitOption:
#
# An enumeration of the options specified when enabling notify VM exit
#
# @run: enable the feature, do nothing and continue if the notify VM exit happens.
#
# @internal-error: enable the feature, raise a internal error if the notify
# VM exit happens.
#
# @disable: disable the feature.
#
# Since: 7.2
##
{ 'enum': 'NotifyVmexitOption',
'data': [ 'run', 'internal-error', 'disable' ] }

View File

@ -191,6 +191,7 @@ DEF("accel", HAS_ARG, QEMU_OPTION_accel,
" split-wx=on|off (enable TCG split w^x mapping)\n"
" tb-size=n (TCG translation block cache size)\n"
" dirty-ring-size=n (KVM dirty ring GFN count, default 0)\n"
" notify-vmexit=run|internal-error|disable,notify-window=n (enable notify VM exit and set notify window, x86 only)\n"
" thread=single|multi (enable multi-threaded TCG)\n", QEMU_ARCH_ALL)
SRST
``-accel name[,prop=value[,...]]``
@ -242,6 +243,16 @@ SRST
is disabled (dirty-ring-size=0). When enabled, KVM will instead
record dirty pages in a bitmap.
``notify-vmexit=run|internal-error|disable,notify-window=n``
Enables or disables notify VM exit support on x86 host and specify
the corresponding notify window to trigger the VM exit if enabled.
``run`` option enables the feature. It does nothing and continue
if the exit happens. ``internal-error`` option enables the feature.
It raises a internal error. ``disable`` option doesn't enable the feature.
This feature can mitigate the CPU stuck issue due to event windows don't
open up for a specified of time (i.e. notify-window).
Default: notify-vmexit=run,notify-window=0.
ERST
DEF("smp", HAS_ARG, QEMU_OPTION_smp,

View File

@ -1058,3 +1058,7 @@ bool kvm_arch_cpu_check_are_resettable(void)
{
return true;
}
void kvm_arch_accel_class_init(ObjectClass *oc)
{
}

View File

@ -25,4 +25,8 @@
#define TARGET_PAGE_BITS 12
#define NB_MMU_MODES 3
#ifndef CONFIG_USER_ONLY
# define TARGET_TB_PCREL 1
#endif
#endif

View File

@ -1467,7 +1467,7 @@ ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
},
};
static uint32_t xsave_area_size(uint64_t mask, bool compacted)
uint32_t xsave_area_size(uint64_t mask, bool compacted)
{
uint64_t ret = x86_ext_save_areas[0].size;
const ExtSaveArea *esa;
@ -6017,6 +6017,7 @@ static void x86_cpu_reset(DeviceState *dev)
env->exception_has_payload = false;
env->exception_payload = 0;
env->nmi_injected = false;
env->triple_fault_pending = false;
#if !defined(CONFIG_USER_ONLY)
/* We hard-wire the BSP to the first CPU. */
apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);

View File

@ -1739,6 +1739,7 @@ typedef struct CPUArchState {
uint8_t has_error_code;
uint8_t exception_has_payload;
uint64_t exception_payload;
uint8_t triple_fault_pending;
uint32_t ins_len;
uint32_t sipi_vector;
bool tsc_valid;
@ -2070,6 +2071,8 @@ void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr);
void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr);
void cpu_x86_xsave(CPUX86State *s, target_ulong ptr);
void cpu_x86_xrstor(CPUX86State *s, target_ulong ptr);
/* cpu.c */
void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
@ -2326,6 +2329,7 @@ bool cpu_is_bsp(X86CPU *cpu);
void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen);
void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen);
uint32_t xsave_area_size(uint64_t mask, bool compacted);
void x86_update_hflags(CPUX86State* env);
static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat)

View File

@ -37,7 +37,7 @@ DEF_HELPER_2(lldt, void, env, int)
DEF_HELPER_2(ltr, void, env, int)
DEF_HELPER_3(load_seg, void, env, int, int)
DEF_HELPER_4(ljmp_protected, void, env, int, tl, tl)
DEF_HELPER_5(lcall_real, void, env, int, tl, int, int)
DEF_HELPER_5(lcall_real, void, env, i32, i32, int, i32)
DEF_HELPER_5(lcall_protected, void, env, int, tl, int, tl)
DEF_HELPER_2(iret_real, void, env, int)
DEF_HELPER_3(iret_protected, void, env, int, int)

View File

@ -15,6 +15,7 @@
#include "qemu/osdep.h"
#include "qapi/qapi-events-run-state.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
#include <sys/ioctl.h>
#include <sys/utsname.h>
#include <sys/syscall.h>
@ -132,6 +133,7 @@ static int has_xcrs;
static int has_pit_state2;
static int has_sregs2;
static int has_exception_payload;
static int has_triple_fault_event;
static bool has_msr_mcg_ext_ctl;
@ -139,6 +141,8 @@ static struct kvm_cpuid2 *cpuid_cache;
static struct kvm_cpuid2 *hv_cpuid_cache;
static struct kvm_msr_list *kvm_feature_msrs;
static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES];
#define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */
static RateLimit bus_lock_ratelimit_ctrl;
static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value);
@ -2397,6 +2401,17 @@ static int kvm_get_supported_msrs(KVMState *s)
return ret;
}
static bool kvm_rdmsr_core_thread_count(X86CPU *cpu, uint32_t msr,
uint64_t *val)
{
CPUState *cs = CPU(cpu);
*val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */
*val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */
return true;
}
static Notifier smram_machine_done;
static KVMMemoryListener smram_listener;
static AddressSpace smram_address_space;
@ -2479,6 +2494,16 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
}
}
has_triple_fault_event = kvm_check_extension(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT);
if (has_triple_fault_event) {
ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true);
if (ret < 0) {
error_report("kvm: Failed to enable triple fault event cap: %s",
strerror(-ret));
return ret;
}
}
ret = kvm_get_supported_msrs(s);
if (ret < 0) {
return ret;
@ -2584,6 +2609,40 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
}
}
if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE &&
kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) {
uint64_t notify_window_flags =
((uint64_t)s->notify_window << 32) |
KVM_X86_NOTIFY_VMEXIT_ENABLED |
KVM_X86_NOTIFY_VMEXIT_USER;
ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0,
notify_window_flags);
if (ret < 0) {
error_report("kvm: Failed to enable notify vmexit cap: %s",
strerror(-ret));
return ret;
}
}
if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) {
bool r;
ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0,
KVM_MSR_EXIT_REASON_FILTER);
if (ret) {
error_report("Could not enable user space MSRs: %s",
strerror(-ret));
exit(1);
}
r = kvm_filter_msr(s, MSR_CORE_THREAD_COUNT,
kvm_rdmsr_core_thread_count, NULL);
if (!r) {
error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s",
strerror(-ret));
exit(1);
}
}
return 0;
}
@ -4295,6 +4354,11 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level)
}
}
if (has_triple_fault_event) {
events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
events.triple_fault.pending = env->triple_fault_pending;
}
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
}
@ -4364,6 +4428,10 @@ static int kvm_get_vcpu_events(X86CPU *cpu)
}
}
if (events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) {
env->triple_fault_pending = events.triple_fault.pending;
}
env->sipi_vector = events.sipi_vector;
return 0;
@ -5073,6 +5141,108 @@ void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
}
}
static bool kvm_install_msr_filters(KVMState *s)
{
uint64_t zero = 0;
struct kvm_msr_filter filter = {
.flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
};
int r, i, j = 0;
for (i = 0; i < KVM_MSR_FILTER_MAX_RANGES; i++) {
KVMMSRHandlers *handler = &msr_handlers[i];
if (handler->msr) {
struct kvm_msr_filter_range *range = &filter.ranges[j++];
*range = (struct kvm_msr_filter_range) {
.flags = 0,
.nmsrs = 1,
.base = handler->msr,
.bitmap = (__u8 *)&zero,
};
if (handler->rdmsr) {
range->flags |= KVM_MSR_FILTER_READ;
}
if (handler->wrmsr) {
range->flags |= KVM_MSR_FILTER_WRITE;
}
}
}
r = kvm_vm_ioctl(s, KVM_X86_SET_MSR_FILTER, &filter);
if (r) {
return false;
}
return true;
}
bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
QEMUWRMSRHandler *wrmsr)
{
int i;
for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
if (!msr_handlers[i].msr) {
msr_handlers[i] = (KVMMSRHandlers) {
.msr = msr,
.rdmsr = rdmsr,
.wrmsr = wrmsr,
};
if (!kvm_install_msr_filters(s)) {
msr_handlers[i] = (KVMMSRHandlers) { };
return false;
}
return true;
}
}
return false;
}
static int kvm_handle_rdmsr(X86CPU *cpu, struct kvm_run *run)
{
int i;
bool r;
for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
KVMMSRHandlers *handler = &msr_handlers[i];
if (run->msr.index == handler->msr) {
if (handler->rdmsr) {
r = handler->rdmsr(cpu, handler->msr,
(uint64_t *)&run->msr.data);
run->msr.error = r ? 0 : 1;
return 0;
}
}
}
assert(false);
}
static int kvm_handle_wrmsr(X86CPU *cpu, struct kvm_run *run)
{
int i;
bool r;
for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
KVMMSRHandlers *handler = &msr_handlers[i];
if (run->msr.index == handler->msr) {
if (handler->wrmsr) {
r = handler->wrmsr(cpu, handler->msr, run->msr.data);
run->msr.error = r ? 0 : 1;
return 0;
}
}
}
assert(false);
}
static bool has_sgx_provisioning;
static bool __kvm_enable_sgx_provisioning(KVMState *s)
@ -5117,6 +5287,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
X86CPU *cpu = X86_CPU(cs);
uint64_t code;
int ret;
bool ctx_invalid;
char str[256];
KVMState *state;
switch (run->exit_reason) {
case KVM_EXIT_HLT:
@ -5172,6 +5345,31 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
/* already handled in kvm_arch_post_run */
ret = 0;
break;
case KVM_EXIT_NOTIFY:
ctx_invalid = !!(run->notify.flags & KVM_NOTIFY_CONTEXT_INVALID);
state = KVM_STATE(current_accel());
sprintf(str, "Encounter a notify exit with %svalid context in"
" guest. There can be possible misbehaves in guest."
" Please have a look.", ctx_invalid ? "in" : "");
if (ctx_invalid ||
state->notify_vmexit == NOTIFY_VMEXIT_OPTION_INTERNAL_ERROR) {
warn_report("KVM internal error: %s", str);
ret = -1;
} else {
warn_report_once("KVM: %s", str);
ret = 0;
}
break;
case KVM_EXIT_X86_RDMSR:
/* We only enable MSR filtering, any other exit is bogus */
assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
ret = kvm_handle_rdmsr(cpu, run);
break;
case KVM_EXIT_X86_WRMSR:
/* We only enable MSR filtering, any other exit is bogus */
assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
ret = kvm_handle_wrmsr(cpu, run);
break;
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
ret = -1;
@ -5448,3 +5646,71 @@ void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)
mask &= ~BIT_ULL(bit);
}
}
static int kvm_arch_get_notify_vmexit(Object *obj, Error **errp)
{
KVMState *s = KVM_STATE(obj);
return s->notify_vmexit;
}
static void kvm_arch_set_notify_vmexit(Object *obj, int value, Error **errp)
{
KVMState *s = KVM_STATE(obj);
if (s->fd != -1) {
error_setg(errp, "Cannot set properties after the accelerator has been initialized");
return;
}
s->notify_vmexit = value;
}
static void kvm_arch_get_notify_window(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
KVMState *s = KVM_STATE(obj);
uint32_t value = s->notify_window;
visit_type_uint32(v, name, &value, errp);
}
static void kvm_arch_set_notify_window(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
KVMState *s = KVM_STATE(obj);
Error *error = NULL;
uint32_t value;
if (s->fd != -1) {
error_setg(errp, "Cannot set properties after the accelerator has been initialized");
return;
}
visit_type_uint32(v, name, &value, &error);
if (error) {
error_propagate(errp, error);
return;
}
s->notify_window = value;
}
void kvm_arch_accel_class_init(ObjectClass *oc)
{
object_class_property_add_enum(oc, "notify-vmexit", "NotifyVMexitOption",
&NotifyVmexitOption_lookup,
kvm_arch_get_notify_vmexit,
kvm_arch_set_notify_vmexit);
object_class_property_set_description(oc, "notify-vmexit",
"Enable notify VM exit");
object_class_property_add(oc, "notify-window", "uint32",
kvm_arch_get_notify_window,
kvm_arch_set_notify_window,
NULL, NULL);
object_class_property_set_description(oc, "notify-window",
"Clock cycles without an event window "
"after which a notification VM exit occurs");
}

View File

@ -54,4 +54,15 @@ uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address);
bool kvm_enable_sgx_provisioning(KVMState *s);
void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask);
typedef bool QEMURDMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t *val);
typedef bool QEMUWRMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t val);
typedef struct kvm_msr_handlers {
uint32_t msr;
QEMURDMSRHandler *rdmsr;
QEMUWRMSRHandler *wrmsr;
} KVMMSRHandlers;
bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
QEMUWRMSRHandler *wrmsr);
#endif

View File

@ -1562,6 +1562,25 @@ static const VMStateDescription vmstate_arch_lbr = {
}
};
static bool triple_fault_needed(void *opaque)
{
X86CPU *cpu = opaque;
CPUX86State *env = &cpu->env;
return env->triple_fault_pending;
}
static const VMStateDescription vmstate_triple_fault = {
.name = "cpu/triple_fault",
.version_id = 1,
.minimum_version_id = 1,
.needed = triple_fault_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(env.triple_fault_pending, X86CPU),
VMSTATE_END_OF_LIST()
}
};
const VMStateDescription vmstate_x86_cpu = {
.name = "cpu",
.version_id = 12,
@ -1706,6 +1725,7 @@ const VMStateDescription vmstate_x86_cpu = {
&vmstate_amx_xtile,
#endif
&vmstate_arch_lbr,
&vmstate_triple_fault,
NULL
}
};

View File

@ -2502,18 +2502,6 @@ void helper_frstor(CPUX86State *env, target_ulong ptr, int data32)
do_frstor(env, ptr, data32, GETPC());
}
#if defined(CONFIG_USER_ONLY)
void cpu_x86_fsave(CPUX86State *env, target_ulong ptr, int data32)
{
do_fsave(env, ptr, data32, 0);
}
void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32)
{
do_frstor(env, ptr, data32, 0);
}
#endif
#define XO(X) offsetof(X86XSaveArea, X)
static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
@ -2787,21 +2775,8 @@ void helper_fxrstor(CPUX86State *env, target_ulong ptr)
do_fxrstor(env, ptr, GETPC());
}
#if defined(CONFIG_USER_ONLY)
void cpu_x86_fxsave(CPUX86State *env, target_ulong ptr)
static void do_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm, uintptr_t ra)
{
do_fxsave(env, ptr, 0);
}
void cpu_x86_fxrstor(CPUX86State *env, target_ulong ptr)
{
do_fxrstor(env, ptr, 0);
}
#endif
void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
{
uintptr_t ra = GETPC();
uint64_t xstate_bv, xcomp_bv, reserve0;
rfbm &= env->xcr0;
@ -2894,6 +2869,43 @@ void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
#undef XO
void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
{
do_xrstor(env, ptr, rfbm, GETPC());
}
#if defined(CONFIG_USER_ONLY)
void cpu_x86_fsave(CPUX86State *env, target_ulong ptr, int data32)
{
do_fsave(env, ptr, data32, 0);
}
void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32)
{
do_frstor(env, ptr, data32, 0);
}
void cpu_x86_fxsave(CPUX86State *env, target_ulong ptr)
{
do_fxsave(env, ptr, 0);
}
void cpu_x86_fxrstor(CPUX86State *env, target_ulong ptr)
{
do_fxrstor(env, ptr, 0);
}
void cpu_x86_xsave(CPUX86State *env, target_ulong ptr)
{
do_xsave(env, ptr, -1, get_xinuse(env), -1, 0);
}
void cpu_x86_xrstor(CPUX86State *env, target_ulong ptr)
{
do_xrstor(env, ptr, -1, 0);
}
#endif
uint64_t helper_xgetbv(CPUX86State *env, uint32_t ecx)
{
/* The OS must have enabled XSAVE. */

View File

@ -1504,14 +1504,12 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
}
/* real mode call */
void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
int shift, int next_eip)
void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
int shift, uint32_t next_eip)
{
int new_eip;
uint32_t esp, esp_mask;
target_ulong ssp;
new_eip = new_eip1;
esp = env->regs[R_ESP];
esp_mask = get_sp_mask(env->segs[R_SS].flags);
ssp = env->segs[R_SS].base;

View File

@ -450,6 +450,11 @@ void helper_rdmsr(CPUX86State *env)
case MSR_IA32_UCODE_REV:
val = x86_cpu->ucode_rev;
break;
case MSR_CORE_THREAD_COUNT: {
CPUState *cs = CPU(x86_cpu);
val = (cs->nr_threads * cs->nr_cores) | (cs->nr_cores << 16);
break;
}
default:
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +

View File

@ -49,9 +49,11 @@ static void x86_cpu_exec_exit(CPUState *cs)
static void x86_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
X86CPU *cpu = X86_CPU(cs);
cpu->env.eip = tb_pc(tb) - tb->cs_base;
/* The instruction pointer is always up to date with TARGET_TB_PCREL. */
if (!TARGET_TB_PCREL) {
CPUX86State *env = cs->env_ptr;
env->eip = tb_pc(tb) - tb->cs_base;
}
}
#ifndef CONFIG_USER_ONLY

File diff suppressed because it is too large Load Diff

View File

@ -1294,3 +1294,7 @@ bool kvm_arch_cpu_check_are_resettable(void)
{
return true;
}
void kvm_arch_accel_class_init(ObjectClass *oc)
{
}

View File

@ -2966,3 +2966,7 @@ bool kvm_arch_cpu_check_are_resettable(void)
{
return true;
}
void kvm_arch_accel_class_init(ObjectClass *oc)
{
}

View File

@ -532,3 +532,7 @@ bool kvm_arch_cpu_check_are_resettable(void)
{
return true;
}
void kvm_arch_accel_class_init(ObjectClass *oc)
{
}

View File

@ -2581,3 +2581,7 @@ int kvm_s390_get_zpci_op(void)
{
return cap_zpci_op;
}
void kvm_arch_accel_class_init(ObjectClass *oc)
{
}