* Miscellaneous fixes and feature enablement (many)

* SEV refactoring (David)
 * Hyper-V initial support (Jon)
 * i386 TCG fixes (x87 and SSE, Joseph)
 * vmport cleanup and improvements (Philippe, Liran)
 * Use-after-free with vCPU hot-unplug (Nengyuan)
 * run-coverity-scan improvements (myself)
 * Record/replay fixes (Pavel)
 * -machine kernel_irqchip=split improvements for INTx (Peter)
 * Code cleanups (Philippe)
 * Crash and security fixes (PJP)
 * HVF cleanups (Roman)
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAl7jpdAUHHBib256aW5p
 QHJlZGhhdC5jb20ACgkQv/vSX3jHroMfjwf/X7+0euuE9dwKFKDDMmIi+4lRWnq7
 gSOyE1BYSfDIUXRIukf64konXe0VpiotNYlyEaYnnQjkMdGm5E9iXKF+LgEwXj/t
 NSGkfj5J3VeWRG4JJp642CSN/aZWO8uzkenld3myCnu6TicuN351tDJchiFwAk9f
 wsXtgLKd67zE8MLVt8AP0rNTbzMHttPXnPaOXDCuwjMHNvMEKnC93UeOeM0M4H5s
 3Dl2HvsNWZ2SzUG9mAbWp0bWWuoIb+Ep9//87HWANvb7Z8jratRws18i6tYt1sPx
 8zOnUS87sVnh1CQlXBDd9fEcqBUVgR9pAlqaaYavNhFp5eC31euvpDU8Iw==
 =F4sU
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging

* Miscellaneous fixes and feature enablement (many)
* SEV refactoring (David)
* Hyper-V initial support (Jon)
* i386 TCG fixes (x87 and SSE, Joseph)
* vmport cleanup and improvements (Philippe, Liran)
* Use-after-free with vCPU hot-unplug (Nengyuan)
* run-coverity-scan improvements (myself)
* Record/replay fixes (Pavel)
* -machine kernel_irqchip=split improvements for INTx (Peter)
* Code cleanups (Philippe)
* Crash and security fixes (PJP)
* HVF cleanups (Roman)

# gpg: Signature made Fri 12 Jun 2020 16:57:04 BST
# gpg:                using RSA key F13338574B662389866C7682BFFBD25F78C7AE83
# gpg:                issuer "pbonzini@redhat.com"
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full]
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>" [full]
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* remotes/bonzini/tags/for-upstream: (116 commits)
  target/i386: Remove obsolete TODO file
  stubs: move Xen stubs to accel/
  replay: fix replay shutdown for console mode
  exec/cpu-common: Move MUSB specific typedefs to 'hw/usb/hcd-musb.h'
  hw/usb: Move device-specific declarations to new 'hcd-musb.h' header
  exec/memory: Remove unused MemoryRegionMmio type
  checkpatch: reversed logic with acpi test checks
  target/i386: sev: Unify SEVState and SevGuestState
  target/i386: sev: Remove redundant handle field
  target/i386: sev: Remove redundant policy field
  target/i386: sev: Remove redundant cbitpos and reduced_phys_bits fields
  target/i386: sev: Partial cleanup to sev_state global
  target/i386: sev: Embed SEVState in SevGuestState
  target/i386: sev: Rename QSevGuestInfo
  target/i386: sev: Move local structure definitions into .c file
  target/i386: sev: Remove unused QSevGuestInfoClass
  xen: fix build without pci passthrough
  i386: hvf: Drop HVFX86EmulatorState
  i386: hvf: Move mmio_buf into CPUX86State
  i386: hvf: Move lazy_flags into CPUX86State
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

# Conflicts:
#	hw/i386/acpi-build.c
This commit is contained in:
Peter Maydell 2020-06-12 23:06:22 +01:00
commit 7d3660e798
126 changed files with 6572 additions and 980 deletions

View File

@ -440,6 +440,7 @@ M: Paul Durrant <paul@xen.org>
L: xen-devel@lists.xenproject.org
S: Supported
F: */xen*
F: accel/xen/*
F: hw/9pfs/xen-9p*
F: hw/char/xen_console.c
F: hw/display/xenfb.c
@ -453,6 +454,7 @@ F: hw/i386/xen/
F: hw/pci-host/xen_igd_pt.c
F: include/hw/block/dataplane/xen*
F: include/hw/xen/
F: include/sysemu/xen.h
F: include/sysemu/xen-mapcache.h
Guest CPU Cores (HAXM)

View File

@ -336,9 +336,9 @@ $(call set-vpath, $(SRC_PATH))
LIBS+=-lz $(LIBS_TOOLS)
vhost-user-json-y =
HELPERS-y =
HELPERS-y = $(HELPERS)
HELPERS-$(call land,$(CONFIG_SOFTMMU),$(CONFIG_LINUX)) = qemu-bridge-helper$(EXESUF)
HELPERS-$(call land,$(CONFIG_SOFTMMU),$(CONFIG_LINUX)) += qemu-bridge-helper$(EXESUF)
ifeq ($(CONFIG_LINUX)$(CONFIG_VIRGL)$(CONFIG_GBM)$(CONFIG_TOOLS),yyyy)
HELPERS-y += vhost-user-gpu$(EXESUF)
@ -1258,6 +1258,11 @@ endif
$(call print-help-run,$(t)/fuzz,Build fuzzer for $(t)); \
))) \
echo '')
@$(if $(HELPERS-y), \
echo 'Helper targets:'; \
$(foreach t, $(HELPERS-y), \
$(call print-help-run,$(t),Build $(shell basename $(t)));) \
echo '')
@$(if $(TOOLS), \
echo 'Tools targets:'; \
$(foreach t, $(TOOLS), \

View File

@ -150,6 +150,7 @@ trace-events-subdirs += hw/block/dataplane
trace-events-subdirs += hw/char
trace-events-subdirs += hw/dma
trace-events-subdirs += hw/hppa
trace-events-subdirs += hw/hyperv
trace-events-subdirs += hw/i2c
trace-events-subdirs += hw/i386
trace-events-subdirs += hw/i386/xen

View File

@ -2,4 +2,5 @@ common-obj-$(CONFIG_SOFTMMU) += accel.o
obj-$(call land,$(CONFIG_SOFTMMU),$(CONFIG_POSIX)) += qtest.o
obj-$(CONFIG_KVM) += kvm/
obj-$(CONFIG_TCG) += tcg/
obj-$(CONFIG_XEN) += xen/
obj-y += stubs/

View File

@ -160,9 +160,59 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = {
static NotifierList kvm_irqchip_change_notifiers =
NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
struct KVMResampleFd {
int gsi;
EventNotifier *resample_event;
QLIST_ENTRY(KVMResampleFd) node;
};
typedef struct KVMResampleFd KVMResampleFd;
/*
* Only used with split irqchip where we need to do the resample fd
* kick for the kernel from userspace.
*/
static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
#define kvm_slots_lock(kml) qemu_mutex_lock(&(kml)->slots_lock)
#define kvm_slots_unlock(kml) qemu_mutex_unlock(&(kml)->slots_lock)
static inline void kvm_resample_fd_remove(int gsi)
{
KVMResampleFd *rfd;
QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
if (rfd->gsi == gsi) {
QLIST_REMOVE(rfd, node);
g_free(rfd);
break;
}
}
}
static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
{
KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
rfd->gsi = gsi;
rfd->resample_event = event;
QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
}
void kvm_resample_fd_notify(int gsi)
{
KVMResampleFd *rfd;
QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
if (rfd->gsi == gsi) {
event_notifier_set(rfd->resample_event);
trace_kvm_resample_fd_notify(gsi);
return;
}
}
}
int kvm_get_max_memslots(void)
{
KVMState *s = KVM_STATE(current_accel());
@ -1662,9 +1712,13 @@ int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
return kvm_update_routing_entry(s, &kroute);
}
static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
EventNotifier *resample, int virq,
bool assign)
{
int fd = event_notifier_get_fd(event);
int rfd = resample ? event_notifier_get_fd(resample) : -1;
struct kvm_irqfd irqfd = {
.fd = fd,
.gsi = virq,
@ -1672,8 +1726,33 @@ static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
};
if (rfd != -1) {
irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
irqfd.resamplefd = rfd;
assert(assign);
if (kvm_irqchip_is_split()) {
/*
* When the slow irqchip (e.g. IOAPIC) is in the
* userspace, KVM kernel resamplefd will not work because
* the EOI of the interrupt will be delivered to userspace
* instead, so the KVM kernel resamplefd kick will be
* skipped. The userspace here mimics what the kernel
* provides with resamplefd, remember the resamplefd and
* kick it when we receive EOI of this IRQ.
*
* This is hackery because IOAPIC is mostly bypassed
* (except EOI broadcasts) when irqfd is used. However
* this can bring much performance back for split irqchip
* with INTx IRQs (for VFIO, this gives 93% perf of the
* full fast path, which is 46% perf boost comparing to
* the INTx slow path).
*/
kvm_resample_fd_insert(virq, resample);
} else {
irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
irqfd.resamplefd = rfd;
}
} else if (!assign) {
if (kvm_irqchip_is_split()) {
kvm_resample_fd_remove(virq);
}
}
if (!kvm_irqfds_enabled()) {
@ -1769,7 +1848,9 @@ int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
return -ENOSYS;
}
static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
EventNotifier *resample, int virq,
bool assign)
{
abort();
}
@ -1783,15 +1864,13 @@ int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
EventNotifier *rn, int virq)
{
return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n),
rn ? event_notifier_get_fd(rn) : -1, virq, true);
return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
}
int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
int virq)
{
return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq,
false);
return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
}
int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,

View File

@ -16,4 +16,5 @@ kvm_set_ioeventfd_mmio(int fd, uint64_t addr, uint32_t val, bool assign, uint32_
kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%x val=0x%x assign: %d size: %d match: %d"
kvm_set_user_memory(uint32_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, int ret) "Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " ret=%d"
kvm_clear_dirty_log(uint32_t slot, uint64_t start, uint32_t size) "slot#%"PRId32" start 0x%"PRIx64" size 0x%"PRIx32
kvm_resample_fd_notify(int gsi) "gsi %d"

View File

@ -3,3 +3,4 @@ obj-$(call lnot,$(CONFIG_HVF)) += hvf-stub.o
obj-$(call lnot,$(CONFIG_WHPX)) += whpx-stub.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
obj-$(call lnot,$(CONFIG_TCG)) += tcg-stub.o
obj-$(call lnot,$(CONFIG_XEN)) += xen-stub.o

View File

@ -1,18 +1,18 @@
/*
* Copyright (C) 2010 Citrix Ltd.
* Copyright (C) 2014 Citrix Systems UK Ltd.
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* Contributions after 2012-01-13 are licensed under the terms of the
* GNU GPL, version 2 or (at your option) any later version.
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "hw/xen/xen.h"
#include "exec/memory.h"
#include "qapi/qapi-commands-misc.h"
void xenstore_store_pv_console_info(int i, Chardev *chr)
{
}
int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
{
return -1;
@ -35,11 +35,6 @@ int xen_is_pirq_msi(uint32_t msi_data)
return 0;
}
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
Error **errp)
{
}
qemu_irq *xen_interrupt_controller_init(void)
{
return NULL;
@ -49,10 +44,6 @@ void xen_register_framebuffer(MemoryRegion *mr)
{
}
void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
{
}
void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory)
{
}

1
accel/xen/Makefile.objs Normal file
View File

@ -0,0 +1 @@
obj-y += xen-all.o

View File

@ -16,6 +16,7 @@
#include "hw/xen/xen_pt.h"
#include "chardev/char.h"
#include "sysemu/accel.h"
#include "sysemu/xen.h"
#include "sysemu/runstate.h"
#include "migration/misc.h"
#include "migration/global_state.h"
@ -31,6 +32,13 @@
do { } while (0)
#endif
static bool xen_allowed;
bool xen_enabled(void)
{
return xen_allowed;
}
xc_interface *xen_xc;
xenforeignmemory_handle *xen_fmem;
xendevicemodel_handle *xen_dmod;
@ -129,12 +137,12 @@ static void xen_change_state_handler(void *opaque, int running,
static bool xen_get_igd_gfx_passthru(Object *obj, Error **errp)
{
return has_igd_gfx_passthru;
return xen_igd_gfx_pt_enabled();
}
static void xen_set_igd_gfx_passthru(Object *obj, bool value, Error **errp)
{
has_igd_gfx_passthru = value;
xen_igd_gfx_pt_set(value, errp);
}
static void xen_setup_post(MachineState *ms, AccelState *accel)

View File

@ -551,7 +551,9 @@ static int tcp_chr_sync_read(Chardev *chr, const uint8_t *buf, int len)
qio_channel_set_blocking(s->ioc, true, NULL);
size = tcp_chr_recv(chr, (void *) buf, len);
qio_channel_set_blocking(s->ioc, false, NULL);
if (s->state != TCP_CHARDEV_STATE_DISCONNECTED) {
qio_channel_set_blocking(s->ioc, false, NULL);
}
if (size == 0) {
/* connection closed */
tcp_chr_disconnect(chr);

25
configure vendored
View File

@ -4587,7 +4587,13 @@ fi
if test "$tcmalloc" = "yes" ; then
cat > $TMPC << EOF
#include <stdlib.h>
int main(void) { malloc(1); return 0; }
int main(void) {
void *tmp = malloc(1);
if (tmp != NULL) {
return 0;
}
return 1;
}
EOF
if compile_prog "" "-ltcmalloc" ; then
@ -4603,7 +4609,13 @@ fi
if test "$jemalloc" = "yes" ; then
cat > $TMPC << EOF
#include <stdlib.h>
int main(void) { malloc(1); return 0; }
int main(void) {
void *tmp = malloc(1);
if (tmp != NULL) {
return 0;
}
return 1;
}
EOF
if compile_prog "" "-ljemalloc" ; then
@ -6164,7 +6176,9 @@ if test "$sanitizers" = "yes" ; then
#include <stdlib.h>
int main(void) {
void *tmp = malloc(10);
return *(int *)(tmp + 2);
if (tmp != NULL) {
return *(int *)(tmp + 2);
}
}
EOF
if compile_prog "$CPU_CFLAGS -Werror -fsanitize=undefined" ""; then
@ -6394,7 +6408,7 @@ if test "$softmmu" = yes ; then
if test "$linux" = yes; then
if test "$virtfs" != no && test "$cap_ng" = yes && test "$attr" = yes ; then
virtfs=yes
tools="$tools fsdev/virtfs-proxy-helper\$(EXESUF)"
helpers="$helpers fsdev/virtfs-proxy-helper\$(EXESUF)"
else
if test "$virtfs" = yes; then
error_exit "VirtFS requires libcap-ng devel and libattr devel"
@ -6409,7 +6423,7 @@ if test "$softmmu" = yes ; then
fi
mpath=no
fi
tools="$tools scsi/qemu-pr-helper\$(EXESUF)"
helpers="$helpers scsi/qemu-pr-helper\$(EXESUF)"
else
if test "$virtfs" = yes; then
error_exit "VirtFS is supported only on Linux"
@ -7654,6 +7668,7 @@ else
QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/\$(ARCH) $QEMU_INCLUDES"
fi
echo "HELPERS=$helpers" >> $config_host_mak
echo "TOOLS=$tools" >> $config_host_mak
echo "ROMS=$roms" >> $config_host_mak
echo "MAKE=$make" >> $config_host_mak

26
cpus.c
View File

@ -379,7 +379,8 @@ static void icount_adjust(void)
seqlock_write_lock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
cur_time = cpu_get_clock_locked();
cur_time = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
cpu_get_clock_locked());
cur_icount = cpu_get_icount_locked();
delta = cur_icount - cur_time;
@ -647,6 +648,11 @@ static bool adjust_timers_state_needed(void *opaque)
return s->icount_rt_timer != NULL;
}
static bool shift_state_needed(void *opaque)
{
return use_icount == 2;
}
/*
* Subsection for warp timer migration is optional, because may not be created
*/
@ -674,6 +680,17 @@ static const VMStateDescription icount_vmstate_adjust_timers = {
}
};
static const VMStateDescription icount_vmstate_shift = {
.name = "timer/icount/shift",
.version_id = 1,
.minimum_version_id = 1,
.needed = shift_state_needed,
.fields = (VMStateField[]) {
VMSTATE_INT16(icount_time_shift, TimersState),
VMSTATE_END_OF_LIST()
}
};
/*
* This is a subsection for icount migration.
*/
@ -690,6 +707,7 @@ static const VMStateDescription icount_vmstate_timers = {
.subsections = (const VMStateDescription*[]) {
&icount_vmstate_warp_timer,
&icount_vmstate_adjust_timers,
&icount_vmstate_shift,
NULL
}
};
@ -803,8 +821,10 @@ void configure_icount(QemuOpts *opts, Error **errp)
bool align = qemu_opt_get_bool(opts, "align", false);
long time_shift = -1;
if (!option && qemu_opt_get(opts, "align")) {
error_setg(errp, "Please specify shift option when using align");
if (!option) {
if (qemu_opt_get(opts, "align") != NULL) {
error_setg(errp, "Please specify shift option when using align");
}
return;
}

13
disas.c
View File

@ -39,9 +39,11 @@ target_read_memory (bfd_vma memaddr,
struct disassemble_info *info)
{
CPUDebug *s = container_of(info, CPUDebug, info);
int r;
cpu_memory_rw_debug(s->cpu, memaddr, myaddr, length, 0);
return 0;
r = cpu_memory_rw_debug(s->cpu, memaddr, myaddr, length, 0);
return r ? EIO : 0;
}
/* Print an error message. We can assume that this is in response to
@ -718,10 +720,11 @@ physical_read_memory(bfd_vma memaddr, bfd_byte *myaddr, int length,
struct disassemble_info *info)
{
CPUDebug *s = container_of(info, CPUDebug, info);
MemTxResult res;
address_space_read(s->cpu->as, memaddr, MEMTXATTRS_UNSPECIFIED,
myaddr, length);
return 0;
res = address_space_read(s->cpu->as, memaddr, MEMTXATTRS_UNSPECIFIED,
myaddr, length);
return res == MEMTX_OK ? 0 : EIO;
}
/* Disassembler for the monitor. */

29
exec.c
View File

@ -3546,6 +3546,7 @@ void *address_space_map(AddressSpace *as,
if (!memory_access_is_direct(mr, is_write)) {
if (atomic_xchg(&bounce.in_use, true)) {
*plen = 0;
return NULL;
}
/* Avoid unbounded allocations */
@ -3724,7 +3725,7 @@ static inline MemoryRegion *address_space_translate_cached(
/* Called from RCU critical section. address_space_read_cached uses this
* out of line function when the target is an MMIO or IOMMU region.
*/
void
MemTxResult
address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr,
void *buf, hwaddr len)
{
@ -3734,15 +3735,15 @@ address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr,
l = len;
mr = address_space_translate_cached(cache, addr, &addr1, &l, false,
MEMTXATTRS_UNSPECIFIED);
flatview_read_continue(cache->fv,
addr, MEMTXATTRS_UNSPECIFIED, buf, len,
addr1, l, mr);
return flatview_read_continue(cache->fv,
addr, MEMTXATTRS_UNSPECIFIED, buf, len,
addr1, l, mr);
}
/* Called from RCU critical section. address_space_write_cached uses this
* out of line function when the target is an MMIO or IOMMU region.
*/
void
MemTxResult
address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr,
const void *buf, hwaddr len)
{
@ -3752,9 +3753,9 @@ address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr,
l = len;
mr = address_space_translate_cached(cache, addr, &addr1, &l, true,
MEMTXATTRS_UNSPECIFIED);
flatview_write_continue(cache->fv,
addr, MEMTXATTRS_UNSPECIFIED, buf, len,
addr1, l, mr);
return flatview_write_continue(cache->fv,
addr, MEMTXATTRS_UNSPECIFIED, buf, len,
addr1, l, mr);
}
#define ARG1_DECL MemoryRegionCache *cache
@ -3777,6 +3778,7 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
while (len > 0) {
int asidx;
MemTxAttrs attrs;
MemTxResult res;
page = addr & TARGET_PAGE_MASK;
phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
@ -3789,11 +3791,14 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
l = len;
phys_addr += (addr & ~TARGET_PAGE_MASK);
if (is_write) {
address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
attrs, buf, l);
res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
attrs, buf, l);
} else {
address_space_read(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf,
l);
res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr,
attrs, buf, l);
}
if (res != MEMTX_OK) {
return -1;
}
len -= l;
buf += l;

View File

@ -254,11 +254,12 @@ ERST
{
.name = "mtree",
.args_type = "flatview:-f,dispatch_tree:-d,owner:-o",
.params = "[-f][-d][-o]",
.args_type = "flatview:-f,dispatch_tree:-d,owner:-o,disabled:-D",
.params = "[-f][-d][-o][-D]",
.help = "show memory tree (-f: dump flat view for address spaces;"
"-d: dump dispatch tree, valid with -f only);"
"-o: dump region owners/parents",
"-o: dump region owners/parents;"
"-D: dump disabled regions",
.cmd = hmp_info_mtree,
},

View File

@ -35,7 +35,7 @@ devices-dirs-y += usb/
devices-dirs-$(CONFIG_VFIO) += vfio/
devices-dirs-y += virtio/
devices-dirs-y += watchdog/
devices-dirs-y += xen/
devices-dirs-$(CONFIG_XEN) += xen/
devices-dirs-$(CONFIG_MEM_DEVICE) += mem/
devices-dirs-$(CONFIG_NUBUS) += nubus/
devices-dirs-y += semihosting/

View File

@ -30,6 +30,7 @@
#include "hw/acpi/acpi.h"
#include "sysemu/runstate.h"
#include "sysemu/sysemu.h"
#include "sysemu/xen.h"
#include "qapi/error.h"
#include "qemu/range.h"
#include "exec/address-spaces.h"
@ -41,7 +42,6 @@
#include "hw/mem/nvdimm.h"
#include "hw/acpi/memory_hotplug.h"
#include "hw/acpi/acpi_dev_interface.h"
#include "hw/xen/xen.h"
#include "migration/vmstate.h"
#include "hw/core/cpu.h"
#include "trace.h"

View File

@ -20,7 +20,6 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/cutils.h"
#include "qom/object.h"
#include "hw/qdev-core.h"
#include "hw/qdev-properties.h"
#include "hw/virtio/vhost.h"

View File

@ -45,6 +45,10 @@ GlobalProperty hw_compat_4_2[] = {
{ "qxl", "revision", "4" },
{ "qxl-vga", "revision", "4" },
{ "fw_cfg", "acpi-mr-restore", "false" },
{ "vmport", "x-read-set-eax", "off" },
{ "vmport", "x-signal-unsupported-cmd", "off" },
{ "vmport", "x-report-vmx-type", "off" },
{ "vmport", "x-cmds-v2", "off" },
};
const size_t hw_compat_4_2_len = G_N_ELEMENTS(hw_compat_4_2);

View File

@ -757,6 +757,11 @@ void numa_complete_configuration(MachineState *ms)
}
if (!numa_uses_legacy_mem() && mc->default_ram_id) {
if (ms->ram_memdev_id) {
error_report("'-machine memory-backend' and '-numa memdev'"
" properties are mutually exclusive");
exit(1);
}
ms->ram = g_new(MemoryRegion, 1);
memory_region_init(ms->ram, OBJECT(ms), mc->default_ram_id,
ram_size);

View File

@ -6,3 +6,8 @@ config HYPERV_TESTDEV
bool
default y if TEST_DEVICES
depends on HYPERV
config VMBUS
bool
default y
depends on HYPERV

View File

@ -1,2 +1,3 @@
obj-y += hyperv.o
obj-$(CONFIG_HYPERV_TESTDEV) += hyperv_testdev.o
obj-$(CONFIG_VMBUS) += vmbus.o

View File

@ -38,6 +38,13 @@ typedef struct SynICState {
#define TYPE_SYNIC "hyperv-synic"
#define SYNIC(obj) OBJECT_CHECK(SynICState, (obj), TYPE_SYNIC)
static bool synic_enabled;
bool hyperv_is_synic_enabled(void)
{
return synic_enabled;
}
static SynICState *get_synic(CPUState *cs)
{
return SYNIC(object_resolve_path_component(OBJECT(cs), "synic"));
@ -134,6 +141,7 @@ void hyperv_synic_add(CPUState *cs)
object_property_add_child(OBJECT(cs), "synic", obj);
object_unref(obj);
object_property_set_bool(obj, true, "realized", &error_abort);
synic_enabled = true;
}
void hyperv_synic_reset(CPUState *cs)

18
hw/hyperv/trace-events Normal file
View File

@ -0,0 +1,18 @@
# vmbus
vmbus_recv_message(uint32_t type, uint32_t size) "type %d size %d"
vmbus_signal_event(void) ""
vmbus_channel_notify_guest(uint32_t chan_id) "channel #%d"
vmbus_post_msg(uint32_t type, uint32_t size) "type %d size %d"
vmbus_msg_cb(int status) "message status %d"
vmbus_process_incoming_message(uint32_t message_type) "type %d"
vmbus_initiate_contact(uint16_t major, uint16_t minor, uint32_t vcpu, uint64_t monitor_page1, uint64_t monitor_page2, uint64_t interrupt_page) "version %d.%d target vp %d mon pages 0x%"PRIx64",0x%"PRIx64" int page 0x%"PRIx64
vmbus_send_offer(uint32_t chan_id, void *dev) "channel #%d dev %p"
vmbus_terminate_offers(void) ""
vmbus_gpadl_header(uint32_t gpadl_id, uint16_t num_gfns) "gpadl #%d gfns %d"
vmbus_gpadl_body(uint32_t gpadl_id) "gpadl #%d"
vmbus_gpadl_created(uint32_t gpadl_id) "gpadl #%d"
vmbus_gpadl_teardown(uint32_t gpadl_id) "gpadl #%d"
vmbus_gpadl_torndown(uint32_t gpadl_id) "gpadl #%d"
vmbus_open_channel(uint32_t chan_id, uint32_t gpadl_id, uint32_t target_vp) "channel #%d gpadl #%d target vp %d"
vmbus_channel_open(uint32_t chan_id, uint32_t status) "channel #%d status %d"
vmbus_close_channel(uint32_t chan_id) "channel #%d"

2778
hw/hyperv/vmbus.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -51,6 +51,7 @@
#include "hw/mem/nvdimm.h"
#include "sysemu/numa.h"
#include "sysemu/reset.h"
#include "hw/hyperv/vmbus-bridge.h"
/* Supported chipsets: */
#include "hw/southbridge/piix.h"
@ -1052,9 +1053,47 @@ static Aml *build_mouse_device_aml(void)
return dev;
}
static Aml *build_vmbus_device_aml(VMBusBridge *vmbus_bridge)
{
Aml *dev;
Aml *method;
Aml *crs;
dev = aml_device("VMBS");
aml_append(dev, aml_name_decl("STA", aml_int(0xF)));
aml_append(dev, aml_name_decl("_HID", aml_string("VMBus")));
aml_append(dev, aml_name_decl("_UID", aml_int(0x0)));
aml_append(dev, aml_name_decl("_DDN", aml_string("VMBUS")));
method = aml_method("_DIS", 0, AML_NOTSERIALIZED);
aml_append(method, aml_store(aml_and(aml_name("STA"), aml_int(0xD), NULL),
aml_name("STA")));
aml_append(dev, method);
method = aml_method("_PS0", 0, AML_NOTSERIALIZED);
aml_append(method, aml_store(aml_or(aml_name("STA"), aml_int(0xF), NULL),
aml_name("STA")));
aml_append(dev, method);
method = aml_method("_STA", 0, AML_NOTSERIALIZED);
aml_append(method, aml_return(aml_name("STA")));
aml_append(dev, method);
aml_append(dev, aml_name_decl("_PS3", aml_int(0x0)));
crs = aml_resource_template();
aml_append(crs, aml_irq_no_flags(vmbus_bridge->irq0));
/* FIXME: newer HyperV gets by with only one IRQ */
aml_append(crs, aml_irq_no_flags(vmbus_bridge->irq1));
aml_append(dev, aml_name_decl("_CRS", crs));
return dev;
}
static void build_isa_devices_aml(Aml *table)
{
ISADevice *fdc = pc_find_fdc0();
VMBusBridge *vmbus_bridge = vmbus_bridge_find();
bool ambiguous;
Aml *scope = aml_scope("_SB.PCI0.ISA");
@ -1075,6 +1114,10 @@ static void build_isa_devices_aml(Aml *table)
isa_build_aml(ISA_BUS(obj), scope);
}
if (vmbus_bridge) {
aml_append(scope, build_vmbus_device_aml(vmbus_bridge));
}
aml_append(table, scope);
}

View File

@ -370,7 +370,7 @@ static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd)
hwaddr addr = cpu_to_le64(extract64(cmd[0], 3, 49)) << 3;
uint64_t data = cpu_to_le64(cmd[1]);
if (extract64(cmd[0], 51, 8)) {
if (extract64(cmd[0], 52, 8)) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
s->cmdbuf + s->cmdbuf_head);
}
@ -395,7 +395,7 @@ static void amdvi_inval_devtab_entry(AMDVIState *s, uint64_t *cmd)
uint16_t devid = cpu_to_le16((uint16_t)extract64(cmd[0], 0, 16));
/* This command should invalidate internal caches of which there isn't */
if (extract64(cmd[0], 15, 16) || cmd[1]) {
if (extract64(cmd[0], 16, 44) || cmd[1]) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
s->cmdbuf + s->cmdbuf_head);
}
@ -405,9 +405,9 @@ static void amdvi_inval_devtab_entry(AMDVIState *s, uint64_t *cmd)
static void amdvi_complete_ppr(AMDVIState *s, uint64_t *cmd)
{
if (extract64(cmd[0], 15, 16) || extract64(cmd[0], 19, 8) ||
if (extract64(cmd[0], 16, 16) || extract64(cmd[0], 52, 8) ||
extract64(cmd[1], 0, 2) || extract64(cmd[1], 3, 29)
|| extract64(cmd[1], 47, 16)) {
|| extract64(cmd[1], 48, 16)) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
s->cmdbuf + s->cmdbuf_head);
}
@ -438,8 +438,8 @@ static void amdvi_inval_pages(AMDVIState *s, uint64_t *cmd)
{
uint16_t domid = cpu_to_le16((uint16_t)extract64(cmd[0], 32, 16));
if (extract64(cmd[0], 20, 12) || extract64(cmd[0], 16, 12) ||
extract64(cmd[0], 3, 10)) {
if (extract64(cmd[0], 20, 12) || extract64(cmd[0], 48, 12) ||
extract64(cmd[1], 3, 9)) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
s->cmdbuf + s->cmdbuf_head);
}
@ -451,7 +451,7 @@ static void amdvi_inval_pages(AMDVIState *s, uint64_t *cmd)
static void amdvi_prefetch_pages(AMDVIState *s, uint64_t *cmd)
{
if (extract64(cmd[0], 16, 8) || extract64(cmd[0], 20, 8) ||
if (extract64(cmd[0], 16, 8) || extract64(cmd[0], 52, 8) ||
extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) ||
extract64(cmd[1], 5, 7)) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
@ -463,7 +463,7 @@ static void amdvi_prefetch_pages(AMDVIState *s, uint64_t *cmd)
static void amdvi_inval_inttable(AMDVIState *s, uint64_t *cmd)
{
if (extract64(cmd[0], 16, 16) || cmd[1]) {
if (extract64(cmd[0], 16, 44) || cmd[1]) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
s->cmdbuf + s->cmdbuf_head);
return;
@ -479,7 +479,8 @@ static void iommu_inval_iotlb(AMDVIState *s, uint64_t *cmd)
{
uint16_t devid = extract64(cmd[0], 0, 16);
if (extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 9)) {
if (extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) ||
extract64(cmd[1], 6, 6)) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
s->cmdbuf + s->cmdbuf_head);
return;

View File

@ -31,6 +31,7 @@
#include "hw/i386/apic.h"
#include "hw/i386/topology.h"
#include "hw/i386/fw_cfg.h"
#include "hw/i386/vmport.h"
#include "sysemu/cpus.h"
#include "hw/block/fdc.h"
#include "hw/ide.h"
@ -56,6 +57,7 @@
#include "sysemu/tcg.h"
#include "sysemu/numa.h"
#include "sysemu/kvm.h"
#include "sysemu/xen.h"
#include "sysemu/qtest.h"
#include "sysemu/reset.h"
#include "sysemu/runstate.h"
@ -91,7 +93,6 @@
#include "qapi/qmp/qerror.h"
#include "config-devices.h"
#include "e820_memory_layout.h"
#include "vmport.h"
#include "fw_cfg.h"
#include "trace.h"

View File

@ -53,6 +53,7 @@
#include "cpu.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "sysemu/xen.h"
#ifdef CONFIG_XEN
#include <xen/hvm/hvm_info_table.h>
#include "hw/xen/xen_pt.h"
@ -60,6 +61,7 @@
#include "migration/global_state.h"
#include "migration/misc.h"
#include "sysemu/numa.h"
#include "hw/hyperv/vmbus-bridge.h"
#include "hw/mem/nvdimm.h"
#include "hw/i386/acpi-build.h"
@ -375,7 +377,7 @@ static void pc_init_isa(MachineState *machine)
#ifdef CONFIG_XEN
static void pc_xen_hvm_init_pci(MachineState *machine)
{
const char *pci_type = has_igd_gfx_passthru ?
const char *pci_type = xen_igd_gfx_pt_enabled() ?
TYPE_IGD_PASSTHROUGH_I440FX_PCI_DEVICE : TYPE_I440FX_PCI_DEVICE;
pc_init1(machine,
@ -419,6 +421,7 @@ static void pc_i440fx_machine_options(MachineClass *m)
m->default_machine_opts = "firmware=bios-256k.bin";
m->default_display = "std";
machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
}
static void pc_i440fx_5_1_machine_options(MachineClass *m)

View File

@ -36,6 +36,7 @@
#include "hw/rtc/mc146818rtc.h"
#include "hw/xen/xen.h"
#include "sysemu/kvm.h"
#include "sysemu/xen.h"
#include "hw/kvm/clock.h"
#include "hw/pci-host/q35.h"
#include "hw/qdev-properties.h"
@ -53,6 +54,7 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "sysemu/numa.h"
#include "hw/hyperv/vmbus-bridge.h"
#include "hw/mem/nvdimm.h"
#include "hw/i386/acpi-build.h"
@ -348,6 +350,7 @@ static void pc_q35_machine_options(MachineClass *m)
machine_class_allow_dynamic_sysbus_dev(m, TYPE_AMD_IOMMU_DEVICE);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_INTEL_IOMMU_DEVICE);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
m->max_cpus = 288;
}

View File

@ -25,21 +25,15 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "ui/console.h"
#include "hw/i386/vmport.h"
#include "hw/input/i8042.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
#include "vmport.h"
#include "cpu.h"
/* debug only vmmouse */
//#define DEBUG_VMMOUSE
/* VMMouse Commands */
#define VMMOUSE_GETVERSION 10
#define VMMOUSE_DATA 39
#define VMMOUSE_STATUS 40
#define VMMOUSE_COMMAND 41
#define VMMOUSE_READ_ID 0x45414552
#define VMMOUSE_DISABLE 0x000000f5
#define VMMOUSE_REQUEST_RELATIVE 0x4c455252
@ -217,10 +211,10 @@ static uint32_t vmmouse_ioport_read(void *opaque, uint32_t addr)
command = data[2] & 0xFFFF;
switch (command) {
case VMMOUSE_STATUS:
case VMPORT_CMD_VMMOUSE_STATUS:
data[0] = vmmouse_get_status(s);
break;
case VMMOUSE_COMMAND:
case VMPORT_CMD_VMMOUSE_COMMAND:
switch (data[1]) {
case VMMOUSE_DISABLE:
vmmouse_disable(s);
@ -239,7 +233,7 @@ static uint32_t vmmouse_ioport_read(void *opaque, uint32_t addr)
break;
}
break;
case VMMOUSE_DATA:
case VMPORT_CMD_VMMOUSE_DATA:
vmmouse_data(s, data, data[1]);
break;
default:
@ -296,9 +290,9 @@ static void vmmouse_realizefn(DeviceState *dev, Error **errp)
return;
}
vmport_register(VMMOUSE_STATUS, vmmouse_ioport_read, s);
vmport_register(VMMOUSE_COMMAND, vmmouse_ioport_read, s);
vmport_register(VMMOUSE_DATA, vmmouse_ioport_read, s);
vmport_register(VMPORT_CMD_VMMOUSE_STATUS, vmmouse_ioport_read, s);
vmport_register(VMPORT_CMD_VMMOUSE_COMMAND, vmmouse_ioport_read, s);
vmport_register(VMPORT_CMD_VMMOUSE_DATA, vmmouse_ioport_read, s);
}
static Property vmmouse_properties[] = {

View File

@ -21,20 +21,47 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/*
* Guest code that interacts with this virtual device can be found
* in VMware open-vm-tools open-source project:
* https://github.com/vmware/open-vm-tools
*/
#include "qemu/osdep.h"
#include "hw/isa/isa.h"
#include "hw/i386/vmport.h"
#include "hw/qdev-properties.h"
#include "sysemu/sysemu.h"
#include "sysemu/hw_accel.h"
#include "sysemu/qtest.h"
#include "qemu/log.h"
#include "vmport.h"
#include "cpu.h"
#include "trace.h"
#define VMPORT_CMD_GETVERSION 0x0a
#define VMPORT_CMD_GETRAMSIZE 0x14
#define VMPORT_ENTRIES 0x2c
#define VMPORT_MAGIC 0x564D5868
/* Compatibility flags for migration */
#define VMPORT_COMPAT_READ_SET_EAX_BIT 0
#define VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD_BIT 1
#define VMPORT_COMPAT_REPORT_VMX_TYPE_BIT 2
#define VMPORT_COMPAT_CMDS_V2_BIT 3
#define VMPORT_COMPAT_READ_SET_EAX \
(1 << VMPORT_COMPAT_READ_SET_EAX_BIT)
#define VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD \
(1 << VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD_BIT)
#define VMPORT_COMPAT_REPORT_VMX_TYPE \
(1 << VMPORT_COMPAT_REPORT_VMX_TYPE_BIT)
#define VMPORT_COMPAT_CMDS_V2 \
(1 << VMPORT_COMPAT_CMDS_V2_BIT)
/* vCPU features reported by CMD_GET_VCPU_INFO */
#define VCPU_INFO_SLC64_BIT 0
#define VCPU_INFO_SYNC_VTSCS_BIT 1
#define VCPU_INFO_HV_REPLAY_OK_BIT 2
#define VCPU_INFO_LEGACY_X2APIC_BIT 3
#define VCPU_INFO_RESERVED_BIT 31
#define VMPORT(obj) OBJECT_CHECK(VMPortState, (obj), TYPE_VMPORT)
typedef struct VMPortState {
@ -43,15 +70,19 @@ typedef struct VMPortState {
MemoryRegion io;
VMPortReadFunc *func[VMPORT_ENTRIES];
void *opaque[VMPORT_ENTRIES];
uint32_t vmware_vmx_version;
uint8_t vmware_vmx_type;
uint32_t compat_flags;
} VMPortState;
static VMPortState *port_state;
void vmport_register(unsigned char command, VMPortReadFunc *func, void *opaque)
void vmport_register(VMPortCommand command, VMPortReadFunc *func, void *opaque)
{
if (command >= VMPORT_ENTRIES) {
return;
}
assert(command < VMPORT_ENTRIES);
assert(port_state);
trace_vmport_register(command, func, opaque);
port_state->func[command] = func;
@ -64,25 +95,51 @@ static uint64_t vmport_ioport_read(void *opaque, hwaddr addr,
VMPortState *s = opaque;
CPUState *cs = current_cpu;
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
CPUX86State *env;
unsigned char command;
uint32_t eax;
if (qtest_enabled()) {
return -1;
}
env = &cpu->env;
cpu_synchronize_state(cs);
eax = env->regs[R_EAX];
if (eax != VMPORT_MAGIC) {
return eax;
goto err;
}
command = env->regs[R_ECX];
trace_vmport_command(command);
if (command >= VMPORT_ENTRIES || !s->func[command]) {
qemu_log_mask(LOG_UNIMP, "vmport: unknown command %x\n", command);
return eax;
goto err;
}
return s->func[command](s->opaque[command], addr);
eax = s->func[command](s->opaque[command], addr);
goto out;
err:
if (s->compat_flags & VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD) {
eax = UINT32_MAX;
}
out:
/*
* The call above to cpu_synchronize_state() gets vCPU registers values
* to QEMU but also cause QEMU to write QEMU vCPU registers values to
* vCPU implementation (e.g. Accelerator such as KVM) just before
* resuming guest.
*
* Therefore, in order to make IOPort return value propagate to
* guest EAX, we need to explicitly update QEMU EAX register value.
*/
if (s->compat_flags & VMPORT_COMPAT_READ_SET_EAX) {
cpu->env.regs[R_EAX] = eax;
}
return eax;
}
static void vmport_ioport_write(void *opaque, hwaddr addr,
@ -90,6 +147,9 @@ static void vmport_ioport_write(void *opaque, hwaddr addr,
{
X86CPU *cpu = X86_CPU(current_cpu);
if (qtest_enabled()) {
return;
}
cpu->env.regs[R_EAX] = vmport_ioport_read(opaque, addr, 4);
}
@ -97,18 +157,69 @@ static uint32_t vmport_cmd_get_version(void *opaque, uint32_t addr)
{
X86CPU *cpu = X86_CPU(current_cpu);
if (qtest_enabled()) {
return -1;
}
cpu->env.regs[R_EBX] = VMPORT_MAGIC;
return 6;
if (port_state->compat_flags & VMPORT_COMPAT_REPORT_VMX_TYPE) {
cpu->env.regs[R_ECX] = port_state->vmware_vmx_type;
}
return port_state->vmware_vmx_version;
}
static uint32_t vmport_cmd_get_bios_uuid(void *opaque, uint32_t addr)
{
X86CPU *cpu = X86_CPU(current_cpu);
uint32_t *uuid_parts = (uint32_t *)(qemu_uuid.data);
cpu->env.regs[R_EAX] = le32_to_cpu(uuid_parts[0]);
cpu->env.regs[R_EBX] = le32_to_cpu(uuid_parts[1]);
cpu->env.regs[R_ECX] = le32_to_cpu(uuid_parts[2]);
cpu->env.regs[R_EDX] = le32_to_cpu(uuid_parts[3]);
return cpu->env.regs[R_EAX];
}
static uint32_t vmport_cmd_ram_size(void *opaque, uint32_t addr)
{
X86CPU *cpu = X86_CPU(current_cpu);
if (qtest_enabled()) {
return -1;
}
cpu->env.regs[R_EBX] = 0x1177;
return ram_size;
}
static uint32_t vmport_cmd_get_hz(void *opaque, uint32_t addr)
{
X86CPU *cpu = X86_CPU(current_cpu);
if (cpu->env.tsc_khz && cpu->env.apic_bus_freq) {
uint64_t tsc_freq = (uint64_t)cpu->env.tsc_khz * 1000;
cpu->env.regs[R_ECX] = cpu->env.apic_bus_freq;
cpu->env.regs[R_EBX] = (uint32_t)(tsc_freq >> 32);
cpu->env.regs[R_EAX] = (uint32_t)tsc_freq;
} else {
/* Signal cmd as not supported */
cpu->env.regs[R_EBX] = UINT32_MAX;
}
return cpu->env.regs[R_EAX];
}
static uint32_t vmport_cmd_get_vcpu_info(void *opaque, uint32_t addr)
{
X86CPU *cpu = X86_CPU(current_cpu);
uint32_t ret = 0;
if (cpu->env.features[FEAT_1_ECX] & CPUID_EXT_X2APIC) {
ret |= 1 << VCPU_INFO_LEGACY_X2APIC_BIT;
}
return ret;
}
static const MemoryRegionOps vmport_ops = {
.read = vmport_ioport_read,
.write = vmport_ioport_write,
@ -128,11 +239,54 @@ static void vmport_realizefn(DeviceState *dev, Error **errp)
isa_register_ioport(isadev, &s->io, 0x5658);
port_state = s;
/* Register some generic port commands */
vmport_register(VMPORT_CMD_GETVERSION, vmport_cmd_get_version, NULL);
vmport_register(VMPORT_CMD_GETRAMSIZE, vmport_cmd_ram_size, NULL);
if (s->compat_flags & VMPORT_COMPAT_CMDS_V2) {
vmport_register(VMPORT_CMD_GETBIOSUUID, vmport_cmd_get_bios_uuid, NULL);
vmport_register(VMPORT_CMD_GETHZ, vmport_cmd_get_hz, NULL);
vmport_register(VMPORT_CMD_GET_VCPU_INFO, vmport_cmd_get_vcpu_info,
NULL);
}
}
static Property vmport_properties[] = {
/* Used to enforce compatibility for migration */
DEFINE_PROP_BIT("x-read-set-eax", VMPortState, compat_flags,
VMPORT_COMPAT_READ_SET_EAX_BIT, true),
DEFINE_PROP_BIT("x-signal-unsupported-cmd", VMPortState, compat_flags,
VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD_BIT, true),
DEFINE_PROP_BIT("x-report-vmx-type", VMPortState, compat_flags,
VMPORT_COMPAT_REPORT_VMX_TYPE_BIT, true),
DEFINE_PROP_BIT("x-cmds-v2", VMPortState, compat_flags,
VMPORT_COMPAT_CMDS_V2_BIT, true),
/* Default value taken from open-vm-tools code VERSION_MAGIC definition */
DEFINE_PROP_UINT32("vmware-vmx-version", VMPortState,
vmware_vmx_version, 6),
/*
* Value determines which VMware product type host report itself to guest.
*
* Most guests are fine with exposing host as VMware ESX server.
* Some legacy/proprietary guests hard-code a given type.
*
* For a complete list of values, refer to enum VMXType at open-vm-tools
* project (Defined at lib/include/vm_vmx_type.h).
*
* Reasonable options:
* 0 - Unset
* 1 - VMware Express (deprecated)
* 2 - VMware ESX Server
* 3 - VMware Server (Deprecated)
* 4 - VMware Workstation
* 5 - ACE 1.x (Deprecated)
*/
DEFINE_PROP_UINT8("vmware-vmx-type", VMPortState, vmware_vmx_type, 2),
DEFINE_PROP_END_OF_LIST(),
};
static void vmport_class_initfn(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@ -140,6 +294,7 @@ static void vmport_class_initfn(ObjectClass *klass, void *data)
dc->realize = vmport_realizefn;
/* Reason: realize sets global port_state */
dc->user_creatable = false;
device_class_set_props(dc, vmport_properties);
}
static const TypeInfo vmport_info = {

View File

@ -1,34 +0,0 @@
/*
* QEMU VMPort emulation
*
* Copyright (C) 2007 Hervé Poussineau
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef HW_I386_VMPORT_H
#define HW_I386_VMPORT_H
#define TYPE_VMPORT "vmport"
typedef uint32_t (VMPortReadFunc)(void *opaque, uint32_t address);
void vmport_register(unsigned char command, VMPortReadFunc *func, void *opaque);
#endif

View File

@ -29,6 +29,7 @@
#include "qemu/range.h"
#include "sysemu/runstate.h"
#include "sysemu/sysemu.h"
#include "sysemu/xen.h"
#include "sysemu/xen-mapcache.h"
#include "trace.h"
#include "exec/address-spaces.h"

View File

@ -33,6 +33,7 @@
#include "hw/xen/xen-legacy-backend.h"
#include "trace.h"
#include "exec/address-spaces.h"
#include "sysemu/xen.h"
#include "sysemu/block-backend.h"
#include "qemu/error-report.h"
#include "qemu/module.h"

View File

@ -241,6 +241,25 @@ void ioapic_eoi_broadcast(int vector)
continue;
}
#ifdef CONFIG_KVM
/*
* When IOAPIC is in the userspace while APIC is still in
* the kernel (i.e., split irqchip), we have a trick to
* kick the resamplefd logic for registered irqfds from
* userspace to deactivate the IRQ. When that happens, it
* means the irq bypassed userspace IOAPIC (so the irr and
* remote-irr of the table entry should be bypassed too
* even if interrupt come). Still kick the resamplefds if
* they're bound to the IRQ, to make sure to EOI the
* interrupt for the hardware correctly.
*
* Note: We still need to go through the irr & remote-irr
* operations below because we don't know whether there're
* emulated devices that are using/sharing the same IRQ.
*/
kvm_resample_fd_notify(n);
#endif
if (!(entry & IOAPIC_LVT_REMOTE_IRR)) {
continue;
}

View File

@ -28,6 +28,7 @@
#include "hw/irq.h"
#include "hw/isa/isa.h"
#include "hw/xen/xen.h"
#include "sysemu/xen.h"
#include "sysemu/sysemu.h"
#include "sysemu/reset.h"
#include "sysemu/runstate.h"

View File

@ -28,7 +28,6 @@
*/
#include "qemu/osdep.h"
#include "qom/object.h"
#include "hw/pci-host/pam.h"
void init_pam(DeviceState *dev, MemoryRegion *ram_memory,

View File

@ -19,6 +19,7 @@
#include "hw/pci/msix.h"
#include "hw/pci/pci.h"
#include "hw/xen/xen.h"
#include "sysemu/xen.h"
#include "migration/qemu-file-types.h"
#include "migration/vmstate.h"
#include "qemu/range.h"

View File

@ -86,34 +86,34 @@ typedef struct MegasasState {
MemoryRegion queue_io;
uint32_t frame_hi;
int fw_state;
uint32_t fw_state;
uint32_t fw_sge;
uint32_t fw_cmds;
uint32_t flags;
int fw_luns;
int intr_mask;
int doorbell;
int busy;
int diag;
int adp_reset;
uint32_t fw_luns;
uint32_t intr_mask;
uint32_t doorbell;
uint32_t busy;
uint32_t diag;
uint32_t adp_reset;
OnOffAuto msi;
OnOffAuto msix;
MegasasCmd *event_cmd;
int event_locale;
uint16_t event_locale;
int event_class;
int event_count;
int shutdown_event;
int boot_event;
uint32_t event_count;
uint32_t shutdown_event;
uint32_t boot_event;
uint64_t sas_addr;
char *hba_serial;
uint64_t reply_queue_pa;
void *reply_queue;
int reply_queue_len;
int reply_queue_head;
int reply_queue_tail;
uint16_t reply_queue_len;
uint16_t reply_queue_head;
uint16_t reply_queue_tail;
uint64_t consumer_pa;
uint64_t producer_pa;
@ -445,7 +445,7 @@ static MegasasCmd *megasas_lookup_frame(MegasasState *s,
index = s->reply_queue_head;
while (num < s->fw_cmds) {
while (num < s->fw_cmds && index < MEGASAS_MAX_FRAMES) {
if (s->frames[index].pa && s->frames[index].pa == frame) {
cmd = &s->frames[index];
break;
@ -504,7 +504,7 @@ static MegasasCmd *megasas_enqueue_frame(MegasasState *s,
cmd->pa = frame;
/* Map all possible frames */
cmd->frame = pci_dma_map(pcid, frame, &frame_size_p, 0);
if (frame_size_p != frame_size) {
if (!cmd->frame || frame_size_p != frame_size) {
trace_megasas_qf_map_failed(cmd->index, (unsigned long)frame);
if (cmd->frame) {
megasas_unmap_frame(s, cmd);
@ -2259,9 +2259,9 @@ static const VMStateDescription vmstate_megasas_gen1 = {
VMSTATE_PCI_DEVICE(parent_obj, MegasasState),
VMSTATE_MSIX(parent_obj, MegasasState),
VMSTATE_INT32(fw_state, MegasasState),
VMSTATE_INT32(intr_mask, MegasasState),
VMSTATE_INT32(doorbell, MegasasState),
VMSTATE_UINT32(fw_state, MegasasState),
VMSTATE_UINT32(intr_mask, MegasasState),
VMSTATE_UINT32(doorbell, MegasasState),
VMSTATE_UINT64(reply_queue_pa, MegasasState),
VMSTATE_UINT64(consumer_pa, MegasasState),
VMSTATE_UINT64(producer_pa, MegasasState),
@ -2278,9 +2278,9 @@ static const VMStateDescription vmstate_megasas_gen2 = {
VMSTATE_PCI_DEVICE(parent_obj, MegasasState),
VMSTATE_MSIX(parent_obj, MegasasState),
VMSTATE_INT32(fw_state, MegasasState),
VMSTATE_INT32(intr_mask, MegasasState),
VMSTATE_INT32(doorbell, MegasasState),
VMSTATE_UINT32(fw_state, MegasasState),
VMSTATE_UINT32(intr_mask, MegasasState),
VMSTATE_UINT32(doorbell, MegasasState),
VMSTATE_UINT64(reply_queue_pa, MegasasState),
VMSTATE_UINT64(consumer_pa, MegasasState),
VMSTATE_UINT64(producer_pa, MegasasState),

View File

@ -18,7 +18,6 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qom/object.h"
#include "hw/fw-path-provider.h"
#include "hw/qdev-core.h"
#include "hw/qdev-properties.h"

View File

@ -23,6 +23,7 @@
#include "qemu/osdep.h"
#include "qemu/timer.h"
#include "hw/usb.h"
#include "hw/usb/hcd-musb.h"
#include "hw/irq.h"
#include "hw/hw.h"
@ -1539,13 +1540,13 @@ static void musb_writew(void *opaque, hwaddr addr, uint32_t value)
};
}
CPUReadMemoryFunc * const musb_read[] = {
MUSBReadFunc * const musb_read[] = {
musb_readb,
musb_readh,
musb_readw,
};
CPUWriteMemoryFunc * const musb_write[] = {
MUSBWriteFunc * const musb_write[] = {
musb_writeb,
musb_writeh,
musb_writew,

View File

@ -23,6 +23,7 @@
#include "qemu/module.h"
#include "qemu/timer.h"
#include "hw/usb.h"
#include "hw/usb/hcd-musb.h"
#include "hw/arm/omap.h"
#include "hw/hw.h"
#include "hw/irq.h"

View File

@ -115,11 +115,7 @@ static void vfio_intx_eoi(VFIODevice *vbasedev)
static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
{
#ifdef CONFIG_KVM
struct kvm_irqfd irqfd = {
.fd = event_notifier_get_fd(&vdev->intx.interrupt),
.gsi = vdev->intx.route.irq,
.flags = KVM_IRQFD_FLAG_RESAMPLE,
};
int irq_fd = event_notifier_get_fd(&vdev->intx.interrupt);
Error *err = NULL;
if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
@ -129,7 +125,7 @@ static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
}
/* Get to a known interrupt state */
qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
qemu_set_fd_handler(irq_fd, NULL, NULL, vdev);
vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
vdev->intx.pending = false;
pci_irq_deassert(&vdev->pdev);
@ -140,17 +136,18 @@ static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
goto fail;
}
/* KVM triggers it, VFIO listens for it */
irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
&vdev->intx.interrupt,
&vdev->intx.unmask,
vdev->intx.route.irq)) {
error_setg_errno(errp, errno, "failed to setup resample irqfd");
goto fail_irqfd;
}
if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0,
VFIO_IRQ_SET_ACTION_UNMASK,
irqfd.resamplefd, &err)) {
event_notifier_get_fd(&vdev->intx.unmask),
&err)) {
error_propagate(errp, err);
goto fail_vfio;
}
@ -165,12 +162,12 @@ static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
return;
fail_vfio:
irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
vdev->intx.route.irq);
fail_irqfd:
event_notifier_cleanup(&vdev->intx.unmask);
fail:
qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
qemu_set_fd_handler(irq_fd, vfio_intx_interrupt, NULL, vdev);
vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
#endif
}
@ -178,12 +175,6 @@ fail:
static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
{
#ifdef CONFIG_KVM
struct kvm_irqfd irqfd = {
.fd = event_notifier_get_fd(&vdev->intx.interrupt),
.gsi = vdev->intx.route.irq,
.flags = KVM_IRQFD_FLAG_DEASSIGN,
};
if (!vdev->intx.kvm_accel) {
return;
}
@ -197,7 +188,8 @@ static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
pci_irq_deassert(&vdev->pdev);
/* Tell KVM to stop listening for an INTx irqfd */
if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
if (kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt,
vdev->intx.route.irq)) {
error_report("vfio: Error: Failed to disable INTx irqfd: %m");
}
@ -205,7 +197,8 @@ static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
event_notifier_cleanup(&vdev->intx.unmask);
/* QEMU starts listening for interrupt events. */
qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
qemu_set_fd_handler(event_notifier_get_fd(&vdev->intx.interrupt),
vfio_intx_interrupt, NULL, vdev);
vdev->intx.kvm_accel = false;

View File

@ -1,6 +1,7 @@
# xen backend driver support
common-obj-$(CONFIG_XEN) += xen-legacy-backend.o xen_devconfig.o xen_pvdev.o xen-common.o xen-bus.o xen-bus-helper.o xen-backend.o
common-obj-y += xen-legacy-backend.o xen_devconfig.o xen_pvdev.o xen-bus.o xen-bus-helper.o xen-backend.o
obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen-host-pci-device.o
obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen_pt.o xen_pt_config_init.o xen_pt_graphics.o xen_pt_msi.o
obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen_pt_load_rom.o
obj-$(call $(lnot, $(CONFIG_XEN_PCI_PASSTHROUGH))) += xen_pt_stub.o

View File

@ -65,7 +65,17 @@
#include "qemu/range.h"
#include "exec/address-spaces.h"
bool has_igd_gfx_passthru;
static bool has_igd_gfx_passthru;
bool xen_igd_gfx_pt_enabled(void)
{
return has_igd_gfx_passthru;
}
void xen_igd_gfx_pt_set(bool value, Error **errp)
{
has_igd_gfx_passthru = value;
}
#define XEN_PT_NR_IRQS (256)
static uint8_t xen_pt_mapped_machine_irq[XEN_PT_NR_IRQS] = {0};

View File

@ -5,6 +5,9 @@
#include "hw/pci/pci.h"
#include "xen-host-pci-device.h"
bool xen_igd_gfx_pt_enabled(void);
void xen_igd_gfx_pt_set(bool value, Error **errp);
void xen_pt_log(const PCIDevice *d, const char *f, ...) GCC_FMT_ATTR(2, 3);
#define XEN_PT_ERR(d, _f, _a...) xen_pt_log(d, "%s: Error: "_f, __func__, ##_a)
@ -322,10 +325,9 @@ extern void *pci_assign_dev_load_option_rom(PCIDevice *dev,
unsigned int domain,
unsigned int bus, unsigned int slot,
unsigned int function);
extern bool has_igd_gfx_passthru;
static inline bool is_igd_vga_passthrough(XenHostPCIDevice *dev)
{
return (has_igd_gfx_passthru
return (xen_igd_gfx_pt_enabled()
&& ((dev->class_code >> 0x8) == PCI_CLASS_DISPLAY_VGA));
}
int xen_pt_register_vga_regions(XenHostPCIDevice *dev);

22
hw/xen/xen_pt_stub.c Normal file
View File

@ -0,0 +1,22 @@
/*
* Copyright (C) 2020 Citrix Systems UK Ltd.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "hw/xen/xen_pt.h"
#include "qapi/error.h"
bool xen_igd_gfx_pt_enabled(void)
{
return false;
}
void xen_igd_gfx_pt_set(bool value, Error **errp)
{
if (value) {
error_setg(errp, "Xen PCI passthrough support not built in");
}
}

View File

@ -413,6 +413,7 @@ void dump_exec_info(void);
void dump_opcount_info(void);
#endif /* !CONFIG_USER_ONLY */
/* Returns: 0 on success, -1 on error */
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
void *ptr, target_ulong len, bool is_write);

View File

@ -43,9 +43,6 @@ extern ram_addr_t ram_size;
/* memory API */
typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value);
typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
/* This should not be used by devices. */
ram_addr_t qemu_ram_addr_from_host(void *ptr);

View File

@ -50,12 +50,6 @@
extern bool global_dirty_log;
typedef struct MemoryRegionOps MemoryRegionOps;
typedef struct MemoryRegionMmio MemoryRegionMmio;
struct MemoryRegionMmio {
CPUReadMemoryFunc *read[3];
CPUWriteMemoryFunc *write[3];
};
typedef struct IOMMUTLBEntry IOMMUTLBEntry;
@ -1984,7 +1978,7 @@ void memory_global_dirty_log_start(void);
*/
void memory_global_dirty_log_stop(void);
void mtree_info(bool flatview, bool dispatch_tree, bool owner);
void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
/**
* memory_region_dispatch_read: perform a read directly to the specified
@ -2314,7 +2308,8 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
/* address_space_map: map a physical memory region into a host virtual address
*
* May map a subset of the requested range, given by and returned in @plen.
* May return %NULL if resources needed to perform the mapping are exhausted.
* May return %NULL and set *@plen to zero(0), if resources needed to perform
* the mapping are exhausted.
* Use only for reads OR writes - not for read-modify-write operations.
* Use cpu_register_map_client() to know when retrying the map operation is
* likely to succeed.
@ -2354,10 +2349,11 @@ void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
/* Internal functions, part of the implementation of address_space_read_cached
* and address_space_write_cached. */
void address_space_read_cached_slow(MemoryRegionCache *cache,
hwaddr addr, void *buf, hwaddr len);
void address_space_write_cached_slow(MemoryRegionCache *cache,
hwaddr addr, const void *buf, hwaddr len);
MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
hwaddr addr, void *buf, hwaddr len);
MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
hwaddr addr, const void *buf,
hwaddr len);
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
{
@ -2422,15 +2418,16 @@ MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
* @buf: buffer with the data transferred
* @len: length of the data transferred
*/
static inline void
static inline MemTxResult
address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
void *buf, hwaddr len)
{
assert(addr < cache->len && len <= cache->len - addr);
if (likely(cache->ptr)) {
memcpy(buf, cache->ptr + addr, len);
return MEMTX_OK;
} else {
address_space_read_cached_slow(cache, addr, buf, len);
return address_space_read_cached_slow(cache, addr, buf, len);
}
}
@ -2442,15 +2439,16 @@ address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
* @buf: buffer with the data transferred
* @len: length of the data transferred
*/
static inline void
static inline MemTxResult
address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
const void *buf, hwaddr len)
{
assert(addr < cache->len && len <= cache->len - addr);
if (likely(cache->ptr)) {
memcpy(cache->ptr + addr, buf, len);
return MEMTX_OK;
} else {
address_space_write_cached_slow(cache, addr, buf, len);
return address_space_write_cached_slow(cache, addr, buf, len);
}
}

View File

@ -21,7 +21,7 @@
#ifndef CONFIG_USER_ONLY
#include "cpu.h"
#include "hw/xen/xen.h"
#include "sysemu/xen.h"
#include "sysemu/tcg.h"
#include "exec/ramlist.h"
#include "exec/ramblock.h"

View File

@ -1,9 +1,6 @@
#ifndef EDID_H
#define EDID_H
#include "qom/object.h"
#include "hw/qdev-properties.h"
typedef struct qemu_edid_info {
const char *vendor; /* http://www.uefi.org/pnp_id_list */
const char *name;

View File

@ -553,9 +553,14 @@ static int glue(load_elf, SZ)(const char *name, int fd,
rom_add_elf_program(label, mapped_file, data, file_size,
mem_size, addr, as);
} else {
address_space_write(as ? as : &address_space_memory,
addr, MEMTXATTRS_UNSPECIFIED,
data, file_size);
MemTxResult res;
res = address_space_write(as ? as : &address_space_memory,
addr, MEMTXATTRS_UNSPECIFIED,
data, file_size);
if (res != MEMTX_OK) {
goto fail;
}
}
}

View File

@ -79,5 +79,6 @@ void hyperv_synic_add(CPUState *cs);
void hyperv_synic_reset(CPUState *cs);
void hyperv_synic_update(CPUState *cs, bool enable,
hwaddr msg_page_addr, hwaddr event_page_addr);
bool hyperv_is_synic_enabled(void);
#endif

View File

@ -0,0 +1,35 @@
/*
* QEMU Hyper-V VMBus root bridge
*
* Copyright (c) 2017-2018 Virtuozzo International GmbH.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef HW_HYPERV_VMBUS_BRIDGE_H
#define HW_HYPERV_VMBUS_BRIDGE_H
#include "hw/sysbus.h"
#define TYPE_VMBUS_BRIDGE "vmbus-bridge"
typedef struct VMBus VMBus;
typedef struct VMBusBridge {
SysBusDevice parent_obj;
uint8_t irq0;
uint8_t irq1;
VMBus *bus;
} VMBusBridge;
#define VMBUS_BRIDGE(obj) OBJECT_CHECK(VMBusBridge, (obj), TYPE_VMBUS_BRIDGE)
static inline VMBusBridge *vmbus_bridge_find(void)
{
return VMBUS_BRIDGE(object_resolve_path_type("", TYPE_VMBUS_BRIDGE, NULL));
}
#endif

View File

@ -0,0 +1,222 @@
/*
* QEMU Hyper-V VMBus support
*
* Copyright (c) 2017-2018 Virtuozzo International GmbH.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef HW_HYPERV_VMBUS_PROTO_H
#define HW_HYPERV_VMBUS_PROTO_H
#define VMBUS_VERSION_WS2008 ((0 << 16) | (13))
#define VMBUS_VERSION_WIN7 ((1 << 16) | (1))
#define VMBUS_VERSION_WIN8 ((2 << 16) | (4))
#define VMBUS_VERSION_WIN8_1 ((3 << 16) | (0))
#define VMBUS_VERSION_WIN10 ((4 << 16) | (0))
#define VMBUS_VERSION_INVAL -1
#define VMBUS_VERSION_CURRENT VMBUS_VERSION_WIN10
#define VMBUS_MESSAGE_CONNECTION_ID 1
#define VMBUS_EVENT_CONNECTION_ID 2
#define VMBUS_MONITOR_CONNECTION_ID 3
#define VMBUS_SINT 2
#define VMBUS_MSG_INVALID 0
#define VMBUS_MSG_OFFERCHANNEL 1
#define VMBUS_MSG_RESCIND_CHANNELOFFER 2
#define VMBUS_MSG_REQUESTOFFERS 3
#define VMBUS_MSG_ALLOFFERS_DELIVERED 4
#define VMBUS_MSG_OPENCHANNEL 5
#define VMBUS_MSG_OPENCHANNEL_RESULT 6
#define VMBUS_MSG_CLOSECHANNEL 7
#define VMBUS_MSG_GPADL_HEADER 8
#define VMBUS_MSG_GPADL_BODY 9
#define VMBUS_MSG_GPADL_CREATED 10
#define VMBUS_MSG_GPADL_TEARDOWN 11
#define VMBUS_MSG_GPADL_TORNDOWN 12
#define VMBUS_MSG_RELID_RELEASED 13
#define VMBUS_MSG_INITIATE_CONTACT 14
#define VMBUS_MSG_VERSION_RESPONSE 15
#define VMBUS_MSG_UNLOAD 16
#define VMBUS_MSG_UNLOAD_RESPONSE 17
#define VMBUS_MSG_COUNT 18
#define VMBUS_MESSAGE_SIZE_ALIGN sizeof(uint64_t)
#define VMBUS_PACKET_INVALID 0x0
#define VMBUS_PACKET_SYNCH 0x1
#define VMBUS_PACKET_ADD_XFER_PAGESET 0x2
#define VMBUS_PACKET_RM_XFER_PAGESET 0x3
#define VMBUS_PACKET_ESTABLISH_GPADL 0x4
#define VMBUS_PACKET_TEARDOWN_GPADL 0x5
#define VMBUS_PACKET_DATA_INBAND 0x6
#define VMBUS_PACKET_DATA_USING_XFER_PAGES 0x7
#define VMBUS_PACKET_DATA_USING_GPADL 0x8
#define VMBUS_PACKET_DATA_USING_GPA_DIRECT 0x9
#define VMBUS_PACKET_CANCEL_REQUEST 0xa
#define VMBUS_PACKET_COMP 0xb
#define VMBUS_PACKET_DATA_USING_ADDITIONAL_PKT 0xc
#define VMBUS_PACKET_ADDITIONAL_DATA 0xd
#define VMBUS_CHANNEL_USER_DATA_SIZE 120
#define VMBUS_OFFER_MONITOR_ALLOCATED 0x1
#define VMBUS_OFFER_INTERRUPT_DEDICATED 0x1
#define VMBUS_RING_BUFFER_FEAT_PENDING_SZ (1ul << 0)
#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 0x1
#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 0x2
#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 0x4
#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
#define VMBUS_CHANNEL_PARENT_OFFER 0x200
#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
#define VMBUS_PACKET_FLAG_REQUEST_COMPLETION 1
typedef struct vmbus_message_header {
uint32_t message_type;
uint32_t _padding;
} vmbus_message_header;
typedef struct vmbus_message_initiate_contact {
vmbus_message_header header;
uint32_t version_requested;
uint32_t target_vcpu;
uint64_t interrupt_page;
uint64_t monitor_page1;
uint64_t monitor_page2;
} vmbus_message_initiate_contact;
typedef struct vmbus_message_version_response {
vmbus_message_header header;
uint8_t version_supported;
uint8_t status;
} vmbus_message_version_response;
typedef struct vmbus_message_offer_channel {
vmbus_message_header header;
uint8_t type_uuid[16];
uint8_t instance_uuid[16];
uint64_t _reserved1;
uint64_t _reserved2;
uint16_t channel_flags;
uint16_t mmio_size_mb;
uint8_t user_data[VMBUS_CHANNEL_USER_DATA_SIZE];
uint16_t sub_channel_index;
uint16_t _reserved3;
uint32_t child_relid;
uint8_t monitor_id;
uint8_t monitor_flags;
uint16_t interrupt_flags;
uint32_t connection_id;
} vmbus_message_offer_channel;
typedef struct vmbus_message_rescind_channel_offer {
vmbus_message_header header;
uint32_t child_relid;
} vmbus_message_rescind_channel_offer;
typedef struct vmbus_gpa_range {
uint32_t byte_count;
uint32_t byte_offset;
uint64_t pfn_array[];
} vmbus_gpa_range;
typedef struct vmbus_message_gpadl_header {
vmbus_message_header header;
uint32_t child_relid;
uint32_t gpadl_id;
uint16_t range_buflen;
uint16_t rangecount;
vmbus_gpa_range range[];
} QEMU_PACKED vmbus_message_gpadl_header;
typedef struct vmbus_message_gpadl_body {
vmbus_message_header header;
uint32_t message_number;
uint32_t gpadl_id;
uint64_t pfn_array[];
} vmbus_message_gpadl_body;
typedef struct vmbus_message_gpadl_created {
vmbus_message_header header;
uint32_t child_relid;
uint32_t gpadl_id;
uint32_t status;
} vmbus_message_gpadl_created;
typedef struct vmbus_message_gpadl_teardown {
vmbus_message_header header;
uint32_t child_relid;
uint32_t gpadl_id;
} vmbus_message_gpadl_teardown;
typedef struct vmbus_message_gpadl_torndown {
vmbus_message_header header;
uint32_t gpadl_id;
} vmbus_message_gpadl_torndown;
typedef struct vmbus_message_open_channel {
vmbus_message_header header;
uint32_t child_relid;
uint32_t open_id;
uint32_t ring_buffer_gpadl_id;
uint32_t target_vp;
uint32_t ring_buffer_offset;
uint8_t user_data[VMBUS_CHANNEL_USER_DATA_SIZE];
} vmbus_message_open_channel;
typedef struct vmbus_message_open_result {
vmbus_message_header header;
uint32_t child_relid;
uint32_t open_id;
uint32_t status;
} vmbus_message_open_result;
typedef struct vmbus_message_close_channel {
vmbus_message_header header;
uint32_t child_relid;
} vmbus_message_close_channel;
typedef struct vmbus_ring_buffer {
uint32_t write_index;
uint32_t read_index;
uint32_t interrupt_mask;
uint32_t pending_send_sz;
uint32_t _reserved1[12];
uint32_t feature_bits;
} vmbus_ring_buffer;
typedef struct vmbus_packet_hdr {
uint16_t type;
uint16_t offset_qwords;
uint16_t len_qwords;
uint16_t flags;
uint64_t transaction_id;
} vmbus_packet_hdr;
typedef struct vmbus_pkt_gpa_direct {
uint32_t _reserved;
uint32_t rangecount;
vmbus_gpa_range range[];
} vmbus_pkt_gpa_direct;
typedef struct vmbus_xferpg_range {
uint32_t byte_count;
uint32_t byte_offset;
} vmbus_xferpg_range;
typedef struct vmbus_pkt_xferpg {
uint16_t buffer_id;
uint8_t sender_owns_set;
uint8_t _reserved;
uint32_t rangecount;
vmbus_xferpg_range range[];
} vmbus_pkt_xferpg;
#endif

230
include/hw/hyperv/vmbus.h Normal file
View File

@ -0,0 +1,230 @@
/*
* QEMU Hyper-V VMBus
*
* Copyright (c) 2017-2018 Virtuozzo International GmbH.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef HW_HYPERV_VMBUS_H
#define HW_HYPERV_VMBUS_H
#include "sysemu/sysemu.h"
#include "sysemu/dma.h"
#include "hw/qdev-core.h"
#include "migration/vmstate.h"
#include "hw/hyperv/vmbus-proto.h"
#include "qemu/uuid.h"
#define TYPE_VMBUS_DEVICE "vmbus-dev"
#define VMBUS_DEVICE(obj) \
OBJECT_CHECK(VMBusDevice, (obj), TYPE_VMBUS_DEVICE)
#define VMBUS_DEVICE_CLASS(klass) \
OBJECT_CLASS_CHECK(VMBusDeviceClass, (klass), TYPE_VMBUS_DEVICE)
#define VMBUS_DEVICE_GET_CLASS(obj) \
OBJECT_GET_CLASS(VMBusDeviceClass, (obj), TYPE_VMBUS_DEVICE)
/*
* Object wrapping a GPADL -- GPA Descriptor List -- an array of guest physical
* pages, to be used for various buffers shared between the host and the guest.
*/
typedef struct VMBusGpadl VMBusGpadl;
/*
* VMBus channel -- a pair of ring buffers for either direction, placed within
* one GPADL, and the associated notification means.
*/
typedef struct VMBusChannel VMBusChannel;
/*
* Base class for VMBus devices. Includes one or more channels. Identified by
* class GUID and instance GUID.
*/
typedef struct VMBusDevice VMBusDevice;
typedef void(*VMBusChannelNotifyCb)(struct VMBusChannel *chan);
typedef struct VMBusDeviceClass {
DeviceClass parent;
QemuUUID classid;
QemuUUID instanceid; /* Fixed UUID for singleton devices */
uint16_t channel_flags;
uint16_t mmio_size_mb;
/* Extentions to standard device callbacks */
void (*vmdev_realize)(VMBusDevice *vdev, Error **errp);
void (*vmdev_unrealize)(VMBusDevice *vdev);
void (*vmdev_reset)(VMBusDevice *vdev);
/*
* Calculate the number of channels based on the device properties. Called
* at realize time.
**/
uint16_t (*num_channels)(VMBusDevice *vdev);
/*
* Device-specific actions to complete the otherwise successful process of
* opening a channel.
* Return 0 on success, -errno on failure.
*/
int (*open_channel)(VMBusChannel *chan);
/*
* Device-specific actions to perform before closing a channel.
*/
void (*close_channel)(VMBusChannel *chan);
/*
* Main device worker; invoked in response to notifications from either
* side, when there's work to do with the data in the channel ring buffers.
*/
VMBusChannelNotifyCb chan_notify_cb;
} VMBusDeviceClass;
struct VMBusDevice {
DeviceState parent;
QemuUUID instanceid;
uint16_t num_channels;
VMBusChannel *channels;
AddressSpace *dma_as;
};
extern const VMStateDescription vmstate_vmbus_dev;
/*
* A unit of work parsed out of a message in the receive (i.e. guest->host)
* ring buffer of a channel. It's supposed to be subclassed (through
* embedding) by the specific devices.
*/
typedef struct VMBusChanReq {
VMBusChannel *chan;
uint16_t pkt_type;
uint32_t msglen;
void *msg;
uint64_t transaction_id;
bool need_comp;
QEMUSGList sgl;
} VMBusChanReq;
VMBusDevice *vmbus_channel_device(VMBusChannel *chan);
VMBusChannel *vmbus_device_channel(VMBusDevice *dev, uint32_t chan_idx);
uint32_t vmbus_channel_idx(VMBusChannel *chan);
bool vmbus_channel_is_open(VMBusChannel *chan);
/*
* Notify (on guest's behalf) the host side of the channel that there's data in
* the ringbuffer to process.
*/
void vmbus_channel_notify_host(VMBusChannel *chan);
/*
* Reserve space for a packet in the send (i.e. host->guest) ringbuffer. If
* there isn't enough room, indicate that to the guest, to be notified when it
* becomes available.
* Return 0 on success, negative errno on failure.
* The ringbuffer indices are NOT updated, the requested space indicator may.
*/
int vmbus_channel_reserve(VMBusChannel *chan,
uint32_t desclen, uint32_t msglen);
/*
* Send a packet to the guest. The space for the packet MUST be reserved
* first.
* Return total number of bytes placed in the send ringbuffer on success,
* negative errno on failure.
* The ringbuffer indices are updated on success, and the guest is signaled if
* needed.
*/
ssize_t vmbus_channel_send(VMBusChannel *chan, uint16_t pkt_type,
void *desc, uint32_t desclen,
void *msg, uint32_t msglen,
bool need_comp, uint64_t transaction_id);
/*
* Prepare to fetch a batch of packets from the receive ring buffer.
* Return 0 on success, negative errno on failure.
*/
int vmbus_channel_recv_start(VMBusChannel *chan);
/*
* Shortcut for a common case of sending a simple completion packet with no
* auxiliary descriptors.
*/
ssize_t vmbus_channel_send_completion(VMBusChanReq *req,
void *msg, uint32_t msglen);
/*
* Peek at the receive (i.e. guest->host) ring buffer and extract a unit of
* work (a device-specific subclass of VMBusChanReq) from a packet if there's
* one.
* Return an allocated buffer, containing the request of @size with filled
* VMBusChanReq at the beginning, followed by the message payload, or NULL on
* failure.
* The ringbuffer indices are NOT updated, nor is the private copy of the read
* index.
*/
void *vmbus_channel_recv_peek(VMBusChannel *chan, uint32_t size);
/*
* Update the private copy of the read index once the preceding peek is deemed
* successful.
* The ringbuffer indices are NOT updated.
*/
void vmbus_channel_recv_pop(VMBusChannel *chan);
/*
* Propagate the private copy of the read index into the receive ring buffer,
* and thus complete the reception of a series of packets. Notify guest if
* needed.
* Return the number of bytes popped off the receive ring buffer by the
* preceding recv_peek/recv_pop calls on success, negative errno on failure.
*/
ssize_t vmbus_channel_recv_done(VMBusChannel *chan);
/*
* Free the request allocated by vmbus_channel_recv_peek, together with its
* fields.
*/
void vmbus_free_req(void *req);
/*
* Find and reference a GPADL by @gpadl_id.
* If not found return NULL.
*/
VMBusGpadl *vmbus_get_gpadl(VMBusChannel *chan, uint32_t gpadl_id);
/*
* Unreference @gpadl. If the reference count drops to zero, free it.
* @gpadl may be NULL, in which case nothing is done.
*/
void vmbus_put_gpadl(VMBusGpadl *gpadl);
/*
* Calculate total length in bytes of @gpadl.
* @gpadl must be valid.
*/
uint32_t vmbus_gpadl_len(VMBusGpadl *gpadl);
/*
* Copy data from @iov to @gpadl at offset @off.
* Return the number of bytes copied, or a negative status on failure.
*/
ssize_t vmbus_iov_to_gpadl(VMBusChannel *chan, VMBusGpadl *gpadl, uint32_t off,
const struct iovec *iov, size_t iov_cnt);
/*
* Map SGList contained in the request @req, at offset @off and no more than
* @len bytes, for io in direction @dir, and populate @iov with the mapped
* iovecs.
* Return the number of iovecs mapped, or negative status on failure.
*/
int vmbus_map_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov,
unsigned iov_cnt, size_t len, size_t off);
/*
* Unmap *iov mapped with vmbus_map_sgl, marking the number of bytes @accessed.
*/
void vmbus_unmap_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov,
unsigned iov_cnt, size_t accessed);
void vmbus_save_req(QEMUFile *f, VMBusChanReq *req);
void *vmbus_load_req(QEMUFile *f, VMBusDevice *dev, uint32_t size);
#endif

28
include/hw/i386/vmport.h Normal file
View File

@ -0,0 +1,28 @@
#ifndef HW_VMPORT_H
#define HW_VMPORT_H
#include "hw/isa/isa.h"
#define TYPE_VMPORT "vmport"
typedef uint32_t (VMPortReadFunc)(void *opaque, uint32_t address);
typedef enum {
VMPORT_CMD_GETVERSION = 10,
VMPORT_CMD_GETBIOSUUID = 19,
VMPORT_CMD_GETRAMSIZE = 20,
VMPORT_CMD_VMMOUSE_DATA = 39,
VMPORT_CMD_VMMOUSE_STATUS = 40,
VMPORT_CMD_VMMOUSE_COMMAND = 41,
VMPORT_CMD_GETHZ = 45,
VMPORT_CMD_GET_VCPU_INFO = 68,
VMPORT_ENTRIES
} VMPortCommand;
static inline void vmport_init(ISABus *bus)
{
isa_create_simple(bus, TYPE_VMPORT);
}
void vmport_register(VMPortCommand command, VMPortReadFunc *func, void *opaque);
#endif

View File

@ -474,36 +474,6 @@ bool usb_host_dev_is_scsi_storage(USBDevice *usbdev);
#define VM_USB_HUB_SIZE 8
/* hw/usb/hdc-musb.c */
enum musb_irq_source_e {
musb_irq_suspend = 0,
musb_irq_resume,
musb_irq_rst_babble,
musb_irq_sof,
musb_irq_connect,
musb_irq_disconnect,
musb_irq_vbus_request,
musb_irq_vbus_error,
musb_irq_rx,
musb_irq_tx,
musb_set_vbus,
musb_set_session,
/* Add new interrupts here */
musb_irq_max, /* total number of interrupts defined */
};
typedef struct MUSBState MUSBState;
extern CPUReadMemoryFunc * const musb_read[];
extern CPUWriteMemoryFunc * const musb_write[];
MUSBState *musb_init(DeviceState *parent_device, int gpio_base);
void musb_reset(MUSBState *s);
uint32_t musb_core_intr_get(MUSBState *s);
void musb_core_intr_clear(MUSBState *s, uint32_t mask);
void musb_set_size(MUSBState *s, int epnum, int size, int is_tx);
/* usb-bus.c */
#define TYPE_USB_BUS "usb-bus"

47
include/hw/usb/hcd-musb.h Normal file
View File

@ -0,0 +1,47 @@
/*
* "Inventra" High-speed Dual-Role Controller (MUSB-HDRC), Mentor Graphics,
* USB2.0 OTG compliant core used in various chips.
*
* Only host-mode and non-DMA accesses are currently supported.
*
* Copyright (C) 2008 Nokia Corporation
* Written by Andrzej Zaborowski <balrog@zabor.org>
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef HW_USB_MUSB_H
#define HW_USB_MUSB_H
enum musb_irq_source_e {
musb_irq_suspend = 0,
musb_irq_resume,
musb_irq_rst_babble,
musb_irq_sof,
musb_irq_connect,
musb_irq_disconnect,
musb_irq_vbus_request,
musb_irq_vbus_error,
musb_irq_rx,
musb_irq_tx,
musb_set_vbus,
musb_set_session,
/* Add new interrupts here */
musb_irq_max /* total number of interrupts defined */
};
/* TODO convert hcd-musb to QOM/qdev and remove MUSBReadFunc/MUSBWriteFunc */
typedef void MUSBWriteFunc(void *opaque, hwaddr addr, uint32_t value);
typedef uint32_t MUSBReadFunc(void *opaque, hwaddr addr);
extern MUSBReadFunc * const musb_read[];
extern MUSBWriteFunc * const musb_write[];
typedef struct MUSBState MUSBState;
MUSBState *musb_init(DeviceState *parent_device, int gpio_base);
void musb_reset(MUSBState *s);
uint32_t musb_core_intr_get(MUSBState *s);
void musb_core_intr_clear(MUSBState *s, uint32_t mask);
void musb_set_size(MUSBState *s, int epnum, int size, int is_tx);
#endif

View File

@ -20,13 +20,6 @@ extern uint32_t xen_domid;
extern enum xen_mode xen_mode;
extern bool xen_domid_restrict;
extern bool xen_allowed;
static inline bool xen_enabled(void)
{
return xen_allowed;
}
int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num);
void xen_piix3_set_irq(void *opaque, int irq_num, int level);
void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len);
@ -39,10 +32,6 @@ void xenstore_store_pv_console_info(int i, struct Chardev *chr);
void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory);
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
struct MemoryRegion *mr, Error **errp);
void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length);
void xen_register_framebuffer(struct MemoryRegion *mr);
#endif /* QEMU_HW_XEN_H */

View File

@ -21,8 +21,6 @@
#ifndef QIO_TASK_H
#define QIO_TASK_H
#include "qom/object.h"
typedef struct QIOTask QIOTask;
typedef void (*QIOTaskFunc)(QIOTask *task,

View File

@ -177,7 +177,7 @@ void qemu_thread_create(QemuThread *thread, const char *name,
void *qemu_thread_join(QemuThread *thread);
void qemu_thread_get_self(QemuThread *thread);
bool qemu_thread_is_self(QemuThread *thread);
void qemu_thread_exit(void *retval);
void qemu_thread_exit(void *retval) QEMU_NORETURN;
void qemu_thread_naming(bool enable);
struct Notifier;

View File

@ -51,7 +51,6 @@ typedef struct FWCfgIoState FWCfgIoState;
typedef struct FWCfgMemState FWCfgMemState;
typedef struct FWCfgState FWCfgState;
typedef struct HostMemoryBackend HostMemoryBackend;
typedef struct HVFX86EmulatorState HVFX86EmulatorState;
typedef struct I2CBus I2CBus;
typedef struct I2SCodec I2SCodec;
typedef struct IOMMUMemoryRegion IOMMUMemoryRegion;
@ -76,6 +75,7 @@ typedef struct NetFilterState NetFilterState;
typedef struct NICInfo NICInfo;
typedef struct NodeInfo NodeInfo;
typedef struct NumaNodeMem NumaNodeMem;
typedef struct Object Object;
typedef struct ObjectClass ObjectClass;
typedef struct PCIBridge PCIBridge;
typedef struct PCIBus PCIBus;

View File

@ -20,8 +20,6 @@
struct TypeImpl;
typedef struct TypeImpl *Type;
typedef struct Object Object;
typedef struct TypeInfo TypeInfo;
typedef struct InterfaceClass InterfaceClass;

View File

@ -13,8 +13,6 @@
#ifndef QEMU_QOM_QOBJECT_H
#define QEMU_QOM_QOBJECT_H
#include "qom/object.h"
/*
* object_property_get_qobject:
* @obj: the object

View File

@ -37,10 +37,12 @@ typedef struct AccelClass {
/*< public >*/
const char *name;
#ifndef CONFIG_USER_ONLY
int (*init_machine)(MachineState *ms);
void (*setup_post)(MachineState *ms, AccelState *accel);
bool (*has_memory)(MachineState *ms, AddressSpace *as,
hwaddr start_addr, hwaddr size);
#endif
bool *allowed;
/*
* Array of global properties that would be applied when specific

View File

@ -13,89 +13,23 @@
#ifndef HVF_H
#define HVF_H
#include "cpu.h"
#include "qemu/bitops.h"
#include "exec/memory.h"
#include "sysemu/accel.h"
extern bool hvf_allowed;
#ifdef CONFIG_HVF
#include <Hypervisor/hv.h>
#include <Hypervisor/hv_vmx.h>
#include <Hypervisor/hv_error.h>
#include "target/i386/cpu.h"
uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
int reg);
extern bool hvf_allowed;
#define hvf_enabled() (hvf_allowed)
#else
#else /* !CONFIG_HVF */
#define hvf_enabled() 0
#define hvf_get_supported_cpuid(func, idx, reg) 0
#endif
/* hvf_slot flags */
#define HVF_SLOT_LOG (1 << 0)
typedef struct hvf_slot {
uint64_t start;
uint64_t size;
uint8_t *mem;
int slot_id;
uint32_t flags;
MemoryRegion *region;
} hvf_slot;
typedef struct hvf_vcpu_caps {
uint64_t vmx_cap_pinbased;
uint64_t vmx_cap_procbased;
uint64_t vmx_cap_procbased2;
uint64_t vmx_cap_entry;
uint64_t vmx_cap_exit;
uint64_t vmx_cap_preemption_timer;
} hvf_vcpu_caps;
typedef struct HVFState {
AccelState parent;
hvf_slot slots[32];
int num_slots;
hvf_vcpu_caps *hvf_caps;
} HVFState;
extern HVFState *hvf_state;
void hvf_set_phys_mem(MemoryRegionSection *, bool);
void hvf_handle_io(CPUArchState *, uint16_t, void *,
int, int, int);
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
/* Disable HVF if |disable| is 1, otherwise, enable it iff it is supported by
* the host CPU. Use hvf_enabled() after this to get the result. */
void hvf_disable(int disable);
/* Returns non-0 if the host CPU supports the VMX "unrestricted guest" feature
* which allows the virtual CPU to directly run in "real mode". If true, this
* allows QEMU to run several vCPU threads in parallel (see cpus.c). Otherwise,
* only a a single TCG thread can run, and it will call HVF to run the current
* instructions, except in case of "real mode" (paging disabled, typically at
* boot time), or MMIO operations. */
int hvf_sync_vcpus(void);
#endif /* !CONFIG_HVF */
int hvf_init_vcpu(CPUState *);
int hvf_vcpu_exec(CPUState *);
int hvf_smp_cpu_exec(CPUState *);
void hvf_cpu_synchronize_state(CPUState *);
void hvf_cpu_synchronize_post_reset(CPUState *);
void hvf_cpu_synchronize_post_init(CPUState *);
void _hvf_cpu_synchronize_post_init(CPUState *, run_on_cpu_data);
void hvf_vcpu_destroy(CPUState *);
void hvf_raise_event(CPUState *);
/* void hvf_reset_vcpu_state(void *opaque); */
void hvf_reset_vcpu(CPUState *);
void vmx_update_tpr(CPUState *);
void update_apic_tpr(CPUState *);
int hvf_put_registers(CPUState *);
void vmx_clear_int_window_exiting(CPUState *cpu);
#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf")

View File

@ -554,4 +554,8 @@ int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source);
int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target);
struct ppc_radix_page_info *kvm_get_radix_page_info(void);
int kvm_get_max_memslots(void);
/* Notify resamplefd for EOI of specific interrupts. */
void kvm_resample_fd_notify(int gsi);
#endif

View File

@ -5,7 +5,6 @@
#include "qemu/timer.h"
#include "qemu/notify.h"
#include "qemu/uuid.h"
#include "qom/object.h"
/* vl.c */

View File

@ -8,9 +8,9 @@
#ifndef SYSEMU_TCG_H
#define SYSEMU_TCG_H
extern bool tcg_allowed;
void tcg_exec_init(unsigned long tb_size);
#ifdef CONFIG_TCG
extern bool tcg_allowed;
#define tcg_enabled() (tcg_allowed)
#else
#define tcg_enabled() 0

38
include/sysemu/xen.h Normal file
View File

@ -0,0 +1,38 @@
/*
* QEMU Xen support
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef SYSEMU_XEN_H
#define SYSEMU_XEN_H
#ifdef CONFIG_XEN
bool xen_enabled(void);
#ifndef CONFIG_USER_ONLY
void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length);
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
struct MemoryRegion *mr, Error **errp);
#endif
#else /* !CONFIG_XEN */
#define xen_enabled() 0
#ifndef CONFIG_USER_ONLY
static inline void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
{
/* nothing */
}
static inline void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
MemoryRegion *mr, Error **errp)
{
g_assert_not_reached();
}
#endif
#endif /* CONFIG_XEN */
#endif

View File

@ -22,6 +22,7 @@
#include "io/task.h"
#include "qapi/error.h"
#include "qemu/thread.h"
#include "qom/object.h"
#include "trace.h"
struct QIOTaskThreadData {

View File

@ -2882,7 +2882,7 @@ static void mtree_print_mr_owner(const MemoryRegion *mr)
static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
hwaddr base,
MemoryRegionListHead *alias_print_queue,
bool owner)
bool owner, bool display_disabled)
{
MemoryRegionList *new_ml, *ml, *next_ml;
MemoryRegionListHead submr_print_queue;
@ -2894,10 +2894,6 @@ static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
return;
}
for (i = 0; i < level; i++) {
qemu_printf(MTREE_INDENT);
}
cur_start = base + mr->addr;
cur_end = cur_start + MR_SIZE(mr->size);
@ -2926,35 +2922,46 @@ static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
ml->mr = mr->alias;
QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
}
qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
" (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
"-" TARGET_FMT_plx "%s",
cur_start, cur_end,
mr->priority,
mr->nonvolatile ? "nv-" : "",
memory_region_type((MemoryRegion *)mr),
memory_region_name(mr),
memory_region_name(mr->alias),
mr->alias_offset,
mr->alias_offset + MR_SIZE(mr->size),
mr->enabled ? "" : " [disabled]");
if (owner) {
mtree_print_mr_owner(mr);
if (mr->enabled || display_disabled) {
for (i = 0; i < level; i++) {
qemu_printf(MTREE_INDENT);
}
qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
" (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
"-" TARGET_FMT_plx "%s",
cur_start, cur_end,
mr->priority,
mr->nonvolatile ? "nv-" : "",
memory_region_type((MemoryRegion *)mr),
memory_region_name(mr),
memory_region_name(mr->alias),
mr->alias_offset,
mr->alias_offset + MR_SIZE(mr->size),
mr->enabled ? "" : " [disabled]");
if (owner) {
mtree_print_mr_owner(mr);
}
qemu_printf("\n");
}
} else {
qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
" (prio %d, %s%s): %s%s",
cur_start, cur_end,
mr->priority,
mr->nonvolatile ? "nv-" : "",
memory_region_type((MemoryRegion *)mr),
memory_region_name(mr),
mr->enabled ? "" : " [disabled]");
if (owner) {
mtree_print_mr_owner(mr);
if (mr->enabled || display_disabled) {
for (i = 0; i < level; i++) {
qemu_printf(MTREE_INDENT);
}
qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
" (prio %d, %s%s): %s%s",
cur_start, cur_end,
mr->priority,
mr->nonvolatile ? "nv-" : "",
memory_region_type((MemoryRegion *)mr),
memory_region_name(mr),
mr->enabled ? "" : " [disabled]");
if (owner) {
mtree_print_mr_owner(mr);
}
qemu_printf("\n");
}
}
qemu_printf("\n");
QTAILQ_INIT(&submr_print_queue);
@ -2977,7 +2984,7 @@ static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
mtree_print_mr(ml->mr, level + 1, cur_start,
alias_print_queue, owner);
alias_print_queue, owner, display_disabled);
}
QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
@ -3088,7 +3095,7 @@ static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
return true;
}
void mtree_info(bool flatview, bool dispatch_tree, bool owner)
void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled)
{
MemoryRegionListHead ml_head;
MemoryRegionList *ml, *ml2;
@ -3136,14 +3143,14 @@ void mtree_info(bool flatview, bool dispatch_tree, bool owner)
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
qemu_printf("address-space: %s\n", as->name);
mtree_print_mr(as->root, 1, 0, &ml_head, owner);
mtree_print_mr(as->root, 1, 0, &ml_head, owner, disabled);
qemu_printf("\n");
}
/* print aliased regions */
QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
mtree_print_mr(ml->mr, 1, 0, &ml_head, owner);
mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled);
qemu_printf("\n");
}

View File

@ -28,7 +28,6 @@
#include "qemu/osdep.h"
#include "hw/boards.h"
#include "hw/xen/xen.h"
#include "net/net.h"
#include "migration.h"
#include "migration/snapshot.h"
@ -59,6 +58,7 @@
#include "sysemu/replay.h"
#include "sysemu/runstate.h"
#include "sysemu/sysemu.h"
#include "sysemu/xen.h"
#include "qjson.h"
#include "migration/colo.h"
#include "qemu/bitmap.h"

View File

@ -957,8 +957,9 @@ static void hmp_info_mtree(Monitor *mon, const QDict *qdict)
bool flatview = qdict_get_try_bool(qdict, "flatview", false);
bool dispatch_tree = qdict_get_try_bool(qdict, "dispatch_tree", false);
bool owner = qdict_get_try_bool(qdict, "owner", false);
bool disabled = qdict_get_try_bool(qdict, "disabled", false);
mtree_info(flatview, dispatch_tree, owner);
mtree_info(flatview, dispatch_tree, owner, disabled);
}
#ifdef CONFIG_PROFILER

View File

@ -16,7 +16,6 @@
static const TypeInfo container_info = {
.name = "container",
.instance_size = sizeof(Object),
.parent = TYPE_OBJECT,
};

View File

@ -262,8 +262,7 @@ static void type_initialize_interface(TypeImpl *ti, TypeImpl *interface_type,
new_iface->concrete_class = ti->class;
new_iface->interface_type = interface_type;
ti->class->interfaces = g_slist_append(ti->class->interfaces,
iface_impl->class);
ti->class->interfaces = g_slist_append(ti->class->interfaces, new_iface);
}
static void object_property_free(gpointer data)
@ -316,8 +315,6 @@ static void type_initialize(TypeImpl *ti)
g_assert(parent->instance_size <= ti->instance_size);
memcpy(ti->class, parent->class, parent->class_size);
ti->class->interfaces = NULL;
ti->class->properties = g_hash_table_new_full(
g_str_hash, g_str_equal, NULL, object_property_free);
for (e = parent->class->interfaces; e; e = e->next) {
InterfaceClass *iface = e->data;
@ -347,11 +344,11 @@ static void type_initialize(TypeImpl *ti)
type_initialize_interface(ti, t, t);
}
} else {
ti->class->properties = g_hash_table_new_full(
g_str_hash, g_str_equal, NULL, object_property_free);
}
ti->class->properties = g_hash_table_new_full(g_str_hash, g_str_equal, NULL,
object_property_free);
ti->class->type = ti;
while (parent) {
@ -497,10 +494,8 @@ static void object_class_property_init_all(Object *obj)
}
}
static void object_initialize_with_type(void *data, size_t size, TypeImpl *type)
static void object_initialize_with_type(Object *obj, size_t size, TypeImpl *type)
{
Object *obj = data;
type_initialize(type);
g_assert(type->instance_size >= sizeof(Object));
@ -1051,7 +1046,10 @@ static int do_object_child_foreach(Object *obj,
break;
}
if (recurse) {
do_object_child_foreach(child, fn, opaque, true);
ret = do_object_child_foreach(child, fn, opaque, true);
if (ret != 0) {
break;
}
}
}
}
@ -1953,26 +1951,25 @@ Object *object_resolve_path_component(Object *parent, const char *part)
}
static Object *object_resolve_abs_path(Object *parent,
char **parts,
const char *typename,
int index)
char **parts,
const char *typename)
{
Object *child;
if (parts[index] == NULL) {
if (*parts == NULL) {
return object_dynamic_cast(parent, typename);
}
if (strcmp(parts[index], "") == 0) {
return object_resolve_abs_path(parent, parts, typename, index + 1);
if (strcmp(*parts, "") == 0) {
return object_resolve_abs_path(parent, parts + 1, typename);
}
child = object_resolve_path_component(parent, parts[index]);
child = object_resolve_path_component(parent, *parts);
if (!child) {
return NULL;
}
return object_resolve_abs_path(child, parts, typename, index + 1);
return object_resolve_abs_path(child, parts + 1, typename);
}
static Object *object_resolve_partial_path(Object *parent,
@ -1984,7 +1981,7 @@ static Object *object_resolve_partial_path(Object *parent,
GHashTableIter iter;
ObjectProperty *prop;
obj = object_resolve_abs_path(parent, parts, typename, 0);
obj = object_resolve_abs_path(parent, parts, typename);
g_hash_table_iter_init(&iter, parent->properties);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *)&prop)) {
@ -2029,7 +2026,7 @@ Object *object_resolve_path_type(const char *path, const char *typename,
*ambiguousp = ambiguous;
}
} else {
obj = object_resolve_abs_path(object_get_root(), parts, typename, 1);
obj = object_resolve_abs_path(object_get_root(), parts + 1, typename);
}
g_strfreev(parts);

View File

@ -22,6 +22,9 @@
It also protects replay events queue which stores events to be
written or read to the log. */
static QemuMutex lock;
/* Condition and queue for fair ordering of mutex lock requests. */
static QemuCond mutex_cond;
static unsigned long mutex_head, mutex_tail;
/* File for replay writing */
static bool write_error;
@ -197,9 +200,10 @@ static __thread bool replay_locked;
void replay_mutex_init(void)
{
qemu_mutex_init(&lock);
qemu_cond_init(&mutex_cond);
/* Hold the mutex while we start-up */
qemu_mutex_lock(&lock);
replay_locked = true;
++mutex_tail;
}
bool replay_mutex_locked(void)
@ -211,10 +215,16 @@ bool replay_mutex_locked(void)
void replay_mutex_lock(void)
{
if (replay_mode != REPLAY_MODE_NONE) {
unsigned long id;
g_assert(!qemu_mutex_iothread_locked());
g_assert(!replay_mutex_locked());
qemu_mutex_lock(&lock);
id = mutex_tail++;
while (id != mutex_head) {
qemu_cond_wait(&mutex_cond, &lock);
}
replay_locked = true;
qemu_mutex_unlock(&lock);
}
}
@ -222,7 +232,10 @@ void replay_mutex_unlock(void)
{
if (replay_mode != REPLAY_MODE_NONE) {
g_assert(replay_mutex_locked());
qemu_mutex_lock(&lock);
++mutex_head;
replay_locked = false;
qemu_cond_broadcast(&mutex_cond);
qemu_mutex_unlock(&lock);
}
}

View File

@ -366,6 +366,11 @@ void replay_finish(void)
/* finalize the file */
if (replay_file) {
if (replay_mode == REPLAY_MODE_RECORD) {
/*
* Can't do it in the signal handler, therefore
* add shutdown event here for the case of Ctrl-C.
*/
replay_shutdown_request(SHUTDOWN_CAUSE_HOST_SIGNAL);
/* write end event */
replay_put_event(EVENT_END);

View File

@ -1267,7 +1267,7 @@ sub checkfilename {
# files and when changing tests.
if ($name =~ m#^tests/data/acpi/# and not $name =~ m#^\.sh$#) {
$$acpi_testexpected = $name;
} elsif ($name =~ m#^tests/qtest/bios-tables-test-allowed-diff.h$#) {
} elsif ($name !~ m#^tests/qtest/bios-tables-test-allowed-diff.h$#) {
$$acpi_nontestexpected = $name;
}
if (defined $$acpi_testexpected and defined $$acpi_nontestexpected) {

View File

@ -125,5 +125,6 @@ RUN dnf install -y $PACKAGES
RUN rpm -q $PACKAGES | sort > /packages.txt
ENV PATH $PATH:/usr/libexec/python3-sphinx/
ENV COVERITY_TOOL_BASE=/coverity-tools
COPY coverity_tool.tgz coverity_tool.tgz
RUN mkdir -p /coverity-tools/coverity_tool && cd /coverity-tools/coverity_tool && tar xf /coverity_tool.tgz
COPY run-coverity-scan run-coverity-scan
RUN --mount=type=secret,id=coverity.token,required ./run-coverity-scan --update-tools-only --tokenfile /run/secrets/coverity.token

View File

@ -29,8 +29,11 @@
# Command line options:
# --dry-run : run the tools, but don't actually do the upload
# --docker : create and work inside a docker container
# --docker : create and work inside a container
# --docker-engine : specify the container engine to use (docker/podman/auto);
# implies --docker
# --update-tools-only : update the cached copy of the tools, but don't run them
# --no-update-tools : do not update the cached copy of the tools
# --tokenfile : file to read Coverity token from
# --version ver : specify version being analyzed (default: ask git)
# --description desc : specify description of this version (default: ask git)
@ -41,9 +44,10 @@
# is intended mainly for internal use by the Docker support
#
# User-specifiable environment variables:
# COVERITY_TOKEN -- Coverity token
# COVERITY_TOKEN -- Coverity token (default: looks at your
# coverity.token config)
# COVERITY_EMAIL -- the email address to use for uploads (default:
# looks at your git user.email config)
# looks at your git coverity.email or user.email config)
# COVERITY_BUILD_CMD -- make command (default: 'make -jN' where N is
# number of CPUs as determined by 'nproc')
# COVERITY_TOOL_BASE -- set to directory to put coverity tools
@ -58,11 +62,11 @@ check_upload_permissions() {
# with status 1 if the check failed (usually a bad token);
# will exit the script with status 0 if the check indicated that we
# can't upload yet (ie we are at quota)
# Assumes that PROJTOKEN, PROJNAME and DRYRUN have been initialized.
# Assumes that COVERITY_TOKEN, PROJNAME and DRYRUN have been initialized.
echo "Checking upload permissions..."
if ! up_perm="$(wget https://scan.coverity.com/api/upload_permitted --post-data "token=$PROJTOKEN&project=$PROJNAME" -q -O -)"; then
if ! up_perm="$(wget https://scan.coverity.com/api/upload_permitted --post-data "token=$COVERITY_TOKEN&project=$PROJNAME" -q -O -)"; then
echo "Coverity Scan API access denied: bad token?"
exit 1
fi
@ -91,43 +95,62 @@ check_upload_permissions() {
}
build_docker_image() {
# build docker container including the coverity-scan tools
echo "Building docker container..."
# TODO: This re-unpacks the tools every time, rather than caching
# and reusing the image produced by the COPY of the .tgz file.
# Not sure why.
tests/docker/docker.py --engine ${DOCKER_ENGINE} build \
-t coverity-scanner -f scripts/coverity-scan/coverity-scan.docker \
--extra-files scripts/coverity-scan/run-coverity-scan \
"$COVERITY_TOOL_BASE"/coverity_tool.tgz
}
update_coverity_tools () {
# Check for whether we need to download the Coverity tools
# (either because we don't have a copy, or because it's out of date)
# Assumes that COVERITY_TOOL_BASE, PROJTOKEN and PROJNAME are set.
# Assumes that COVERITY_TOOL_BASE, COVERITY_TOKEN and PROJNAME are set.
mkdir -p "$COVERITY_TOOL_BASE"
cd "$COVERITY_TOOL_BASE"
echo "Checking for new version of coverity build tools..."
wget https://scan.coverity.com/download/linux64 --post-data "token=$PROJTOKEN&project=$PROJNAME&md5=1" -O coverity_tool.md5.new
wget https://scan.coverity.com/download/linux64 --post-data "token=$COVERITY_TOKEN&project=$PROJNAME&md5=1" -O coverity_tool.md5.new
if ! cmp -s coverity_tool.md5 coverity_tool.md5.new; then
# out of date md5 or no md5: download new build tool
# blow away the old build tool
echo "Downloading coverity build tools..."
rm -rf coverity_tool coverity_tool.tgz
wget https://scan.coverity.com/download/linux64 --post-data "token=$PROJTOKEN&project=$PROJNAME" -O coverity_tool.tgz
wget https://scan.coverity.com/download/linux64 --post-data "token=$COVERITY_TOKEN&project=$PROJNAME" -O coverity_tool.tgz
if ! (cat coverity_tool.md5.new; echo " coverity_tool.tgz") | md5sum -c --status; then
echo "Downloaded tarball didn't match md5sum!"
exit 1
fi
# extract the new one, keeping it corralled in a 'coverity_tool' directory
echo "Unpacking coverity build tools..."
mkdir -p coverity_tool
cd coverity_tool
tar xf ../coverity_tool.tgz
cd ..
mv coverity_tool.md5.new coverity_tool.md5
fi
if [ "$DOCKER" != yes ]; then
# extract the new one, keeping it corralled in a 'coverity_tool' directory
echo "Unpacking coverity build tools..."
mkdir -p coverity_tool
cd coverity_tool
tar xf ../coverity_tool.tgz
cd ..
mv coverity_tool.md5.new coverity_tool.md5
fi
fi
rm -f coverity_tool.md5.new
cd "$SRCDIR"
if [ "$DOCKER" = yes ]; then
build_docker_image
fi
}
# Check user-provided environment variables and arguments
DRYRUN=no
UPDATE_ONLY=no
UPDATE=yes
DOCKER=no
while [ "$#" -ge 1 ]; do
@ -136,9 +159,13 @@ while [ "$#" -ge 1 ]; do
shift
DRYRUN=yes
;;
--no-update-tools)
shift
UPDATE=no
;;
--update-tools-only)
shift
UPDATE_ONLY=yes
UPDATE=only
;;
--version)
shift
@ -196,6 +223,17 @@ while [ "$#" -ge 1 ]; do
;;
--docker)
DOCKER=yes
DOCKER_ENGINE=auto
shift
;;
--docker-engine)
shift
if [ $# -eq 0 ]; then
echo "--docker-engine needs an argument"
exit 1
fi
DOCKER=yes
DOCKER_ENGINE="$1"
shift
;;
*)
@ -205,6 +243,9 @@ while [ "$#" -ge 1 ]; do
esac
done
if [ -z "$COVERITY_TOKEN" ]; then
COVERITY_TOKEN="$(git config coverity.token)"
fi
if [ -z "$COVERITY_TOKEN" ]; then
echo "COVERITY_TOKEN environment variable not set"
exit 1
@ -225,19 +266,19 @@ if [ -z "$SRCDIR" ]; then
SRCDIR="$PWD"
fi
PROJTOKEN="$COVERITY_TOKEN"
PROJNAME=QEMU
TARBALL=cov-int.tar.xz
if [ "$UPDATE_ONLY" = yes ] && [ "$DOCKER" = yes ]; then
echo "Combining --docker and --update-only is not supported"
exit 1
fi
if [ "$UPDATE_ONLY" = yes ]; then
if [ "$UPDATE" = only ]; then
# Just do the tools update; we don't need to check whether
# we are in a source tree or have upload rights for this,
# so do it before some of the command line and source tree checks.
if [ "$DOCKER" = yes ] && [ ! -z "$SRCTARBALL" ]; then
echo --update-tools-only --docker is incompatible with --src-tarball.
exit 1
fi
update_coverity_tools
exit 0
fi
@ -268,18 +309,27 @@ if [ -z "$DESCRIPTION" ]; then
DESCRIPTION="$(git rev-parse HEAD)"
fi
if [ -z "$COVERITY_EMAIL" ]; then
COVERITY_EMAIL="$(git config coverity.email)"
fi
if [ -z "$COVERITY_EMAIL" ]; then
COVERITY_EMAIL="$(git config user.email)"
fi
# Otherwise, continue with the full build and upload process.
check_upload_permissions
if [ "$UPDATE" != no ]; then
update_coverity_tools
fi
# Run ourselves inside docker if that's what the user wants
if [ "$DOCKER" = yes ]; then
# build docker container including the coverity-scan tools
# Put the Coverity token into a temporary file that only
# we have read access to, and then pass it to docker build
# using --secret. This requires at least Docker 18.09.
# Mostly what we are trying to do here is ensure we don't leak
# the token into the Docker image.
# using a volume. A volume is enough for the token not to
# leak into the Docker image.
umask 077
SECRETDIR=$(mktemp -d)
if [ -z "$SECRETDIR" ]; then
@ -290,38 +340,27 @@ if [ "$DOCKER" = yes ]; then
echo "Created temporary directory $SECRETDIR"
SECRET="$SECRETDIR/token"
echo "$COVERITY_TOKEN" > "$SECRET"
echo "Building docker container..."
# TODO: This re-downloads the tools every time, rather than
# caching and reusing the image produced with the downloaded tools.
# Not sure why.
# TODO: how do you get 'docker build' to print the output of the
# commands it is running to its stdout? This would be useful for debug.
DOCKER_BUILDKIT=1 docker build -t coverity-scanner \
--secret id=coverity.token,src="$SECRET" \
-f scripts/coverity-scan/coverity-scan.docker \
scripts/coverity-scan
echo "Archiving sources to be analyzed..."
./scripts/archive-source.sh "$SECRETDIR/qemu-sources.tgz"
ARGS="--no-update-tools"
if [ "$DRYRUN" = yes ]; then
DRYRUNARG=--dry-run
ARGS="$ARGS --dry-run"
fi
echo "Running scanner..."
# If we need to capture the output tarball, get the inner run to
# save it to the secrets directory so we can copy it out before the
# directory is cleaned up.
if [ ! -z "$RESULTSTARBALL" ]; then
RTARGS="--results-tarball /work/cov-int.tar.xz"
else
RTARGS=""
ARGS="$ARGS --results-tarball /work/cov-int.tar.xz"
fi
# Arrange for this docker run to get access to the sources with -v.
# We pass through all the configuration from the outer script to the inner.
export COVERITY_EMAIL COVERITY_BUILD_CMD
docker run -it --env COVERITY_EMAIL --env COVERITY_BUILD_CMD \
tests/docker/docker.py run -it --env COVERITY_EMAIL --env COVERITY_BUILD_CMD \
-v "$SECRETDIR:/work" coverity-scanner \
./run-coverity-scan --version "$VERSION" \
--description "$DESCRIPTION" $DRYRUNARG --tokenfile /work/token \
--srcdir /qemu --src-tarball /work/qemu-sources.tgz $RTARGS
--description "$DESCRIPTION" $ARGS --tokenfile /work/token \
--srcdir /qemu --src-tarball /work/qemu-sources.tgz
if [ ! -z "$RESULTSTARBALL" ]; then
echo "Copying results tarball to $RESULTSTARBALL..."
cp "$SECRETDIR/cov-int.tar.xz" "$RESULTSTARBALL"
@ -330,12 +369,6 @@ if [ "$DOCKER" = yes ]; then
exit 0
fi
# Otherwise, continue with the full build and upload process.
check_upload_permissions
update_coverity_tools
TOOLBIN="$(cd "$COVERITY_TOOL_BASE" && echo $PWD/coverity_tool/cov-analysis-*/bin)"
if ! test -x "$TOOLBIN/cov-build"; then
@ -393,7 +426,7 @@ if [ "$DRYRUN" = yes ]; then
exit 0
fi
curl --form token="$PROJTOKEN" --form email="$COVERITY_EMAIL" \
curl --form token="$COVERITY_TOKEN" --form email="$COVERITY_EMAIL" \
--form file=@"$TARBALL" --form version="$VERSION" \
--form description="$DESCRIPTION" \
https://scan.coverity.com/builds?project="$PROJNAME"

View File

@ -36,6 +36,7 @@
#include "sysemu/runstate.h"
#include "sysemu/seccomp.h"
#include "sysemu/tcg.h"
#include "sysemu/xen.h"
#include "qemu/error-report.h"
#include "qemu/sockets.h"
@ -178,7 +179,6 @@ static NotifierList exit_notifiers =
static NotifierList machine_init_done_notifiers =
NOTIFIER_LIST_INITIALIZER(machine_init_done_notifiers);
bool xen_allowed;
uint32_t xen_domid;
enum xen_mode xen_mode = XEN_EMULATE;
bool xen_domid_restrict;
@ -4334,12 +4334,13 @@ void qemu_init(int argc, char **argv, char **envp)
parse_numa_opts(current_machine);
/* do monitor/qmp handling at preconfig state if requested */
qemu_main_loop();
if (machine_class->default_ram_id && current_machine->ram_size &&
numa_uses_legacy_mem() && !current_machine->ram_memdev_id) {
create_default_memdev(current_machine, mem_path);
}
/* do monitor/qmp handling at preconfig state if requested */
qemu_main_loop();
audio_init_audiodevs();

View File

@ -49,7 +49,5 @@ stub-obj-y += target-get-monitor-def.o
stub-obj-y += target-monitor-defs.o
stub-obj-y += uuid.o
stub-obj-y += vm-stop.o
stub-obj-y += xen-common.o
stub-obj-y += xen-hvm.o
endif # CONFIG_SOFTMMU || CONFIG_TOOLS

View File

@ -1,5 +1,4 @@
#include "qemu/osdep.h"
#include "qom/object.h"
#include "hw/mem/memory-device.h"
MemoryDeviceInfoList *qmp_memory_device_list(void)

View File

@ -1,13 +0,0 @@
/*
* Copyright (C) 2014 Citrix Systems UK Ltd.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "hw/xen/xen.h"
void xenstore_store_pv_console_info(int i, Chardev *chr)
{
}

View File

@ -1,31 +0,0 @@
Correctness issues:
- some eflags manipulation incorrectly reset the bit 0x2.
- SVM: test, cpu save/restore, SMM save/restore.
- x86_64: lcall/ljmp intel/amd differences ?
- better code fetch (different exception handling + CS.limit support)
- user/kernel PUSHL/POPL in helper.c
- add missing cpuid tests
- return UD exception if LOCK prefix incorrectly used
- test ldt limit < 7 ?
- fix some 16 bit sp push/pop overflow (pusha/popa, lcall lret)
- full support of segment limit/rights
- full x87 exception support
- improve x87 bit exactness (use bochs code ?)
- DRx register support
- CR0.AC emulation
- SSE alignment checks
Optimizations/Features:
- add SVM nested paging support
- add VMX support
- add AVX support
- add SSE5 support
- fxsave/fxrstor AMD extensions
- improve monitor/mwait support
- faster EFLAGS update: consider SZAP, C, O can be updated separately
with a bit field in CC_OP and more state variables.
- evaluate x87 stack pointer statically
- find a way to avoid translating several time the same TB if CR0.TS
is set or not.

View File

@ -29,6 +29,7 @@
#include "sysemu/reset.h"
#include "sysemu/hvf.h"
#include "sysemu/cpus.h"
#include "sysemu/xen.h"
#include "kvm_i386.h"
#include "sev_i386.h"
@ -54,7 +55,6 @@
#include "hw/i386/topology.h"
#ifndef CONFIG_USER_ONLY
#include "exec/address-spaces.h"
#include "hw/xen/xen.h"
#include "hw/i386/apic_internal.h"
#include "hw/boards.h"
#endif
@ -985,7 +985,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.feat_names = {
NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
NULL, NULL, NULL, NULL,
NULL, NULL, "md-clear", NULL,
"avx512-vp2intersect", NULL, "md-clear", NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL /* pconfig */, NULL,
NULL, NULL, NULL, NULL,
@ -1139,6 +1139,22 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.index = MSR_IA32_CORE_CAPABILITY,
},
},
[FEAT_PERF_CAPABILITIES] = {
.type = MSR_FEATURE_WORD,
.feat_names = {
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, "full-width-write", NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
.msr = {
.index = MSR_IA32_PERF_CAPABILITIES,
},
},
[FEAT_VMX_PROCBASED_CTLS] = {
.type = MSR_FEATURE_WORD,
@ -1316,6 +1332,10 @@ static FeatureDep feature_dependencies[] = {
.from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY },
.to = { FEAT_CORE_CAPABILITY, ~0ull },
},
{
.from = { FEAT_1_ECX, CPUID_EXT_PDCM },
.to = { FEAT_PERF_CAPABILITIES, ~0ull },
},
{
.from = { FEAT_1_ECX, CPUID_EXT_VMX },
.to = { FEAT_VMX_PROCBASED_CTLS, ~0ull },
@ -5488,6 +5508,9 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ebx |= (cs->nr_cores * cs->nr_threads) << 16;
*edx |= CPUID_HT;
}
if (!cpu->enable_pmu) {
*ecx &= ~CPUID_EXT_PDCM;
}
break;
case 2:
/* cache info: needed for Pentium Pro compatibility */
@ -5837,11 +5860,20 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*eax = cpu->phys_bits;
}
*ebx = env->features[FEAT_8000_0008_EBX];
*ecx = 0;
*edx = 0;
if (cs->nr_cores * cs->nr_threads > 1) {
*ecx |= (cs->nr_cores * cs->nr_threads) - 1;
/*
* Bits 15:12 is "The number of bits in the initial
* Core::X86::Apic::ApicId[ApicId] value that indicate
* thread ID within a package". This is already stored at
* CPUX86State::pkg_offset.
* Bits 7:0 is "The number of threads in the package is NC+1"
*/
*ecx = (env->pkg_offset << 12) |
((cs->nr_cores * cs->nr_threads) - 1);
} else {
*ecx = 0;
}
*edx = 0;
break;
case 0x8000000A:
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {

View File

@ -356,6 +356,8 @@ typedef enum X86Seg {
#define MSR_IA32_ARCH_CAPABILITIES 0x10a
#define ARCH_CAP_TSX_CTRL_MSR (1<<7)
#define MSR_IA32_PERF_CAPABILITIES 0x345
#define MSR_IA32_TSX_CTRL 0x122
#define MSR_IA32_TSCDEADLINE 0x6e0
@ -529,6 +531,7 @@ typedef enum FeatureWord {
FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
FEAT_ARCH_CAPABILITIES,
FEAT_CORE_CAPABILITY,
FEAT_PERF_CAPABILITIES,
FEAT_VMX_PROCBASED_CTLS,
FEAT_VMX_SECONDARY_CTLS,
FEAT_VMX_PINBASED_CTLS,
@ -772,6 +775,8 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2)
/* AVX512 Multiply Accumulation Single Precision */
#define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3)
/* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
#define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
/* Speculation Control */
#define CPUID_7_0_EDX_SPEC_CTRL (1U << 26)
/* Single Thread Indirect Branch Predictors */
@ -1361,6 +1366,11 @@ typedef struct CPUCaches {
CPUCacheInfo *l3_cache;
} CPUCaches;
typedef struct HVFX86LazyFlags {
target_ulong result;
target_ulong auxbits;
} HVFX86LazyFlags;
typedef struct CPUX86State {
/* standard registers */
target_ulong regs[CPU_NB_REGS];
@ -1584,6 +1594,7 @@ typedef struct CPUX86State {
bool tsc_valid;
int64_t tsc_khz;
int64_t user_tsc_khz; /* for sanity check only */
uint64_t apic_bus_freq;
#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
void *xsave_buf;
#endif
@ -1591,7 +1602,8 @@ typedef struct CPUX86State {
struct kvm_nested_state *nested_state;
#endif
#if defined(CONFIG_HVF)
HVFX86EmulatorState *hvf_emul;
HVFX86LazyFlags hvf_lflags;
void *hvf_mmio_buf;
#endif
uint64_t mcg_cap;
@ -1633,6 +1645,7 @@ struct X86CPU {
CPUNegativeOffsetState neg;
CPUX86State env;
VMChangeStateEntry *vmsentry;
uint64_t ucode_rev;

View File

@ -59,8 +59,13 @@
#define FPUC_EM 0x3f
#define floatx80_lg2 make_floatx80(0x3ffd, 0x9a209a84fbcff799LL)
#define floatx80_lg2_d make_floatx80(0x3ffd, 0x9a209a84fbcff798LL)
#define floatx80_l2e make_floatx80(0x3fff, 0xb8aa3b295c17f0bcLL)
#define floatx80_l2e_d make_floatx80(0x3fff, 0xb8aa3b295c17f0bbLL)
#define floatx80_l2t make_floatx80(0x4000, 0xd49a784bcd1b8afeLL)
#define floatx80_l2t_u make_floatx80(0x4000, 0xd49a784bcd1b8affLL)
#define floatx80_ln2_d make_floatx80(0x3ffe, 0xb17217f7d1cf79abLL)
#define floatx80_pi_d make_floatx80(0x4000, 0xc90fdaa22168c234LL)
#if !defined(CONFIG_USER_ONLY)
static qemu_irq ferr_irq;
@ -156,12 +161,32 @@ static void fpu_set_exception(CPUX86State *env, int mask)
}
}
static inline uint8_t save_exception_flags(CPUX86State *env)
{
uint8_t old_flags = get_float_exception_flags(&env->fp_status);
set_float_exception_flags(0, &env->fp_status);
return old_flags;
}
static void merge_exception_flags(CPUX86State *env, uint8_t old_flags)
{
uint8_t new_flags = get_float_exception_flags(&env->fp_status);
float_raise(old_flags, &env->fp_status);
fpu_set_exception(env,
((new_flags & float_flag_invalid ? FPUS_IE : 0) |
(new_flags & float_flag_divbyzero ? FPUS_ZE : 0) |
(new_flags & float_flag_overflow ? FPUS_OE : 0) |
(new_flags & float_flag_underflow ? FPUS_UE : 0) |
(new_flags & float_flag_inexact ? FPUS_PE : 0) |
(new_flags & float_flag_input_denormal ? FPUS_DE : 0)));
}
static inline floatx80 helper_fdiv(CPUX86State *env, floatx80 a, floatx80 b)
{
if (floatx80_is_zero(b)) {
fpu_set_exception(env, FPUS_ZE);
}
return floatx80_div(a, b, &env->fp_status);
uint8_t old_flags = save_exception_flags(env);
floatx80 ret = floatx80_div(a, b, &env->fp_status);
merge_exception_flags(env, old_flags);
return ret;
}
static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr)
@ -178,6 +203,7 @@ static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr)
void helper_flds_FT0(CPUX86State *env, uint32_t val)
{
uint8_t old_flags = save_exception_flags(env);
union {
float32 f;
uint32_t i;
@ -185,10 +211,12 @@ void helper_flds_FT0(CPUX86State *env, uint32_t val)
u.i = val;
FT0 = float32_to_floatx80(u.f, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fldl_FT0(CPUX86State *env, uint64_t val)
{
uint8_t old_flags = save_exception_flags(env);
union {
float64 f;
uint64_t i;
@ -196,6 +224,7 @@ void helper_fldl_FT0(CPUX86State *env, uint64_t val)
u.i = val;
FT0 = float64_to_floatx80(u.f, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fildl_FT0(CPUX86State *env, int32_t val)
@ -205,6 +234,7 @@ void helper_fildl_FT0(CPUX86State *env, int32_t val)
void helper_flds_ST0(CPUX86State *env, uint32_t val)
{
uint8_t old_flags = save_exception_flags(env);
int new_fpstt;
union {
float32 f;
@ -216,10 +246,12 @@ void helper_flds_ST0(CPUX86State *env, uint32_t val)
env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
merge_exception_flags(env, old_flags);
}
void helper_fldl_ST0(CPUX86State *env, uint64_t val)
{
uint8_t old_flags = save_exception_flags(env);
int new_fpstt;
union {
float64 f;
@ -231,6 +263,7 @@ void helper_fldl_ST0(CPUX86State *env, uint64_t val)
env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
env->fpstt = new_fpstt;
env->fptags[new_fpstt] = 0; /* validate stack entry */
merge_exception_flags(env, old_flags);
}
void helper_fildl_ST0(CPUX86State *env, int32_t val)
@ -255,90 +288,108 @@ void helper_fildll_ST0(CPUX86State *env, int64_t val)
uint32_t helper_fsts_ST0(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
union {
float32 f;
uint32_t i;
} u;
u.f = floatx80_to_float32(ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
return u.i;
}
uint64_t helper_fstl_ST0(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
union {
float64 f;
uint64_t i;
} u;
u.f = floatx80_to_float64(ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
return u.i;
}
int32_t helper_fist_ST0(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
int32_t val;
val = floatx80_to_int32(ST0, &env->fp_status);
if (val != (int16_t)val) {
set_float_exception_flags(float_flag_invalid, &env->fp_status);
val = -32768;
}
merge_exception_flags(env, old_flags);
return val;
}
int32_t helper_fistl_ST0(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
int32_t val;
signed char old_exp_flags;
old_exp_flags = get_float_exception_flags(&env->fp_status);
set_float_exception_flags(0, &env->fp_status);
val = floatx80_to_int32(ST0, &env->fp_status);
if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
val = 0x80000000;
}
set_float_exception_flags(get_float_exception_flags(&env->fp_status)
| old_exp_flags, &env->fp_status);
merge_exception_flags(env, old_flags);
return val;
}
int64_t helper_fistll_ST0(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
int64_t val;
signed char old_exp_flags;
old_exp_flags = get_float_exception_flags(&env->fp_status);
set_float_exception_flags(0, &env->fp_status);
val = floatx80_to_int64(ST0, &env->fp_status);
if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
val = 0x8000000000000000ULL;
}
set_float_exception_flags(get_float_exception_flags(&env->fp_status)
| old_exp_flags, &env->fp_status);
merge_exception_flags(env, old_flags);
return val;
}
int32_t helper_fistt_ST0(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
int32_t val;
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
if (val != (int16_t)val) {
set_float_exception_flags(float_flag_invalid, &env->fp_status);
val = -32768;
}
merge_exception_flags(env, old_flags);
return val;
}
int32_t helper_fisttl_ST0(CPUX86State *env)
{
return floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
uint8_t old_flags = save_exception_flags(env);
int32_t val;
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
val = 0x80000000;
}
merge_exception_flags(env, old_flags);
return val;
}
int64_t helper_fisttll_ST0(CPUX86State *env)
{
return floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
uint8_t old_flags = save_exception_flags(env);
int64_t val;
val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
val = 0x8000000000000000ULL;
}
merge_exception_flags(env, old_flags);
return val;
}
void helper_fldt_ST0(CPUX86State *env, target_ulong ptr)
@ -420,24 +471,29 @@ static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
void helper_fcom_ST0_FT0(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
FloatRelation ret;
ret = floatx80_compare(ST0, FT0, &env->fp_status);
env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
merge_exception_flags(env, old_flags);
}
void helper_fucom_ST0_FT0(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
FloatRelation ret;
ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
merge_exception_flags(env, old_flags);
}
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
void helper_fcomi_ST0_FT0(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
int eflags;
FloatRelation ret;
@ -445,10 +501,12 @@ void helper_fcomi_ST0_FT0(CPUX86State *env)
eflags = cpu_cc_compute_all(env, CC_OP);
eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
CC_SRC = eflags;
merge_exception_flags(env, old_flags);
}
void helper_fucomi_ST0_FT0(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
int eflags;
FloatRelation ret;
@ -456,26 +514,35 @@ void helper_fucomi_ST0_FT0(CPUX86State *env)
eflags = cpu_cc_compute_all(env, CC_OP);
eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
CC_SRC = eflags;
merge_exception_flags(env, old_flags);
}
void helper_fadd_ST0_FT0(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
ST0 = floatx80_add(ST0, FT0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fmul_ST0_FT0(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fsub_ST0_FT0(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fsubr_ST0_FT0(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fdiv_ST0_FT0(CPUX86State *env)
@ -492,22 +559,30 @@ void helper_fdivr_ST0_FT0(CPUX86State *env)
void helper_fadd_STN_ST0(CPUX86State *env, int st_index)
{
uint8_t old_flags = save_exception_flags(env);
ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fmul_STN_ST0(CPUX86State *env, int st_index)
{
uint8_t old_flags = save_exception_flags(env);
ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fsub_STN_ST0(CPUX86State *env, int st_index)
{
uint8_t old_flags = save_exception_flags(env);
ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fsubr_STN_ST0(CPUX86State *env, int st_index)
{
uint8_t old_flags = save_exception_flags(env);
ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fdiv_STN_ST0(CPUX86State *env, int st_index)
@ -544,27 +619,66 @@ void helper_fld1_ST0(CPUX86State *env)
void helper_fldl2t_ST0(CPUX86State *env)
{
ST0 = floatx80_l2t;
switch (env->fpuc & FPU_RC_MASK) {
case FPU_RC_UP:
ST0 = floatx80_l2t_u;
break;
default:
ST0 = floatx80_l2t;
break;
}
}
void helper_fldl2e_ST0(CPUX86State *env)
{
ST0 = floatx80_l2e;
switch (env->fpuc & FPU_RC_MASK) {
case FPU_RC_DOWN:
case FPU_RC_CHOP:
ST0 = floatx80_l2e_d;
break;
default:
ST0 = floatx80_l2e;
break;
}
}
void helper_fldpi_ST0(CPUX86State *env)
{
ST0 = floatx80_pi;
switch (env->fpuc & FPU_RC_MASK) {
case FPU_RC_DOWN:
case FPU_RC_CHOP:
ST0 = floatx80_pi_d;
break;
default:
ST0 = floatx80_pi;
break;
}
}
void helper_fldlg2_ST0(CPUX86State *env)
{
ST0 = floatx80_lg2;
switch (env->fpuc & FPU_RC_MASK) {
case FPU_RC_DOWN:
case FPU_RC_CHOP:
ST0 = floatx80_lg2_d;
break;
default:
ST0 = floatx80_lg2;
break;
}
}
void helper_fldln2_ST0(CPUX86State *env)
{
ST0 = floatx80_ln2;
switch (env->fpuc & FPU_RC_MASK) {
case FPU_RC_DOWN:
case FPU_RC_CHOP:
ST0 = floatx80_ln2_d;
break;
default:
ST0 = floatx80_ln2;
break;
}
}
void helper_fldz_ST0(CPUX86State *env)
@ -679,14 +793,29 @@ void helper_fbld_ST0(CPUX86State *env, target_ulong ptr)
void helper_fbst_ST0(CPUX86State *env, target_ulong ptr)
{
uint8_t old_flags = save_exception_flags(env);
int v;
target_ulong mem_ref, mem_end;
int64_t val;
CPU_LDoubleU temp;
temp.d = ST0;
val = floatx80_to_int64(ST0, &env->fp_status);
mem_ref = ptr;
if (val >= 1000000000000000000LL || val <= -1000000000000000000LL) {
set_float_exception_flags(float_flag_invalid, &env->fp_status);
while (mem_ref < ptr + 7) {
cpu_stb_data_ra(env, mem_ref++, 0, GETPC());
}
cpu_stb_data_ra(env, mem_ref++, 0xc0, GETPC());
cpu_stb_data_ra(env, mem_ref++, 0xff, GETPC());
cpu_stb_data_ra(env, mem_ref++, 0xff, GETPC());
merge_exception_flags(env, old_flags);
return;
}
mem_end = mem_ref + 9;
if (val < 0) {
if (SIGND(temp)) {
cpu_stb_data_ra(env, mem_end, 0x80, GETPC());
val = -val;
} else {
@ -704,6 +833,7 @@ void helper_fbst_ST0(CPUX86State *env, target_ulong ptr)
while (mem_ref < mem_end) {
cpu_stb_data_ra(env, mem_ref++, 0, GETPC());
}
merge_exception_flags(env, old_flags);
}
void helper_f2xm1(CPUX86State *env)
@ -757,6 +887,7 @@ void helper_fpatan(CPUX86State *env)
void helper_fxtract(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
CPU_LDoubleU temp;
temp.d = ST0;
@ -767,16 +898,40 @@ void helper_fxtract(CPUX86State *env)
&env->fp_status);
fpush(env);
ST0 = temp.d;
} else if (floatx80_invalid_encoding(ST0)) {
float_raise(float_flag_invalid, &env->fp_status);
ST0 = floatx80_default_nan(&env->fp_status);
fpush(env);
ST0 = ST1;
} else if (floatx80_is_any_nan(ST0)) {
if (floatx80_is_signaling_nan(ST0, &env->fp_status)) {
float_raise(float_flag_invalid, &env->fp_status);
ST0 = floatx80_silence_nan(ST0, &env->fp_status);
}
fpush(env);
ST0 = ST1;
} else if (floatx80_is_infinity(ST0)) {
fpush(env);
ST0 = ST1;
ST1 = floatx80_infinity;
} else {
int expdif;
expdif = EXPD(temp) - EXPBIAS;
if (EXPD(temp) == 0) {
int shift = clz64(temp.l.lower);
temp.l.lower <<= shift;
expdif = 1 - EXPBIAS - shift;
float_raise(float_flag_input_denormal, &env->fp_status);
} else {
expdif = EXPD(temp) - EXPBIAS;
}
/* DP exponent bias */
ST0 = int32_to_floatx80(expdif, &env->fp_status);
fpush(env);
BIASEXPONENT(temp);
ST0 = temp.d;
}
merge_exception_flags(env, old_flags);
}
void helper_fprem1(CPUX86State *env)
@ -916,11 +1071,13 @@ void helper_fyl2xp1(CPUX86State *env)
void helper_fsqrt(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
if (floatx80_is_neg(ST0)) {
env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
env->fpus |= 0x400;
}
ST0 = floatx80_sqrt(ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fsincos(CPUX86State *env)
@ -940,17 +1097,60 @@ void helper_fsincos(CPUX86State *env)
void helper_frndint(CPUX86State *env)
{
uint8_t old_flags = save_exception_flags(env);
ST0 = floatx80_round_to_int(ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fscale(CPUX86State *env)
{
if (floatx80_is_any_nan(ST1)) {
uint8_t old_flags = save_exception_flags(env);
if (floatx80_invalid_encoding(ST1) || floatx80_invalid_encoding(ST0)) {
float_raise(float_flag_invalid, &env->fp_status);
ST0 = floatx80_default_nan(&env->fp_status);
} else if (floatx80_is_any_nan(ST1)) {
if (floatx80_is_signaling_nan(ST0, &env->fp_status)) {
float_raise(float_flag_invalid, &env->fp_status);
}
ST0 = ST1;
if (floatx80_is_signaling_nan(ST0, &env->fp_status)) {
float_raise(float_flag_invalid, &env->fp_status);
ST0 = floatx80_silence_nan(ST0, &env->fp_status);
}
} else if (floatx80_is_infinity(ST1) &&
!floatx80_invalid_encoding(ST0) &&
!floatx80_is_any_nan(ST0)) {
if (floatx80_is_neg(ST1)) {
if (floatx80_is_infinity(ST0)) {
float_raise(float_flag_invalid, &env->fp_status);
ST0 = floatx80_default_nan(&env->fp_status);
} else {
ST0 = (floatx80_is_neg(ST0) ?
floatx80_chs(floatx80_zero) :
floatx80_zero);
}
} else {
if (floatx80_is_zero(ST0)) {
float_raise(float_flag_invalid, &env->fp_status);
ST0 = floatx80_default_nan(&env->fp_status);
} else {
ST0 = (floatx80_is_neg(ST0) ?
floatx80_chs(floatx80_infinity) :
floatx80_infinity);
}
}
} else {
int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
int n;
signed char save = env->fp_status.floatx80_rounding_precision;
uint8_t save_flags = get_float_exception_flags(&env->fp_status);
set_float_exception_flags(0, &env->fp_status);
n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
set_float_exception_flags(save_flags, &env->fp_status);
env->fp_status.floatx80_rounding_precision = 80;
ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
env->fp_status.floatx80_rounding_precision = save;
}
merge_exception_flags(env, old_flags);
}
void helper_fsin(CPUX86State *env)
@ -1000,7 +1200,7 @@ void helper_fxam_ST0(CPUX86State *env)
if (expdif == MAXEXPD) {
if (MANTD(temp) == 0x8000000000000000ULL) {
env->fpus |= 0x500; /* Infinity */
} else {
} else if (MANTD(temp) & 0x8000000000000000ULL) {
env->fpus |= 0x100; /* NaN */
}
} else if (expdif == 0) {
@ -1009,7 +1209,7 @@ void helper_fxam_ST0(CPUX86State *env)
} else {
env->fpus |= 0x4400; /* Denormal */
}
} else {
} else if (MANTD(temp) & 0x8000000000000000ULL) {
env->fpus |= 0x400;
}
}

View File

@ -232,10 +232,10 @@ int hax_init_vcpu(CPUState *cpu)
return ret;
}
struct hax_vm *hax_vm_create(struct hax_state *hax)
struct hax_vm *hax_vm_create(struct hax_state *hax, int max_cpus)
{
struct hax_vm *vm;
int vm_id = 0, ret;
int vm_id = 0, ret, i;
if (hax_invalid_fd(hax->fd)) {
return NULL;
@ -245,6 +245,11 @@ struct hax_vm *hax_vm_create(struct hax_state *hax)
return hax->vm;
}
if (max_cpus > HAX_MAX_VCPU) {
fprintf(stderr, "Maximum VCPU number QEMU supported is %d\n", HAX_MAX_VCPU);
return NULL;
}
vm = g_new0(struct hax_vm, 1);
ret = hax_host_create_vm(hax, &vm_id);
@ -259,6 +264,12 @@ struct hax_vm *hax_vm_create(struct hax_state *hax)
goto error;
}
vm->numvcpus = max_cpus;
vm->vcpus = g_new0(struct hax_vcpu_state *, vm->numvcpus);
for (i = 0; i < vm->numvcpus; i++) {
vm->vcpus[i] = NULL;
}
hax->vm = vm;
return vm;
@ -272,12 +283,14 @@ int hax_vm_destroy(struct hax_vm *vm)
{
int i;
for (i = 0; i < HAX_MAX_VCPU; i++)
for (i = 0; i < vm->numvcpus; i++)
if (vm->vcpus[i]) {
fprintf(stderr, "VCPU should be cleaned before vm clean\n");
return -1;
}
hax_close_fd(vm->fd);
vm->numvcpus = 0;
g_free(vm->vcpus);
g_free(vm);
hax_global.vm = NULL;
return 0;
@ -292,7 +305,7 @@ static void hax_handle_interrupt(CPUState *cpu, int mask)
}
}
static int hax_init(ram_addr_t ram_size)
static int hax_init(ram_addr_t ram_size, int max_cpus)
{
struct hax_state *hax = NULL;
struct hax_qemu_version qversion;
@ -324,7 +337,7 @@ static int hax_init(ram_addr_t ram_size)
goto error;
}
hax->vm = hax_vm_create(hax);
hax->vm = hax_vm_create(hax, max_cpus);
if (!hax->vm) {
fprintf(stderr, "Failed to create HAX VM\n");
ret = -EINVAL;
@ -352,7 +365,7 @@ static int hax_init(ram_addr_t ram_size)
static int hax_accel_init(MachineState *ms)
{
int ret = hax_init(ms->ram_size);
int ret = hax_init(ms->ram_size, (int)ms->smp.max_cpus);
if (ret && (ret != -ENOSPC)) {
fprintf(stderr, "No accelerator found.\n");

View File

@ -41,13 +41,12 @@ struct hax_state {
};
#define HAX_MAX_VCPU 0x10
#define MAX_VM_ID 0x40
#define MAX_VCPU_ID 0x40
struct hax_vm {
hax_fd fd;
int id;
struct hax_vcpu_state *vcpus[HAX_MAX_VCPU];
int numvcpus;
struct hax_vcpu_state **vcpus;
};
#ifdef NEED_CPU_H
@ -58,7 +57,7 @@ int valid_hax_tunnel_size(uint16_t size);
/* Host specific functions */
int hax_mod_version(struct hax_state *hax, struct hax_module_version *version);
int hax_inject_interrupt(CPUArchState *env, int vector);
struct hax_vm *hax_vm_create(struct hax_state *hax);
struct hax_vm *hax_vm_create(struct hax_state *hax, int max_cpus);
int hax_vcpu_run(struct hax_vcpu_state *vcpu);
int hax_vcpu_create(int id);
int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state,

View File

@ -16,13 +16,12 @@
#ifndef HVF_I386_H
#define HVF_I386_H
#include "sysemu/accel.h"
#include "sysemu/hvf.h"
#include "cpu.h"
#include "x86.h"
#define HVF_MAX_VCPU 0x10
#define MAX_VM_ID 0x40
#define MAX_VCPU_ID 0x40
extern struct hvf_state hvf_global;
@ -37,6 +36,40 @@ struct hvf_state {
uint64_t mem_quota;
};
/* hvf_slot flags */
#define HVF_SLOT_LOG (1 << 0)
typedef struct hvf_slot {
uint64_t start;
uint64_t size;
uint8_t *mem;
int slot_id;
uint32_t flags;
MemoryRegion *region;
} hvf_slot;
typedef struct hvf_vcpu_caps {
uint64_t vmx_cap_pinbased;
uint64_t vmx_cap_procbased;
uint64_t vmx_cap_procbased2;
uint64_t vmx_cap_entry;
uint64_t vmx_cap_exit;
uint64_t vmx_cap_preemption_timer;
} hvf_vcpu_caps;
typedef struct HVFState {
AccelState parent;
hvf_slot slots[32];
int num_slots;
hvf_vcpu_caps *hvf_caps;
} HVFState;
extern HVFState *hvf_state;
void hvf_set_phys_mem(MemoryRegionSection *, bool);
void hvf_handle_io(CPUArchState *, uint16_t, void *, int, int, int);
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
#ifdef NEED_CPU_H
/* Functions exported to host specific mode */

View File

@ -251,7 +251,7 @@ void vmx_update_tpr(CPUState *cpu)
}
}
void update_apic_tpr(CPUState *cpu)
static void update_apic_tpr(CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
int tpr = rreg(cpu->hvf_fd, HV_X86_TPR) >> 4;
@ -312,7 +312,8 @@ void hvf_cpu_synchronize_post_reset(CPUState *cpu_state)
run_on_cpu(cpu_state, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
}
void _hvf_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
static void do_hvf_cpu_synchronize_post_init(CPUState *cpu,
run_on_cpu_data arg)
{
CPUState *cpu_state = cpu;
hvf_put_registers(cpu_state);
@ -321,7 +322,7 @@ void _hvf_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
void hvf_cpu_synchronize_post_init(CPUState *cpu_state)
{
run_on_cpu(cpu_state, _hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
run_on_cpu(cpu_state, do_hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
}
static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
@ -532,7 +533,11 @@ void hvf_reset_vcpu(CPUState *cpu) {
void hvf_vcpu_destroy(CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
hv_return_t ret = hv_vcpu_destroy((hv_vcpuid_t)cpu->hvf_fd);
g_free(env->hvf_mmio_buf);
assert_hvf_ok(ret);
}
@ -562,7 +567,7 @@ int hvf_init_vcpu(CPUState *cpu)
init_decoder();
hvf_state->hvf_caps = g_new0(struct hvf_vcpu_caps, 1);
env->hvf_emul = g_new0(HVFX86EmulatorState, 1);
env->hvf_mmio_buf = g_new(char, 4096);
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);
cpu->vcpu_dirty = 1;
@ -722,8 +727,7 @@ int hvf_vcpu_exec(CPUState *cpu)
hvf_store_events(cpu, ins_len, idtvec_info);
rip = rreg(cpu->hvf_fd, HV_X86_RIP);
RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
env->eflags = RFLAGS(env);
env->eflags = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
qemu_mutex_lock_iothread();
@ -735,7 +739,7 @@ int hvf_vcpu_exec(CPUState *cpu)
case EXIT_REASON_HLT: {
macvm_set_rip(cpu, rip + ins_len);
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
(EFLAGS(env) & IF_MASK))
(env->eflags & IF_MASK))
&& !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
!(idtvec_info & VMCS_IDT_VEC_VALID)) {
cpu->halted = 1;
@ -766,8 +770,6 @@ int hvf_vcpu_exec(CPUState *cpu)
struct x86_decode decode;
load_regs(cpu);
env->hvf_emul->fetch_rip = rip;
decode_instruction(env, &decode);
exec_instruction(env, &decode);
store_regs(cpu);
@ -796,7 +798,7 @@ int hvf_vcpu_exec(CPUState *cpu)
} else {
RAX(env) = (uint64_t)val;
}
RIP(env) += ins_len;
env->eip += ins_len;
store_regs(cpu);
break;
} else if (!string && !in) {
@ -808,8 +810,6 @@ int hvf_vcpu_exec(CPUState *cpu)
struct x86_decode decode;
load_regs(cpu);
env->hvf_emul->fetch_rip = rip;
decode_instruction(env, &decode);
assert(ins_len == decode.len);
exec_instruction(env, &decode);
@ -870,7 +870,7 @@ int hvf_vcpu_exec(CPUState *cpu)
} else {
simulate_wrmsr(cpu);
}
RIP(env) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
env->eip += ins_len;
store_regs(cpu);
break;
}
@ -906,7 +906,7 @@ int hvf_vcpu_exec(CPUState *cpu)
error_report("Unrecognized CR %d", cr);
abort();
}
RIP(env) += ins_len;
env->eip += ins_len;
store_regs(cpu);
break;
}
@ -914,8 +914,6 @@ int hvf_vcpu_exec(CPUState *cpu)
struct x86_decode decode;
load_regs(cpu);
env->hvf_emul->fetch_rip = rip;
decode_instruction(env, &decode);
exec_instruction(env, &decode);
store_regs(cpu);

View File

@ -131,7 +131,7 @@ bool x86_is_v8086(struct CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
return x86_is_protected(cpu) && (RFLAGS(env) & RFLAGS_VM);
return x86_is_protected(cpu) && (env->eflags & VM_MASK);
}
bool x86_is_long_mode(struct CPUState *cpu)

View File

@ -42,64 +42,6 @@ typedef struct x86_register {
};
} __attribute__ ((__packed__)) x86_register;
typedef enum x86_rflags {
RFLAGS_CF = (1L << 0),
RFLAGS_PF = (1L << 2),
RFLAGS_AF = (1L << 4),
RFLAGS_ZF = (1L << 6),
RFLAGS_SF = (1L << 7),
RFLAGS_TF = (1L << 8),
RFLAGS_IF = (1L << 9),
RFLAGS_DF = (1L << 10),
RFLAGS_OF = (1L << 11),
RFLAGS_IOPL = (3L << 12),
RFLAGS_NT = (1L << 14),
RFLAGS_RF = (1L << 16),
RFLAGS_VM = (1L << 17),
RFLAGS_AC = (1L << 18),
RFLAGS_VIF = (1L << 19),
RFLAGS_VIP = (1L << 20),
RFLAGS_ID = (1L << 21),
} x86_rflags;
/* rflags register */
typedef struct x86_reg_flags {
union {
struct {
uint64_t rflags;
};
struct {
uint32_t eflags;
uint32_t hi32_unused1;
};
struct {
uint32_t cf:1;
uint32_t unused1:1;
uint32_t pf:1;
uint32_t unused2:1;
uint32_t af:1;
uint32_t unused3:1;
uint32_t zf:1;
uint32_t sf:1;
uint32_t tf:1;
uint32_t ief:1;
uint32_t df:1;
uint32_t of:1;
uint32_t iopl:2;
uint32_t nt:1;
uint32_t unused4:1;
uint32_t rf:1;
uint32_t vm:1;
uint32_t ac:1;
uint32_t vif:1;
uint32_t vip:1;
uint32_t id:1;
uint32_t unused5:10;
uint32_t hi32_unused2;
};
};
} __attribute__ ((__packed__)) x86_reg_flags;
typedef enum x86_reg_cr0 {
CR0_PE = (1L << 0),
CR0_MP = (1L << 1),
@ -286,29 +228,10 @@ typedef struct x68_segment_selector {
};
} __attribute__ ((__packed__)) x68_segment_selector;
typedef struct lazy_flags {
target_ulong result;
target_ulong auxbits;
} lazy_flags;
/* Definition of hvf_x86_state is here */
struct HVFX86EmulatorState {
int interruptable;
uint64_t fetch_rip;
uint64_t rip;
struct x86_register regs[16];
struct x86_reg_flags rflags;
struct lazy_flags lflags;
uint8_t mmio_buf[4096];
};
/* useful register access macros */
#define RIP(cpu) (cpu->hvf_emul->rip)
#define EIP(cpu) ((uint32_t)cpu->hvf_emul->rip)
#define RFLAGS(cpu) (cpu->hvf_emul->rflags.rflags)
#define EFLAGS(cpu) (cpu->hvf_emul->rflags.eflags)
#define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg])
#define RRX(cpu, reg) (cpu->hvf_emul->regs[reg].rrx)
#define RRX(cpu, reg) (x86_reg(cpu, reg)->rrx)
#define RAX(cpu) RRX(cpu, R_EAX)
#define RCX(cpu) RRX(cpu, R_ECX)
#define RDX(cpu) RRX(cpu, R_EDX)
@ -326,7 +249,7 @@ struct HVFX86EmulatorState {
#define R14(cpu) RRX(cpu, R_R14)
#define R15(cpu) RRX(cpu, R_R15)
#define ERX(cpu, reg) (cpu->hvf_emul->regs[reg].erx)
#define ERX(cpu, reg) (x86_reg(cpu, reg)->erx)
#define EAX(cpu) ERX(cpu, R_EAX)
#define ECX(cpu) ERX(cpu, R_ECX)
#define EDX(cpu) ERX(cpu, R_EDX)
@ -336,7 +259,7 @@ struct HVFX86EmulatorState {
#define ESI(cpu) ERX(cpu, R_ESI)
#define EDI(cpu) ERX(cpu, R_EDI)
#define RX(cpu, reg) (cpu->hvf_emul->regs[reg].rx)
#define RX(cpu, reg) (x86_reg(cpu, reg)->rx)
#define AX(cpu) RX(cpu, R_EAX)
#define CX(cpu) RX(cpu, R_ECX)
#define DX(cpu) RX(cpu, R_EDX)
@ -346,13 +269,13 @@ struct HVFX86EmulatorState {
#define SI(cpu) RX(cpu, R_ESI)
#define DI(cpu) RX(cpu, R_EDI)
#define RL(cpu, reg) (cpu->hvf_emul->regs[reg].lx)
#define RL(cpu, reg) (x86_reg(cpu, reg)->lx)
#define AL(cpu) RL(cpu, R_EAX)
#define CL(cpu) RL(cpu, R_ECX)
#define DL(cpu) RL(cpu, R_EDX)
#define BL(cpu) RL(cpu, R_EBX)
#define RH(cpu, reg) (cpu->hvf_emul->regs[reg].hx)
#define RH(cpu, reg) (x86_reg(cpu, reg)->hx)
#define AH(cpu) RH(cpu, R_EAX)
#define CH(cpu) RH(cpu, R_ECX)
#define DH(cpu) RH(cpu, R_EDX)

View File

@ -29,8 +29,7 @@
static void decode_invalid(CPUX86State *env, struct x86_decode *decode)
{
printf("%llx: failed to decode instruction ", env->hvf_emul->fetch_rip -
decode->len);
printf("%llx: failed to decode instruction ", env->eip);
for (int i = 0; i < decode->opcode_len; i++) {
printf("%x ", decode->opcode[i]);
}
@ -75,7 +74,7 @@ static inline uint64_t decode_bytes(CPUX86State *env, struct x86_decode *decode,
VM_PANIC_EX("%s invalid size %d\n", __func__, size);
break;
}
target_ulong va = linear_rip(env_cpu(env), RIP(env)) + decode->len;
target_ulong va = linear_rip(env_cpu(env), env->eip) + decode->len;
vmx_read_mem(env_cpu(env), &val, va, size);
decode->len += size;
@ -698,15 +697,13 @@ static void decode_db_4(CPUX86State *env, struct x86_decode *decode)
#define RFLAGS_MASK_NONE 0
#define RFLAGS_MASK_OSZAPC (RFLAGS_OF | RFLAGS_SF | RFLAGS_ZF | RFLAGS_AF | \
RFLAGS_PF | RFLAGS_CF)
#define RFLAGS_MASK_LAHF (RFLAGS_SF | RFLAGS_ZF | RFLAGS_AF | RFLAGS_PF | \
RFLAGS_CF)
#define RFLAGS_MASK_CF (RFLAGS_CF)
#define RFLAGS_MASK_IF (RFLAGS_IF)
#define RFLAGS_MASK_TF (RFLAGS_TF)
#define RFLAGS_MASK_DF (RFLAGS_DF)
#define RFLAGS_MASK_ZF (RFLAGS_ZF)
#define RFLAGS_MASK_OSZAPC (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C)
#define RFLAGS_MASK_LAHF (CC_S | CC_Z | CC_A | CC_P | CC_C)
#define RFLAGS_MASK_CF (CC_C)
#define RFLAGS_MASK_IF (IF_MASK)
#define RFLAGS_MASK_TF (TF_MASK)
#define RFLAGS_MASK_DF (DF_MASK)
#define RFLAGS_MASK_ZF (CC_Z)
struct decode_tbl _1op_inst[] = {
{0x0, X86_DECODE_CMD_ADD, 1, true, decode_modrm_rm, decode_modrm_reg, NULL,
@ -1771,7 +1768,7 @@ void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,
ptr += get_sib_val(env, decode, &seg);
} else if (!decode->modrm.mod && 5 == decode->modrm.rm) {
if (x86_is_long_mode(env_cpu(env))) {
ptr += RIP(env) + decode->len;
ptr += env->eip + decode->len;
} else {
ptr = decode->displacement;
}
@ -1807,7 +1804,7 @@ void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,
if (4 == rm) {
ptr = get_sib_val(env, decode, &seg) + offset;
} else if (0 == mod && 5 == rm) {
ptr = RIP(env) + decode->len + (int32_t) offset;
ptr = env->eip + decode->len + (int32_t) offset;
} else {
ptr = get_reg_val(env, src, decode->rex.rex, decode->rex.b, 8) +
(int64_t) offset;

Some files were not shown because too many files have changed in this diff Show More