* FreeBSD build fixes (atomics, qapi/error.h)

* x86 KVM fixes (SynIC, KVM_GET/SET_MSRS)
 * Memory API doc fix
 * checkpatch fix
 * Chardev and socket fixes
 * NBD fixes
 * exec.c SEGV fix
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQEcBAABCAAGBQJXA4nFAAoJEL/70l94x66DKqQIAIR+0iID6hXUDTtqa/D8ZgfY
 kGrRyFjyhihsHAM+pLg4YaXGpdYFOBZTW0ZA2qjUoM7u/6uigpbTkQTC25wpMSnd
 OpyApB0oEIv5vuwku1AayF43Meq9PuTl7baxM5gqqo8xzqkzbvlrfvX+62GYGai6
 NATpAEMQAB7usKcTdUElcKczaiUlGDfail+LnKQoq+ih5xDH4LYwpkD9p5EQCTK1
 pkF9LxAbRomFxAxar5m20zPFMMX+33QduEIvcUelTeZJN545R6di1eXMLpu5OGgu
 21zZ8o1ahgrBNI9nQZkeaSaFvFQr+n5T6pIEaoPES5rrMyAg77o0Zv47fpCZFiI=
 =ZB1f
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging

* FreeBSD build fixes (atomics, qapi/error.h)
* x86 KVM fixes (SynIC, KVM_GET/SET_MSRS)
* Memory API doc fix
* checkpatch fix
* Chardev and socket fixes
* NBD fixes
* exec.c SEGV fix

# gpg: Signature made Tue 05 Apr 2016 10:47:49 BST using RSA key ID 78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>"

* remotes/bonzini/tags/for-upstream:
  net: fix missing include of qapi/error.h in netmap.c
  nbd: Fix poor debug message
  include/qemu/atomic: add compile time asserts
  cpus: don't use atomic_read for vm_clock_warp_start
  nbd: don't request FUA on FLUSH
  doc/memory: update MMIO section
  char: ensure all clients are in non-blocking mode
  char: fix broken EAGAIN retry on OS-X due to errno clobbering
  util: retry getaddrinfo if getting EAI_BADFLAGS with AI_V4MAPPED
  checkpatch: add target_ulong to typelist
  target-i386: assert that KVM_GET/SET_MSRS can set all requested MSRs
  target-i386: do not pass MSR_TSC_AUX to KVM ioctls if CPUID bit is not set
  memory: fix segv on qemu_ram_free(block=0x0)
  target-i386/kvm: Hyper-V VMBus hypercalls blank handlers
  update Linux headers to 4.6

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2016-04-05 11:03:18 +01:00
commit cc621a9838
26 changed files with 275 additions and 72 deletions

View File

@ -319,10 +319,6 @@ int nbd_client_co_flush(BlockDriverState *bs)
return 0;
}
if (client->nbdflags & NBD_FLAG_SEND_FUA) {
request.type |= NBD_CMD_FLAG_FUA;
}
request.from = 0;
request.len = 0;

10
cpus.c
View File

@ -338,10 +338,18 @@ static int64_t qemu_icount_round(int64_t count)
static void icount_warp_rt(void)
{
unsigned seq;
int64_t warp_start;
/* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
* changes from -1 to another value, so the race here is okay.
*/
if (atomic_read(&vm_clock_warp_start) == -1) {
do {
seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
warp_start = vm_clock_warp_start;
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
if (warp_start == -1) {
return;
}

View File

@ -37,8 +37,8 @@ MemoryRegion):
- MMIO: a range of guest memory that is implemented by host callbacks;
each read or write causes a callback to be called on the host.
You initialize these with memory_region_io(), passing it a MemoryRegionOps
structure describing the callbacks.
You initialize these with memory_region_init_io(), passing it a
MemoryRegionOps structure describing the callbacks.
- ROM: a ROM memory region works like RAM for reads (directly accessing
a region of host memory), but like MMIO for writes (invoking a callback).

4
exec.c
View File

@ -1773,6 +1773,10 @@ static void reclaim_ramblock(RAMBlock *block)
void qemu_ram_free(RAMBlock *block)
{
if (!block) {
return;
}
qemu_mutex_lock_ramlist();
QLIST_REMOVE_RCU(block, next);
ram_list.mru_block = NULL;

View File

@ -42,30 +42,34 @@
* loads/stores past the atomic operation load/store. However there is
* no explicit memory barrier for the processor.
*/
#define atomic_read(ptr) \
({ \
typeof(*ptr) _val; \
__atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
_val; \
#define atomic_read(ptr) \
({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _val; \
__atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
_val; \
})
#define atomic_set(ptr, i) do { \
typeof(*ptr) _val = (i); \
__atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
#define atomic_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _val = (i); \
__atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
} while(0)
/* Atomic RCU operations imply weak memory barriers */
#define atomic_rcu_read(ptr) \
({ \
typeof(*ptr) _val; \
__atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
_val; \
#define atomic_rcu_read(ptr) \
({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _val; \
__atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
_val; \
})
#define atomic_rcu_set(ptr, i) do { \
typeof(*ptr) _val = (i); \
__atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
#define atomic_rcu_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _val = (i); \
__atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
} while(0)
/* atomic_mb_read/set semantics map Java volatile variables. They are
@ -79,6 +83,7 @@
#if defined(_ARCH_PPC)
#define atomic_mb_read(ptr) \
({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _val; \
__atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
smp_rmb(); \
@ -86,22 +91,25 @@
})
#define atomic_mb_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _val = (i); \
smp_wmb(); \
__atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
smp_mb(); \
} while(0)
#else
#define atomic_mb_read(ptr) \
({ \
typeof(*ptr) _val; \
__atomic_load(ptr, &_val, __ATOMIC_SEQ_CST); \
_val; \
#define atomic_mb_read(ptr) \
({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _val; \
__atomic_load(ptr, &_val, __ATOMIC_SEQ_CST); \
_val; \
})
#define atomic_mb_set(ptr, i) do { \
typeof(*ptr) _val = (i); \
__atomic_store(ptr, &_val, __ATOMIC_SEQ_CST); \
#define atomic_mb_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _val = (i); \
__atomic_store(ptr, &_val, __ATOMIC_SEQ_CST); \
} while(0)
#endif
@ -109,6 +117,7 @@
/* All the remaining operations are fully sequentially consistent */
#define atomic_xchg(ptr, i) ({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _new = (i), _old; \
__atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
_old; \
@ -117,6 +126,7 @@
/* Returns the eventual value, failed or not */
#define atomic_cmpxchg(ptr, old, new) \
({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
typeof(*ptr) _old = (old), _new = (new); \
__atomic_compare_exchange(ptr, &_old, &_new, false, \
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \

View File

@ -226,7 +226,9 @@
(~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
/* Declare the various hypercall operations. */
#define HV_X64_HV_NOTIFY_LONG_SPIN_WAIT 0x0008
#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
#define HVCALL_POST_MESSAGE 0x005c
#define HVCALL_SIGNAL_EVENT 0x005d
#define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001
#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12

View File

@ -243,6 +243,7 @@ struct input_mask {
#define BUS_GSC 0x1A
#define BUS_ATARI 0x1B
#define BUS_SPI 0x1C
#define BUS_RMI 0x1D
/*
* MT_TOOL types

View File

@ -1,2 +1,3 @@
#include <stdint.h>
#include "qemu/compiler.h"
/* For QEMU all types are already defined via osdep.h, so this
* header does not need to do anything.
*/

View File

@ -51,7 +51,7 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_MINFLT 3 /* Number of minor faults */
#define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */
#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
#define VIRTIO_BALLOON_S_AVAIL 6 /* Amount of available memory in guest */
#define VIRTIO_BALLOON_S_AVAIL 6 /* Available memory as in /proc */
#define VIRTIO_BALLOON_S_NR 7
/*

View File

@ -43,10 +43,10 @@
#ifndef VIRTIO_BLK_NO_LEGACY
#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */
#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
#define VIRTIO_BLK_F_WCE 9 /* Writeback mode enabled after reset */
#define VIRTIO_BLK_F_FLUSH 9 /* Flush command supported */
#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */
/* Old (deprecated) name for VIRTIO_BLK_F_WCE. */
#define VIRTIO_BLK_F_FLUSH VIRTIO_BLK_F_WCE
/* Old (deprecated) name for VIRTIO_BLK_F_FLUSH. */
#define VIRTIO_BLK_F_WCE VIRTIO_BLK_F_FLUSH
#endif /* !VIRTIO_BLK_NO_LEGACY */
#define VIRTIO_BLK_ID_BYTES 20 /* ID string length */

View File

@ -94,6 +94,7 @@ struct kvm_regs {
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
#define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */
struct kvm_vcpu_init {
__u32 target;
@ -204,6 +205,11 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
#define KVM_ARM_VCPU_PMU_V3_IRQ 0
#define KVM_ARM_VCPU_PMU_V3_INIT 1
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
#define KVM_ARM_IRQ_TYPE_MASK 0xff

View File

@ -78,7 +78,7 @@
#define EV_SUCCESS 0
#define EV_EPERM 1 /* Operation not permitted */
#define EV_ENOENT 2 /* Entry Not Found */
#define EV_EIO 3 /* I/O error occured */
#define EV_EIO 3 /* I/O error occurred */
#define EV_EAGAIN 4 /* The operation had insufficient
* resources to complete and should be
* retried
@ -89,7 +89,7 @@
#define EV_ENODEV 7 /* No such device */
#define EV_EINVAL 8 /* An argument supplied to the hcall
was out of range or invalid */
#define EV_INTERNAL 9 /* An internal error occured */
#define EV_INTERNAL 9 /* An internal error occurred */
#define EV_CONFIG 10 /* A configuration error was detected */
#define EV_INVALID_STATE 11 /* The object is in an invalid state */
#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */

View File

@ -333,6 +333,15 @@ struct kvm_create_spapr_tce {
__u32 window_size;
};
/* for KVM_CAP_SPAPR_TCE_64 */
struct kvm_create_spapr_tce_64 {
__u64 liobn;
__u32 page_shift;
__u32 flags;
__u64 offset; /* in pages */
__u64 size; /* in pages */
};
/* for KVM_ALLOCATE_RMA */
struct kvm_allocate_rma {
__u64 rma_size;

View File

@ -375,5 +375,7 @@
#define __NR_membarrier 375
#define __NR_mlock2 376
#define __NR_copy_file_range 377
#define __NR_preadv2 378
#define __NR_pwritev2 379
#endif /* _ASM_X86_UNISTD_32_H */

View File

@ -328,5 +328,7 @@
#define __NR_membarrier 324
#define __NR_mlock2 325
#define __NR_copy_file_range 326
#define __NR_preadv2 327
#define __NR_pwritev2 328
#endif /* _ASM_X86_UNISTD_64_H */

View File

@ -157,6 +157,7 @@ struct kvm_s390_skeys {
struct kvm_hyperv_exit {
#define KVM_EXIT_HYPERV_SYNIC 1
#define KVM_EXIT_HYPERV_HCALL 2
__u32 type;
union {
struct {
@ -165,6 +166,11 @@ struct kvm_hyperv_exit {
__u64 evt_page;
__u64 msg_page;
} synic;
struct {
__u64 input;
__u64 result;
__u64 params[2];
} hcall;
} u;
};
@ -856,6 +862,9 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_IOEVENTFD_ANY_LENGTH 122
#define KVM_CAP_HYPERV_SYNIC 123
#define KVM_CAP_S390_RI 124
#define KVM_CAP_SPAPR_TCE_64 125
#define KVM_CAP_ARM_PMU_V3 126
#define KVM_CAP_VCPU_ATTRIBUTES 127
#ifdef KVM_CAP_IRQ_ROUTING
@ -1148,6 +1157,8 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_PPC_ALLOC_HTAB */
#define KVM_PPC_ALLOCATE_HTAB _IOWR(KVMIO, 0xa7, __u32)
#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce)
#define KVM_CREATE_SPAPR_TCE_64 _IOW(KVMIO, 0xa8, \
struct kvm_create_spapr_tce_64)
/* Available with KVM_CAP_RMA */
#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
/* Available with KVM_CAP_PPC_HTAB_FD */

View File

@ -78,7 +78,7 @@ struct uffd_msg {
__u64 reserved3;
} reserved;
} arg;
} __packed;
} __attribute__((packed));
/*
* Start at 0x12 and not at 0 to be more strict against bugs.

View File

@ -59,6 +59,33 @@
#define VFIO_TYPE (';')
#define VFIO_BASE 100
/*
* For extension of INFO ioctls, VFIO makes use of a capability chain
* designed after PCI/e capabilities. A flag bit indicates whether
* this capability chain is supported and a field defined in the fixed
* structure defines the offset of the first capability in the chain.
* This field is only valid when the corresponding bit in the flags
* bitmap is set. This offset field is relative to the start of the
* INFO buffer, as is the next field within each capability header.
* The id within the header is a shared address space per INFO ioctl,
* while the version field is specific to the capability id. The
* contents following the header are specific to the capability id.
*/
struct vfio_info_cap_header {
__u16 id; /* Identifies capability */
__u16 version; /* Version specific to the capability ID */
__u32 next; /* Offset of next capability */
};
/*
* Callers of INFO ioctls passing insufficiently sized buffers will see
* the capability chain flag bit set, a zero value for the first capability
* offset (if available within the provided argsz), and argsz will be
* updated to report the necessary buffer size. For compatibility, the
* INFO ioctl will not report error in this case, but the capability chain
* will not be available.
*/
/* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */
/**
@ -194,13 +221,73 @@ struct vfio_region_info {
#define VFIO_REGION_INFO_FLAG_READ (1 << 0) /* Region supports read */
#define VFIO_REGION_INFO_FLAG_WRITE (1 << 1) /* Region supports write */
#define VFIO_REGION_INFO_FLAG_MMAP (1 << 2) /* Region supports mmap */
#define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */
__u32 index; /* Region index */
__u32 resv; /* Reserved for alignment */
__u32 cap_offset; /* Offset within info struct of first cap */
__u64 size; /* Region size (bytes) */
__u64 offset; /* Region offset from start of device fd */
};
#define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8)
/*
* The sparse mmap capability allows finer granularity of specifying areas
* within a region with mmap support. When specified, the user should only
* mmap the offset ranges specified by the areas array. mmaps outside of the
* areas specified may fail (such as the range covering a PCI MSI-X table) or
* may result in improper device behavior.
*
* The structures below define version 1 of this capability.
*/
#define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1
struct vfio_region_sparse_mmap_area {
__u64 offset; /* Offset of mmap'able area within region */
__u64 size; /* Size of mmap'able area */
};
struct vfio_region_info_cap_sparse_mmap {
struct vfio_info_cap_header header;
__u32 nr_areas;
__u32 reserved;
struct vfio_region_sparse_mmap_area areas[];
};
/*
* The device specific type capability allows regions unique to a specific
* device or class of devices to be exposed. This helps solve the problem for
* vfio bus drivers of defining which region indexes correspond to which region
* on the device, without needing to resort to static indexes, as done by
* vfio-pci. For instance, if we were to go back in time, we might remove
* VFIO_PCI_VGA_REGION_INDEX and let vfio-pci simply define that all indexes
* greater than or equal to VFIO_PCI_NUM_REGIONS are device specific and we'd
* make a "VGA" device specific type to describe the VGA access space. This
* means that non-VGA devices wouldn't need to waste this index, and thus the
* address space associated with it due to implementation of device file
* descriptor offsets in vfio-pci.
*
* The current implementation is now part of the user ABI, so we can't use this
* for VGA, but there are other upcoming use cases, such as opregions for Intel
* IGD devices and framebuffers for vGPU devices. We missed VGA, but we'll
* use this for future additions.
*
* The structure below defines version 1 of this capability.
*/
#define VFIO_REGION_INFO_CAP_TYPE 2
struct vfio_region_info_cap_type {
struct vfio_info_cap_header header;
__u32 type; /* global per bus driver */
__u32 subtype; /* type specific */
};
#define VFIO_REGION_TYPE_PCI_VENDOR_TYPE (1 << 31)
#define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff)
/* 8086 Vendor sub-types */
#define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION (1)
#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2)
#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3)
/**
* VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
* struct vfio_irq_info)
@ -336,7 +423,8 @@ enum {
* between described ranges are unimplemented.
*/
VFIO_PCI_VGA_REGION_INDEX,
VFIO_PCI_NUM_REGIONS
VFIO_PCI_NUM_REGIONS = 9 /* Fixed user ABI, region indexes >=9 use */
/* device specific cap to define content. */
};
enum {

View File

@ -126,6 +126,12 @@ struct vhost_memory {
#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
/* Set eventfd to signal an error */
#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
/* Set busy loop timeout (in us) */
#define VHOST_SET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x23, \
struct vhost_vring_state)
/* Get busy loop timeout (in us) */
#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \
struct vhost_vring_state)
/* VHOST_NET specific defines */

View File

@ -634,7 +634,7 @@ ssize_t nbd_send_request(QIOChannel *ioc, struct nbd_request *request)
cpu_to_be64w((uint64_t*)(buf + 16), request->from);
cpu_to_be32w((uint32_t*)(buf + 24), request->len);
TRACE("Sending request to client: "
TRACE("Sending request to server: "
"{ .from = %" PRIu64", .len = %u, .handle = %" PRIu64", .type=%i}",
request->from, request->len, request->handle, request->type);

View File

@ -225,12 +225,12 @@ static void qemu_chr_fe_write_log(CharDriverState *s,
}
while (done < len) {
do {
ret = write(s->logfd, buf + done, len - done);
if (ret == -1 && errno == EAGAIN) {
g_usleep(100);
}
} while (ret == -1 && errno == EAGAIN);
retry:
ret = write(s->logfd, buf + done, len - done);
if (ret == -1 && errno == EAGAIN) {
g_usleep(100);
goto retry;
}
if (ret <= 0) {
return;
@ -246,12 +246,12 @@ static int qemu_chr_fe_write_buffer(CharDriverState *s, const uint8_t *buf, int
qemu_mutex_lock(&s->chr_write_lock);
while (*offset < len) {
do {
res = s->chr_write(s, buf + *offset, len - *offset);
if (res == -1 && errno == EAGAIN) {
g_usleep(100);
}
} while (res == -1 && errno == EAGAIN);
retry:
res = s->chr_write(s, buf + *offset, len - *offset);
if (res < 0 && errno == EAGAIN) {
g_usleep(100);
goto retry;
}
if (res <= 0) {
break;
@ -333,12 +333,12 @@ int qemu_chr_fe_read_all(CharDriverState *s, uint8_t *buf, int len)
}
while (offset < len) {
do {
res = s->chr_sync_read(s, buf + offset, len - offset);
if (res == -1 && errno == EAGAIN) {
g_usleep(100);
}
} while (res == -1 && errno == EAGAIN);
retry:
res = s->chr_sync_read(s, buf + offset, len - offset);
if (res == -1 && errno == EAGAIN) {
g_usleep(100);
goto retry;
}
if (res == 0) {
break;
@ -3081,6 +3081,8 @@ static int tcp_chr_new_client(CharDriverState *chr, QIOChannelSocket *sioc)
s->sioc = sioc;
object_ref(OBJECT(sioc));
qio_channel_set_blocking(s->ioc, false, NULL);
if (s->do_nodelay) {
qio_channel_set_delay(s->ioc, false);
}
@ -3112,7 +3114,6 @@ static int tcp_chr_add_client(CharDriverState *chr, int fd)
if (!sioc) {
return -1;
}
qio_channel_set_blocking(QIO_CHANNEL(sioc), false, NULL);
ret = tcp_chr_new_client(chr, sioc);
object_unref(OBJECT(sioc));
return ret;

View File

@ -212,6 +212,7 @@ our @typeList = (
qr{${Ident}_t},
qr{${Ident}_handler},
qr{${Ident}_handler_fn},
qr{target_(?:u)?long},
);
# This can be modified by sub possible. Since it can be empty, be careful

View File

@ -44,6 +44,18 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
return -1;
}
return 0;
case KVM_EXIT_HYPERV_HCALL: {
uint16_t code;
code = exit->u.hcall.input & 0xffff;
switch (code) {
case HVCALL_POST_MESSAGE:
case HVCALL_SIGNAL_EVENT:
default:
exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE;
return 0;
}
}
default:
return -1;
}

View File

@ -141,6 +141,7 @@ static int kvm_get_tsc(CPUState *cs)
return ret;
}
assert(ret == 1);
env->tsc = msr_data.entries[0].data;
return 0;
}
@ -917,6 +918,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
has_msr_mtrr = true;
}
if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
has_msr_tsc_aux = false;
}
return 0;
}
@ -1443,6 +1447,7 @@ static int kvm_put_tscdeadline_msr(X86CPU *cpu)
struct kvm_msr_entry entries[1];
} msr_data;
struct kvm_msr_entry *msrs = msr_data.entries;
int ret;
if (!has_msr_tsc_deadline) {
return 0;
@ -1454,7 +1459,13 @@ static int kvm_put_tscdeadline_msr(X86CPU *cpu)
.nmsrs = 1,
};
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
if (ret < 0) {
return ret;
}
assert(ret == 1);
return 0;
}
/*
@ -1469,6 +1480,11 @@ static int kvm_put_msr_feature_control(X86CPU *cpu)
struct kvm_msrs info;
struct kvm_msr_entry entry;
} msr_data;
int ret;
if (!has_msr_feature_control) {
return 0;
}
kvm_msr_entry_set(&msr_data.entry, MSR_IA32_FEATURE_CONTROL,
cpu->env.msr_ia32_feature_control);
@ -1477,7 +1493,13 @@ static int kvm_put_msr_feature_control(X86CPU *cpu)
.nmsrs = 1,
};
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
if (ret < 0) {
return ret;
}
assert(ret == 1);
return 0;
}
static int kvm_put_msrs(X86CPU *cpu, int level)
@ -1489,6 +1511,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
} msr_data;
struct kvm_msr_entry *msrs = msr_data.entries;
int n = 0, i;
int ret;
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
@ -1682,8 +1705,13 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
.nmsrs = n,
};
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
if (ret < 0) {
return ret;
}
assert(ret == n);
return 0;
}
@ -2052,6 +2080,7 @@ static int kvm_get_msrs(X86CPU *cpu)
return ret;
}
assert(ret == n);
for (i = 0; i < ret; i++) {
uint32_t index = msrs[i].index;
switch (index) {
@ -2508,7 +2537,7 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
if (level >= KVM_PUT_RESET_STATE && has_msr_feature_control) {
if (level >= KVM_PUT_RESET_STATE) {
ret = kvm_put_msr_feature_control(x86_cpu);
if (ret < 0) {
return ret;

View File

@ -27,9 +27,6 @@
#ifndef AI_ADDRCONFIG
# define AI_ADDRCONFIG 0
#endif
#ifndef AI_V4MAPPED
# define AI_V4MAPPED 0
#endif
#ifndef EAI_ADDRFAMILY
# define EAI_ADDRFAMILY 0
#endif
@ -42,7 +39,7 @@ static int check_bind(const char *hostname, bool *has_proto)
int ret = -1;
memset(&ai, 0, sizeof(ai));
ai.ai_flags = AI_CANONNAME | AI_V4MAPPED | AI_ADDRCONFIG;
ai.ai_flags = AI_CANONNAME | AI_ADDRCONFIG;
ai.ai_family = AF_UNSPEC;
ai.ai_socktype = SOCK_STREAM;

View File

@ -29,6 +29,7 @@
#ifndef AI_ADDRCONFIG
# define AI_ADDRCONFIG 0
#endif
#ifndef AI_V4MAPPED
# define AI_V4MAPPED 0
#endif
@ -354,10 +355,14 @@ static struct addrinfo *inet_parse_connect_saddr(InetSocketAddress *saddr,
struct addrinfo ai, *res;
int rc;
Error *err = NULL;
static int useV4Mapped = 1;
memset(&ai, 0, sizeof(ai));
ai.ai_flags = AI_CANONNAME | AI_V4MAPPED | AI_ADDRCONFIG;
ai.ai_flags = AI_CANONNAME | AI_ADDRCONFIG;
if (atomic_read(&useV4Mapped)) {
ai.ai_flags |= AI_V4MAPPED;
}
ai.ai_family = inet_ai_family_from_address(saddr, &err);
ai.ai_socktype = SOCK_STREAM;
@ -373,6 +378,18 @@ static struct addrinfo *inet_parse_connect_saddr(InetSocketAddress *saddr,
/* lookup */
rc = getaddrinfo(saddr->host, saddr->port, &ai, &res);
/* At least FreeBSD and OS-X 10.6 declare AI_V4MAPPED but
* then don't implement it in their getaddrinfo(). Detect
* this and retry without the flag since that's preferrable
* to a fatal error
*/
if (rc == EAI_BADFLAGS &&
(ai.ai_flags & AI_V4MAPPED)) {
atomic_set(&useV4Mapped, 0);
ai.ai_flags &= ~AI_V4MAPPED;
rc = getaddrinfo(saddr->host, saddr->port, &ai, &res);
}
if (rc != 0) {
error_setg(errp, "address resolution failed for %s:%s: %s",
saddr->host, saddr->port, gai_strerror(rc));