2008-12-04 20:38:57 +01:00
|
|
|
/*
|
|
|
|
* Virtio Support
|
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2007
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2016-01-26 19:17:07 +01:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 09:01:28 +01:00
|
|
|
#include "qapi/error.h"
|
2016-01-19 21:51:44 +01:00
|
|
|
#include "qemu-common.h"
|
|
|
|
#include "cpu.h"
|
2010-05-24 14:19:21 +02:00
|
|
|
#include "trace.h"
|
2013-11-15 14:46:38 +01:00
|
|
|
#include "exec/address-spaces.h"
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/error-report.h"
|
2013-02-05 17:06:20 +01:00
|
|
|
#include "hw/virtio/virtio.h"
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/atomic.h"
|
2013-02-05 17:06:20 +01:00
|
|
|
#include "hw/virtio/virtio-bus.h"
|
2014-06-24 19:22:30 +02:00
|
|
|
#include "migration/migration.h"
|
2014-06-24 19:40:16 +02:00
|
|
|
#include "hw/virtio/virtio-access.h"
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2013-07-16 14:25:08 +02:00
|
|
|
/*
|
|
|
|
* The alignment to use between consumer and producer parts of vring.
|
|
|
|
* x86 pagesize again. This is the default, used by transports like PCI
|
|
|
|
* which don't provide a means for the guest to tell the host the alignment.
|
|
|
|
*/
|
2008-12-04 20:58:45 +01:00
|
|
|
#define VIRTIO_PCI_VRING_ALIGN 4096
|
|
|
|
|
2008-12-04 20:38:57 +01:00
|
|
|
typedef struct VRingDesc
|
|
|
|
{
|
|
|
|
uint64_t addr;
|
|
|
|
uint32_t len;
|
|
|
|
uint16_t flags;
|
|
|
|
uint16_t next;
|
|
|
|
} VRingDesc;
|
|
|
|
|
|
|
|
typedef struct VRingAvail
|
|
|
|
{
|
|
|
|
uint16_t flags;
|
|
|
|
uint16_t idx;
|
|
|
|
uint16_t ring[0];
|
|
|
|
} VRingAvail;
|
|
|
|
|
|
|
|
typedef struct VRingUsedElem
|
|
|
|
{
|
|
|
|
uint32_t id;
|
|
|
|
uint32_t len;
|
|
|
|
} VRingUsedElem;
|
|
|
|
|
|
|
|
typedef struct VRingUsed
|
|
|
|
{
|
|
|
|
uint16_t flags;
|
|
|
|
uint16_t idx;
|
|
|
|
VRingUsedElem ring[0];
|
|
|
|
} VRingUsed;
|
|
|
|
|
|
|
|
typedef struct VRing
|
|
|
|
{
|
|
|
|
unsigned int num;
|
2015-09-11 15:16:41 +02:00
|
|
|
unsigned int num_default;
|
2013-07-16 14:25:08 +02:00
|
|
|
unsigned int align;
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr desc;
|
|
|
|
hwaddr avail;
|
|
|
|
hwaddr used;
|
2008-12-04 20:38:57 +01:00
|
|
|
} VRing;
|
|
|
|
|
|
|
|
struct VirtQueue
|
|
|
|
{
|
|
|
|
VRing vring;
|
2016-01-31 11:29:05 +01:00
|
|
|
|
|
|
|
/* Next head to pop */
|
2008-12-04 20:38:57 +01:00
|
|
|
uint16_t last_avail_idx;
|
2016-01-31 11:29:04 +01:00
|
|
|
|
2016-01-31 11:29:05 +01:00
|
|
|
/* Last avail_idx read from VQ. */
|
|
|
|
uint16_t shadow_avail_idx;
|
|
|
|
|
2016-01-31 11:29:04 +01:00
|
|
|
uint16_t used_idx;
|
|
|
|
|
2011-06-12 15:21:57 +02:00
|
|
|
/* Last used index value we have signalled on */
|
|
|
|
uint16_t signalled_used;
|
|
|
|
|
|
|
|
/* Last used index value we have signalled on */
|
|
|
|
bool signalled_used_valid;
|
|
|
|
|
|
|
|
/* Notification enabled? */
|
|
|
|
bool notification;
|
|
|
|
|
2013-01-30 12:12:37 +01:00
|
|
|
uint16_t queue_index;
|
|
|
|
|
2008-12-04 20:38:57 +01:00
|
|
|
int inuse;
|
2011-06-12 15:21:57 +02:00
|
|
|
|
2009-06-21 18:50:13 +02:00
|
|
|
uint16_t vector;
|
2016-07-13 07:09:43 +02:00
|
|
|
VirtIOHandleOutput handle_output;
|
|
|
|
VirtIOHandleOutput handle_aio_output;
|
2016-07-13 07:09:44 +02:00
|
|
|
bool use_aio;
|
2010-03-17 12:08:02 +01:00
|
|
|
VirtIODevice *vdev;
|
|
|
|
EventNotifier guest_notifier;
|
|
|
|
EventNotifier host_notifier;
|
2015-04-23 08:21:46 +02:00
|
|
|
QLIST_ENTRY(VirtQueue) node;
|
2008-12-04 20:38:57 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/* virt queue functions */
|
2015-06-04 12:34:12 +02:00
|
|
|
void virtio_queue_update_rings(VirtIODevice *vdev, int n)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2015-06-04 12:34:12 +02:00
|
|
|
VRing *vring = &vdev->vq[n].vring;
|
2009-05-18 15:51:59 +02:00
|
|
|
|
2015-06-04 12:34:12 +02:00
|
|
|
if (!vring->desc) {
|
|
|
|
/* not yet setup -> nothing to do */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
|
|
|
|
vring->used = vring_align(vring->avail +
|
|
|
|
offsetof(VRingAvail, ring[vring->num]),
|
|
|
|
vring->align);
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2016-01-31 11:29:03 +01:00
|
|
|
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
|
|
|
|
hwaddr desc_pa, int i)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2016-01-31 11:29:03 +01:00
|
|
|
address_space_read(&address_space_memory, desc_pa + i * sizeof(VRingDesc),
|
|
|
|
MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
|
|
|
|
virtio_tswap64s(vdev, &desc->addr);
|
|
|
|
virtio_tswap32s(vdev, &desc->len);
|
|
|
|
virtio_tswap16s(vdev, &desc->flags);
|
|
|
|
virtio_tswap16s(vdev, &desc->next);
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t vring_avail_flags(VirtQueue *vq)
|
|
|
|
{
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr pa;
|
2008-12-04 20:38:57 +01:00
|
|
|
pa = vq->vring.avail + offsetof(VRingAvail, flags);
|
2014-06-24 19:40:16 +02:00
|
|
|
return virtio_lduw_phys(vq->vdev, pa);
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t vring_avail_idx(VirtQueue *vq)
|
|
|
|
{
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr pa;
|
2008-12-04 20:38:57 +01:00
|
|
|
pa = vq->vring.avail + offsetof(VRingAvail, idx);
|
2016-01-31 11:29:05 +01:00
|
|
|
vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
|
|
|
|
return vq->shadow_avail_idx;
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
|
|
|
|
{
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr pa;
|
2008-12-04 20:38:57 +01:00
|
|
|
pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
|
2014-06-24 19:40:16 +02:00
|
|
|
return virtio_lduw_phys(vq->vdev, pa);
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2015-02-16 22:35:46 +01:00
|
|
|
static inline uint16_t vring_get_used_event(VirtQueue *vq)
|
2011-06-12 15:21:57 +02:00
|
|
|
{
|
|
|
|
return vring_avail_ring(vq, vq->vring.num);
|
|
|
|
}
|
|
|
|
|
2016-01-31 11:29:06 +01:00
|
|
|
static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
|
|
|
|
int i)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr pa;
|
2016-01-31 11:29:06 +01:00
|
|
|
virtio_tswap32s(vq->vdev, &uelem->id);
|
|
|
|
virtio_tswap32s(vq->vdev, &uelem->len);
|
|
|
|
pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
|
|
|
|
address_space_write(&address_space_memory, pa, MEMTXATTRS_UNSPECIFIED,
|
|
|
|
(void *)uelem, sizeof(VRingUsedElem));
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint16_t vring_used_idx(VirtQueue *vq)
|
|
|
|
{
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr pa;
|
2008-12-04 20:38:57 +01:00
|
|
|
pa = vq->vring.used + offsetof(VRingUsed, idx);
|
2014-06-24 19:40:16 +02:00
|
|
|
return virtio_lduw_phys(vq->vdev, pa);
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2011-06-12 15:21:57 +02:00
|
|
|
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr pa;
|
2008-12-04 20:38:57 +01:00
|
|
|
pa = vq->vring.used + offsetof(VRingUsed, idx);
|
2014-06-24 19:40:16 +02:00
|
|
|
virtio_stw_phys(vq->vdev, pa, val);
|
2016-01-31 11:29:04 +01:00
|
|
|
vq->used_idx = val;
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
|
|
|
|
{
|
2014-06-24 19:40:16 +02:00
|
|
|
VirtIODevice *vdev = vq->vdev;
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr pa;
|
2008-12-04 20:38:57 +01:00
|
|
|
pa = vq->vring.used + offsetof(VRingUsed, flags);
|
2014-06-24 19:40:16 +02:00
|
|
|
virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
|
|
|
|
{
|
2014-06-24 19:40:16 +02:00
|
|
|
VirtIODevice *vdev = vq->vdev;
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr pa;
|
2008-12-04 20:38:57 +01:00
|
|
|
pa = vq->vring.used + offsetof(VRingUsed, flags);
|
2014-06-24 19:40:16 +02:00
|
|
|
virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2015-02-16 22:35:46 +01:00
|
|
|
static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
|
2011-06-12 15:21:57 +02:00
|
|
|
{
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr pa;
|
2011-06-12 15:21:57 +02:00
|
|
|
if (!vq->notification) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
|
2014-06-24 19:40:16 +02:00
|
|
|
virtio_stw_phys(vq->vdev, pa, val);
|
2011-06-12 15:21:57 +02:00
|
|
|
}
|
|
|
|
|
2008-12-04 20:38:57 +01:00
|
|
|
void virtio_queue_set_notification(VirtQueue *vq, int enable)
|
|
|
|
{
|
2011-06-12 15:21:57 +02:00
|
|
|
vq->notification = enable;
|
2015-08-17 11:48:29 +02:00
|
|
|
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
|
2015-02-16 22:35:46 +01:00
|
|
|
vring_set_avail_event(vq, vring_avail_idx(vq));
|
2011-06-12 15:21:57 +02:00
|
|
|
} else if (enable) {
|
2008-12-04 20:38:57 +01:00
|
|
|
vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
|
2011-06-12 15:21:57 +02:00
|
|
|
} else {
|
2008-12-04 20:38:57 +01:00
|
|
|
vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
|
2011-06-12 15:21:57 +02:00
|
|
|
}
|
2012-04-23 13:11:14 +02:00
|
|
|
if (enable) {
|
|
|
|
/* Expose avail event/used flags before caller checks the avail idx. */
|
|
|
|
smp_mb();
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int virtio_queue_ready(VirtQueue *vq)
|
|
|
|
{
|
|
|
|
return vq->vring.avail != 0;
|
|
|
|
}
|
|
|
|
|
2016-01-31 11:29:05 +01:00
|
|
|
/* Fetch avail_idx from VQ memory only when we really need to know if
|
|
|
|
* guest has added some buffers. */
|
2008-12-04 20:38:57 +01:00
|
|
|
int virtio_queue_empty(VirtQueue *vq)
|
|
|
|
{
|
2016-01-31 11:29:05 +01:00
|
|
|
if (vq->shadow_avail_idx != vq->last_avail_idx) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-12-04 20:38:57 +01:00
|
|
|
return vring_avail_idx(vq) == vq->last_avail_idx;
|
|
|
|
}
|
|
|
|
|
2015-09-25 07:21:28 +02:00
|
|
|
static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
|
|
|
|
unsigned int len)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
|
|
|
unsigned int offset;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
offset = 0;
|
|
|
|
for (i = 0; i < elem->in_num; i++) {
|
|
|
|
size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
|
|
|
|
|
2009-03-28 18:46:18 +01:00
|
|
|
cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
|
|
|
|
elem->in_sg[i].iov_len,
|
|
|
|
1, size);
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2012-09-24 15:09:30 +02:00
|
|
|
offset += size;
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2009-03-28 18:46:18 +01:00
|
|
|
for (i = 0; i < elem->out_num; i++)
|
|
|
|
cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
|
|
|
|
elem->out_sg[i].iov_len,
|
|
|
|
0, elem->out_sg[i].iov_len);
|
2015-09-25 07:21:28 +02:00
|
|
|
}
|
|
|
|
|
2015-09-25 07:21:29 +02:00
|
|
|
void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
vq->last_avail_idx--;
|
2016-08-15 14:54:16 +02:00
|
|
|
vq->inuse--;
|
2015-09-25 07:21:29 +02:00
|
|
|
virtqueue_unmap_sg(vq, elem, len);
|
|
|
|
}
|
|
|
|
|
2016-09-07 17:20:48 +02:00
|
|
|
/* virtqueue_rewind:
|
|
|
|
* @vq: The #VirtQueue
|
|
|
|
* @num: Number of elements to push back
|
|
|
|
*
|
|
|
|
* Pretend that elements weren't popped from the virtqueue. The next
|
|
|
|
* virtqueue_pop() will refetch the oldest element.
|
|
|
|
*
|
|
|
|
* Use virtqueue_discard() instead if you have a VirtQueueElement.
|
|
|
|
*
|
|
|
|
* Returns: true on success, false if @num is greater than the number of in use
|
|
|
|
* elements.
|
|
|
|
*/
|
|
|
|
bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
|
|
|
|
{
|
|
|
|
if (num > vq->inuse) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
vq->last_avail_idx -= num;
|
|
|
|
vq->inuse -= num;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-09-25 07:21:28 +02:00
|
|
|
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
|
|
|
|
unsigned int len, unsigned int idx)
|
|
|
|
{
|
2016-01-31 11:29:06 +01:00
|
|
|
VRingUsedElem uelem;
|
|
|
|
|
2015-09-25 07:21:28 +02:00
|
|
|
trace_virtqueue_fill(vq, elem, len, idx);
|
|
|
|
|
|
|
|
virtqueue_unmap_sg(vq, elem, len);
|
2009-03-28 18:46:18 +01:00
|
|
|
|
2016-09-21 17:52:19 +02:00
|
|
|
if (unlikely(vq->vdev->broken)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-01-31 11:29:04 +01:00
|
|
|
idx = (idx + vq->used_idx) % vq->vring.num;
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2016-01-31 11:29:06 +01:00
|
|
|
uelem.id = elem->index;
|
|
|
|
uelem.len = len;
|
|
|
|
vring_used_write(vq, &uelem, idx);
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void virtqueue_flush(VirtQueue *vq, unsigned int count)
|
|
|
|
{
|
2011-06-12 15:21:57 +02:00
|
|
|
uint16_t old, new;
|
2016-09-21 17:52:19 +02:00
|
|
|
|
|
|
|
if (unlikely(vq->vdev->broken)) {
|
|
|
|
vq->inuse -= count;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-12-04 20:38:57 +01:00
|
|
|
/* Make sure buffer is written before we update index. */
|
2011-09-20 04:05:20 +02:00
|
|
|
smp_wmb();
|
2010-05-24 14:19:21 +02:00
|
|
|
trace_virtqueue_flush(vq, count);
|
2016-01-31 11:29:04 +01:00
|
|
|
old = vq->used_idx;
|
2011-06-12 15:21:57 +02:00
|
|
|
new = old + count;
|
|
|
|
vring_used_idx_set(vq, new);
|
2008-12-04 20:38:57 +01:00
|
|
|
vq->inuse -= count;
|
2011-06-12 15:21:57 +02:00
|
|
|
if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
|
|
|
|
vq->signalled_used_valid = false;
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
virtqueue_fill(vq, elem, len, 0);
|
|
|
|
virtqueue_flush(vq, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
|
|
|
|
{
|
|
|
|
uint16_t num_heads = vring_avail_idx(vq) - idx;
|
|
|
|
|
|
|
|
/* Check it isn't doing very strange things with descriptor numbers. */
|
2008-12-04 22:28:28 +01:00
|
|
|
if (num_heads > vq->vring.num) {
|
2010-11-15 21:44:36 +01:00
|
|
|
error_report("Guest moved used index from %u to %u",
|
2016-01-31 11:29:05 +01:00
|
|
|
idx, vq->shadow_avail_idx);
|
2008-12-04 22:28:28 +01:00
|
|
|
exit(1);
|
|
|
|
}
|
2012-04-23 14:46:22 +02:00
|
|
|
/* On success, callers read a descriptor at vq->last_avail_idx.
|
|
|
|
* Make sure descriptor read does not bypass avail index read. */
|
|
|
|
if (num_heads) {
|
|
|
|
smp_rmb();
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
|
|
|
return num_heads;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
|
|
|
|
{
|
|
|
|
unsigned int head;
|
|
|
|
|
|
|
|
/* Grab the next descriptor number they're advertising, and increment
|
|
|
|
* the index we've seen. */
|
|
|
|
head = vring_avail_ring(vq, idx % vq->vring.num);
|
|
|
|
|
|
|
|
/* If their number is silly, that's a fatal mistake. */
|
2008-12-04 22:28:28 +01:00
|
|
|
if (head >= vq->vring.num) {
|
2010-11-15 21:44:36 +01:00
|
|
|
error_report("Guest says index %u is available", head);
|
2008-12-04 22:28:28 +01:00
|
|
|
exit(1);
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
|
|
|
return head;
|
|
|
|
}
|
|
|
|
|
2016-01-31 11:29:03 +01:00
|
|
|
static unsigned virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
|
|
|
|
hwaddr desc_pa, unsigned int max)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
|
|
|
unsigned int next;
|
|
|
|
|
|
|
|
/* If this descriptor says it doesn't chain, we're done. */
|
2016-01-31 11:29:03 +01:00
|
|
|
if (!(desc->flags & VRING_DESC_F_NEXT)) {
|
2009-06-17 12:37:32 +02:00
|
|
|
return max;
|
2014-06-24 19:40:16 +02:00
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
|
|
|
/* Check they're not leading us off end of descriptors. */
|
2016-01-31 11:29:03 +01:00
|
|
|
next = desc->next;
|
2008-12-04 20:38:57 +01:00
|
|
|
/* Make sure compiler knows to grab that: we don't want it changing! */
|
2011-09-20 04:05:20 +02:00
|
|
|
smp_wmb();
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2009-06-17 12:37:32 +02:00
|
|
|
if (next >= max) {
|
2010-11-15 21:44:36 +01:00
|
|
|
error_report("Desc next is %u", next);
|
2008-12-04 22:28:28 +01:00
|
|
|
exit(1);
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2016-01-31 11:29:03 +01:00
|
|
|
vring_desc_read(vdev, desc, desc_pa, next);
|
2008-12-04 20:38:57 +01:00
|
|
|
return next;
|
|
|
|
}
|
|
|
|
|
2012-09-24 20:35:15 +02:00
|
|
|
void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
|
2012-11-29 23:02:56 +01:00
|
|
|
unsigned int *out_bytes,
|
|
|
|
unsigned max_in_bytes, unsigned max_out_bytes)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2009-06-17 12:38:28 +02:00
|
|
|
unsigned int idx;
|
2012-09-24 20:35:14 +02:00
|
|
|
unsigned int total_bufs, in_total, out_total;
|
2008-12-04 20:38:57 +01:00
|
|
|
|
|
|
|
idx = vq->last_avail_idx;
|
|
|
|
|
2009-06-17 12:38:28 +02:00
|
|
|
total_bufs = in_total = out_total = 0;
|
2008-12-04 20:38:57 +01:00
|
|
|
while (virtqueue_num_heads(vq, idx)) {
|
2014-06-24 19:40:16 +02:00
|
|
|
VirtIODevice *vdev = vq->vdev;
|
2009-06-17 12:38:28 +02:00
|
|
|
unsigned int max, num_bufs, indirect = 0;
|
2016-01-31 11:29:03 +01:00
|
|
|
VRingDesc desc;
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr desc_pa;
|
2008-12-04 20:38:57 +01:00
|
|
|
int i;
|
|
|
|
|
2009-06-17 12:38:28 +02:00
|
|
|
max = vq->vring.num;
|
|
|
|
num_bufs = total_bufs;
|
2008-12-04 20:38:57 +01:00
|
|
|
i = virtqueue_get_head(vq, idx++);
|
2009-06-17 12:38:28 +02:00
|
|
|
desc_pa = vq->vring.desc;
|
2016-01-31 11:29:03 +01:00
|
|
|
vring_desc_read(vdev, &desc, desc_pa, i);
|
2009-06-17 12:38:28 +02:00
|
|
|
|
2016-01-31 11:29:03 +01:00
|
|
|
if (desc.flags & VRING_DESC_F_INDIRECT) {
|
|
|
|
if (desc.len % sizeof(VRingDesc)) {
|
2010-11-15 21:44:36 +01:00
|
|
|
error_report("Invalid size for indirect buffer table");
|
2009-06-17 12:38:28 +02:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we've got too many, that implies a descriptor loop. */
|
|
|
|
if (num_bufs >= max) {
|
2010-11-15 21:44:36 +01:00
|
|
|
error_report("Looped descriptor");
|
2009-06-17 12:38:28 +02:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* loop over the indirect descriptor table */
|
|
|
|
indirect = 1;
|
2016-01-31 11:29:03 +01:00
|
|
|
max = desc.len / sizeof(VRingDesc);
|
|
|
|
desc_pa = desc.addr;
|
2013-08-22 08:47:16 +02:00
|
|
|
num_bufs = i = 0;
|
2016-01-31 11:29:03 +01:00
|
|
|
vring_desc_read(vdev, &desc, desc_pa, i);
|
2009-06-17 12:38:28 +02:00
|
|
|
}
|
|
|
|
|
2008-12-04 20:38:57 +01:00
|
|
|
do {
|
|
|
|
/* If we've got too many, that implies a descriptor loop. */
|
2009-06-17 12:37:32 +02:00
|
|
|
if (++num_bufs > max) {
|
2010-11-15 21:44:36 +01:00
|
|
|
error_report("Looped descriptor");
|
2008-12-04 22:28:28 +01:00
|
|
|
exit(1);
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2016-01-31 11:29:03 +01:00
|
|
|
if (desc.flags & VRING_DESC_F_WRITE) {
|
|
|
|
in_total += desc.len;
|
2008-12-04 20:38:57 +01:00
|
|
|
} else {
|
2016-01-31 11:29:03 +01:00
|
|
|
out_total += desc.len;
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
2012-11-29 23:02:56 +01:00
|
|
|
if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
|
|
|
|
goto done;
|
|
|
|
}
|
2016-01-31 11:29:03 +01:00
|
|
|
} while ((i = virtqueue_read_next_desc(vdev, &desc, desc_pa, max)) != max);
|
2009-06-17 12:38:28 +02:00
|
|
|
|
|
|
|
if (!indirect)
|
|
|
|
total_bufs = num_bufs;
|
|
|
|
else
|
|
|
|
total_bufs++;
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
2012-11-29 23:02:56 +01:00
|
|
|
done:
|
2012-09-24 20:35:15 +02:00
|
|
|
if (in_bytes) {
|
|
|
|
*in_bytes = in_total;
|
|
|
|
}
|
|
|
|
if (out_bytes) {
|
|
|
|
*out_bytes = out_total;
|
|
|
|
}
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2012-09-24 20:35:15 +02:00
|
|
|
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
|
|
|
|
unsigned int out_bytes)
|
|
|
|
{
|
|
|
|
unsigned int in_total, out_total;
|
|
|
|
|
2012-11-29 23:02:56 +01:00
|
|
|
virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
|
|
|
|
return in_bytes <= in_total && out_bytes <= out_total;
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2016-09-21 17:52:21 +02:00
|
|
|
static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
|
|
|
|
hwaddr *addr, struct iovec *iov,
|
2016-01-31 11:29:01 +01:00
|
|
|
unsigned int max_num_sg, bool is_write,
|
|
|
|
hwaddr pa, size_t sz)
|
|
|
|
{
|
2016-09-21 17:52:21 +02:00
|
|
|
bool ok = false;
|
2016-01-31 11:29:01 +01:00
|
|
|
unsigned num_sg = *p_num_sg;
|
|
|
|
assert(num_sg <= max_num_sg);
|
|
|
|
|
2016-07-27 17:37:56 +02:00
|
|
|
if (!sz) {
|
2016-09-21 17:52:21 +02:00
|
|
|
virtio_error(vdev, "virtio: zero sized buffers are not allowed");
|
|
|
|
goto out;
|
2016-07-27 17:37:56 +02:00
|
|
|
}
|
|
|
|
|
2016-01-31 11:29:01 +01:00
|
|
|
while (sz) {
|
|
|
|
hwaddr len = sz;
|
|
|
|
|
|
|
|
if (num_sg == max_num_sg) {
|
2016-09-21 17:52:21 +02:00
|
|
|
virtio_error(vdev, "virtio: too many write descriptors in "
|
|
|
|
"indirect table");
|
|
|
|
goto out;
|
2016-01-31 11:29:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
iov[num_sg].iov_base = cpu_physical_memory_map(pa, &len, is_write);
|
2016-09-19 20:25:45 +02:00
|
|
|
if (!iov[num_sg].iov_base) {
|
2016-09-21 17:52:21 +02:00
|
|
|
virtio_error(vdev, "virtio: bogus descriptor or out of resources");
|
|
|
|
goto out;
|
2016-09-19 20:25:45 +02:00
|
|
|
}
|
|
|
|
|
2016-01-31 11:29:01 +01:00
|
|
|
iov[num_sg].iov_len = len;
|
|
|
|
addr[num_sg] = pa;
|
|
|
|
|
|
|
|
sz -= len;
|
|
|
|
pa += len;
|
|
|
|
num_sg++;
|
|
|
|
}
|
2016-09-21 17:52:21 +02:00
|
|
|
ok = true;
|
|
|
|
|
|
|
|
out:
|
2016-01-31 11:29:01 +01:00
|
|
|
*p_num_sg = num_sg;
|
2016-09-21 17:52:21 +02:00
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only used by error code paths before we have a VirtQueueElement (therefore
|
|
|
|
* virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
|
|
|
|
* yet.
|
|
|
|
*/
|
|
|
|
static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
|
|
|
|
struct iovec *iov)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < out_num + in_num; i++) {
|
|
|
|
int is_write = i >= out_num;
|
|
|
|
|
|
|
|
cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
|
|
|
|
iov++;
|
|
|
|
}
|
2016-01-31 11:29:01 +01:00
|
|
|
}
|
|
|
|
|
2015-10-27 09:01:44 +01:00
|
|
|
static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
|
|
|
|
unsigned int *num_sg, unsigned int max_size,
|
|
|
|
int is_write)
|
2010-08-03 16:54:38 +02:00
|
|
|
{
|
|
|
|
unsigned int i;
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr len;
|
2010-08-03 16:54:38 +02:00
|
|
|
|
2015-10-27 09:01:44 +01:00
|
|
|
/* Note: this function MUST validate input, some callers
|
|
|
|
* are passing in num_sg values received over the network.
|
|
|
|
*/
|
|
|
|
/* TODO: teach all callers that this can fail, and return failure instead
|
|
|
|
* of asserting here.
|
|
|
|
* When we do, we might be able to re-enable NDEBUG below.
|
|
|
|
*/
|
|
|
|
#ifdef NDEBUG
|
|
|
|
#error building with NDEBUG is not supported
|
|
|
|
#endif
|
|
|
|
assert(*num_sg <= max_size);
|
|
|
|
|
|
|
|
for (i = 0; i < *num_sg; i++) {
|
2010-08-03 16:54:38 +02:00
|
|
|
len = sg[i].iov_len;
|
|
|
|
sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
|
2015-10-27 09:01:44 +01:00
|
|
|
if (!sg[i].iov_base) {
|
2014-06-10 17:56:27 +02:00
|
|
|
error_report("virtio: error trying to map MMIO memory");
|
2010-08-03 16:54:38 +02:00
|
|
|
exit(1);
|
|
|
|
}
|
2016-01-31 11:29:01 +01:00
|
|
|
if (len != sg[i].iov_len) {
|
|
|
|
error_report("virtio: unexpected memory split");
|
2015-10-27 09:01:44 +01:00
|
|
|
exit(1);
|
|
|
|
}
|
2010-08-03 16:54:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-27 09:01:44 +01:00
|
|
|
void virtqueue_map(VirtQueueElement *elem)
|
|
|
|
{
|
|
|
|
virtqueue_map_iovec(elem->in_sg, elem->in_addr, &elem->in_num,
|
2016-01-31 11:29:00 +01:00
|
|
|
VIRTQUEUE_MAX_SIZE, 1);
|
2015-10-27 09:01:44 +01:00
|
|
|
virtqueue_map_iovec(elem->out_sg, elem->out_addr, &elem->out_num,
|
2016-01-31 11:29:00 +01:00
|
|
|
VIRTQUEUE_MAX_SIZE, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
|
|
|
|
{
|
|
|
|
VirtQueueElement *elem;
|
|
|
|
size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
|
|
|
|
size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
|
|
|
|
size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
|
|
|
|
size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
|
|
|
|
size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
|
|
|
|
size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
|
|
|
|
|
|
|
|
assert(sz >= sizeof(VirtQueueElement));
|
|
|
|
elem = g_malloc(out_sg_end);
|
|
|
|
elem->out_num = out_num;
|
|
|
|
elem->in_num = in_num;
|
|
|
|
elem->in_addr = (void *)elem + in_addr_ofs;
|
|
|
|
elem->out_addr = (void *)elem + out_addr_ofs;
|
|
|
|
elem->in_sg = (void *)elem + in_sg_ofs;
|
|
|
|
elem->out_sg = (void *)elem + out_sg_ofs;
|
|
|
|
return elem;
|
2015-10-27 09:01:44 +01:00
|
|
|
}
|
|
|
|
|
2016-02-04 15:26:51 +01:00
|
|
|
void *virtqueue_pop(VirtQueue *vq, size_t sz)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2009-06-17 12:37:32 +02:00
|
|
|
unsigned int i, head, max;
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr desc_pa = vq->vring.desc;
|
2014-06-24 19:40:16 +02:00
|
|
|
VirtIODevice *vdev = vq->vdev;
|
2016-02-04 15:26:51 +01:00
|
|
|
VirtQueueElement *elem;
|
2016-01-31 11:29:01 +01:00
|
|
|
unsigned out_num, in_num;
|
|
|
|
hwaddr addr[VIRTQUEUE_MAX_SIZE];
|
|
|
|
struct iovec iov[VIRTQUEUE_MAX_SIZE];
|
2016-01-31 11:29:03 +01:00
|
|
|
VRingDesc desc;
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2016-09-21 17:52:19 +02:00
|
|
|
if (unlikely(vdev->broken)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-01-31 11:29:05 +01:00
|
|
|
if (virtio_queue_empty(vq)) {
|
2016-02-04 15:26:51 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
2016-01-31 11:29:05 +01:00
|
|
|
/* Needed after virtio_queue_empty(), see comment in
|
|
|
|
* virtqueue_num_heads(). */
|
|
|
|
smp_rmb();
|
2008-12-04 20:38:57 +01:00
|
|
|
|
|
|
|
/* When we start there are none of either input nor output. */
|
2016-01-31 11:29:01 +01:00
|
|
|
out_num = in_num = 0;
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2009-06-17 12:37:32 +02:00
|
|
|
max = vq->vring.num;
|
|
|
|
|
2016-07-19 14:07:13 +02:00
|
|
|
if (vq->inuse >= vq->vring.num) {
|
2016-09-21 17:52:21 +02:00
|
|
|
virtio_error(vdev, "Virtqueue size exceeded");
|
|
|
|
return NULL;
|
2016-07-19 14:07:13 +02:00
|
|
|
}
|
|
|
|
|
2008-12-04 20:38:57 +01:00
|
|
|
i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
|
2015-08-17 11:48:29 +02:00
|
|
|
if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
|
2015-02-16 22:35:46 +01:00
|
|
|
vring_set_avail_event(vq, vq->last_avail_idx);
|
2011-06-12 15:21:57 +02:00
|
|
|
}
|
2009-06-17 12:38:28 +02:00
|
|
|
|
2016-01-31 11:29:03 +01:00
|
|
|
vring_desc_read(vdev, &desc, desc_pa, i);
|
|
|
|
if (desc.flags & VRING_DESC_F_INDIRECT) {
|
|
|
|
if (desc.len % sizeof(VRingDesc)) {
|
2016-09-21 17:52:21 +02:00
|
|
|
virtio_error(vdev, "Invalid size for indirect buffer table");
|
|
|
|
return NULL;
|
2009-06-17 12:38:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* loop over the indirect descriptor table */
|
2016-01-31 11:29:03 +01:00
|
|
|
max = desc.len / sizeof(VRingDesc);
|
|
|
|
desc_pa = desc.addr;
|
2009-06-17 12:38:28 +02:00
|
|
|
i = 0;
|
2016-01-31 11:29:03 +01:00
|
|
|
vring_desc_read(vdev, &desc, desc_pa, i);
|
2009-06-17 12:38:28 +02:00
|
|
|
}
|
|
|
|
|
2010-08-03 16:54:38 +02:00
|
|
|
/* Collect all the descriptors */
|
2008-12-04 20:38:57 +01:00
|
|
|
do {
|
2016-09-21 17:52:21 +02:00
|
|
|
bool map_ok;
|
|
|
|
|
2016-01-31 11:29:03 +01:00
|
|
|
if (desc.flags & VRING_DESC_F_WRITE) {
|
2016-09-21 17:52:21 +02:00
|
|
|
map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
|
|
|
|
iov + out_num,
|
|
|
|
VIRTQUEUE_MAX_SIZE - out_num, true,
|
|
|
|
desc.addr, desc.len);
|
2010-08-03 16:54:38 +02:00
|
|
|
} else {
|
2016-01-31 11:29:01 +01:00
|
|
|
if (in_num) {
|
2016-09-21 17:52:21 +02:00
|
|
|
virtio_error(vdev, "Incorrect order for descriptors");
|
|
|
|
goto err_undo_map;
|
2011-06-20 12:42:27 +02:00
|
|
|
}
|
2016-09-21 17:52:21 +02:00
|
|
|
map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
|
|
|
|
VIRTQUEUE_MAX_SIZE, false,
|
|
|
|
desc.addr, desc.len);
|
|
|
|
}
|
|
|
|
if (!map_ok) {
|
|
|
|
goto err_undo_map;
|
2010-08-03 16:54:38 +02:00
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
|
|
|
/* If we've got too many, that implies a descriptor loop. */
|
2016-01-31 11:29:01 +01:00
|
|
|
if ((in_num + out_num) > max) {
|
2016-09-21 17:52:21 +02:00
|
|
|
virtio_error(vdev, "Looped descriptor");
|
|
|
|
goto err_undo_map;
|
2008-12-04 22:28:28 +01:00
|
|
|
}
|
2016-01-31 11:29:03 +01:00
|
|
|
} while ((i = virtqueue_read_next_desc(vdev, &desc, desc_pa, max)) != max);
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2016-01-31 11:29:01 +01:00
|
|
|
/* Now copy what we have collected and mapped */
|
|
|
|
elem = virtqueue_alloc_element(sz, out_num, in_num);
|
2008-12-04 20:38:57 +01:00
|
|
|
elem->index = head;
|
2016-01-31 11:29:01 +01:00
|
|
|
for (i = 0; i < out_num; i++) {
|
|
|
|
elem->out_addr[i] = addr[i];
|
|
|
|
elem->out_sg[i] = iov[i];
|
|
|
|
}
|
|
|
|
for (i = 0; i < in_num; i++) {
|
|
|
|
elem->in_addr[i] = addr[out_num + i];
|
|
|
|
elem->in_sg[i] = iov[out_num + i];
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
|
|
|
vq->inuse++;
|
|
|
|
|
2010-05-24 14:19:21 +02:00
|
|
|
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
|
2016-02-04 15:26:51 +01:00
|
|
|
return elem;
|
2016-09-21 17:52:21 +02:00
|
|
|
|
|
|
|
err_undo_map:
|
|
|
|
virtqueue_undo_map_desc(out_num, in_num, iov);
|
|
|
|
return NULL;
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2016-01-31 11:29:00 +01:00
|
|
|
/* Reading and writing a structure directly to QEMUFile is *awful*, but
|
|
|
|
* it is what QEMU has always done by mistake. We can change it sooner
|
|
|
|
* or later by bumping the version number of the affected vm states.
|
|
|
|
* In the meanwhile, since the in-memory layout of VirtQueueElement
|
|
|
|
* has changed, we need to marshal to and from the layout that was
|
|
|
|
* used before the change.
|
|
|
|
*/
|
|
|
|
typedef struct VirtQueueElementOld {
|
|
|
|
unsigned int index;
|
|
|
|
unsigned int out_num;
|
|
|
|
unsigned int in_num;
|
|
|
|
hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
|
|
|
|
hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
|
|
|
|
struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
|
|
|
|
struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
|
|
|
|
} VirtQueueElementOld;
|
|
|
|
|
2016-01-31 11:28:59 +01:00
|
|
|
void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
|
|
|
|
{
|
2016-01-31 11:29:00 +01:00
|
|
|
VirtQueueElement *elem;
|
|
|
|
VirtQueueElementOld data;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
|
|
|
|
|
|
|
|
elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
|
|
|
|
elem->index = data.index;
|
|
|
|
|
|
|
|
for (i = 0; i < elem->in_num; i++) {
|
|
|
|
elem->in_addr[i] = data.in_addr[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < elem->out_num; i++) {
|
|
|
|
elem->out_addr[i] = data.out_addr[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < elem->in_num; i++) {
|
|
|
|
/* Base is overwritten by virtqueue_map. */
|
|
|
|
elem->in_sg[i].iov_base = 0;
|
|
|
|
elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < elem->out_num; i++) {
|
|
|
|
/* Base is overwritten by virtqueue_map. */
|
|
|
|
elem->out_sg[i].iov_base = 0;
|
|
|
|
elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
|
|
|
|
}
|
|
|
|
|
2016-01-31 11:28:59 +01:00
|
|
|
virtqueue_map(elem);
|
|
|
|
return elem;
|
|
|
|
}
|
|
|
|
|
|
|
|
void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
|
|
|
|
{
|
2016-01-31 11:29:00 +01:00
|
|
|
VirtQueueElementOld data;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
memset(&data, 0, sizeof(data));
|
|
|
|
data.index = elem->index;
|
|
|
|
data.in_num = elem->in_num;
|
|
|
|
data.out_num = elem->out_num;
|
|
|
|
|
|
|
|
for (i = 0; i < elem->in_num; i++) {
|
|
|
|
data.in_addr[i] = elem->in_addr[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < elem->out_num; i++) {
|
|
|
|
data.out_addr[i] = elem->out_addr[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < elem->in_num; i++) {
|
|
|
|
/* Base is overwritten by virtqueue_map when loading. Do not
|
|
|
|
* save it, as it would leak the QEMU address space layout. */
|
|
|
|
data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < elem->out_num; i++) {
|
|
|
|
/* Do not save iov_base as above. */
|
|
|
|
data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
|
|
|
|
}
|
|
|
|
qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
|
2016-01-31 11:28:59 +01:00
|
|
|
}
|
|
|
|
|
2008-12-04 20:38:57 +01:00
|
|
|
/* virtio device */
|
2009-06-21 18:50:13 +02:00
|
|
|
static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
|
|
|
|
{
|
2013-04-24 10:21:21 +02:00
|
|
|
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
|
|
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
|
|
|
|
2016-09-21 17:52:19 +02:00
|
|
|
if (unlikely(vdev->broken)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-04-24 10:21:21 +02:00
|
|
|
if (k->notify) {
|
|
|
|
k->notify(qbus->parent, vector);
|
2009-06-21 18:50:13 +02:00
|
|
|
}
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2009-05-18 15:51:59 +02:00
|
|
|
void virtio_update_irq(VirtIODevice *vdev)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2009-06-21 18:50:13 +02:00
|
|
|
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2015-06-04 12:34:15 +02:00
|
|
|
static int virtio_validate_features(VirtIODevice *vdev)
|
|
|
|
{
|
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
|
|
|
|
|
|
|
if (k->validate_features) {
|
|
|
|
return k->validate_features(vdev);
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int virtio_set_status(VirtIODevice *vdev, uint8_t val)
|
2011-09-13 14:34:37 +02:00
|
|
|
{
|
2013-04-24 10:21:20 +02:00
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2011-09-13 14:34:37 +02:00
|
|
|
trace_virtio_set_status(vdev, val);
|
|
|
|
|
2015-08-17 11:48:29 +02:00
|
|
|
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
|
2015-06-04 12:34:15 +02:00
|
|
|
if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
|
|
|
|
val & VIRTIO_CONFIG_S_FEATURES_OK) {
|
|
|
|
int ret = virtio_validate_features(vdev);
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-04-24 10:21:20 +02:00
|
|
|
if (k->set_status) {
|
|
|
|
k->set_status(vdev, val);
|
2011-09-13 14:34:37 +02:00
|
|
|
}
|
|
|
|
vdev->status = val;
|
2015-06-04 12:34:15 +02:00
|
|
|
return 0;
|
2011-09-13 14:34:37 +02:00
|
|
|
}
|
|
|
|
|
2014-06-24 19:38:54 +02:00
|
|
|
bool target_words_bigendian(void);
|
|
|
|
static enum virtio_device_endian virtio_default_endian(void)
|
|
|
|
{
|
|
|
|
if (target_words_bigendian()) {
|
|
|
|
return VIRTIO_DEVICE_ENDIAN_BIG;
|
|
|
|
} else {
|
|
|
|
return VIRTIO_DEVICE_ENDIAN_LITTLE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum virtio_device_endian virtio_current_cpu_endian(void)
|
|
|
|
{
|
|
|
|
CPUClass *cc = CPU_GET_CLASS(current_cpu);
|
|
|
|
|
|
|
|
if (cc->virtio_is_big_endian(current_cpu)) {
|
|
|
|
return VIRTIO_DEVICE_ENDIAN_BIG;
|
|
|
|
} else {
|
|
|
|
return VIRTIO_DEVICE_ENDIAN_LITTLE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-18 15:51:59 +02:00
|
|
|
void virtio_reset(void *opaque)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
|
|
|
VirtIODevice *vdev = opaque;
|
2013-04-24 10:21:20 +02:00
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2008-12-04 20:38:57 +01:00
|
|
|
int i;
|
|
|
|
|
2010-09-27 18:32:52 +02:00
|
|
|
virtio_set_status(vdev, 0);
|
2014-06-24 19:38:54 +02:00
|
|
|
if (current_cpu) {
|
|
|
|
/* Guest initiated reset */
|
|
|
|
vdev->device_endian = virtio_current_cpu_endian();
|
|
|
|
} else {
|
|
|
|
/* System reset */
|
|
|
|
vdev->device_endian = virtio_default_endian();
|
|
|
|
}
|
2010-09-27 18:32:52 +02:00
|
|
|
|
2013-04-24 10:21:20 +02:00
|
|
|
if (k->reset) {
|
|
|
|
k->reset(vdev);
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2016-09-21 17:52:19 +02:00
|
|
|
vdev->broken = false;
|
2010-01-10 12:52:47 +01:00
|
|
|
vdev->guest_features = 0;
|
2008-12-04 20:38:57 +01:00
|
|
|
vdev->queue_sel = 0;
|
|
|
|
vdev->status = 0;
|
|
|
|
vdev->isr = 0;
|
2009-06-21 18:50:13 +02:00
|
|
|
vdev->config_vector = VIRTIO_NO_VECTOR;
|
|
|
|
virtio_notify_vector(vdev, vdev->config_vector);
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2015-05-29 08:15:31 +02:00
|
|
|
for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
2008-12-04 20:38:57 +01:00
|
|
|
vdev->vq[i].vring.desc = 0;
|
|
|
|
vdev->vq[i].vring.avail = 0;
|
|
|
|
vdev->vq[i].vring.used = 0;
|
|
|
|
vdev->vq[i].last_avail_idx = 0;
|
2016-01-31 11:29:05 +01:00
|
|
|
vdev->vq[i].shadow_avail_idx = 0;
|
2016-01-31 11:29:04 +01:00
|
|
|
vdev->vq[i].used_idx = 0;
|
2015-04-23 08:21:46 +02:00
|
|
|
virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
|
2011-06-12 15:21:57 +02:00
|
|
|
vdev->vq[i].signalled_used = 0;
|
|
|
|
vdev->vq[i].signalled_used_valid = false;
|
|
|
|
vdev->vq[i].notification = true;
|
2015-09-11 15:16:41 +02:00
|
|
|
vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
|
2016-09-07 17:51:25 +02:00
|
|
|
vdev->vq[i].inuse = 0;
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-18 15:51:59 +02:00
|
|
|
uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2013-04-24 10:21:20 +02:00
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2008-12-04 20:38:57 +01:00
|
|
|
uint8_t val;
|
|
|
|
|
2013-05-07 07:42:49 +02:00
|
|
|
if (addr + sizeof(val) > vdev->config_len) {
|
2008-12-04 20:38:57 +01:00
|
|
|
return (uint32_t)-1;
|
2013-05-07 07:42:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
k->get_config(vdev, vdev->config);
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2012-01-10 23:33:10 +01:00
|
|
|
val = ldub_p(vdev->config + addr);
|
2008-12-04 20:38:57 +01:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2009-05-18 15:51:59 +02:00
|
|
|
uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2013-04-24 10:21:20 +02:00
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2008-12-04 20:38:57 +01:00
|
|
|
uint16_t val;
|
|
|
|
|
2013-05-07 07:42:49 +02:00
|
|
|
if (addr + sizeof(val) > vdev->config_len) {
|
2008-12-04 20:38:57 +01:00
|
|
|
return (uint32_t)-1;
|
2013-05-07 07:42:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
k->get_config(vdev, vdev->config);
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2012-01-10 23:33:10 +01:00
|
|
|
val = lduw_p(vdev->config + addr);
|
2008-12-04 20:38:57 +01:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2009-05-18 15:51:59 +02:00
|
|
|
uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2013-04-24 10:21:20 +02:00
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2008-12-04 20:38:57 +01:00
|
|
|
uint32_t val;
|
|
|
|
|
2013-05-07 07:42:49 +02:00
|
|
|
if (addr + sizeof(val) > vdev->config_len) {
|
2008-12-04 20:38:57 +01:00
|
|
|
return (uint32_t)-1;
|
2013-05-07 07:42:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
k->get_config(vdev, vdev->config);
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2012-01-10 23:33:10 +01:00
|
|
|
val = ldl_p(vdev->config + addr);
|
2008-12-04 20:38:57 +01:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2009-05-18 15:51:59 +02:00
|
|
|
void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2013-04-24 10:21:20 +02:00
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2008-12-04 20:38:57 +01:00
|
|
|
uint8_t val = data;
|
|
|
|
|
2013-05-07 07:42:49 +02:00
|
|
|
if (addr + sizeof(val) > vdev->config_len) {
|
2008-12-04 20:38:57 +01:00
|
|
|
return;
|
2013-05-07 07:42:49 +02:00
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2012-01-10 23:33:10 +01:00
|
|
|
stb_p(vdev->config + addr, val);
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2013-04-24 10:21:20 +02:00
|
|
|
if (k->set_config) {
|
|
|
|
k->set_config(vdev, vdev->config);
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2009-05-18 15:51:59 +02:00
|
|
|
void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2013-04-24 10:21:20 +02:00
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2008-12-04 20:38:57 +01:00
|
|
|
uint16_t val = data;
|
|
|
|
|
2013-05-07 07:42:49 +02:00
|
|
|
if (addr + sizeof(val) > vdev->config_len) {
|
2008-12-04 20:38:57 +01:00
|
|
|
return;
|
2013-05-07 07:42:49 +02:00
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2012-01-10 23:33:10 +01:00
|
|
|
stw_p(vdev->config + addr, val);
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2013-04-24 10:21:20 +02:00
|
|
|
if (k->set_config) {
|
|
|
|
k->set_config(vdev, vdev->config);
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2009-05-18 15:51:59 +02:00
|
|
|
void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2013-04-24 10:21:20 +02:00
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2008-12-04 20:38:57 +01:00
|
|
|
uint32_t val = data;
|
|
|
|
|
2013-05-07 07:42:49 +02:00
|
|
|
if (addr + sizeof(val) > vdev->config_len) {
|
2008-12-04 20:38:57 +01:00
|
|
|
return;
|
2013-05-07 07:42:49 +02:00
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2012-01-10 23:33:10 +01:00
|
|
|
stl_p(vdev->config + addr, val);
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2013-04-24 10:21:20 +02:00
|
|
|
if (k->set_config) {
|
|
|
|
k->set_config(vdev, vdev->config);
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2015-06-04 12:34:24 +02:00
|
|
|
uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
|
|
|
|
{
|
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
|
|
|
uint8_t val;
|
|
|
|
|
|
|
|
if (addr + sizeof(val) > vdev->config_len) {
|
|
|
|
return (uint32_t)-1;
|
|
|
|
}
|
|
|
|
|
|
|
|
k->get_config(vdev, vdev->config);
|
|
|
|
|
|
|
|
val = ldub_p(vdev->config + addr);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
|
|
|
|
{
|
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
|
|
|
uint16_t val;
|
|
|
|
|
|
|
|
if (addr + sizeof(val) > vdev->config_len) {
|
|
|
|
return (uint32_t)-1;
|
|
|
|
}
|
|
|
|
|
|
|
|
k->get_config(vdev, vdev->config);
|
|
|
|
|
|
|
|
val = lduw_le_p(vdev->config + addr);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
|
|
|
|
{
|
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
|
|
|
uint32_t val;
|
|
|
|
|
|
|
|
if (addr + sizeof(val) > vdev->config_len) {
|
|
|
|
return (uint32_t)-1;
|
|
|
|
}
|
|
|
|
|
|
|
|
k->get_config(vdev, vdev->config);
|
|
|
|
|
|
|
|
val = ldl_le_p(vdev->config + addr);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_config_modern_writeb(VirtIODevice *vdev,
|
|
|
|
uint32_t addr, uint32_t data)
|
|
|
|
{
|
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
|
|
|
uint8_t val = data;
|
|
|
|
|
|
|
|
if (addr + sizeof(val) > vdev->config_len) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
stb_p(vdev->config + addr, val);
|
|
|
|
|
|
|
|
if (k->set_config) {
|
|
|
|
k->set_config(vdev, vdev->config);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_config_modern_writew(VirtIODevice *vdev,
|
|
|
|
uint32_t addr, uint32_t data)
|
|
|
|
{
|
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
|
|
|
uint16_t val = data;
|
|
|
|
|
|
|
|
if (addr + sizeof(val) > vdev->config_len) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
stw_le_p(vdev->config + addr, val);
|
|
|
|
|
|
|
|
if (k->set_config) {
|
|
|
|
k->set_config(vdev, vdev->config);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_config_modern_writel(VirtIODevice *vdev,
|
|
|
|
uint32_t addr, uint32_t data)
|
|
|
|
{
|
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
|
|
|
uint32_t val = data;
|
|
|
|
|
|
|
|
if (addr + sizeof(val) > vdev->config_len) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
stl_le_p(vdev->config + addr, val);
|
|
|
|
|
|
|
|
if (k->set_config) {
|
|
|
|
k->set_config(vdev, vdev->config);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-23 12:30:10 +02:00
|
|
|
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2015-06-04 12:34:12 +02:00
|
|
|
vdev->vq[n].vring.desc = addr;
|
|
|
|
virtio_queue_update_rings(vdev, n);
|
2009-05-18 15:51:59 +02:00
|
|
|
}
|
|
|
|
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
|
2009-05-18 15:51:59 +02:00
|
|
|
{
|
2015-06-04 12:34:12 +02:00
|
|
|
return vdev->vq[n].vring.desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
|
|
|
|
hwaddr avail, hwaddr used)
|
|
|
|
{
|
|
|
|
vdev->vq[n].vring.desc = desc;
|
|
|
|
vdev->vq[n].vring.avail = avail;
|
|
|
|
vdev->vq[n].vring.used = used;
|
2009-05-18 15:51:59 +02:00
|
|
|
}
|
|
|
|
|
2013-07-16 14:25:07 +02:00
|
|
|
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
|
|
|
|
{
|
2013-07-26 17:41:27 +02:00
|
|
|
/* Don't allow guest to flip queue between existent and
|
|
|
|
* nonexistent states, or to set it to an invalid size.
|
|
|
|
*/
|
|
|
|
if (!!num != !!vdev->vq[n].vring.num ||
|
|
|
|
num > VIRTQUEUE_MAX_SIZE ||
|
|
|
|
num < 0) {
|
|
|
|
return;
|
2013-07-16 14:25:07 +02:00
|
|
|
}
|
2013-07-26 17:41:27 +02:00
|
|
|
vdev->vq[n].vring.num = num;
|
2013-07-16 14:25:07 +02:00
|
|
|
}
|
|
|
|
|
2015-04-23 08:21:46 +02:00
|
|
|
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
|
|
|
|
{
|
|
|
|
return QLIST_FIRST(&vdev->vector_queues[vector]);
|
|
|
|
}
|
|
|
|
|
|
|
|
VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
|
|
|
|
{
|
|
|
|
return QLIST_NEXT(vq, node);
|
|
|
|
}
|
|
|
|
|
2009-05-18 15:51:59 +02:00
|
|
|
int virtio_queue_get_num(VirtIODevice *vdev, int n)
|
|
|
|
{
|
|
|
|
return vdev->vq[n].vring.num;
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2015-05-29 08:15:26 +02:00
|
|
|
int virtio_get_num_queues(VirtIODevice *vdev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2015-05-29 08:15:31 +02:00
|
|
|
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
2015-05-29 08:15:26 +02:00
|
|
|
if (!virtio_queue_get_num(vdev, i)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2013-07-16 14:25:08 +02:00
|
|
|
void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
|
|
|
|
{
|
|
|
|
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
|
|
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
|
|
|
|
2015-06-04 12:34:12 +02:00
|
|
|
/* virtio-1 compliant devices cannot change the alignment */
|
2015-08-17 11:48:29 +02:00
|
|
|
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
|
2015-06-04 12:34:12 +02:00
|
|
|
error_report("tried to modify queue alignment for virtio-1 device");
|
|
|
|
return;
|
|
|
|
}
|
2013-07-16 14:25:08 +02:00
|
|
|
/* Check that the transport told us it was going to do this
|
|
|
|
* (so a buggy transport will immediately assert rather than
|
|
|
|
* silently failing to migrate this state)
|
|
|
|
*/
|
|
|
|
assert(k->has_variable_vring_alignment);
|
|
|
|
|
|
|
|
vdev->vq[n].vring.align = align;
|
2015-06-04 12:34:12 +02:00
|
|
|
virtio_queue_update_rings(vdev, n);
|
2013-07-16 14:25:08 +02:00
|
|
|
}
|
|
|
|
|
2016-04-06 12:16:25 +02:00
|
|
|
static void virtio_queue_notify_aio_vq(VirtQueue *vq)
|
|
|
|
{
|
|
|
|
if (vq->vring.desc && vq->handle_aio_output) {
|
|
|
|
VirtIODevice *vdev = vq->vdev;
|
|
|
|
|
|
|
|
trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
|
|
|
|
vq->handle_aio_output(vdev, vq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-06 12:16:22 +02:00
|
|
|
static void virtio_queue_notify_vq(VirtQueue *vq)
|
2010-12-17 13:01:50 +01:00
|
|
|
{
|
2015-03-12 10:50:18 +01:00
|
|
|
if (vq->vring.desc && vq->handle_output) {
|
2010-12-17 13:01:50 +01:00
|
|
|
VirtIODevice *vdev = vq->vdev;
|
2015-03-12 10:50:18 +01:00
|
|
|
|
2016-09-21 17:52:19 +02:00
|
|
|
if (unlikely(vdev->broken)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-12-17 13:01:50 +01:00
|
|
|
trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
|
|
|
|
vq->handle_output(vdev, vq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-18 15:51:59 +02:00
|
|
|
void virtio_queue_notify(VirtIODevice *vdev, int n)
|
|
|
|
{
|
2011-05-08 23:29:07 +02:00
|
|
|
virtio_queue_notify_vq(&vdev->vq[n]);
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2009-06-21 18:50:13 +02:00
|
|
|
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
|
|
|
|
{
|
2015-05-29 08:15:31 +02:00
|
|
|
return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
|
2009-06-21 18:50:13 +02:00
|
|
|
VIRTIO_NO_VECTOR;
|
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
|
|
|
|
{
|
2015-04-23 08:21:46 +02:00
|
|
|
VirtQueue *vq = &vdev->vq[n];
|
|
|
|
|
2015-05-29 08:15:31 +02:00
|
|
|
if (n < VIRTIO_QUEUE_MAX) {
|
2015-04-23 08:21:46 +02:00
|
|
|
if (vdev->vector_queues &&
|
|
|
|
vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
|
|
|
|
QLIST_REMOVE(vq, node);
|
|
|
|
}
|
2009-06-21 18:50:13 +02:00
|
|
|
vdev->vq[n].vector = vector;
|
2015-04-23 08:21:46 +02:00
|
|
|
if (vdev->vector_queues &&
|
|
|
|
vector != VIRTIO_NO_VECTOR) {
|
|
|
|
QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
|
|
|
|
}
|
|
|
|
}
|
2009-06-21 18:50:13 +02:00
|
|
|
}
|
|
|
|
|
2016-07-13 07:09:44 +02:00
|
|
|
static VirtQueue *virtio_add_queue_internal(VirtIODevice *vdev, int queue_size,
|
|
|
|
VirtIOHandleOutput handle_output,
|
|
|
|
bool use_aio)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2015-05-29 08:15:31 +02:00
|
|
|
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
2008-12-04 20:38:57 +01:00
|
|
|
if (vdev->vq[i].vring.num == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-05-29 08:15:31 +02:00
|
|
|
if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
|
2008-12-04 20:38:57 +01:00
|
|
|
abort();
|
|
|
|
|
|
|
|
vdev->vq[i].vring.num = queue_size;
|
2015-09-11 15:16:41 +02:00
|
|
|
vdev->vq[i].vring.num_default = queue_size;
|
2013-07-16 14:25:08 +02:00
|
|
|
vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
|
2008-12-04 20:38:57 +01:00
|
|
|
vdev->vq[i].handle_output = handle_output;
|
2016-04-06 12:16:25 +02:00
|
|
|
vdev->vq[i].handle_aio_output = NULL;
|
2016-07-13 07:09:44 +02:00
|
|
|
vdev->vq[i].use_aio = use_aio;
|
2008-12-04 20:38:57 +01:00
|
|
|
|
|
|
|
return &vdev->vq[i];
|
|
|
|
}
|
|
|
|
|
2016-07-13 07:09:44 +02:00
|
|
|
/* Add a virt queue and mark AIO.
|
|
|
|
* An AIO queue will use the AioContext based event interface instead of the
|
|
|
|
* default IOHandler and EventNotifier interface.
|
|
|
|
*/
|
|
|
|
VirtQueue *virtio_add_queue_aio(VirtIODevice *vdev, int queue_size,
|
|
|
|
VirtIOHandleOutput handle_output)
|
|
|
|
{
|
|
|
|
return virtio_add_queue_internal(vdev, queue_size, handle_output, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add a normal virt queue (on the contrary to the AIO version above. */
|
|
|
|
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
|
|
|
|
VirtIOHandleOutput handle_output)
|
|
|
|
{
|
|
|
|
return virtio_add_queue_internal(vdev, queue_size, handle_output, false);
|
|
|
|
}
|
|
|
|
|
2013-01-30 12:12:36 +01:00
|
|
|
void virtio_del_queue(VirtIODevice *vdev, int n)
|
|
|
|
{
|
2015-05-29 08:15:31 +02:00
|
|
|
if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
|
2013-01-30 12:12:36 +01:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
vdev->vq[n].vring.num = 0;
|
2015-09-11 15:16:41 +02:00
|
|
|
vdev->vq[n].vring.num_default = 0;
|
2013-01-30 12:12:36 +01:00
|
|
|
}
|
|
|
|
|
2010-03-17 12:08:02 +01:00
|
|
|
void virtio_irq(VirtQueue *vq)
|
|
|
|
{
|
2010-05-24 14:19:21 +02:00
|
|
|
trace_virtio_irq(vq);
|
2010-03-17 12:08:02 +01:00
|
|
|
vq->vdev->isr |= 0x01;
|
|
|
|
virtio_notify_vector(vq->vdev, vq->vector);
|
|
|
|
}
|
|
|
|
|
2016-02-14 18:17:07 +01:00
|
|
|
bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
|
2011-06-12 15:21:57 +02:00
|
|
|
{
|
|
|
|
uint16_t old, new;
|
|
|
|
bool v;
|
virtio: add missing mb() on notification
During normal operation, virtio first writes a used index
and then checks whether it should interrupt the guest
by reading guest avail index/flag values.
Guest does the reverse: writes the index/flag,
then checks the used ring.
The ordering is important: if host avail flag read bypasses the used
index write, we could in effect get this timing:
host avail flag read
guest enable interrupts: avail flag write
guest check used ring: ring is empty
host used index write
which results in a lost interrupt: guest will never be notified
about the used ring update.
This actually can happen when using kvm with an io thread,
such that the guest vcpu and qemu run on different host cpus,
and this has actually been observed in the field
(but only seems to trigger on very specific processor types)
with userspace virtio: vhost has the necessary smp_mb()
in place to prevent the regordering, so the same workload stalls
forever waiting for an interrupt with vhost=off but works
fine with vhost=on.
Insert an smp_mb barrier operation in userspace virtio to
ensure the correct ordering.
Applying this patch fixed the race condition we have observed.
Tested on x86_64. I checked the code generated by the new macro
for i386 and ppc but didn't run virtio.
Note: mb could in theory be implemented by __sync_synchronize, but this
would make us hit old GCC bugs. Besides old GCC
not implementing __sync_synchronize at all, there were bugs
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793
in this functionality as recently as in 4.3.
As we need asm for rmb,wmb anyway, it's just as well to
use it for mb.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2012-04-22 15:45:53 +02:00
|
|
|
/* We need to expose used array entries before checking used event. */
|
|
|
|
smp_mb();
|
2009-03-20 17:13:50 +01:00
|
|
|
/* Always notify when queue is empty (when feature acknowledge) */
|
2015-08-17 11:48:29 +02:00
|
|
|
if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
|
2016-01-31 11:29:05 +01:00
|
|
|
!vq->inuse && virtio_queue_empty(vq)) {
|
2011-06-12 15:21:57 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-08-17 11:48:29 +02:00
|
|
|
if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
|
2011-06-12 15:21:57 +02:00
|
|
|
return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
|
|
|
|
}
|
|
|
|
|
|
|
|
v = vq->signalled_used_valid;
|
|
|
|
vq->signalled_used_valid = true;
|
|
|
|
old = vq->signalled_used;
|
2016-01-31 11:29:04 +01:00
|
|
|
new = vq->signalled_used = vq->used_idx;
|
2015-02-16 22:35:46 +01:00
|
|
|
return !v || vring_need_event(vring_get_used_event(vq), new, old);
|
2011-06-12 15:21:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
|
|
|
|
{
|
2016-02-14 18:17:07 +01:00
|
|
|
if (!virtio_should_notify(vdev, vq)) {
|
2008-12-04 20:38:57 +01:00
|
|
|
return;
|
2011-06-12 15:21:57 +02:00
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2010-05-24 14:19:21 +02:00
|
|
|
trace_virtio_notify(vdev, vq);
|
2008-12-04 20:38:57 +01:00
|
|
|
vdev->isr |= 0x01;
|
2009-06-21 18:50:13 +02:00
|
|
|
virtio_notify_vector(vdev, vq->vector);
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_notify_config(VirtIODevice *vdev)
|
|
|
|
{
|
2009-01-29 18:02:13 +01:00
|
|
|
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
|
|
|
|
return;
|
|
|
|
|
2008-12-04 20:38:57 +01:00
|
|
|
vdev->isr |= 0x03;
|
2015-06-04 12:34:23 +02:00
|
|
|
vdev->generation++;
|
2009-06-21 18:50:13 +02:00
|
|
|
virtio_notify_vector(vdev, vdev->config_vector);
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2014-06-24 19:38:54 +02:00
|
|
|
static bool virtio_device_endian_needed(void *opaque)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = opaque;
|
|
|
|
|
|
|
|
assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
|
2015-08-17 11:48:29 +02:00
|
|
|
if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
|
2015-06-04 12:34:11 +02:00
|
|
|
return vdev->device_endian != virtio_default_endian();
|
|
|
|
}
|
|
|
|
/* Devices conforming to VIRTIO 1.0 or later are always LE. */
|
|
|
|
return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
|
2014-06-24 19:38:54 +02:00
|
|
|
}
|
|
|
|
|
2015-06-01 10:45:40 +02:00
|
|
|
static bool virtio_64bit_features_needed(void *opaque)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = opaque;
|
|
|
|
|
|
|
|
return (vdev->host_features >> 32) != 0;
|
|
|
|
}
|
|
|
|
|
2015-08-05 11:50:07 +02:00
|
|
|
static bool virtio_virtqueue_needed(void *opaque)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = opaque;
|
|
|
|
|
|
|
|
return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
|
|
|
|
}
|
|
|
|
|
2015-09-11 15:16:41 +02:00
|
|
|
static bool virtio_ringsize_needed(void *opaque)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = opaque;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
|
|
|
if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-11-06 09:02:44 +01:00
|
|
|
static bool virtio_extra_state_needed(void *opaque)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = opaque;
|
|
|
|
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
|
|
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
|
|
|
|
|
|
|
return k->has_extra_state &&
|
|
|
|
k->has_extra_state(qbus->parent);
|
|
|
|
}
|
|
|
|
|
2016-09-21 17:52:20 +02:00
|
|
|
static bool virtio_broken_needed(void *opaque)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = opaque;
|
|
|
|
|
|
|
|
return vdev->broken;
|
|
|
|
}
|
|
|
|
|
2016-01-06 13:23:39 +01:00
|
|
|
static const VMStateDescription vmstate_virtqueue = {
|
2015-08-05 11:50:07 +02:00
|
|
|
.name = "virtqueue_state",
|
2016-01-06 13:23:39 +01:00
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UINT64(vring.avail, struct VirtQueue),
|
|
|
|
VMSTATE_UINT64(vring.used, struct VirtQueue),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
2015-08-05 11:50:07 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_virtio_virtqueues = {
|
|
|
|
.name = "virtio/virtqueues",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.needed = &virtio_virtqueue_needed,
|
|
|
|
.fields = (VMStateField[]) {
|
2016-01-29 14:18:56 +01:00
|
|
|
VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
|
|
|
|
VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
|
2015-08-05 11:50:07 +02:00
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2016-01-06 13:23:39 +01:00
|
|
|
static const VMStateDescription vmstate_ringsize = {
|
2015-09-11 15:16:41 +02:00
|
|
|
.name = "ringsize_state",
|
2016-01-06 13:23:39 +01:00
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UINT32(vring.num_default, struct VirtQueue),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
2015-09-11 15:16:41 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_virtio_ringsize = {
|
|
|
|
.name = "virtio/ringsize",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.needed = &virtio_ringsize_needed,
|
|
|
|
.fields = (VMStateField[]) {
|
2016-01-29 14:18:56 +01:00
|
|
|
VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
|
|
|
|
VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
|
2015-09-11 15:16:41 +02:00
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-11-06 09:02:44 +01:00
|
|
|
static int get_extra_state(QEMUFile *f, void *pv, size_t size)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = pv;
|
|
|
|
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
|
|
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
|
|
|
|
|
|
|
if (!k->load_extra_state) {
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
return k->load_extra_state(qbus->parent, f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void put_extra_state(QEMUFile *f, void *pv, size_t size)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = pv;
|
|
|
|
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
|
|
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
|
|
|
|
|
|
|
k->save_extra_state(qbus->parent, f);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateInfo vmstate_info_extra_state = {
|
|
|
|
.name = "virtqueue_extra_state",
|
|
|
|
.get = get_extra_state,
|
|
|
|
.put = put_extra_state,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_virtio_extra_state = {
|
|
|
|
.name = "virtio/extra_state",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.needed = &virtio_extra_state_needed,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
{
|
|
|
|
.name = "extra_state",
|
|
|
|
.version_id = 0,
|
|
|
|
.field_exists = NULL,
|
|
|
|
.size = 0,
|
|
|
|
.info = &vmstate_info_extra_state,
|
|
|
|
.flags = VMS_SINGLE,
|
|
|
|
.offset = 0,
|
|
|
|
},
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-06-24 19:38:54 +02:00
|
|
|
static const VMStateDescription vmstate_virtio_device_endian = {
|
|
|
|
.name = "virtio/device_endian",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
2014-09-23 14:09:54 +02:00
|
|
|
.needed = &virtio_device_endian_needed,
|
2014-06-24 19:38:54 +02:00
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UINT8(device_endian, VirtIODevice),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-06-01 10:45:40 +02:00
|
|
|
static const VMStateDescription vmstate_virtio_64bit_features = {
|
|
|
|
.name = "virtio/64bit_features",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
2014-09-23 14:09:54 +02:00
|
|
|
.needed = &virtio_64bit_features_needed,
|
2015-06-01 10:45:40 +02:00
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UINT64(guest_features, VirtIODevice),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2016-09-21 17:52:20 +02:00
|
|
|
static const VMStateDescription vmstate_virtio_broken = {
|
|
|
|
.name = "virtio/broken",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.needed = &virtio_broken_needed,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_BOOL(broken, VirtIODevice),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-06-24 19:22:30 +02:00
|
|
|
static const VMStateDescription vmstate_virtio = {
|
|
|
|
.name = "virtio",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.minimum_version_id_old = 1,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_END_OF_LIST()
|
2014-06-24 19:38:54 +02:00
|
|
|
},
|
2014-09-23 14:09:54 +02:00
|
|
|
.subsections = (const VMStateDescription*[]) {
|
|
|
|
&vmstate_virtio_device_endian,
|
|
|
|
&vmstate_virtio_64bit_features,
|
2015-08-05 11:50:07 +02:00
|
|
|
&vmstate_virtio_virtqueues,
|
2015-09-11 15:16:41 +02:00
|
|
|
&vmstate_virtio_ringsize,
|
2016-09-21 17:52:20 +02:00
|
|
|
&vmstate_virtio_broken,
|
2015-11-06 09:02:44 +01:00
|
|
|
&vmstate_virtio_extra_state,
|
2014-09-23 14:09:54 +02:00
|
|
|
NULL
|
2014-06-24 19:22:30 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2008-12-04 20:38:57 +01:00
|
|
|
void virtio_save(VirtIODevice *vdev, QEMUFile *f)
|
|
|
|
{
|
2013-04-24 10:21:21 +02:00
|
|
|
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
|
|
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
2014-06-24 19:15:31 +02:00
|
|
|
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2015-06-01 10:45:40 +02:00
|
|
|
uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
|
2008-12-04 20:38:57 +01:00
|
|
|
int i;
|
|
|
|
|
2013-04-24 10:21:21 +02:00
|
|
|
if (k->save_config) {
|
|
|
|
k->save_config(qbus->parent, f);
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
|
|
|
qemu_put_8s(f, &vdev->status);
|
|
|
|
qemu_put_8s(f, &vdev->isr);
|
|
|
|
qemu_put_be16s(f, &vdev->queue_sel);
|
2015-06-01 10:45:40 +02:00
|
|
|
qemu_put_be32s(f, &guest_features_lo);
|
2008-12-04 20:38:57 +01:00
|
|
|
qemu_put_be32(f, vdev->config_len);
|
|
|
|
qemu_put_buffer(f, vdev->config, vdev->config_len);
|
|
|
|
|
2015-05-29 08:15:31 +02:00
|
|
|
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
2008-12-04 20:38:57 +01:00
|
|
|
if (vdev->vq[i].vring.num == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_put_be32(f, i);
|
|
|
|
|
2015-05-29 08:15:31 +02:00
|
|
|
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
2008-12-04 20:38:57 +01:00
|
|
|
if (vdev->vq[i].vring.num == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
qemu_put_be32(f, vdev->vq[i].vring.num);
|
2013-07-16 14:25:08 +02:00
|
|
|
if (k->has_variable_vring_alignment) {
|
|
|
|
qemu_put_be32(f, vdev->vq[i].vring.align);
|
|
|
|
}
|
2015-06-04 12:34:12 +02:00
|
|
|
/* XXX virtio-1 devices */
|
|
|
|
qemu_put_be64(f, vdev->vq[i].vring.desc);
|
2008-12-04 20:38:57 +01:00
|
|
|
qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
|
2013-04-24 10:21:21 +02:00
|
|
|
if (k->save_queue) {
|
|
|
|
k->save_queue(qbus->parent, i, f);
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
2014-06-24 19:15:31 +02:00
|
|
|
|
|
|
|
if (vdc->save != NULL) {
|
|
|
|
vdc->save(vdev, f);
|
|
|
|
}
|
2014-06-24 19:22:30 +02:00
|
|
|
|
|
|
|
/* Subsections */
|
2015-01-22 15:01:39 +01:00
|
|
|
vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2016-07-14 19:22:45 +02:00
|
|
|
/* A wrapper for use as a VMState .put function */
|
|
|
|
void virtio_vmstate_save(QEMUFile *f, void *opaque, size_t size)
|
|
|
|
{
|
|
|
|
virtio_save(VIRTIO_DEVICE(opaque), f);
|
|
|
|
}
|
|
|
|
|
2015-06-04 12:34:14 +02:00
|
|
|
static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
|
2011-11-24 13:28:52 +01:00
|
|
|
{
|
2013-04-24 10:21:20 +02:00
|
|
|
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2015-05-26 16:34:47 +02:00
|
|
|
bool bad = (val & ~(vdev->host_features)) != 0;
|
2011-11-24 13:28:52 +01:00
|
|
|
|
2015-05-26 16:34:47 +02:00
|
|
|
val &= vdev->host_features;
|
2013-04-24 10:21:20 +02:00
|
|
|
if (k->set_features) {
|
|
|
|
k->set_features(vdev, val);
|
2011-11-24 13:28:52 +01:00
|
|
|
}
|
|
|
|
vdev->guest_features = val;
|
|
|
|
return bad ? -1 : 0;
|
|
|
|
}
|
|
|
|
|
2015-06-04 12:34:14 +02:00
|
|
|
int virtio_set_features(VirtIODevice *vdev, uint64_t val)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The driver must not attempt to set features after feature negotiation
|
|
|
|
* has finished.
|
|
|
|
*/
|
|
|
|
if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return virtio_set_features_nocheck(vdev, val);
|
|
|
|
}
|
|
|
|
|
2014-06-24 19:15:31 +02:00
|
|
|
int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2014-04-03 18:51:14 +02:00
|
|
|
int i, ret;
|
2014-04-28 15:08:23 +02:00
|
|
|
int32_t config_len;
|
2014-04-03 18:51:14 +02:00
|
|
|
uint32_t num;
|
2009-12-08 19:07:48 +01:00
|
|
|
uint32_t features;
|
2013-04-24 10:21:21 +02:00
|
|
|
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
|
|
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
2014-06-24 19:15:31 +02:00
|
|
|
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2014-06-24 19:38:54 +02:00
|
|
|
/*
|
|
|
|
* We poison the endianness to ensure it does not get used before
|
|
|
|
* subsections have been loaded.
|
|
|
|
*/
|
|
|
|
vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
|
|
|
|
|
2013-04-24 10:21:21 +02:00
|
|
|
if (k->load_config) {
|
|
|
|
ret = k->load_config(qbus->parent, f);
|
2009-06-21 18:50:40 +02:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
|
|
|
qemu_get_8s(f, &vdev->status);
|
|
|
|
qemu_get_8s(f, &vdev->isr);
|
|
|
|
qemu_get_be16s(f, &vdev->queue_sel);
|
2015-05-29 08:15:31 +02:00
|
|
|
if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
|
2014-04-03 18:51:46 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2009-12-08 19:07:48 +01:00
|
|
|
qemu_get_be32s(f, &features);
|
2011-11-24 13:28:52 +01:00
|
|
|
|
2016-07-04 13:39:10 +02:00
|
|
|
/*
|
|
|
|
* Temporarily set guest_features low bits - needed by
|
|
|
|
* virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
|
|
|
|
* VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
|
|
|
|
*
|
|
|
|
* Note: devices should always test host features in future - don't create
|
|
|
|
* new dependencies like this.
|
|
|
|
*/
|
|
|
|
vdev->guest_features = features;
|
|
|
|
|
2014-04-28 15:08:23 +02:00
|
|
|
config_len = qemu_get_be32(f);
|
2014-06-27 21:02:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There are cases where the incoming config can be bigger or smaller
|
|
|
|
* than what we have; so load what we have space for, and skip
|
|
|
|
* any excess that's in the stream.
|
|
|
|
*/
|
|
|
|
qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
|
|
|
|
|
|
|
|
while (config_len > vdev->config_len) {
|
|
|
|
qemu_get_byte(f);
|
|
|
|
config_len--;
|
2014-04-28 15:08:23 +02:00
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
|
|
|
num = qemu_get_be32(f);
|
|
|
|
|
2015-05-29 08:15:31 +02:00
|
|
|
if (num > VIRTIO_QUEUE_MAX) {
|
2016-01-08 11:00:00 +01:00
|
|
|
error_report("Invalid number of virtqueues: 0x%x", num);
|
2014-04-03 18:51:14 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2008-12-04 20:38:57 +01:00
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
vdev->vq[i].vring.num = qemu_get_be32(f);
|
2013-07-16 14:25:08 +02:00
|
|
|
if (k->has_variable_vring_alignment) {
|
|
|
|
vdev->vq[i].vring.align = qemu_get_be32(f);
|
|
|
|
}
|
2015-06-04 12:34:12 +02:00
|
|
|
vdev->vq[i].vring.desc = qemu_get_be64(f);
|
2008-12-04 20:38:57 +01:00
|
|
|
qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
|
2011-06-12 15:21:57 +02:00
|
|
|
vdev->vq[i].signalled_used_valid = false;
|
|
|
|
vdev->vq[i].notification = true;
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2015-06-04 12:34:12 +02:00
|
|
|
if (vdev->vq[i].vring.desc) {
|
|
|
|
/* XXX virtio-1 devices */
|
|
|
|
virtio_queue_update_rings(vdev, i);
|
2010-11-23 20:55:39 +01:00
|
|
|
} else if (vdev->vq[i].last_avail_idx) {
|
|
|
|
error_report("VQ %d address 0x0 "
|
2011-06-22 14:03:54 +02:00
|
|
|
"inconsistent with Host index 0x%x",
|
2010-11-23 20:55:39 +01:00
|
|
|
i, vdev->vq[i].last_avail_idx);
|
|
|
|
return -1;
|
2016-09-21 17:52:18 +02:00
|
|
|
}
|
2013-04-24 10:21:21 +02:00
|
|
|
if (k->load_queue) {
|
|
|
|
ret = k->load_queue(qbus->parent, i, f);
|
2009-06-21 18:50:40 +02:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2009-06-21 18:50:13 +02:00
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2009-06-21 18:50:13 +02:00
|
|
|
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
|
2014-06-24 19:15:31 +02:00
|
|
|
|
|
|
|
if (vdc->load != NULL) {
|
2014-06-24 19:22:30 +02:00
|
|
|
ret = vdc->load(vdev, f, version_id);
|
|
|
|
if (ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
2014-06-24 19:15:31 +02:00
|
|
|
}
|
|
|
|
|
2014-06-24 19:38:54 +02:00
|
|
|
/* Subsections */
|
|
|
|
ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
|
|
|
|
if (ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
|
|
|
|
vdev->device_endian = virtio_default_endian();
|
|
|
|
}
|
|
|
|
|
2015-06-01 10:45:40 +02:00
|
|
|
if (virtio_64bit_features_needed(vdev)) {
|
|
|
|
/*
|
|
|
|
* Subsection load filled vdev->guest_features. Run them
|
|
|
|
* through virtio_set_features to sanity-check them against
|
|
|
|
* host_features.
|
|
|
|
*/
|
|
|
|
uint64_t features64 = vdev->guest_features;
|
2015-06-04 12:34:14 +02:00
|
|
|
if (virtio_set_features_nocheck(vdev, features64) < 0) {
|
2015-06-01 10:45:40 +02:00
|
|
|
error_report("Features 0x%" PRIx64 " unsupported. "
|
|
|
|
"Allowed features: 0x%" PRIx64,
|
|
|
|
features64, vdev->host_features);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
2015-06-04 12:34:14 +02:00
|
|
|
if (virtio_set_features_nocheck(vdev, features) < 0) {
|
2015-06-01 10:45:40 +02:00
|
|
|
error_report("Features 0x%x unsupported. "
|
|
|
|
"Allowed features: 0x%" PRIx64,
|
|
|
|
features, vdev->host_features);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-24 19:38:54 +02:00
|
|
|
for (i = 0; i < num; i++) {
|
2015-06-04 12:34:12 +02:00
|
|
|
if (vdev->vq[i].vring.desc) {
|
2014-06-24 19:38:54 +02:00
|
|
|
uint16_t nheads;
|
|
|
|
nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
|
|
|
|
/* Check it isn't doing strange things with descriptor numbers. */
|
|
|
|
if (nheads > vdev->vq[i].vring.num) {
|
|
|
|
error_report("VQ %d size 0x%x Guest index 0x%x "
|
|
|
|
"inconsistent with Host index 0x%x: delta 0x%x",
|
|
|
|
i, vdev->vq[i].vring.num,
|
|
|
|
vring_avail_idx(&vdev->vq[i]),
|
|
|
|
vdev->vq[i].last_avail_idx, nheads);
|
|
|
|
return -1;
|
|
|
|
}
|
2016-01-31 11:29:04 +01:00
|
|
|
vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
|
2016-01-31 11:29:05 +01:00
|
|
|
vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
|
2016-08-15 14:54:15 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Some devices migrate VirtQueueElements that have been popped
|
|
|
|
* from the avail ring but not yet returned to the used ring.
|
|
|
|
*/
|
|
|
|
vdev->vq[i].inuse = vdev->vq[i].last_avail_idx -
|
|
|
|
vdev->vq[i].used_idx;
|
|
|
|
if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
|
|
|
|
error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
|
|
|
|
"used_idx 0x%x",
|
|
|
|
i, vdev->vq[i].vring.num,
|
|
|
|
vdev->vq[i].last_avail_idx,
|
|
|
|
vdev->vq[i].used_idx);
|
|
|
|
return -1;
|
|
|
|
}
|
2014-06-24 19:38:54 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2008-12-04 20:38:57 +01:00
|
|
|
}
|
|
|
|
|
2013-04-24 10:21:22 +02:00
|
|
|
void virtio_cleanup(VirtIODevice *vdev)
|
2009-04-17 19:11:08 +02:00
|
|
|
{
|
2011-01-10 13:28:40 +01:00
|
|
|
qemu_del_vm_change_state_handler(vdev->vmstate);
|
2012-02-09 14:29:42 +01:00
|
|
|
g_free(vdev->config);
|
2011-08-21 05:09:37 +02:00
|
|
|
g_free(vdev->vq);
|
2015-04-23 08:21:46 +02:00
|
|
|
g_free(vdev->vector_queues);
|
2013-01-15 00:08:02 +01:00
|
|
|
}
|
|
|
|
|
2011-07-29 19:26:33 +02:00
|
|
|
static void virtio_vmstate_change(void *opaque, int running, RunState state)
|
2011-01-10 13:28:40 +01:00
|
|
|
{
|
|
|
|
VirtIODevice *vdev = opaque;
|
2013-04-24 10:21:21 +02:00
|
|
|
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
|
|
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
2011-01-10 13:28:40 +01:00
|
|
|
bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
|
2014-09-11 17:42:02 +02:00
|
|
|
vdev->vm_running = running;
|
2011-01-10 13:28:40 +01:00
|
|
|
|
|
|
|
if (backend_run) {
|
|
|
|
virtio_set_status(vdev, vdev->status);
|
|
|
|
}
|
|
|
|
|
2013-04-24 10:21:21 +02:00
|
|
|
if (k->vmstate_change) {
|
|
|
|
k->vmstate_change(qbus->parent, backend_run);
|
2011-01-10 13:28:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!backend_run) {
|
|
|
|
virtio_set_status(vdev, vdev->status);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-30 08:10:38 +02:00
|
|
|
void virtio_instance_init_common(Object *proxy_obj, void *data,
|
|
|
|
size_t vdev_size, const char *vdev_name)
|
|
|
|
{
|
|
|
|
DeviceState *vdev = data;
|
|
|
|
|
|
|
|
object_initialize(vdev, vdev_size, vdev_name);
|
|
|
|
object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
|
|
|
|
object_unref(OBJECT(vdev));
|
|
|
|
qdev_alias_all_properties(vdev, proxy_obj);
|
|
|
|
}
|
|
|
|
|
2013-01-15 00:08:02 +01:00
|
|
|
void virtio_init(VirtIODevice *vdev, const char *name,
|
|
|
|
uint16_t device_id, size_t config_size)
|
2008-12-04 20:38:57 +01:00
|
|
|
{
|
2015-04-23 08:21:46 +02:00
|
|
|
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
|
|
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
2009-09-07 20:20:15 +02:00
|
|
|
int i;
|
2015-04-23 08:21:46 +02:00
|
|
|
int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
|
|
|
|
|
|
|
|
if (nvectors) {
|
|
|
|
vdev->vector_queues =
|
|
|
|
g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
|
|
|
|
}
|
|
|
|
|
2009-05-18 15:51:59 +02:00
|
|
|
vdev->device_id = device_id;
|
2008-12-04 20:38:57 +01:00
|
|
|
vdev->status = 0;
|
|
|
|
vdev->isr = 0;
|
|
|
|
vdev->queue_sel = 0;
|
2009-06-21 18:50:13 +02:00
|
|
|
vdev->config_vector = VIRTIO_NO_VECTOR;
|
2015-05-29 08:15:31 +02:00
|
|
|
vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
|
2011-07-29 20:36:43 +02:00
|
|
|
vdev->vm_running = runstate_is_running();
|
2016-09-21 17:52:19 +02:00
|
|
|
vdev->broken = false;
|
2015-05-29 08:15:31 +02:00
|
|
|
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
2009-09-07 20:20:15 +02:00
|
|
|
vdev->vq[i].vector = VIRTIO_NO_VECTOR;
|
2010-03-17 12:08:02 +01:00
|
|
|
vdev->vq[i].vdev = vdev;
|
2013-01-30 12:12:37 +01:00
|
|
|
vdev->vq[i].queue_index = i;
|
2010-03-17 12:08:02 +01:00
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
|
|
|
vdev->name = name;
|
|
|
|
vdev->config_len = config_size;
|
2013-01-15 00:08:02 +01:00
|
|
|
if (vdev->config_len) {
|
2011-08-21 05:09:37 +02:00
|
|
|
vdev->config = g_malloc0(config_size);
|
2013-01-15 00:08:02 +01:00
|
|
|
} else {
|
2008-12-04 20:38:57 +01:00
|
|
|
vdev->config = NULL;
|
2013-01-15 00:08:02 +01:00
|
|
|
}
|
|
|
|
vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
|
|
|
|
vdev);
|
2014-06-24 19:38:54 +02:00
|
|
|
vdev->device_endian = virtio_default_endian();
|
2016-02-18 15:12:23 +01:00
|
|
|
vdev->use_guest_notifier_mask = true;
|
2013-01-15 00:08:02 +01:00
|
|
|
}
|
2008-12-04 20:38:57 +01:00
|
|
|
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
|
2010-03-17 12:08:02 +01:00
|
|
|
{
|
|
|
|
return vdev->vq[n].vring.desc;
|
|
|
|
}
|
|
|
|
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
|
2010-03-17 12:08:02 +01:00
|
|
|
{
|
|
|
|
return vdev->vq[n].vring.avail;
|
|
|
|
}
|
|
|
|
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
|
2010-03-17 12:08:02 +01:00
|
|
|
{
|
|
|
|
return vdev->vq[n].vring.used;
|
|
|
|
}
|
|
|
|
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
|
2010-03-17 12:08:02 +01:00
|
|
|
{
|
|
|
|
return vdev->vq[n].vring.desc;
|
|
|
|
}
|
|
|
|
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
|
2010-03-17 12:08:02 +01:00
|
|
|
{
|
|
|
|
return sizeof(VRingDesc) * vdev->vq[n].vring.num;
|
|
|
|
}
|
|
|
|
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
|
2010-03-17 12:08:02 +01:00
|
|
|
{
|
|
|
|
return offsetof(VRingAvail, ring) +
|
2015-09-10 13:37:10 +02:00
|
|
|
sizeof(uint16_t) * vdev->vq[n].vring.num;
|
2010-03-17 12:08:02 +01:00
|
|
|
}
|
|
|
|
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
|
2010-03-17 12:08:02 +01:00
|
|
|
{
|
|
|
|
return offsetof(VRingUsed, ring) +
|
|
|
|
sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
|
|
|
|
}
|
|
|
|
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
|
2010-03-17 12:08:02 +01:00
|
|
|
{
|
|
|
|
return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
|
|
|
|
virtio_queue_get_used_size(vdev, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
|
|
|
|
{
|
|
|
|
return vdev->vq[n].last_avail_idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
|
|
|
|
{
|
|
|
|
vdev->vq[n].last_avail_idx = idx;
|
2016-01-31 11:29:05 +01:00
|
|
|
vdev->vq[n].shadow_avail_idx = idx;
|
2010-03-17 12:08:02 +01:00
|
|
|
}
|
|
|
|
|
2013-08-12 11:08:09 +02:00
|
|
|
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
|
|
|
|
{
|
|
|
|
vdev->vq[n].signalled_used_valid = false;
|
|
|
|
}
|
|
|
|
|
2010-03-17 12:08:02 +01:00
|
|
|
VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
|
|
|
|
{
|
|
|
|
return vdev->vq + n;
|
|
|
|
}
|
|
|
|
|
2013-01-30 12:12:37 +01:00
|
|
|
uint16_t virtio_get_queue_index(VirtQueue *vq)
|
|
|
|
{
|
|
|
|
return vq->queue_index;
|
|
|
|
}
|
|
|
|
|
2012-07-05 17:16:30 +02:00
|
|
|
static void virtio_queue_guest_notifier_read(EventNotifier *n)
|
|
|
|
{
|
|
|
|
VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
|
|
|
|
if (event_notifier_test_and_clear(n)) {
|
|
|
|
virtio_irq(vq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
|
|
|
|
bool with_irqfd)
|
|
|
|
{
|
|
|
|
if (assign && !with_irqfd) {
|
2016-04-22 15:53:53 +02:00
|
|
|
event_notifier_set_handler(&vq->guest_notifier, false,
|
2012-07-05 17:16:30 +02:00
|
|
|
virtio_queue_guest_notifier_read);
|
|
|
|
} else {
|
2016-04-22 15:53:53 +02:00
|
|
|
event_notifier_set_handler(&vq->guest_notifier, false, NULL);
|
2012-07-05 17:16:30 +02:00
|
|
|
}
|
|
|
|
if (!assign) {
|
|
|
|
/* Test and clear notifier before closing it,
|
|
|
|
* in case poll callback didn't have time to run. */
|
|
|
|
virtio_queue_guest_notifier_read(&vq->guest_notifier);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-17 12:08:02 +01:00
|
|
|
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
|
|
|
|
{
|
|
|
|
return &vq->guest_notifier;
|
|
|
|
}
|
2012-07-05 17:16:29 +02:00
|
|
|
|
2016-04-06 12:16:25 +02:00
|
|
|
static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
|
2012-07-05 17:16:29 +02:00
|
|
|
{
|
|
|
|
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
|
|
|
|
if (event_notifier_test_and_clear(n)) {
|
2016-04-06 12:16:25 +02:00
|
|
|
virtio_queue_notify_aio_vq(vq);
|
2012-07-05 17:16:29 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-14 18:17:06 +01:00
|
|
|
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
|
2016-07-13 07:09:43 +02:00
|
|
|
VirtIOHandleOutput handle_output)
|
2016-02-14 18:17:06 +01:00
|
|
|
{
|
2016-04-06 12:16:28 +02:00
|
|
|
if (handle_output) {
|
|
|
|
vq->handle_aio_output = handle_output;
|
2016-02-14 18:17:06 +01:00
|
|
|
aio_set_event_notifier(ctx, &vq->host_notifier, true,
|
2016-04-06 12:16:25 +02:00
|
|
|
virtio_queue_host_notifier_aio_read);
|
2016-02-14 18:17:06 +01:00
|
|
|
} else {
|
|
|
|
aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
|
|
|
|
/* Test and clear notifier before after disabling event,
|
|
|
|
* in case poll callback didn't have time to run. */
|
2016-04-06 12:16:25 +02:00
|
|
|
virtio_queue_host_notifier_aio_read(&vq->host_notifier);
|
2016-04-06 12:16:28 +02:00
|
|
|
vq->handle_aio_output = NULL;
|
2016-04-06 12:16:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_queue_host_notifier_read(EventNotifier *n)
|
|
|
|
{
|
|
|
|
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
|
|
|
|
if (event_notifier_test_and_clear(n)) {
|
|
|
|
virtio_queue_notify_vq(vq);
|
2016-02-14 18:17:06 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-06 15:26:14 +02:00
|
|
|
void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
|
|
|
|
bool set_handler)
|
2012-07-05 17:16:29 +02:00
|
|
|
{
|
2016-07-13 07:09:44 +02:00
|
|
|
AioContext *ctx = qemu_get_aio_context();
|
2012-08-06 15:26:14 +02:00
|
|
|
if (assign && set_handler) {
|
2016-07-13 07:09:44 +02:00
|
|
|
if (vq->use_aio) {
|
|
|
|
aio_set_event_notifier(ctx, &vq->host_notifier, true,
|
2012-07-05 17:16:29 +02:00
|
|
|
virtio_queue_host_notifier_read);
|
2016-07-13 07:09:44 +02:00
|
|
|
} else {
|
|
|
|
event_notifier_set_handler(&vq->host_notifier, true,
|
|
|
|
virtio_queue_host_notifier_read);
|
|
|
|
}
|
2012-07-05 17:16:29 +02:00
|
|
|
} else {
|
2016-07-13 07:09:44 +02:00
|
|
|
if (vq->use_aio) {
|
|
|
|
aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
|
|
|
|
} else {
|
|
|
|
event_notifier_set_handler(&vq->host_notifier, true, NULL);
|
|
|
|
}
|
2012-08-06 15:26:14 +02:00
|
|
|
}
|
|
|
|
if (!assign) {
|
2012-07-05 17:16:29 +02:00
|
|
|
/* Test and clear notifier before after disabling event,
|
|
|
|
* in case poll callback didn't have time to run. */
|
|
|
|
virtio_queue_host_notifier_read(&vq->host_notifier);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-17 12:08:02 +01:00
|
|
|
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
|
|
|
|
{
|
|
|
|
return &vq->host_notifier;
|
|
|
|
}
|
2013-01-15 00:08:02 +01:00
|
|
|
|
2013-04-30 16:08:48 +02:00
|
|
|
void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
|
|
|
|
{
|
2014-06-06 18:43:29 +02:00
|
|
|
g_free(vdev->bus_name);
|
2014-06-06 18:43:30 +02:00
|
|
|
vdev->bus_name = g_strdup(bus_name);
|
2013-04-30 16:08:48 +02:00
|
|
|
}
|
|
|
|
|
2016-09-21 17:52:19 +02:00
|
|
|
void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
error_vreport(fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
vdev->broken = true;
|
|
|
|
|
|
|
|
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
|
|
|
|
virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
|
|
|
|
virtio_notify_config(vdev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-30 00:50:27 +02:00
|
|
|
static void virtio_device_realize(DeviceState *dev, Error **errp)
|
|
|
|
{
|
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
|
|
|
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
|
|
|
|
Error *err = NULL;
|
|
|
|
|
|
|
|
if (vdc->realize != NULL) {
|
|
|
|
vdc->realize(dev, &err);
|
|
|
|
if (err != NULL) {
|
|
|
|
error_propagate(errp, err);
|
|
|
|
return;
|
|
|
|
}
|
2013-01-15 00:08:02 +01:00
|
|
|
}
|
2015-05-29 08:15:25 +02:00
|
|
|
|
|
|
|
virtio_bus_device_plugged(vdev, &err);
|
|
|
|
if (err != NULL) {
|
|
|
|
error_propagate(errp, err);
|
|
|
|
return;
|
|
|
|
}
|
2013-01-15 00:08:02 +01:00
|
|
|
}
|
|
|
|
|
2013-07-30 00:50:27 +02:00
|
|
|
static void virtio_device_unrealize(DeviceState *dev, Error **errp)
|
2013-04-30 16:08:48 +02:00
|
|
|
{
|
2013-07-30 00:50:27 +02:00
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
2013-07-30 03:50:44 +02:00
|
|
|
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
|
|
|
|
Error *err = NULL;
|
2013-07-30 00:50:27 +02:00
|
|
|
|
2013-12-20 19:48:51 +01:00
|
|
|
virtio_bus_device_unplugged(vdev);
|
|
|
|
|
2013-07-30 03:50:44 +02:00
|
|
|
if (vdc->unrealize != NULL) {
|
|
|
|
vdc->unrealize(dev, &err);
|
|
|
|
if (err != NULL) {
|
|
|
|
error_propagate(errp, err);
|
|
|
|
return;
|
|
|
|
}
|
2013-09-20 13:59:08 +02:00
|
|
|
}
|
2013-07-30 00:50:27 +02:00
|
|
|
|
2014-06-06 18:43:29 +02:00
|
|
|
g_free(vdev->bus_name);
|
|
|
|
vdev->bus_name = NULL;
|
2013-04-30 16:08:48 +02:00
|
|
|
}
|
|
|
|
|
2015-05-26 16:34:47 +02:00
|
|
|
static Property virtio_properties[] = {
|
|
|
|
DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
|
|
|
|
DEFINE_PROP_END_OF_LIST(),
|
|
|
|
};
|
|
|
|
|
2013-01-15 00:08:02 +01:00
|
|
|
static void virtio_device_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
/* Set the default value here. */
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
2013-07-30 00:50:27 +02:00
|
|
|
|
|
|
|
dc->realize = virtio_device_realize;
|
|
|
|
dc->unrealize = virtio_device_unrealize;
|
2013-01-15 00:08:02 +01:00
|
|
|
dc->bus_type = TYPE_VIRTIO_BUS;
|
2015-05-26 16:34:47 +02:00
|
|
|
dc->props = virtio_properties;
|
2013-01-15 00:08:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo virtio_device_info = {
|
|
|
|
.name = TYPE_VIRTIO_DEVICE,
|
|
|
|
.parent = TYPE_DEVICE,
|
|
|
|
.instance_size = sizeof(VirtIODevice),
|
|
|
|
.class_init = virtio_device_class_init,
|
|
|
|
.abstract = true,
|
|
|
|
.class_size = sizeof(VirtioDeviceClass),
|
|
|
|
};
|
|
|
|
|
|
|
|
static void virtio_register_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&virtio_device_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(virtio_register_types)
|