virtio,pc,pci: bugfixes

Tiny fixes: important but mostly obvious ones.  Revert VDPA network sim
 for this release as there are questions around it's maintainatiblity.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmYU7qcPHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpn/cIAJBWRN67BS5ysdHjK0Hmw1zumbLpK+85wlAv
 dTfmJmUnIV6Ft5yaFFXCpxVH0/lh/vhG2ra5+lu53mX+GMtwjdqk4Sufvo4TukXu
 uweHUqlb4pdL37Yf7Q9N6kSX4Ay3ITEC7N18IvlBU8be5gRhidejMWlKq/gW/1rk
 +mnWeD5Qxs91Lh2pxShcnsRah0D4UY47dNu3VnglC9wYb4fupukGgj0qOnqYDF2K
 tG9Us0grU/qF1FgqWwbrlhOUO1Ntlp4uYn4JNOFhswAFDPm2XXIJRIPUhoYEi9G2
 HhxGSpDjJm8I9BBbllDnQVpIbBFxoG/EiQRT64Nt+rw+Tq01sPA=
 =AZIl
 -----END PGP SIGNATURE-----

Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging

virtio,pc,pci: bugfixes

Tiny fixes: important but mostly obvious ones.  Revert VDPA network sim
for this release as there are questions around it's maintainatiblity.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# -----BEGIN PGP SIGNATURE-----
#
# iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmYU7qcPHG1zdEByZWRo
# YXQuY29tAAoJECgfDbjSjVRpn/cIAJBWRN67BS5ysdHjK0Hmw1zumbLpK+85wlAv
# dTfmJmUnIV6Ft5yaFFXCpxVH0/lh/vhG2ra5+lu53mX+GMtwjdqk4Sufvo4TukXu
# uweHUqlb4pdL37Yf7Q9N6kSX4Ay3ITEC7N18IvlBU8be5gRhidejMWlKq/gW/1rk
# +mnWeD5Qxs91Lh2pxShcnsRah0D4UY47dNu3VnglC9wYb4fupukGgj0qOnqYDF2K
# tG9Us0grU/qF1FgqWwbrlhOUO1Ntlp4uYn4JNOFhswAFDPm2XXIJRIPUhoYEi9G2
# HhxGSpDjJm8I9BBbllDnQVpIbBFxoG/EiQRT64Nt+rw+Tq01sPA=
# =AZIl
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 09 Apr 2024 08:30:47 BST
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [full]
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu:
  qdev-monitor: fix error message in find_device_state()
  vhost-user-blk: simplify and fix vhost_user_blk_handle_config_change
  vdpa-dev: Fix the issue of device status not updating when configuration interruption is triggered
  hw/virtio: Fix packed virtqueue flush used_idx
  virtio-snd: rewrite invalid tx/rx message handling
  virtio-snd: Enhance error handling for invalid transfers
  Revert "hw/virtio: Add support for VDPA network simulation devices"

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-04-09 09:51:07 +01:00
commit bc0cd4ae88
14 changed files with 97 additions and 482 deletions

View File

@ -2371,11 +2371,6 @@ F: hw/virtio/vhost-user-scmi*
F: include/hw/virtio/vhost-user-scmi.h
F: tests/qtest/libqos/virtio-scmi.*
vdpa-net
M: Hao Chen <chenh@yusur.tech>
S: Maintained
F: docs/system/devices/vdpa-net.rst
virtio-crypto
M: Gonglei <arei.gonglei@huawei.com>
S: Supported

View File

@ -99,4 +99,3 @@ Emulated Devices
devices/canokey.rst
devices/usb-u2f.rst
devices/igb.rst
devices/vdpa-net.rst

View File

@ -1,121 +0,0 @@
vdpa net
============
This document explains the setup and usage of the vdpa network device.
The vdpa network device is a paravirtualized vdpa emulate device.
Description
-----------
VDPA net devices support dirty page bitmap mark and vring state saving and recovery.
Users can use this VDPA device for live migration simulation testing in a nested virtualization environment.
Registers layout
----------------
The vdpa device add live migrate registers layout as follow::
Offset Register Name Bitwidth Associated vq
0x0 LM_LOGGING_CTRL 4bits
0x10 LM_BASE_ADDR_LOW 32bits
0x14 LM_BASE_ADDR_HIGH 32bits
0x18 LM_END_ADDR_LOW 32bits
0x1c LM_END_ADDR_HIGH 32bits
0x20 LM_RING_STATE_OFFSET 32bits vq0
0x24 LM_RING_STATE_OFFSET 32bits vq1
0x28 LM_RING_STATE_OFFSET 32bits vq2
......
0x20+1023*4 LM_RING_STATE_OFFSET 32bits vq1023
These registers are extended at the end of the notify bar space.
Architecture diagram
--------------------
::
|------------------------------------------------------------------------|
| guest-L1-user-space |
| |
| |----------------------------------------|
| | [virtio-net driver] |
| | ^ guest-L2-src(iommu=on) |
| |--------------|-------------------------|
| | | qemu-L2-src(viommu) |
| [dpdk-vdpa]<->[vhost socket]<-+->[vhost-user backend(iommu=on)] |
--------------------------------------------------------------------------
--------------------------------------------------------------------------
| ^ guest-L1-kernel-space |
| | |
| [VFIO] |
| ^ |
| | guest-L1-src(iommu=on) |
--------|-----------------------------------------------------------------
--------|-----------------------------------------------------------------
| [vdpa net device(iommu=on)] [manager nic device] |
| | | |
| | | |
| [tap device] qemu-L1-src(viommu) | |
------------------------------------------------+-------------------------
|
|
--------------------- |
| kernel net bridge |<-----
| virbr0 |<----------------------------------
--------------------- |
|
|
-------------------------------------------------------------------------- |
| guest-L1-user-space | |
| | |
| |----------------------------------------| |
| | [virtio-net driver] | |
| | ^ guest-L2-dst(iommu=on) | |
| |--------------|-------------------------| |
| | | qemu-L2-dst(viommu) | |
| [dpdk-vdpa]<->[vhost socket]<-+->[vhost-user backend(iommu=on)] | |
-------------------------------------------------------------------------- |
-------------------------------------------------------------------------- |
| ^ guest-L1-kernel-space | |
| | | |
| [VFIO] | |
| ^ | |
| | guest-L1-dst(iommu=on) | |
--------|----------------------------------------------------------------- |
--------|----------------------------------------------------------------- |
| [vdpa net device(iommu=on)] [manager nic device]----------------+----
| | |
| | |
| [tap device] qemu-L1-dst(viommu) |
--------------------------------------------------------------------------
Device properties
-----------------
The Virtio vdpa device can be configured with the following properties:
* ``vdpa=on`` open vdpa device emulated.
Usages
--------
This patch add virtio sriov support and vdpa live migrate support.
You can open vdpa by set xml file as follow::
<qemu:commandline xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'>
<qemu:arg value='-device'/>
<qemu:arg value='intel-iommu,intremap=on,device-iotlb=on,aw-bits=48'/>
<qemu:arg value='-netdev'/>
<qemu:arg value='tap,id=hostnet1,script=no,downscript=no,vhost=off'/>
<qemu:arg value='-device'/>
<qemu:arg value='virtio-net-pci,netdev=hostnet1,id=net1,mac=56:4a:b7:4f:4d:a9,bus=pci.6,addr=0x0,iommu_platform=on,ats=on,vdpa=on'/>
</qemu:commandline>
Limitations
-----------
1. Dependent on tap device with param ``vhost=off``.
2. Nested virtualization environment only supports ``q35`` machines.
3. Current only support split vring live migrate.

View File

@ -456,7 +456,6 @@ static uint32_t virtio_snd_pcm_prepare(VirtIOSound *s, uint32_t stream_id)
stream->s = s;
qemu_mutex_init(&stream->queue_mutex);
QSIMPLEQ_INIT(&stream->queue);
QSIMPLEQ_INIT(&stream->invalid);
/*
* stream_id >= s->snd_conf.streams was checked before so this is
@ -611,9 +610,6 @@ static size_t virtio_snd_pcm_get_io_msgs_count(VirtIOSoundPCMStream *stream)
QSIMPLEQ_FOREACH_SAFE(buffer, &stream->queue, entry, next) {
count += 1;
}
QSIMPLEQ_FOREACH_SAFE(buffer, &stream->invalid, entry, next) {
count += 1;
}
}
return count;
}
@ -831,25 +827,22 @@ static void virtio_snd_handle_event(VirtIODevice *vdev, VirtQueue *vq)
trace_virtio_snd_handle_event();
}
/*
* Must only be called if vsnd->invalid is not empty.
*/
static inline void empty_invalid_queue(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOSoundPCMBuffer *buffer = NULL;
VirtIOSoundPCMStream *stream = NULL;
virtio_snd_pcm_status resp = { 0 };
VirtIOSound *vsnd = VIRTIO_SND(vdev);
bool any = false;
for (uint32_t i = 0; i < vsnd->snd_conf.streams; i++) {
stream = vsnd->pcm->streams[i];
if (stream) {
any = false;
WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) {
while (!QSIMPLEQ_EMPTY(&stream->invalid)) {
buffer = QSIMPLEQ_FIRST(&stream->invalid);
if (buffer->vq != vq) {
break;
}
any = true;
g_assert(!QSIMPLEQ_EMPTY(&vsnd->invalid));
while (!QSIMPLEQ_EMPTY(&vsnd->invalid)) {
buffer = QSIMPLEQ_FIRST(&vsnd->invalid);
/* If buffer->vq != vq, our logic is fundamentally wrong, so bail out */
g_assert(buffer->vq == vq);
resp.status = cpu_to_le32(VIRTIO_SND_S_BAD_MSG);
iov_from_buf(buffer->elem->in_sg,
buffer->elem->in_num,
@ -859,20 +852,12 @@ static inline void empty_invalid_queue(VirtIODevice *vdev, VirtQueue *vq)
virtqueue_push(vq,
buffer->elem,
sizeof(virtio_snd_pcm_status));
QSIMPLEQ_REMOVE_HEAD(&stream->invalid, entry);
QSIMPLEQ_REMOVE_HEAD(&vsnd->invalid, entry);
virtio_snd_pcm_buffer_free(buffer);
}
if (any) {
/*
* Notify vq about virtio_snd_pcm_status responses.
* Buffer responses must be notified separately later.
*/
/* Notify vq about virtio_snd_pcm_status responses. */
virtio_notify(vdev, vq);
}
}
}
}
}
/*
* The tx virtqueue handler. Makes the buffers available to their respective
@ -883,15 +868,14 @@ static inline void empty_invalid_queue(VirtIODevice *vdev, VirtQueue *vq)
*/
static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOSound *s = VIRTIO_SND(vdev);
VirtIOSoundPCMStream *stream = NULL;
VirtIOSound *vsnd = VIRTIO_SND(vdev);
VirtIOSoundPCMBuffer *buffer;
VirtQueueElement *elem;
size_t msg_sz, size;
virtio_snd_pcm_xfer hdr;
uint32_t stream_id;
/*
* If any of the I/O messages are invalid, put them in stream->invalid and
* If any of the I/O messages are invalid, put them in vsnd->invalid and
* return them after the for loop.
*/
bool must_empty_invalid_queue = false;
@ -901,7 +885,7 @@ static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq)
}
trace_virtio_snd_handle_tx_xfer();
for (;;) {
for (VirtIOSoundPCMStream *stream = NULL;; stream = NULL) {
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
break;
@ -917,12 +901,12 @@ static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq)
}
stream_id = le32_to_cpu(hdr.stream_id);
if (stream_id >= s->snd_conf.streams
|| s->pcm->streams[stream_id] == NULL) {
if (stream_id >= vsnd->snd_conf.streams
|| vsnd->pcm->streams[stream_id] == NULL) {
goto tx_err;
}
stream = s->pcm->streams[stream_id];
stream = vsnd->pcm->streams[stream_id];
if (stream->info.direction != VIRTIO_SND_D_OUTPUT) {
goto tx_err;
}
@ -942,13 +926,11 @@ static void virtio_snd_handle_tx_xfer(VirtIODevice *vdev, VirtQueue *vq)
continue;
tx_err:
WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) {
must_empty_invalid_queue = true;
buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer));
buffer->elem = elem;
buffer->vq = vq;
QSIMPLEQ_INSERT_TAIL(&stream->invalid, buffer, entry);
}
QSIMPLEQ_INSERT_TAIL(&vsnd->invalid, buffer, entry);
}
if (must_empty_invalid_queue) {
@ -965,15 +947,14 @@ tx_err:
*/
static void virtio_snd_handle_rx_xfer(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOSound *s = VIRTIO_SND(vdev);
VirtIOSoundPCMStream *stream = NULL;
VirtIOSound *vsnd = VIRTIO_SND(vdev);
VirtIOSoundPCMBuffer *buffer;
VirtQueueElement *elem;
size_t msg_sz, size;
virtio_snd_pcm_xfer hdr;
uint32_t stream_id;
/*
* if any of the I/O messages are invalid, put them in stream->invalid and
* if any of the I/O messages are invalid, put them in vsnd->invalid and
* return them after the for loop.
*/
bool must_empty_invalid_queue = false;
@ -983,7 +964,7 @@ static void virtio_snd_handle_rx_xfer(VirtIODevice *vdev, VirtQueue *vq)
}
trace_virtio_snd_handle_rx_xfer();
for (;;) {
for (VirtIOSoundPCMStream *stream = NULL;; stream = NULL) {
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
break;
@ -999,12 +980,12 @@ static void virtio_snd_handle_rx_xfer(VirtIODevice *vdev, VirtQueue *vq)
}
stream_id = le32_to_cpu(hdr.stream_id);
if (stream_id >= s->snd_conf.streams
|| !s->pcm->streams[stream_id]) {
if (stream_id >= vsnd->snd_conf.streams
|| !vsnd->pcm->streams[stream_id]) {
goto rx_err;
}
stream = s->pcm->streams[stream_id];
stream = vsnd->pcm->streams[stream_id];
if (stream == NULL || stream->info.direction != VIRTIO_SND_D_INPUT) {
goto rx_err;
}
@ -1021,13 +1002,11 @@ static void virtio_snd_handle_rx_xfer(VirtIODevice *vdev, VirtQueue *vq)
continue;
rx_err:
WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) {
must_empty_invalid_queue = true;
buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer));
buffer->elem = elem;
buffer->vq = vq;
QSIMPLEQ_INSERT_TAIL(&stream->invalid, buffer, entry);
}
QSIMPLEQ_INSERT_TAIL(&vsnd->invalid, buffer, entry);
}
if (must_empty_invalid_queue) {
@ -1127,6 +1106,7 @@ static void virtio_snd_realize(DeviceState *dev, Error **errp)
virtio_add_queue(vdev, 64, virtio_snd_handle_rx_xfer);
qemu_mutex_init(&vsnd->cmdq_mutex);
QTAILQ_INIT(&vsnd->cmdq);
QSIMPLEQ_INIT(&vsnd->invalid);
for (uint32_t i = 0; i < vsnd->snd_conf.streams; i++) {
status = virtio_snd_set_pcm_params(vsnd, i, &default_params);
@ -1376,13 +1356,20 @@ static void virtio_snd_unrealize(DeviceState *dev)
static void virtio_snd_reset(VirtIODevice *vdev)
{
VirtIOSound *s = VIRTIO_SND(vdev);
VirtIOSound *vsnd = VIRTIO_SND(vdev);
virtio_snd_ctrl_command *cmd;
WITH_QEMU_LOCK_GUARD(&s->cmdq_mutex) {
while (!QTAILQ_EMPTY(&s->cmdq)) {
cmd = QTAILQ_FIRST(&s->cmdq);
QTAILQ_REMOVE(&s->cmdq, cmd, next);
/*
* Sanity check that the invalid buffer message queue is emptied at the end
* of every virtio_snd_handle_tx_xfer/virtio_snd_handle_rx_xfer call, and
* must be empty otherwise.
*/
g_assert(QSIMPLEQ_EMPTY(&vsnd->invalid));
WITH_QEMU_LOCK_GUARD(&vsnd->cmdq_mutex) {
while (!QTAILQ_EMPTY(&vsnd->cmdq)) {
cmd = QTAILQ_FIRST(&vsnd->cmdq);
QTAILQ_REMOVE(&vsnd->cmdq, cmd, next);
virtio_snd_ctrl_cmd_free(cmd);
}
}

View File

@ -91,7 +91,6 @@ static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
static int vhost_user_blk_handle_config_change(struct vhost_dev *dev)
{
int ret;
struct virtio_blk_config blkcfg;
VirtIODevice *vdev = dev->vdev;
VHostUserBlk *s = VHOST_USER_BLK(dev->vdev);
Error *local_err = NULL;
@ -100,19 +99,15 @@ static int vhost_user_blk_handle_config_change(struct vhost_dev *dev)
return 0;
}
ret = vhost_dev_get_config(dev, (uint8_t *)&blkcfg,
ret = vhost_dev_get_config(dev, (uint8_t *)&s->blkcfg,
vdev->config_len, &local_err);
if (ret < 0) {
error_report_err(local_err);
return ret;
}
/* valid for resize only */
if (blkcfg.capacity != s->blkcfg.capacity) {
s->blkcfg.capacity = blkcfg.capacity;
memcpy(dev->vdev->config, &s->blkcfg, vdev->config_len);
virtio_notify_config(dev->vdev);
}
return 0;
}

View File

@ -2039,22 +2039,6 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
goto err;
}
/* Mark dirty page's bitmap of guest memory */
if (vdev->lm_logging_ctrl == LM_ENABLE) {
uint64_t chunk = elem->in_addr[i] / VHOST_LOG_CHUNK;
/* Get chunk index */
BitmapMemoryRegionCaches *caches = qatomic_rcu_read(&vdev->caches);
uint64_t index = chunk / 8;
uint64_t shift = chunk % 8;
uint8_t val = 0;
address_space_read_cached(&caches->bitmap, index, &val,
sizeof(val));
val |= 1 << shift;
address_space_write_cached(&caches->bitmap, index, &val,
sizeof(val));
address_space_cache_invalidate(&caches->bitmap, index, sizeof(val));
}
elems[i] = elem;
lens[i] = total;
i++;

View File

@ -195,7 +195,14 @@ static void
vhost_vdpa_device_get_config(VirtIODevice *vdev, uint8_t *config)
{
VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
int ret;
ret = vhost_dev_get_config(&s->dev, s->config, s->config_size,
NULL);
if (ret < 0) {
error_report("get device config space failed");
return;
}
memcpy(config, s->config, s->config_size);
}

View File

@ -1442,155 +1442,6 @@ int virtio_pci_add_shm_cap(VirtIOPCIProxy *proxy,
return virtio_pci_add_mem_cap(proxy, &cap.cap);
}
/* Called within call_rcu(). */
static void bitmap_free_region_cache(BitmapMemoryRegionCaches *caches)
{
assert(caches != NULL);
address_space_cache_destroy(&caches->bitmap);
g_free(caches);
}
static void lm_disable(VirtIODevice *vdev)
{
BitmapMemoryRegionCaches *caches;
caches = qatomic_read(&vdev->caches);
qatomic_rcu_set(&vdev->caches, NULL);
if (caches) {
call_rcu(caches, bitmap_free_region_cache, rcu);
}
}
static void lm_enable(VirtIODevice *vdev)
{
BitmapMemoryRegionCaches *old = vdev->caches;
BitmapMemoryRegionCaches *new = NULL;
hwaddr addr, end, size;
int64_t len;
addr = vdev->lm_base_addr_low | ((hwaddr)(vdev->lm_base_addr_high) << 32);
end = vdev->lm_end_addr_low | ((hwaddr)(vdev->lm_end_addr_high) << 32);
size = end - addr;
if (size <= 0) {
error_report("Invalid lm size.");
return;
}
new = g_new0(BitmapMemoryRegionCaches, 1);
len = address_space_cache_init(&new->bitmap, vdev->dma_as, addr, size,
true);
if (len < size) {
virtio_error(vdev, "Cannot map bitmap");
goto err_bitmap;
}
qatomic_rcu_set(&vdev->caches, new);
if (old) {
call_rcu(old, bitmap_free_region_cache, rcu);
}
return;
err_bitmap:
address_space_cache_destroy(&new->bitmap);
g_free(new);
}
static uint64_t virtio_pci_lm_read(void *opaque, hwaddr addr,
unsigned size)
{
VirtIOPCIProxy *proxy = opaque;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
hwaddr offset_end = LM_VRING_STATE_OFFSET +
virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
uint32_t val;
int qid;
if (vdev == NULL) {
return UINT64_MAX;
}
switch (addr) {
case LM_LOGGING_CTRL:
val = vdev->lm_logging_ctrl;
break;
case LM_BASE_ADDR_LOW:
val = vdev->lm_base_addr_low;
break;
case LM_BASE_ADDR_HIGH:
val = vdev->lm_base_addr_high;
break;
case LM_END_ADDR_LOW:
val = vdev->lm_end_addr_low;
break;
case LM_END_ADDR_HIGH:
val = vdev->lm_end_addr_high;
break;
default:
if (addr >= LM_VRING_STATE_OFFSET && addr <= offset_end) {
qid = (addr - LM_VRING_STATE_OFFSET) /
virtio_pci_queue_mem_mult(proxy);
val = virtio_queue_get_vring_states(vdev, qid);
} else
val = 0;
break;
}
return val;
}
static void virtio_pci_lm_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
VirtIOPCIProxy *proxy = opaque;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
hwaddr offset_end = LM_VRING_STATE_OFFSET +
virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
int qid;
if (vdev == NULL) {
return;
}
switch (addr) {
case LM_LOGGING_CTRL:
vdev->lm_logging_ctrl = val;
switch (val) {
case LM_DISABLE:
lm_disable(vdev);
break;
case LM_ENABLE:
lm_enable(vdev);
break;
default:
virtio_error(vdev, "Unsupport LM_LOGGING_CTRL value: %"PRIx64,
val);
break;
};
break;
case LM_BASE_ADDR_LOW:
vdev->lm_base_addr_low = val;
break;
case LM_BASE_ADDR_HIGH:
vdev->lm_base_addr_high = val;
break;
case LM_END_ADDR_LOW:
vdev->lm_end_addr_low = val;
break;
case LM_END_ADDR_HIGH:
vdev->lm_end_addr_high = val;
break;
default:
if (addr >= LM_VRING_STATE_OFFSET && addr <= offset_end) {
qid = (addr - LM_VRING_STATE_OFFSET) /
virtio_pci_queue_mem_mult(proxy);
virtio_queue_set_vring_states(vdev, qid, val);
} else
virtio_error(vdev, "Unsupport addr: %"PRIx64, addr);
break;
}
}
static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
unsigned size)
{
@ -1972,15 +1823,6 @@ static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy,
},
.endianness = DEVICE_LITTLE_ENDIAN,
};
static const MemoryRegionOps lm_ops = {
.read = virtio_pci_lm_read,
.write = virtio_pci_lm_write,
.impl = {
.min_access_size = 1,
.max_access_size = 4,
},
.endianness = DEVICE_LITTLE_ENDIAN,
};
g_autoptr(GString) name = g_string_new(NULL);
g_string_printf(name, "virtio-pci-common-%s", vdev_name);
@ -2017,14 +1859,6 @@ static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy,
proxy,
name->str,
proxy->notify_pio.size);
if (proxy->flags & VIRTIO_PCI_FLAG_VDPA) {
g_string_printf(name, "virtio-pci-lm-%s", vdev_name);
memory_region_init_io(&proxy->lm.mr, OBJECT(proxy),
&lm_ops,
proxy,
name->str,
proxy->lm.size);
}
}
static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
@ -2187,10 +2021,6 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap);
if (proxy->flags & VIRTIO_PCI_FLAG_VDPA) {
memory_region_add_subregion(&proxy->modern_bar,
proxy->lm.offset, &proxy->lm.mr);
}
if (modern_pio) {
memory_region_init(&proxy->io_bar, OBJECT(proxy),
@ -2260,9 +2090,6 @@ static void virtio_pci_device_unplugged(DeviceState *d)
virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
if (proxy->flags & VIRTIO_PCI_FLAG_VDPA) {
memory_region_del_subregion(&proxy->modern_bar, &proxy->lm.mr);
}
if (modern_pio) {
virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
}
@ -2317,17 +2144,9 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
/* subclasses can enforce modern, so do this unconditionally */
if (!(proxy->flags & VIRTIO_PCI_FLAG_VDPA)) {
memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
/* PCI BAR regions must be powers of 2 */
pow2ceil(proxy->notify.offset + proxy->notify.size));
} else {
proxy->lm.offset = proxy->notify.offset + proxy->notify.size;
proxy->lm.size = 0x20 + VIRTIO_QUEUE_MAX * 4;
memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
/* PCI BAR regions must be powers of 2 */
pow2ceil(proxy->lm.offset + proxy->lm.size));
}
if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) {
proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
@ -2482,8 +2301,6 @@ static Property virtio_pci_properties[] = {
VIRTIO_PCI_FLAG_INIT_FLR_BIT, true),
DEFINE_PROP_BIT("aer", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_AER_BIT, false),
DEFINE_PROP_BIT("vdpa", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_VDPA_BIT, false),
DEFINE_PROP_END_OF_LIST(),
};

View File

@ -957,12 +957,20 @@ static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
return;
}
/*
* For indirect element's 'ndescs' is 1.
* For all other elemment's 'ndescs' is the
* number of descriptors chained by NEXT (as set in virtqueue_packed_pop).
* So When the 'elem' be filled into the descriptor ring,
* The 'idx' of this 'elem' shall be
* the value of 'vq->used_idx' plus the 'ndescs'.
*/
ndescs += vq->used_elems[0].ndescs;
for (i = 1; i < count; i++) {
virtqueue_packed_fill_desc(vq, &vq->used_elems[i], i, false);
virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
ndescs += vq->used_elems[i].ndescs;
}
virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
ndescs += vq->used_elems[0].ndescs;
vq->inuse -= ndescs;
vq->used_idx += ndescs;
@ -3368,18 +3376,6 @@ static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
return vdev->vq[n].last_avail_idx;
}
static uint32_t virtio_queue_split_get_vring_states(VirtIODevice *vdev,
int n)
{
struct VirtQueue *vq = &vdev->vq[n];
uint16_t avail, used;
avail = vq->last_avail_idx;
used = vq->used_idx;
return avail | (uint32_t)used << 16;
}
unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
{
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
@ -3389,33 +3385,6 @@ unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
}
}
unsigned int virtio_queue_get_vring_states(VirtIODevice *vdev, int n)
{
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
return -1;
} else {
return virtio_queue_split_get_vring_states(vdev, n);
}
}
static void virtio_queue_split_set_vring_states(VirtIODevice *vdev,
int n, uint32_t idx)
{
struct VirtQueue *vq = &vdev->vq[n];
vq->last_avail_idx = (uint16_t)(idx & 0xffff);
vq->shadow_avail_idx = (uint16_t)(idx & 0xffff);
vq->used_idx = (uint16_t)(idx >> 16);
}
void virtio_queue_set_vring_states(VirtIODevice *vdev, int n, uint32_t idx)
{
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
return;
} else {
virtio_queue_split_set_vring_states(vdev, n, idx);
}
}
static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
int n, unsigned int idx)
{

View File

@ -151,7 +151,6 @@ struct VirtIOSoundPCMStream {
QemuMutex queue_mutex;
bool active;
QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) queue;
QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) invalid;
};
/*
@ -223,6 +222,21 @@ struct VirtIOSound {
QemuMutex cmdq_mutex;
QTAILQ_HEAD(, virtio_snd_ctrl_command) cmdq;
bool processing_cmdq;
/*
* Convenience queue to keep track of invalid tx/rx queue messages inside
* the tx/rx callbacks.
*
* In the callbacks as a first step we are emptying the virtqueue to handle
* each message and we cannot add an invalid message back to the queue: we
* would re-process it in subsequent loop iterations.
*
* Instead, we add them to this queue and after finishing examining every
* virtqueue element, we inform the guest for each invalid message.
*
* This queue must be empty at all times except for inside the tx/rx
* callbacks.
*/
QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) invalid;
};
struct virtio_snd_ctrl_command {

View File

@ -43,7 +43,6 @@ enum {
VIRTIO_PCI_FLAG_INIT_FLR_BIT,
VIRTIO_PCI_FLAG_AER_BIT,
VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT,
VIRTIO_PCI_FLAG_VDPA_BIT,
};
/* Need to activate work-arounds for buggy guests at vmstate load. */
@ -90,9 +89,6 @@ enum {
#define VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED \
(1 << VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT)
/* VDPA supported flags */
#define VIRTIO_PCI_FLAG_VDPA (1 << VIRTIO_PCI_FLAG_VDPA_BIT)
typedef struct {
MSIMessage msg;
int virq;
@ -144,7 +140,6 @@ struct VirtIOPCIProxy {
};
VirtIOPCIRegion regs[5];
};
VirtIOPCIRegion lm;
MemoryRegion modern_bar;
MemoryRegion io_bar;
uint32_t legacy_io_bar_idx;

View File

@ -35,9 +35,6 @@
(0x1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \
(0x1ULL << VIRTIO_F_ANY_LAYOUT))
#define LM_DISABLE 0x00
#define LM_ENABLE 0x01
struct VirtQueue;
static inline hwaddr vring_align(hwaddr addr,
@ -98,11 +95,6 @@ enum virtio_device_endian {
VIRTIO_DEVICE_ENDIAN_BIG,
};
typedef struct BitmapMemoryRegionCaches {
struct rcu_head rcu;
MemoryRegionCache bitmap;
} BitmapMemoryRegionCaches;
/**
* struct VirtIODevice - common VirtIO structure
* @name: name of the device
@ -136,14 +128,6 @@ struct VirtIODevice
uint32_t generation;
int nvectors;
VirtQueue *vq;
uint8_t lm_logging_ctrl;
uint32_t lm_base_addr_low;
uint32_t lm_base_addr_high;
uint32_t lm_end_addr_low;
uint32_t lm_end_addr_high;
BitmapMemoryRegionCaches *caches;
MemoryListener listener;
uint16_t device_id;
/* @vm_running: current VM running state via virtio_vmstate_change() */
@ -395,11 +379,8 @@ hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n);
unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n);
unsigned int virtio_queue_get_vring_states(VirtIODevice *vdev, int n);
void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
unsigned int idx);
void virtio_queue_set_vring_states(VirtIODevice *vdev, int n,
unsigned int idx);
void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n);
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n);
void virtio_queue_update_used_idx(VirtIODevice *vdev, int n);

View File

@ -221,13 +221,6 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_PCI_COMMON_ADM_Q_IDX 60
#define VIRTIO_PCI_COMMON_ADM_Q_NUM 62
#define LM_LOGGING_CTRL 0
#define LM_BASE_ADDR_LOW 4
#define LM_BASE_ADDR_HIGH 8
#define LM_END_ADDR_LOW 12
#define LM_END_ADDR_HIGH 16
#define LM_VRING_STATE_OFFSET 0x20
#endif /* VIRTIO_PCI_NO_MODERN */
/* Admin command status. */

View File

@ -891,7 +891,7 @@ static DeviceState *find_device_state(const char *id, Error **errp)
dev = (DeviceState *)object_dynamic_cast(obj, TYPE_DEVICE);
if (!dev) {
error_setg(errp, "%s is not a hotpluggable device", id);
error_setg(errp, "%s is not a device", id);
return NULL;
}