Block layer patches

- vhost-user-blk: Fix error handling during initialisation
 - Add test cases for the vhost-user-blk export
 - Fix leaked Transaction objects
 - qcow2: Expose dirty bit in 'qemu-img info'
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmCjnaoRHGt3b2xmQHJl
 ZGhhdC5jb20ACgkQfwmycsiPL9ZRDRAAw814/9O2E5dXDK3dZfqHqxWBdvNuPEuj
 LTUSGpuF+cAPAMJhzZm5Hy8n8G4KGwzpqt/vgBSnBAeAjHPBORGu4gEpr0G/GiHk
 OltElQ2GBBtHdixVhhimk7XoaE90Rmonh3uvolzdI8Ej8a2uvVYJcZhOD4JNZiOV
 HweG5SWcL3mvF4S8m0SOTFdlzeyA8NloaDduILiX+GiwiHystWw7e9bMMyP9Mejx
 95Qip9Huco3KFpYSpnGCvhTJ4jZwuQdqKs2d0dJBtIiU5PeRBY4mw4XT8xh6K3SA
 d/DBIDJ0iEQiVT/a2eNVtY/MsOklJYqnGVUWs18piOu1d/r6zQ2whLtIx/H6z3eD
 PLk1/mKNYcHDM6sTdBpliRsBDRJ7yeeaSqUGY3JPbyIhc7A/gqgfkXiRfEvN9lHF
 O/zerX4tgg7HRlqsyEyT937wiT7I8HHhbS0JtC0c5uxwmk4J0L+PUCnFptUtmZim
 iJTO5h90PKldnzKz0VNXgvrvCFTgmyR/aq89VZfafTE4sNSlZOTbasPADcPUUr/V
 Ju5J1r3J3lctjBGAReiTRxeTHyYHP2BEfzBqt3Orf86qrsNuWY3SXbzAEbcT1Pyu
 O6rIOF6B8DWEN5R2krPC/aw4/lXDST1FdVHibrmbUyQaayJrEWwvudqgTzCcfPNr
 c96LQH3gkTY=
 =Xe4O
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging

Block layer patches

- vhost-user-blk: Fix error handling during initialisation
- Add test cases for the vhost-user-blk export
- Fix leaked Transaction objects
- qcow2: Expose dirty bit in 'qemu-img info'

# gpg: Signature made Tue 18 May 2021 11:57:46 BST
# gpg:                using RSA key DC3DEB159A9AF95D3D7456FE7F09B272C88F2FD6
# gpg:                issuer "kwolf@redhat.com"
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full]
# Primary key fingerprint: DC3D EB15 9A9A F95D 3D74  56FE 7F09 B272 C88F 2FD6

* remotes/kevin/tags/for-upstream:
  vhost-user-blk: Check that num-queues is supported by backend
  virtio: Fail if iommu_platform is requested, but unsupported
  vhost-user-blk: Get more feature flags from vhost device
  vhost-user-blk: Improve error reporting in realize
  vhost-user-blk: Don't reconnect during initialisation
  vhost-user-blk: Make sure to set Error on realize failure
  vhost-user-blk-test: test discard/write zeroes invalid inputs
  tests/qtest: add multi-queue test case to vhost-user-blk-test
  test: new qTest case to test the vhost-user-blk-server
  block/export: improve vu_blk_sect_range_ok()
  block: Fix Transaction leak in bdrv_reopen_multiple()
  block: Fix Transaction leak in bdrv_root_attach_child()
  qcow2: set bdi->is_dirty

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2021-05-19 16:10:35 +01:00
commit d874bc0816
13 changed files with 1230 additions and 60 deletions

View File

@ -3245,6 +3245,8 @@ F: block/export/vhost-user-blk-server.c
F: block/export/vhost-user-blk-server.h F: block/export/vhost-user-blk-server.h
F: include/qemu/vhost-user-server.h F: include/qemu/vhost-user-server.h
F: tests/qtest/libqos/vhost-user-blk.c F: tests/qtest/libqos/vhost-user-blk.c
F: tests/qtest/libqos/vhost-user-blk.h
F: tests/qtest/vhost-user-blk-test.c
F: util/vhost-user-server.c F: util/vhost-user-server.c
FUSE block device exports FUSE block device exports

View File

@ -2916,13 +2916,14 @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
child_role, perm, shared_perm, opaque, child_role, perm, shared_perm, opaque,
&child, tran, errp); &child, tran, errp);
if (ret < 0) { if (ret < 0) {
bdrv_unref(child_bs); assert(child == NULL);
return NULL; goto out;
} }
ret = bdrv_refresh_perms(child_bs, errp); ret = bdrv_refresh_perms(child_bs, errp);
tran_finalize(tran, ret);
out:
tran_finalize(tran, ret);
bdrv_unref(child_bs); bdrv_unref(child_bs);
return child; return child;
} }
@ -4049,7 +4050,7 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
ret = bdrv_flush(bs_entry->state.bs); ret = bdrv_flush(bs_entry->state.bs);
if (ret < 0) { if (ret < 0) {
error_setg_errno(errp, -ret, "Error flushing drive"); error_setg_errno(errp, -ret, "Error flushing drive");
goto cleanup; goto abort;
} }
} }

View File

@ -70,9 +70,16 @@ static void vu_blk_req_complete(VuBlkReq *req)
static bool vu_blk_sect_range_ok(VuBlkExport *vexp, uint64_t sector, static bool vu_blk_sect_range_ok(VuBlkExport *vexp, uint64_t sector,
size_t size) size_t size)
{ {
uint64_t nb_sectors = size >> BDRV_SECTOR_BITS; uint64_t nb_sectors;
uint64_t total_sectors; uint64_t total_sectors;
if (size % VIRTIO_BLK_SECTOR_SIZE) {
return false;
}
nb_sectors = size >> VIRTIO_BLK_SECTOR_BITS;
QEMU_BUILD_BUG_ON(BDRV_SECTOR_SIZE != VIRTIO_BLK_SECTOR_SIZE);
if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) { if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
return false; return false;
} }

View File

@ -5089,6 +5089,7 @@ static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
BDRVQcow2State *s = bs->opaque; BDRVQcow2State *s = bs->opaque;
bdi->cluster_size = s->cluster_size; bdi->cluster_size = s->cluster_size;
bdi->vm_state_offset = qcow2_vm_state_offset(s); bdi->vm_state_offset = qcow2_vm_state_offset(s);
bdi->is_dirty = s->incompatible_features & QCOW2_INCOMPAT_DIRTY;
return 0; return 0;
} }

View File

@ -47,9 +47,13 @@ static const int user_feature_bits[] = {
VIRTIO_RING_F_INDIRECT_DESC, VIRTIO_RING_F_INDIRECT_DESC,
VIRTIO_RING_F_EVENT_IDX, VIRTIO_RING_F_EVENT_IDX,
VIRTIO_F_NOTIFY_ON_EMPTY, VIRTIO_F_NOTIFY_ON_EMPTY,
VIRTIO_F_RING_PACKED,
VIRTIO_F_IOMMU_PLATFORM,
VHOST_INVALID_FEATURE_BIT VHOST_INVALID_FEATURE_BIT
}; };
static void vhost_user_blk_event(void *opaque, QEMUChrEvent event);
static void vhost_user_blk_update_config(VirtIODevice *vdev, uint8_t *config) static void vhost_user_blk_update_config(VirtIODevice *vdev, uint8_t *config)
{ {
VHostUserBlk *s = VHOST_USER_BLK(vdev); VHostUserBlk *s = VHOST_USER_BLK(vdev);
@ -309,7 +313,7 @@ static void vhost_user_blk_reset(VirtIODevice *vdev)
vhost_dev_free_inflight(s->inflight); vhost_dev_free_inflight(s->inflight);
} }
static int vhost_user_blk_connect(DeviceState *dev) static int vhost_user_blk_connect(DeviceState *dev, Error **errp)
{ {
VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserBlk *s = VHOST_USER_BLK(vdev); VHostUserBlk *s = VHOST_USER_BLK(vdev);
@ -320,6 +324,7 @@ static int vhost_user_blk_connect(DeviceState *dev)
} }
s->connected = true; s->connected = true;
s->dev.num_queues = s->num_queues;
s->dev.nvqs = s->num_queues; s->dev.nvqs = s->num_queues;
s->dev.vqs = s->vhost_vqs; s->dev.vqs = s->vhost_vqs;
s->dev.vq_index = 0; s->dev.vq_index = 0;
@ -329,8 +334,7 @@ static int vhost_user_blk_connect(DeviceState *dev)
ret = vhost_dev_init(&s->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0); ret = vhost_dev_init(&s->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0);
if (ret < 0) { if (ret < 0) {
error_report("vhost-user-blk: vhost initialization failed: %s", error_setg_errno(errp, -ret, "vhost initialization failed");
strerror(-ret));
return ret; return ret;
} }
@ -338,8 +342,7 @@ static int vhost_user_blk_connect(DeviceState *dev)
if (virtio_device_started(vdev, vdev->status)) { if (virtio_device_started(vdev, vdev->status)) {
ret = vhost_user_blk_start(vdev); ret = vhost_user_blk_start(vdev);
if (ret < 0) { if (ret < 0) {
error_report("vhost-user-blk: vhost start failed: %s", error_setg_errno(errp, -ret, "vhost start failed");
strerror(-ret));
return ret; return ret;
} }
} }
@ -362,19 +365,6 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
vhost_dev_cleanup(&s->dev); vhost_dev_cleanup(&s->dev);
} }
static void vhost_user_blk_event(void *opaque, QEMUChrEvent event,
bool realized);
static void vhost_user_blk_event_realize(void *opaque, QEMUChrEvent event)
{
vhost_user_blk_event(opaque, event, false);
}
static void vhost_user_blk_event_oper(void *opaque, QEMUChrEvent event)
{
vhost_user_blk_event(opaque, event, true);
}
static void vhost_user_blk_chr_closed_bh(void *opaque) static void vhost_user_blk_chr_closed_bh(void *opaque)
{ {
DeviceState *dev = opaque; DeviceState *dev = opaque;
@ -382,36 +372,27 @@ static void vhost_user_blk_chr_closed_bh(void *opaque)
VHostUserBlk *s = VHOST_USER_BLK(vdev); VHostUserBlk *s = VHOST_USER_BLK(vdev);
vhost_user_blk_disconnect(dev); vhost_user_blk_disconnect(dev);
qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
vhost_user_blk_event_oper, NULL, opaque, NULL, true); NULL, opaque, NULL, true);
} }
static void vhost_user_blk_event(void *opaque, QEMUChrEvent event, static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
bool realized)
{ {
DeviceState *dev = opaque; DeviceState *dev = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserBlk *s = VHOST_USER_BLK(vdev); VHostUserBlk *s = VHOST_USER_BLK(vdev);
Error *local_err = NULL;
switch (event) { switch (event) {
case CHR_EVENT_OPENED: case CHR_EVENT_OPENED:
if (vhost_user_blk_connect(dev) < 0) { if (vhost_user_blk_connect(dev, &local_err) < 0) {
error_report_err(local_err);
qemu_chr_fe_disconnect(&s->chardev); qemu_chr_fe_disconnect(&s->chardev);
return; return;
} }
break; break;
case CHR_EVENT_CLOSED: case CHR_EVENT_CLOSED:
/* if (!runstate_check(RUN_STATE_SHUTDOWN)) {
* Closing the connection should happen differently on device
* initialization and operation stages.
* On initalization, we want to re-start vhost_dev initialization
* from the very beginning right away when the connection is closed,
* so we clean up vhost_dev on each connection closing.
* On operation, we want to postpone vhost_dev cleanup to let the
* other code perform its own cleanup sequence using vhost_dev data
* (e.g. vhost_dev_set_log).
*/
if (realized && !runstate_check(RUN_STATE_SHUTDOWN)) {
/* /*
* A close event may happen during a read/write, but vhost * A close event may happen during a read/write, but vhost
* code assumes the vhost_dev remains setup, so delay the * code assumes the vhost_dev remains setup, so delay the
@ -431,8 +412,6 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event,
* knowing its type (in this case vhost-user). * knowing its type (in this case vhost-user).
*/ */
s->dev.started = false; s->dev.started = false;
} else {
vhost_user_blk_disconnect(dev);
} }
break; break;
case CHR_EVENT_BREAK: case CHR_EVENT_BREAK:
@ -447,11 +426,10 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
{ {
VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserBlk *s = VHOST_USER_BLK(vdev); VHostUserBlk *s = VHOST_USER_BLK(vdev);
Error *err = NULL;
int i, ret; int i, ret;
if (!s->chardev.chr) { if (!s->chardev.chr) {
error_setg(errp, "vhost-user-blk: chardev is mandatory"); error_setg(errp, "chardev is mandatory");
return; return;
} }
@ -459,16 +437,16 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
s->num_queues = 1; s->num_queues = 1;
} }
if (!s->num_queues || s->num_queues > VIRTIO_QUEUE_MAX) { if (!s->num_queues || s->num_queues > VIRTIO_QUEUE_MAX) {
error_setg(errp, "vhost-user-blk: invalid number of IO queues"); error_setg(errp, "invalid number of IO queues");
return; return;
} }
if (!s->queue_size) { if (!s->queue_size) {
error_setg(errp, "vhost-user-blk: queue size must be non-zero"); error_setg(errp, "queue size must be non-zero");
return; return;
} }
if (s->queue_size > VIRTQUEUE_MAX_SIZE) { if (s->queue_size > VIRTQUEUE_MAX_SIZE) {
error_setg(errp, "vhost-user-blk: queue size must not exceed %d", error_setg(errp, "queue size must not exceed %d",
VIRTQUEUE_MAX_SIZE); VIRTQUEUE_MAX_SIZE);
return; return;
} }
@ -490,34 +468,31 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
s->vhost_vqs = g_new0(struct vhost_virtqueue, s->num_queues); s->vhost_vqs = g_new0(struct vhost_virtqueue, s->num_queues);
s->connected = false; s->connected = false;
qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, if (qemu_chr_fe_wait_connected(&s->chardev, errp) < 0) {
vhost_user_blk_event_realize, NULL, (void *)dev,
NULL, true);
reconnect:
if (qemu_chr_fe_wait_connected(&s->chardev, &err) < 0) {
error_report_err(err);
goto virtio_err; goto virtio_err;
} }
/* check whether vhost_user_blk_connect() failed or not */ if (vhost_user_blk_connect(dev, errp) < 0) {
if (!s->connected) { qemu_chr_fe_disconnect(&s->chardev);
goto reconnect; goto virtio_err;
} }
assert(s->connected);
ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg, ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg,
sizeof(struct virtio_blk_config)); sizeof(struct virtio_blk_config));
if (ret < 0) { if (ret < 0) {
error_report("vhost-user-blk: get block config failed"); error_setg(errp, "vhost-user-blk: get block config failed");
goto reconnect; goto vhost_err;
} }
/* we're fully initialized, now we can operate, so change the handler */ /* we're fully initialized, now we can operate, so add the handler */
qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL,
vhost_user_blk_event_oper, NULL, (void *)dev, vhost_user_blk_event, NULL, (void *)dev,
NULL, true); NULL, true);
return; return;
vhost_err:
vhost_dev_cleanup(&s->dev);
virtio_err: virtio_err:
g_free(s->vhost_vqs); g_free(s->vhost_vqs);
s->vhost_vqs = NULL; s->vhost_vqs = NULL;

View File

@ -1909,6 +1909,11 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque)
return err; return err;
} }
} }
if (dev->num_queues && dev->max_queues < dev->num_queues) {
error_report("The maximum number of queues supported by the "
"backend is %" PRIu64, dev->max_queues);
return -EINVAL;
}
if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) && if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
!(virtio_has_feature(dev->protocol_features, !(virtio_has_feature(dev->protocol_features,

View File

@ -69,6 +69,11 @@ void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp)
return; return;
} }
if (has_iommu && !virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
error_setg(errp, "iommu_platform=true is not supported by the device");
return;
}
if (klass->device_plugged != NULL) { if (klass->device_plugged != NULL) {
klass->device_plugged(qbus->parent, &local_err); klass->device_plugged(qbus->parent, &local_err);
} }

View File

@ -74,6 +74,8 @@ struct vhost_dev {
int nvqs; int nvqs;
/* the first virtqueue which would be used by this vhost dev */ /* the first virtqueue which would be used by this vhost dev */
int vq_index; int vq_index;
/* if non-zero, minimum required value for max_queues */
int num_queues;
uint64_t features; uint64_t features;
uint64_t acked_features; uint64_t acked_features;
uint64_t backend_features; uint64_t backend_features;

View File

@ -32,6 +32,7 @@ libqos_srcs = files('../libqtest.c',
'virtio-9p.c', 'virtio-9p.c',
'virtio-balloon.c', 'virtio-balloon.c',
'virtio-blk.c', 'virtio-blk.c',
'vhost-user-blk.c',
'virtio-mmio.c', 'virtio-mmio.c',
'virtio-net.c', 'virtio-net.c',
'virtio-pci.c', 'virtio-pci.c',

View File

@ -0,0 +1,130 @@
/*
* libqos driver framework
*
* Based on tests/qtest/libqos/virtio-blk.c
*
* Copyright (c) 2020 Coiby Xu <coiby.xu@gmail.com>
*
* Copyright (c) 2018 Emanuele Giuseppe Esposito <e.emanuelegiuseppe@gmail.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
#include "qemu/osdep.h"
#include "libqtest.h"
#include "qemu/module.h"
#include "standard-headers/linux/virtio_blk.h"
#include "vhost-user-blk.h"
#define PCI_SLOT 0x04
#define PCI_FN 0x00
/* virtio-blk-device */
static void *qvhost_user_blk_get_driver(QVhostUserBlk *v_blk,
const char *interface)
{
if (!g_strcmp0(interface, "vhost-user-blk")) {
return v_blk;
}
if (!g_strcmp0(interface, "virtio")) {
return v_blk->vdev;
}
fprintf(stderr, "%s not present in vhost-user-blk-device\n", interface);
g_assert_not_reached();
}
static void *qvhost_user_blk_device_get_driver(void *object,
const char *interface)
{
QVhostUserBlkDevice *v_blk = object;
return qvhost_user_blk_get_driver(&v_blk->blk, interface);
}
static void *vhost_user_blk_device_create(void *virtio_dev,
QGuestAllocator *t_alloc,
void *addr)
{
QVhostUserBlkDevice *vhost_user_blk = g_new0(QVhostUserBlkDevice, 1);
QVhostUserBlk *interface = &vhost_user_blk->blk;
interface->vdev = virtio_dev;
vhost_user_blk->obj.get_driver = qvhost_user_blk_device_get_driver;
return &vhost_user_blk->obj;
}
/* virtio-blk-pci */
static void *qvhost_user_blk_pci_get_driver(void *object, const char *interface)
{
QVhostUserBlkPCI *v_blk = object;
if (!g_strcmp0(interface, "pci-device")) {
return v_blk->pci_vdev.pdev;
}
return qvhost_user_blk_get_driver(&v_blk->blk, interface);
}
static void *vhost_user_blk_pci_create(void *pci_bus, QGuestAllocator *t_alloc,
void *addr)
{
QVhostUserBlkPCI *vhost_user_blk = g_new0(QVhostUserBlkPCI, 1);
QVhostUserBlk *interface = &vhost_user_blk->blk;
QOSGraphObject *obj = &vhost_user_blk->pci_vdev.obj;
virtio_pci_init(&vhost_user_blk->pci_vdev, pci_bus, addr);
interface->vdev = &vhost_user_blk->pci_vdev.vdev;
g_assert_cmphex(interface->vdev->device_type, ==, VIRTIO_ID_BLOCK);
obj->get_driver = qvhost_user_blk_pci_get_driver;
return obj;
}
static void vhost_user_blk_register_nodes(void)
{
/*
* FIXME: every test using these two nodes needs to setup a
* -drive,id=drive0 otherwise QEMU is not going to start.
* Therefore, we do not include "produces" edge for virtio
* and pci-device yet.
*/
char *arg = g_strdup_printf("id=drv0,chardev=char1,addr=%x.%x",
PCI_SLOT, PCI_FN);
QPCIAddress addr = {
.devfn = QPCI_DEVFN(PCI_SLOT, PCI_FN),
};
QOSGraphEdgeOptions opts = { };
/* virtio-blk-device */
/** opts.extra_device_opts = "drive=drive0"; */
qos_node_create_driver("vhost-user-blk-device",
vhost_user_blk_device_create);
qos_node_consumes("vhost-user-blk-device", "virtio-bus", &opts);
qos_node_produces("vhost-user-blk-device", "vhost-user-blk");
/* virtio-blk-pci */
opts.extra_device_opts = arg;
add_qpci_address(&opts, &addr);
qos_node_create_driver("vhost-user-blk-pci", vhost_user_blk_pci_create);
qos_node_consumes("vhost-user-blk-pci", "pci-bus", &opts);
qos_node_produces("vhost-user-blk-pci", "vhost-user-blk");
g_free(arg);
}
libqos_init(vhost_user_blk_register_nodes);

View File

@ -0,0 +1,48 @@
/*
* libqos driver framework
*
* Based on tests/qtest/libqos/virtio-blk.c
*
* Copyright (c) 2020 Coiby Xu <coiby.xu@gmail.com>
*
* Copyright (c) 2018 Emanuele Giuseppe Esposito <e.emanuelegiuseppe@gmail.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
#ifndef TESTS_LIBQOS_VHOST_USER_BLK_H
#define TESTS_LIBQOS_VHOST_USER_BLK_H
#include "qgraph.h"
#include "virtio.h"
#include "virtio-pci.h"
typedef struct QVhostUserBlk QVhostUserBlk;
typedef struct QVhostUserBlkPCI QVhostUserBlkPCI;
typedef struct QVhostUserBlkDevice QVhostUserBlkDevice;
struct QVhostUserBlk {
QVirtioDevice *vdev;
};
struct QVhostUserBlkPCI {
QVirtioPCIDevice pci_vdev;
QVhostUserBlk blk;
};
struct QVhostUserBlkDevice {
QOSGraphObject obj;
QVhostUserBlk blk;
};
#endif

View File

@ -231,6 +231,9 @@ if have_virtfs
qos_test_ss.add(files('virtio-9p-test.c')) qos_test_ss.add(files('virtio-9p-test.c'))
endif endif
qos_test_ss.add(when: 'CONFIG_VHOST_USER', if_true: files('vhost-user-test.c')) qos_test_ss.add(when: 'CONFIG_VHOST_USER', if_true: files('vhost-user-test.c'))
if have_tools and have_vhost_user_blk_server
qos_test_ss.add(files('vhost-user-blk-test.c'))
endif
tpmemu_files = ['tpm-emu.c', 'tpm-util.c', 'tpm-tests.c'] tpmemu_files = ['tpm-emu.c', 'tpm-util.c', 'tpm-tests.c']
@ -269,6 +272,7 @@ foreach dir : target_dirs
endif endif
qtest_env.set('G_TEST_DBUS_DAEMON', meson.source_root() / 'tests/dbus-vmstate-daemon.sh') qtest_env.set('G_TEST_DBUS_DAEMON', meson.source_root() / 'tests/dbus-vmstate-daemon.sh')
qtest_env.set('QTEST_QEMU_BINARY', './qemu-system-' + target_base) qtest_env.set('QTEST_QEMU_BINARY', './qemu-system-' + target_base)
qtest_env.set('QTEST_QEMU_STORAGE_DAEMON_BINARY', './storage-daemon/qemu-storage-daemon')
foreach test : target_qtests foreach test : target_qtests
# Executables are shared across targets, declare them only the first time we # Executables are shared across targets, declare them only the first time we

View File

@ -0,0 +1,989 @@
/*
* QTest testcase for Vhost-user Block Device
*
* Based on tests/qtest//virtio-blk-test.c
* Copyright (c) 2014 SUSE LINUX Products GmbH
* Copyright (c) 2014 Marc Marí
* Copyright (c) 2020 Coiby Xu
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "libqtest-single.h"
#include "qemu/bswap.h"
#include "qemu/module.h"
#include "standard-headers/linux/virtio_blk.h"
#include "standard-headers/linux/virtio_pci.h"
#include "libqos/qgraph.h"
#include "libqos/vhost-user-blk.h"
#include "libqos/libqos-pc.h"
#define TEST_IMAGE_SIZE (64 * 1024 * 1024)
#define QVIRTIO_BLK_TIMEOUT_US (30 * 1000 * 1000)
#define PCI_SLOT_HP 0x06
typedef struct {
pid_t pid;
} QemuStorageDaemonState;
typedef struct QVirtioBlkReq {
uint32_t type;
uint32_t ioprio;
uint64_t sector;
char *data;
uint8_t status;
} QVirtioBlkReq;
#ifdef HOST_WORDS_BIGENDIAN
static const bool host_is_big_endian = true;
#else
static const bool host_is_big_endian; /* false */
#endif
static inline void virtio_blk_fix_request(QVirtioDevice *d, QVirtioBlkReq *req)
{
if (qvirtio_is_big_endian(d) != host_is_big_endian) {
req->type = bswap32(req->type);
req->ioprio = bswap32(req->ioprio);
req->sector = bswap64(req->sector);
}
}
static inline void virtio_blk_fix_dwz_hdr(QVirtioDevice *d,
struct virtio_blk_discard_write_zeroes *dwz_hdr)
{
if (qvirtio_is_big_endian(d) != host_is_big_endian) {
dwz_hdr->sector = bswap64(dwz_hdr->sector);
dwz_hdr->num_sectors = bswap32(dwz_hdr->num_sectors);
dwz_hdr->flags = bswap32(dwz_hdr->flags);
}
}
static uint64_t virtio_blk_request(QGuestAllocator *alloc, QVirtioDevice *d,
QVirtioBlkReq *req, uint64_t data_size)
{
uint64_t addr;
uint8_t status = 0xFF;
QTestState *qts = global_qtest;
switch (req->type) {
case VIRTIO_BLK_T_IN:
case VIRTIO_BLK_T_OUT:
g_assert_cmpuint(data_size % 512, ==, 0);
break;
case VIRTIO_BLK_T_DISCARD:
case VIRTIO_BLK_T_WRITE_ZEROES:
g_assert_cmpuint(data_size %
sizeof(struct virtio_blk_discard_write_zeroes), ==, 0);
break;
default:
g_assert_cmpuint(data_size, ==, 0);
}
addr = guest_alloc(alloc, sizeof(*req) + data_size);
virtio_blk_fix_request(d, req);
qtest_memwrite(qts, addr, req, 16);
qtest_memwrite(qts, addr + 16, req->data, data_size);
qtest_memwrite(qts, addr + 16 + data_size, &status, sizeof(status));
return addr;
}
static void test_invalid_discard_write_zeroes(QVirtioDevice *dev,
QGuestAllocator *alloc,
QTestState *qts,
QVirtQueue *vq,
uint32_t type)
{
QVirtioBlkReq req;
struct virtio_blk_discard_write_zeroes dwz_hdr;
struct virtio_blk_discard_write_zeroes dwz_hdr2[2];
uint64_t req_addr;
uint32_t free_head;
uint8_t status;
/* More than one dwz is not supported */
req.type = type;
req.data = (char *) dwz_hdr2;
dwz_hdr2[0].sector = 0;
dwz_hdr2[0].num_sectors = 1;
dwz_hdr2[0].flags = 0;
dwz_hdr2[1].sector = 1;
dwz_hdr2[1].num_sectors = 1;
dwz_hdr2[1].flags = 0;
virtio_blk_fix_dwz_hdr(dev, &dwz_hdr2[0]);
virtio_blk_fix_dwz_hdr(dev, &dwz_hdr2[1]);
req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr2));
free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr2), false, true);
qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr2), 1, true,
false);
qvirtqueue_kick(qts, dev, vq, free_head);
qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 16 + sizeof(dwz_hdr2));
g_assert_cmpint(status, ==, VIRTIO_BLK_S_UNSUPP);
guest_free(alloc, req_addr);
/* num_sectors must be less than config->max_write_zeroes_sectors */
req.type = type;
req.data = (char *) &dwz_hdr;
dwz_hdr.sector = 0;
dwz_hdr.num_sectors = 0xffffffff;
dwz_hdr.flags = 0;
virtio_blk_fix_dwz_hdr(dev, &dwz_hdr);
req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr));
free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr), false, true);
qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr), 1, true,
false);
qvirtqueue_kick(qts, dev, vq, free_head);
qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 16 + sizeof(dwz_hdr));
g_assert_cmpint(status, ==, VIRTIO_BLK_S_IOERR);
guest_free(alloc, req_addr);
/* sector must be less than the device capacity */
req.type = type;
req.data = (char *) &dwz_hdr;
dwz_hdr.sector = TEST_IMAGE_SIZE / 512 + 1;
dwz_hdr.num_sectors = 1;
dwz_hdr.flags = 0;
virtio_blk_fix_dwz_hdr(dev, &dwz_hdr);
req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr));
free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr), false, true);
qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr), 1, true,
false);
qvirtqueue_kick(qts, dev, vq, free_head);
qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 16 + sizeof(dwz_hdr));
g_assert_cmpint(status, ==, VIRTIO_BLK_S_IOERR);
guest_free(alloc, req_addr);
/* reserved flag bits must be zero */
req.type = type;
req.data = (char *) &dwz_hdr;
dwz_hdr.sector = 0;
dwz_hdr.num_sectors = 1;
dwz_hdr.flags = ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
virtio_blk_fix_dwz_hdr(dev, &dwz_hdr);
req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr));
free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr), false, true);
qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr), 1, true,
false);
qvirtqueue_kick(qts, dev, vq, free_head);
qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 16 + sizeof(dwz_hdr));
g_assert_cmpint(status, ==, VIRTIO_BLK_S_UNSUPP);
guest_free(alloc, req_addr);
}
/* Returns the request virtqueue so the caller can perform further tests */
static QVirtQueue *test_basic(QVirtioDevice *dev, QGuestAllocator *alloc)
{
QVirtioBlkReq req;
uint64_t req_addr;
uint64_t capacity;
uint64_t features;
uint32_t free_head;
uint8_t status;
char *data;
QTestState *qts = global_qtest;
QVirtQueue *vq;
features = qvirtio_get_features(dev);
features = features & ~(QVIRTIO_F_BAD_FEATURE |
(1u << VIRTIO_RING_F_INDIRECT_DESC) |
(1u << VIRTIO_RING_F_EVENT_IDX) |
(1u << VIRTIO_BLK_F_SCSI));
qvirtio_set_features(dev, features);
capacity = qvirtio_config_readq(dev, 0);
g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);
vq = qvirtqueue_setup(dev, alloc, 0);
qvirtio_set_driver_ok(dev);
/* Write and read with 3 descriptor layout */
/* Write request */
req.type = VIRTIO_BLK_T_OUT;
req.ioprio = 1;
req.sector = 0;
req.data = g_malloc0(512);
strcpy(req.data, "TEST");
req_addr = virtio_blk_request(alloc, dev, &req, 512);
g_free(req.data);
free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
qvirtqueue_add(qts, vq, req_addr + 16, 512, false, true);
qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false);
qvirtqueue_kick(qts, dev, vq, free_head);
qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 528);
g_assert_cmpint(status, ==, 0);
guest_free(alloc, req_addr);
/* Read request */
req.type = VIRTIO_BLK_T_IN;
req.ioprio = 1;
req.sector = 0;
req.data = g_malloc0(512);
req_addr = virtio_blk_request(alloc, dev, &req, 512);
g_free(req.data);
free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
qvirtqueue_add(qts, vq, req_addr + 16, 512, true, true);
qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false);
qvirtqueue_kick(qts, dev, vq, free_head);
qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 528);
g_assert_cmpint(status, ==, 0);
data = g_malloc0(512);
qtest_memread(qts, req_addr + 16, data, 512);
g_assert_cmpstr(data, ==, "TEST");
g_free(data);
guest_free(alloc, req_addr);
if (features & (1u << VIRTIO_BLK_F_WRITE_ZEROES)) {
struct virtio_blk_discard_write_zeroes dwz_hdr;
void *expected;
/*
* WRITE_ZEROES request on the same sector of previous test where
* we wrote "TEST".
*/
req.type = VIRTIO_BLK_T_WRITE_ZEROES;
req.data = (char *) &dwz_hdr;
dwz_hdr.sector = 0;
dwz_hdr.num_sectors = 1;
dwz_hdr.flags = 0;
virtio_blk_fix_dwz_hdr(dev, &dwz_hdr);
req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr));
free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr), false, true);
qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr), 1, true,
false);
qvirtqueue_kick(qts, dev, vq, free_head);
qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 16 + sizeof(dwz_hdr));
g_assert_cmpint(status, ==, 0);
guest_free(alloc, req_addr);
/* Read request to check if the sector contains all zeroes */
req.type = VIRTIO_BLK_T_IN;
req.ioprio = 1;
req.sector = 0;
req.data = g_malloc0(512);
req_addr = virtio_blk_request(alloc, dev, &req, 512);
g_free(req.data);
free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
qvirtqueue_add(qts, vq, req_addr + 16, 512, true, true);
qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false);
qvirtqueue_kick(qts, dev, vq, free_head);
qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 528);
g_assert_cmpint(status, ==, 0);
data = g_malloc(512);
expected = g_malloc0(512);
qtest_memread(qts, req_addr + 16, data, 512);
g_assert_cmpmem(data, 512, expected, 512);
g_free(expected);
g_free(data);
guest_free(alloc, req_addr);
test_invalid_discard_write_zeroes(dev, alloc, qts, vq,
VIRTIO_BLK_T_WRITE_ZEROES);
}
if (features & (1u << VIRTIO_BLK_F_DISCARD)) {
struct virtio_blk_discard_write_zeroes dwz_hdr;
req.type = VIRTIO_BLK_T_DISCARD;
req.data = (char *) &dwz_hdr;
dwz_hdr.sector = 0;
dwz_hdr.num_sectors = 1;
dwz_hdr.flags = 0;
virtio_blk_fix_dwz_hdr(dev, &dwz_hdr);
req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr));
free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr), false, true);
qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr),
1, true, false);
qvirtqueue_kick(qts, dev, vq, free_head);
qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 16 + sizeof(dwz_hdr));
g_assert_cmpint(status, ==, 0);
guest_free(alloc, req_addr);
test_invalid_discard_write_zeroes(dev, alloc, qts, vq,
VIRTIO_BLK_T_DISCARD);
}
if (features & (1u << VIRTIO_F_ANY_LAYOUT)) {
/* Write and read with 2 descriptor layout */
/* Write request */
req.type = VIRTIO_BLK_T_OUT;
req.ioprio = 1;
req.sector = 1;
req.data = g_malloc0(512);
strcpy(req.data, "TEST");
req_addr = virtio_blk_request(alloc, dev, &req, 512);
g_free(req.data);
free_head = qvirtqueue_add(qts, vq, req_addr, 528, false, true);
qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false);
qvirtqueue_kick(qts, dev, vq, free_head);
qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 528);
g_assert_cmpint(status, ==, 0);
guest_free(alloc, req_addr);
/* Read request */
req.type = VIRTIO_BLK_T_IN;
req.ioprio = 1;
req.sector = 1;
req.data = g_malloc0(512);
req_addr = virtio_blk_request(alloc, dev, &req, 512);
g_free(req.data);
free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
qvirtqueue_add(qts, vq, req_addr + 16, 513, true, false);
qvirtqueue_kick(qts, dev, vq, free_head);
qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 528);
g_assert_cmpint(status, ==, 0);
data = g_malloc0(512);
qtest_memread(qts, req_addr + 16, data, 512);
g_assert_cmpstr(data, ==, "TEST");
g_free(data);
guest_free(alloc, req_addr);
}
return vq;
}
static void basic(void *obj, void *data, QGuestAllocator *t_alloc)
{
QVhostUserBlk *blk_if = obj;
QVirtQueue *vq;
vq = test_basic(blk_if->vdev, t_alloc);
qvirtqueue_cleanup(blk_if->vdev->bus, vq, t_alloc);
}
static void indirect(void *obj, void *u_data, QGuestAllocator *t_alloc)
{
QVirtQueue *vq;
QVhostUserBlk *blk_if = obj;
QVirtioDevice *dev = blk_if->vdev;
QVirtioBlkReq req;
QVRingIndirectDesc *indirect;
uint64_t req_addr;
uint64_t capacity;
uint64_t features;
uint32_t free_head;
uint8_t status;
char *data;
QTestState *qts = global_qtest;
features = qvirtio_get_features(dev);
g_assert_cmphex(features & (1u << VIRTIO_RING_F_INDIRECT_DESC), !=, 0);
features = features & ~(QVIRTIO_F_BAD_FEATURE |
(1u << VIRTIO_RING_F_EVENT_IDX) |
(1u << VIRTIO_BLK_F_SCSI));
qvirtio_set_features(dev, features);
capacity = qvirtio_config_readq(dev, 0);
g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);
vq = qvirtqueue_setup(dev, t_alloc, 0);
qvirtio_set_driver_ok(dev);
/* Write request */
req.type = VIRTIO_BLK_T_OUT;
req.ioprio = 1;
req.sector = 0;
req.data = g_malloc0(512);
strcpy(req.data, "TEST");
req_addr = virtio_blk_request(t_alloc, dev, &req, 512);
g_free(req.data);
indirect = qvring_indirect_desc_setup(qts, dev, t_alloc, 2);
qvring_indirect_desc_add(dev, qts, indirect, req_addr, 528, false);
qvring_indirect_desc_add(dev, qts, indirect, req_addr + 528, 1, true);
free_head = qvirtqueue_add_indirect(qts, vq, indirect);
qvirtqueue_kick(qts, dev, vq, free_head);
qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 528);
g_assert_cmpint(status, ==, 0);
g_free(indirect);
guest_free(t_alloc, req_addr);
/* Read request */
req.type = VIRTIO_BLK_T_IN;
req.ioprio = 1;
req.sector = 0;
req.data = g_malloc0(512);
strcpy(req.data, "TEST");
req_addr = virtio_blk_request(t_alloc, dev, &req, 512);
g_free(req.data);
indirect = qvring_indirect_desc_setup(qts, dev, t_alloc, 2);
qvring_indirect_desc_add(dev, qts, indirect, req_addr, 16, false);
qvring_indirect_desc_add(dev, qts, indirect, req_addr + 16, 513, true);
free_head = qvirtqueue_add_indirect(qts, vq, indirect);
qvirtqueue_kick(qts, dev, vq, free_head);
qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 528);
g_assert_cmpint(status, ==, 0);
data = g_malloc0(512);
qtest_memread(qts, req_addr + 16, data, 512);
g_assert_cmpstr(data, ==, "TEST");
g_free(data);
g_free(indirect);
guest_free(t_alloc, req_addr);
qvirtqueue_cleanup(dev->bus, vq, t_alloc);
}
static void idx(void *obj, void *u_data, QGuestAllocator *t_alloc)
{
QVirtQueue *vq;
QVhostUserBlkPCI *blk = obj;
QVirtioPCIDevice *pdev = &blk->pci_vdev;
QVirtioDevice *dev = &pdev->vdev;
QVirtioBlkReq req;
uint64_t req_addr;
uint64_t capacity;
uint64_t features;
uint32_t free_head;
uint32_t write_head;
uint32_t desc_idx;
uint8_t status;
char *data;
QOSGraphObject *blk_object = obj;
QPCIDevice *pci_dev = blk_object->get_driver(blk_object, "pci-device");
QTestState *qts = global_qtest;
if (qpci_check_buggy_msi(pci_dev)) {
return;
}
qpci_msix_enable(pdev->pdev);
qvirtio_pci_set_msix_configuration_vector(pdev, t_alloc, 0);
features = qvirtio_get_features(dev);
features = features & ~(QVIRTIO_F_BAD_FEATURE |
(1u << VIRTIO_RING_F_INDIRECT_DESC) |
(1u << VIRTIO_F_NOTIFY_ON_EMPTY) |
(1u << VIRTIO_BLK_F_SCSI));
qvirtio_set_features(dev, features);
capacity = qvirtio_config_readq(dev, 0);
g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);
vq = qvirtqueue_setup(dev, t_alloc, 0);
qvirtqueue_pci_msix_setup(pdev, (QVirtQueuePCI *)vq, t_alloc, 1);
qvirtio_set_driver_ok(dev);
/*
* libvhost-user signals the call fd in VHOST_USER_SET_VRING_CALL, make
* sure to wait for the isr here so we don't race and confuse it later on.
*/
qvirtio_wait_queue_isr(qts, dev, vq, QVIRTIO_BLK_TIMEOUT_US);
/* Write request */
req.type = VIRTIO_BLK_T_OUT;
req.ioprio = 1;
req.sector = 0;
req.data = g_malloc0(512);
strcpy(req.data, "TEST");
req_addr = virtio_blk_request(t_alloc, dev, &req, 512);
g_free(req.data);
free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
qvirtqueue_add(qts, vq, req_addr + 16, 512, false, true);
qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false);
qvirtqueue_kick(qts, dev, vq, free_head);
qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
/* Write request */
req.type = VIRTIO_BLK_T_OUT;
req.ioprio = 1;
req.sector = 1;
req.data = g_malloc0(512);
strcpy(req.data, "TEST");
req_addr = virtio_blk_request(t_alloc, dev, &req, 512);
g_free(req.data);
/* Notify after processing the third request */
qvirtqueue_set_used_event(qts, vq, 2);
free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
qvirtqueue_add(qts, vq, req_addr + 16, 512, false, true);
qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false);
qvirtqueue_kick(qts, dev, vq, free_head);
write_head = free_head;
/* No notification expected */
status = qvirtio_wait_status_byte_no_isr(qts, dev,
vq, req_addr + 528,
QVIRTIO_BLK_TIMEOUT_US);
g_assert_cmpint(status, ==, 0);
guest_free(t_alloc, req_addr);
/* Read request */
req.type = VIRTIO_BLK_T_IN;
req.ioprio = 1;
req.sector = 1;
req.data = g_malloc0(512);
req_addr = virtio_blk_request(t_alloc, dev, &req, 512);
g_free(req.data);
free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
qvirtqueue_add(qts, vq, req_addr + 16, 512, true, true);
qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false);
qvirtqueue_kick(qts, dev, vq, free_head);
/* We get just one notification for both requests */
qvirtio_wait_used_elem(qts, dev, vq, write_head, NULL,
QVIRTIO_BLK_TIMEOUT_US);
g_assert(qvirtqueue_get_buf(qts, vq, &desc_idx, NULL));
g_assert_cmpint(desc_idx, ==, free_head);
status = readb(req_addr + 528);
g_assert_cmpint(status, ==, 0);
data = g_malloc0(512);
qtest_memread(qts, req_addr + 16, data, 512);
g_assert_cmpstr(data, ==, "TEST");
g_free(data);
guest_free(t_alloc, req_addr);
/* End test */
qpci_msix_disable(pdev->pdev);
qvirtqueue_cleanup(dev->bus, vq, t_alloc);
}
static void pci_hotplug(void *obj, void *data, QGuestAllocator *t_alloc)
{
QVirtioPCIDevice *dev1 = obj;
QVirtioPCIDevice *dev;
QTestState *qts = dev1->pdev->bus->qts;
/* plug secondary disk */
qtest_qmp_device_add(qts, "vhost-user-blk-pci", "drv1",
"{'addr': %s, 'chardev': 'char2'}",
stringify(PCI_SLOT_HP) ".0");
dev = virtio_pci_new(dev1->pdev->bus,
&(QPCIAddress) { .devfn = QPCI_DEVFN(PCI_SLOT_HP, 0)
});
g_assert_nonnull(dev);
g_assert_cmpint(dev->vdev.device_type, ==, VIRTIO_ID_BLOCK);
qvirtio_pci_device_disable(dev);
qos_object_destroy((QOSGraphObject *)dev);
/* unplug secondary disk */
qpci_unplug_acpi_device_test(qts, "drv1", PCI_SLOT_HP);
}
static void multiqueue(void *obj, void *data, QGuestAllocator *t_alloc)
{
QVirtioPCIDevice *pdev1 = obj;
QVirtioDevice *dev1 = &pdev1->vdev;
QVirtioPCIDevice *pdev8;
QVirtioDevice *dev8;
QTestState *qts = pdev1->pdev->bus->qts;
uint64_t features;
uint16_t num_queues;
/*
* The primary device has 1 queue and VIRTIO_BLK_F_MQ is not enabled. The
* VIRTIO specification allows VIRTIO_BLK_F_MQ to be enabled when there is
* only 1 virtqueue, but --device vhost-user-blk-pci doesn't do this (which
* is also spec-compliant).
*/
features = qvirtio_get_features(dev1);
g_assert_cmpint(features & (1u << VIRTIO_BLK_F_MQ), ==, 0);
features = features & ~(QVIRTIO_F_BAD_FEATURE |
(1u << VIRTIO_RING_F_INDIRECT_DESC) |
(1u << VIRTIO_F_NOTIFY_ON_EMPTY) |
(1u << VIRTIO_BLK_F_SCSI));
qvirtio_set_features(dev1, features);
/* Hotplug a secondary device with 8 queues */
qtest_qmp_device_add(qts, "vhost-user-blk-pci", "drv1",
"{'addr': %s, 'chardev': 'char2', 'num-queues': 8}",
stringify(PCI_SLOT_HP) ".0");
pdev8 = virtio_pci_new(pdev1->pdev->bus,
&(QPCIAddress) {
.devfn = QPCI_DEVFN(PCI_SLOT_HP, 0)
});
g_assert_nonnull(pdev8);
g_assert_cmpint(pdev8->vdev.device_type, ==, VIRTIO_ID_BLOCK);
qos_object_start_hw(&pdev8->obj);
dev8 = &pdev8->vdev;
features = qvirtio_get_features(dev8);
g_assert_cmpint(features & (1u << VIRTIO_BLK_F_MQ),
==,
(1u << VIRTIO_BLK_F_MQ));
features = features & ~(QVIRTIO_F_BAD_FEATURE |
(1u << VIRTIO_RING_F_INDIRECT_DESC) |
(1u << VIRTIO_F_NOTIFY_ON_EMPTY) |
(1u << VIRTIO_BLK_F_SCSI) |
(1u << VIRTIO_BLK_F_MQ));
qvirtio_set_features(dev8, features);
num_queues = qvirtio_config_readw(dev8,
offsetof(struct virtio_blk_config, num_queues));
g_assert_cmpint(num_queues, ==, 8);
qvirtio_pci_device_disable(pdev8);
qos_object_destroy(&pdev8->obj);
/* unplug secondary disk */
qpci_unplug_acpi_device_test(qts, "drv1", PCI_SLOT_HP);
}
/*
* Check that setting the vring addr on a non-existent virtqueue does
* not crash.
*/
static void test_nonexistent_virtqueue(void *obj, void *data,
QGuestAllocator *t_alloc)
{
QVhostUserBlkPCI *blk = obj;
QVirtioPCIDevice *pdev = &blk->pci_vdev;
QPCIBar bar0;
QPCIDevice *dev;
dev = qpci_device_find(pdev->pdev->bus, QPCI_DEVFN(4, 0));
g_assert(dev != NULL);
qpci_device_enable(dev);
bar0 = qpci_iomap(dev, 0, NULL);
qpci_io_writeb(dev, bar0, VIRTIO_PCI_QUEUE_SEL, 2);
qpci_io_writel(dev, bar0, VIRTIO_PCI_QUEUE_PFN, 1);
g_free(dev);
}
static const char *qtest_qemu_storage_daemon_binary(void)
{
const char *qemu_storage_daemon_bin;
qemu_storage_daemon_bin = getenv("QTEST_QEMU_STORAGE_DAEMON_BINARY");
if (!qemu_storage_daemon_bin) {
fprintf(stderr, "Environment variable "
"QTEST_QEMU_STORAGE_DAEMON_BINARY required\n");
exit(0);
}
return qemu_storage_daemon_bin;
}
/* g_test_queue_destroy() cleanup function for files */
static void destroy_file(void *path)
{
unlink(path);
g_free(path);
qos_invalidate_command_line();
}
static char *drive_create(void)
{
int fd, ret;
/** vhost-user-blk won't recognize drive located in /tmp */
char *t_path = g_strdup("qtest.XXXXXX");
/** Create a temporary raw image */
fd = mkstemp(t_path);
g_assert_cmpint(fd, >=, 0);
ret = ftruncate(fd, TEST_IMAGE_SIZE);
g_assert_cmpint(ret, ==, 0);
close(fd);
g_test_queue_destroy(destroy_file, t_path);
return t_path;
}
static char *create_listen_socket(int *fd)
{
int tmp_fd;
char *path;
/* No race because our pid makes the path unique */
path = g_strdup_printf("/tmp/qtest-%d-sock.XXXXXX", getpid());
tmp_fd = mkstemp(path);
g_assert_cmpint(tmp_fd, >=, 0);
close(tmp_fd);
unlink(path);
*fd = qtest_socket_server(path);
g_test_queue_destroy(destroy_file, path);
return path;
}
/*
* g_test_queue_destroy() and qtest_add_abrt_handler() cleanup function for
* qemu-storage-daemon.
*/
static void quit_storage_daemon(void *data)
{
QemuStorageDaemonState *qsd = data;
int wstatus;
pid_t pid;
/*
* If we were invoked as a g_test_queue_destroy() cleanup function we need
* to remove the abrt handler to avoid being called again if the code below
* aborts. Also, we must not leave the abrt handler installed after
* cleanup.
*/
qtest_remove_abrt_handler(data);
/* Before quitting storage-daemon, quit qemu to avoid dubious messages */
qtest_kill_qemu(global_qtest);
kill(qsd->pid, SIGTERM);
pid = waitpid(qsd->pid, &wstatus, 0);
g_assert_cmpint(pid, ==, qsd->pid);
if (!WIFEXITED(wstatus)) {
fprintf(stderr, "%s: expected qemu-storage-daemon to exit\n",
__func__);
abort();
}
if (WEXITSTATUS(wstatus) != 0) {
fprintf(stderr, "%s: expected qemu-storage-daemon to exit "
"successfully, got %d\n",
__func__, WEXITSTATUS(wstatus));
abort();
}
g_free(data);
}
static void start_vhost_user_blk(GString *cmd_line, int vus_instances,
int num_queues)
{
const char *vhost_user_blk_bin = qtest_qemu_storage_daemon_binary();
int i;
gchar *img_path;
GString *storage_daemon_command = g_string_new(NULL);
QemuStorageDaemonState *qsd;
g_string_append_printf(storage_daemon_command,
"exec %s ",
vhost_user_blk_bin);
g_string_append_printf(cmd_line,
" -object memory-backend-memfd,id=mem,size=256M,share=on "
" -M memory-backend=mem -m 256M ");
for (i = 0; i < vus_instances; i++) {
int fd;
char *sock_path = create_listen_socket(&fd);
/* create image file */
img_path = drive_create();
g_string_append_printf(storage_daemon_command,
"--blockdev driver=file,node-name=disk%d,filename=%s "
"--export type=vhost-user-blk,id=disk%d,addr.type=unix,addr.path=%s,"
"node-name=disk%i,writable=on,num-queues=%d ",
i, img_path, i, sock_path, i, num_queues);
g_string_append_printf(cmd_line, "-chardev socket,id=char%d,path=%s ",
i + 1, sock_path);
}
g_test_message("starting vhost-user backend: %s",
storage_daemon_command->str);
pid_t pid = fork();
if (pid == 0) {
/*
* Close standard file descriptors so tap-driver.pl pipe detects when
* our parent terminates.
*/
close(0);
close(1);
open("/dev/null", O_RDONLY);
open("/dev/null", O_WRONLY);
execlp("/bin/sh", "sh", "-c", storage_daemon_command->str, NULL);
exit(1);
}
g_string_free(storage_daemon_command, true);
qsd = g_new(QemuStorageDaemonState, 1);
qsd->pid = pid;
/* Make sure qemu-storage-daemon is stopped */
qtest_add_abrt_handler(quit_storage_daemon, qsd);
g_test_queue_destroy(quit_storage_daemon, qsd);
}
static void *vhost_user_blk_test_setup(GString *cmd_line, void *arg)
{
start_vhost_user_blk(cmd_line, 1, 1);
return arg;
}
/*
* Setup for hotplug.
*
* Since vhost-user server only serves one vhost-user client one time,
* another exprot
*
*/
static void *vhost_user_blk_hotplug_test_setup(GString *cmd_line, void *arg)
{
/* "-chardev socket,id=char2" is used for pci_hotplug*/
start_vhost_user_blk(cmd_line, 2, 1);
return arg;
}
static void *vhost_user_blk_multiqueue_test_setup(GString *cmd_line, void *arg)
{
start_vhost_user_blk(cmd_line, 2, 8);
return arg;
}
static void register_vhost_user_blk_test(void)
{
QOSGraphTestOptions opts = {
.before = vhost_user_blk_test_setup,
};
/*
* tests for vhost-user-blk and vhost-user-blk-pci
* The tests are borrowed from tests/virtio-blk-test.c. But some tests
* regarding block_resize don't work for vhost-user-blk.
* vhost-user-blk device doesn't have -drive, so tests containing
* block_resize are also abandoned,
* - config
* - resize
*/
qos_add_test("basic", "vhost-user-blk", basic, &opts);
qos_add_test("indirect", "vhost-user-blk", indirect, &opts);
qos_add_test("idx", "vhost-user-blk-pci", idx, &opts);
qos_add_test("nxvirtq", "vhost-user-blk-pci",
test_nonexistent_virtqueue, &opts);
opts.before = vhost_user_blk_hotplug_test_setup;
qos_add_test("hotplug", "vhost-user-blk-pci", pci_hotplug, &opts);
opts.before = vhost_user_blk_multiqueue_test_setup;
qos_add_test("multiqueue", "vhost-user-blk-pci", multiqueue, &opts);
}
libqos_init(register_vhost_user_blk_test);