pc,pci,virtio: features, fixes, tests

vhost user rng
 vdpa multiqueue
 Fixes, cleanups, new tests all over the place.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmFv7PAPHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpcgcIAIlw7wmyX6Z70aXhtcF5vH2tF2Q3ttx+4URT
 lCnTlRogQe2m5fIZSPWmQLj7Zd7GHdNVR6W9QflflPIjRt5EqNPx5CpVnJVRnjEZ
 ILWLGPRa9/Pm2JvBW/+hAp97JdJpRElMWf6NZcE1PTqvb91OmS+FspZ0W5T6fLgZ
 ljC2YaOOriJQdesyQECxtvYFlecFxglGSA3ecvNwwTiwIEG/zV5XJqA8h+nSJX5+
 DjPHsVk2oareGQ8pT3ChoAnodfwLzxaFQsdC/FzzIqdLWFL45g7XGtcexc2IRzw7
 H02Z2gNKv2iYv0qAaJlnCFKFx1dwwnGCAmF22xpByvOzXK8Ua6c=
 =+FJA
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging

pc,pci,virtio: features, fixes, tests

vhost user rng
vdpa multiqueue
Fixes, cleanups, new tests all over the place.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Wed 20 Oct 2021 03:18:24 AM PDT
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [full]

* remotes/mst/tags/for_upstream: (44 commits)
  tests/acpi/bios-tables-test: update DSDT blob for multifunction bridge test
  tests/acpi/pcihp: add unit tests for hotplug on multifunction bridges for q35
  tests/acpi/bios-tables-test: add and allow changes to a new q35 DSDT table blob
  pci: fix PCI resource reserve capability on BE
  vhost-vdpa: multiqueue support
  virtio-net: vhost control virtqueue support
  vhost: record the last virtqueue index for the virtio device
  virtio-net: use "queue_pairs" instead of "queues" when possible
  vhost-net: control virtqueue support
  net: introduce control client
  vhost-vdpa: let net_vhost_vdpa_init() returns NetClientState *
  vhost-vdpa: prepare for the multiqueue support
  vhost-vdpa: classify one time request
  vhost-vdpa: open device fd in net_init_vhost_vdpa()
  bios-tables-test: don't disassemble empty files
  rebuild-expected-aml.sh: allow partial target list
  qdev/qbus: remove failover specific code
  vhost-user-blk-test: pass vhost-user socket fds to QSD
  failover: fix a regression introduced by JSON'ification of -device
  vhost-user: fix duplicated notifier MR init
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2021-10-20 06:10:51 -07:00
commit afc9fcde55
42 changed files with 1075 additions and 233 deletions

View File

@ -88,3 +88,4 @@ Emulated Devices
devices/usb.rst
devices/vhost-user.rst
devices/virtio-pmem.rst
devices/vhost-user-rng.rst

View File

@ -0,0 +1,39 @@
QEMU vhost-user-rng - RNG emulation
===================================
Background
----------
What follows builds on the material presented in vhost-user.rst - it should
be reviewed before moving forward with the content in this file.
Description
-----------
The vhost-user-rng device implementation was designed to work with a random
number generator daemon such as the one found in the vhost-device crate of
the rust-vmm project available on github [1].
[1]. https://github.com/rust-vmm/vhost-device
Examples
--------
The daemon should be started first:
::
host# vhost-device-rng --socket-path=rng.sock -c 1 -m 512 -p 1000
The QEMU invocation needs to create a chardev socket the device can
use to communicate as well as share the guests memory over a memfd.
::
host# qemu-system \
-chardev socket,path=$(PATH)/rng.sock,id=rng0 \
-device vhost-user-rng-pci,chardev=rng0 \
-m 4096 \
-object memory-backend-file,id=mem,size=4G,mem-path=/dev/shm,share=on \
-numa node,memdev=mem \
...

View File

@ -33,13 +33,13 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)
int vhost_net_start(VirtIODevice *dev,
NetClientState *ncs,
int total_queues)
int data_queue_pairs, int cvq)
{
return -ENOSYS;
}
void vhost_net_stop(VirtIODevice *dev,
NetClientState *ncs,
int total_queues)
int data_queue_pairs, int cvq)
{
}

View File

@ -231,9 +231,11 @@ fail:
return NULL;
}
static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index)
static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index,
int last_index)
{
net->dev.vq_index = vq_index;
net->dev.last_index = last_index;
}
static int vhost_net_start_one(struct vhost_net *net,
@ -315,25 +317,37 @@ static void vhost_net_stop_one(struct vhost_net *net,
}
int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
int total_queues)
int data_queue_pairs, int cvq)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
VirtioBusState *vbus = VIRTIO_BUS(qbus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
int total_notifiers = data_queue_pairs * 2 + cvq;
VirtIONet *n = VIRTIO_NET(dev);
int nvhosts = data_queue_pairs + cvq;
struct vhost_net *net;
int r, e, i;
int r, e, i, last_index = data_queue_pairs * 2;
NetClientState *peer;
if (!cvq) {
last_index -= 1;
}
if (!k->set_guest_notifiers) {
error_report("binding does not support guest notifiers");
return -ENOSYS;
}
for (i = 0; i < total_queues; i++) {
for (i = 0; i < nvhosts; i++) {
if (i < data_queue_pairs) {
peer = qemu_get_peer(ncs, i);
} else { /* Control Virtqueue */
peer = qemu_get_peer(ncs, n->max_queue_pairs);
}
peer = qemu_get_peer(ncs, i);
net = get_vhost_net(peer);
vhost_net_set_vq_index(net, i * 2);
vhost_net_set_vq_index(net, i * 2, last_index);
/* Suppress the masking guest notifiers on vhost user
* because vhost user doesn't interrupt masking/unmasking
@ -344,14 +358,18 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
}
}
r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
r = k->set_guest_notifiers(qbus->parent, total_notifiers, true);
if (r < 0) {
error_report("Error binding guest notifier: %d", -r);
goto err;
}
for (i = 0; i < total_queues; i++) {
peer = qemu_get_peer(ncs, i);
for (i = 0; i < nvhosts; i++) {
if (i < data_queue_pairs) {
peer = qemu_get_peer(ncs, i);
} else {
peer = qemu_get_peer(ncs, n->max_queue_pairs);
}
r = vhost_net_start_one(get_vhost_net(peer), dev);
if (r < 0) {
@ -375,7 +393,7 @@ err_start:
peer = qemu_get_peer(ncs , i);
vhost_net_stop_one(get_vhost_net(peer), dev);
}
e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
e = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
if (e < 0) {
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
fflush(stderr);
@ -385,18 +403,27 @@ err:
}
void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
int total_queues)
int data_queue_pairs, int cvq)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
VirtioBusState *vbus = VIRTIO_BUS(qbus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
VirtIONet *n = VIRTIO_NET(dev);
NetClientState *peer;
int total_notifiers = data_queue_pairs * 2 + cvq;
int nvhosts = data_queue_pairs + cvq;
int i, r;
for (i = 0; i < total_queues; i++) {
vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
for (i = 0; i < nvhosts; i++) {
if (i < data_queue_pairs) {
peer = qemu_get_peer(ncs, i);
} else {
peer = qemu_get_peer(ncs, n->max_queue_pairs);
}
vhost_net_stop_one(get_vhost_net(peer), dev);
}
r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
r = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
if (r < 0) {
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
fflush(stderr);

View File

@ -54,7 +54,7 @@
#define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
/* for now, only allow larger queues; with virtio-1, guest can downsize */
/* for now, only allow larger queue_pairs; with virtio-1, guest can downsize */
#define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
@ -131,7 +131,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
int ret = 0;
memset(&netcfg, 0 , sizeof(struct virtio_net_config));
virtio_stw_p(vdev, &netcfg.status, n->status);
virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queue_pairs);
virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
memcpy(netcfg.mac, n->mac, ETH_ALEN);
virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
@ -243,7 +243,8 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
NetClientState *nc = qemu_get_queue(n->nic);
int queues = n->multiqueue ? n->max_queues : 1;
int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
int cvq = n->max_ncs - n->max_queue_pairs;
if (!get_vhost_net(nc->peer)) {
return;
@ -266,7 +267,7 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
/* Any packets outstanding? Purge them to avoid touching rings
* when vhost is running.
*/
for (i = 0; i < queues; i++) {
for (i = 0; i < queue_pairs; i++) {
NetClientState *qnc = qemu_get_subqueue(n->nic, i);
/* Purge both directions: TX and RX. */
@ -285,14 +286,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
}
n->vhost_started = 1;
r = vhost_net_start(vdev, n->nic->ncs, queues);
r = vhost_net_start(vdev, n->nic->ncs, queue_pairs, cvq);
if (r < 0) {
error_report("unable to start vhost net: %d: "
"falling back on userspace virtio", -r);
n->vhost_started = 0;
}
} else {
vhost_net_stop(vdev, n->nic->ncs, queues);
vhost_net_stop(vdev, n->nic->ncs, queue_pairs, cvq);
n->vhost_started = 0;
}
}
@ -309,11 +310,11 @@ static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
}
static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
int queues, bool enable)
int queue_pairs, bool enable)
{
int i;
for (i = 0; i < queues; i++) {
for (i = 0; i < queue_pairs; i++) {
if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
enable) {
while (--i >= 0) {
@ -330,7 +331,7 @@ static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
int queues = n->multiqueue ? n->max_queues : 1;
int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
if (virtio_net_started(n, status)) {
/* Before using the device, we tell the network backend about the
@ -339,14 +340,14 @@ static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
* virtio-net code.
*/
n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
queues, true);
queue_pairs, true);
} else if (virtio_net_started(n, vdev->status)) {
/* After using the device, we need to reset the network backend to
* the default (guest native endianness), otherwise the guest may
* lose network connectivity if it is rebooted into a different
* endianness.
*/
virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false);
virtio_net_set_vnet_endian(vdev, n->nic->ncs, queue_pairs, false);
}
}
@ -368,12 +369,12 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
virtio_net_vnet_endian_status(n, status);
virtio_net_vhost_status(n, status);
for (i = 0; i < n->max_queues; i++) {
for (i = 0; i < n->max_queue_pairs; i++) {
NetClientState *ncs = qemu_get_subqueue(n->nic, i);
bool queue_started;
q = &n->vqs[i];
if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
if ((!n->multiqueue && i != 0) || i >= n->curr_queue_pairs) {
queue_status = 0;
} else {
queue_status = status;
@ -540,7 +541,7 @@ static void virtio_net_reset(VirtIODevice *vdev)
n->nouni = 0;
n->nobcast = 0;
/* multiqueue is disabled by default */
n->curr_queues = 1;
n->curr_queue_pairs = 1;
timer_del(n->announce_timer.tm);
n->announce_timer.round = 0;
n->status &= ~VIRTIO_NET_S_ANNOUNCE;
@ -556,7 +557,7 @@ static void virtio_net_reset(VirtIODevice *vdev)
memset(n->vlans, 0, MAX_VLAN >> 3);
/* Flush any async TX */
for (i = 0; i < n->max_queues; i++) {
for (i = 0; i < n->max_queue_pairs; i++) {
NetClientState *nc = qemu_get_subqueue(n->nic, i);
if (nc->peer) {
@ -610,7 +611,7 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
sizeof(struct virtio_net_hdr);
}
for (i = 0; i < n->max_queues; i++) {
for (i = 0; i < n->max_queue_pairs; i++) {
nc = qemu_get_subqueue(n->nic, i);
if (peer_has_vnet_hdr(n) &&
@ -655,7 +656,7 @@ static int peer_attach(VirtIONet *n, int index)
return 0;
}
if (n->max_queues == 1) {
if (n->max_queue_pairs == 1) {
return 0;
}
@ -681,7 +682,7 @@ static int peer_detach(VirtIONet *n, int index)
return tap_disable(nc->peer);
}
static void virtio_net_set_queues(VirtIONet *n)
static void virtio_net_set_queue_pairs(VirtIONet *n)
{
int i;
int r;
@ -690,8 +691,8 @@ static void virtio_net_set_queues(VirtIONet *n)
return;
}
for (i = 0; i < n->max_queues; i++) {
if (i < n->curr_queues) {
for (i = 0; i < n->max_queue_pairs; i++) {
if (i < n->curr_queue_pairs) {
r = peer_attach(n, i);
assert(!r);
} else {
@ -905,7 +906,7 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
virtio_net_apply_guest_offloads(n);
}
for (i = 0; i < n->max_queues; i++) {
for (i = 0; i < n->max_queue_pairs; i++) {
NetClientState *nc = qemu_get_subqueue(n->nic, i);
if (!get_vhost_net(nc->peer)) {
@ -1232,7 +1233,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
VirtIODevice *vdev = VIRTIO_DEVICE(n);
struct virtio_net_rss_config cfg;
size_t s, offset = 0, size_get;
uint16_t queues, i;
uint16_t queue_pairs, i;
struct {
uint16_t us;
uint8_t b;
@ -1274,7 +1275,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
}
n->rss_data.default_queue = do_rss ?
virtio_lduw_p(vdev, &cfg.unclassified_queue) : 0;
if (n->rss_data.default_queue >= n->max_queues) {
if (n->rss_data.default_queue >= n->max_queue_pairs) {
err_msg = "Invalid default queue";
err_value = n->rss_data.default_queue;
goto error;
@ -1303,14 +1304,14 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
size_get = sizeof(temp);
s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get);
if (s != size_get) {
err_msg = "Can't get queues";
err_msg = "Can't get queue_pairs";
err_value = (uint32_t)s;
goto error;
}
queues = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queues;
if (queues == 0 || queues > n->max_queues) {
err_msg = "Invalid number of queues";
err_value = queues;
queue_pairs = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queue_pairs;
if (queue_pairs == 0 || queue_pairs > n->max_queue_pairs) {
err_msg = "Invalid number of queue_pairs";
err_value = queue_pairs;
goto error;
}
if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
@ -1325,7 +1326,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
}
if (!temp.b && !n->rss_data.hash_types) {
virtio_net_disable_rss(n);
return queues;
return queue_pairs;
}
offset += size_get;
size_get = temp.b;
@ -1358,7 +1359,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
trace_virtio_net_rss_enable(n->rss_data.hash_types,
n->rss_data.indirections_len,
temp.b);
return queues;
return queue_pairs;
error:
trace_virtio_net_rss_error(err_msg, err_value);
virtio_net_disable_rss(n);
@ -1369,15 +1370,15 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
struct iovec *iov, unsigned int iov_cnt)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
uint16_t queues;
uint16_t queue_pairs;
virtio_net_disable_rss(n);
if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) {
queues = virtio_net_handle_rss(n, iov, iov_cnt, false);
return queues ? VIRTIO_NET_OK : VIRTIO_NET_ERR;
queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, false);
return queue_pairs ? VIRTIO_NET_OK : VIRTIO_NET_ERR;
}
if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
queues = virtio_net_handle_rss(n, iov, iov_cnt, true);
queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, true);
} else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
struct virtio_net_ctrl_mq mq;
size_t s;
@ -1388,24 +1389,24 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
if (s != sizeof(mq)) {
return VIRTIO_NET_ERR;
}
queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
queue_pairs = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
} else {
return VIRTIO_NET_ERR;
}
if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
queues > n->max_queues ||
if (queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
queue_pairs > n->max_queue_pairs ||
!n->multiqueue) {
return VIRTIO_NET_ERR;
}
n->curr_queues = queues;
/* stop the backend before changing the number of queues to avoid handling a
n->curr_queue_pairs = queue_pairs;
/* stop the backend before changing the number of queue_pairs to avoid handling a
* disabled queue */
virtio_net_set_status(vdev, vdev->status);
virtio_net_set_queues(n);
virtio_net_set_queue_pairs(n);
return VIRTIO_NET_OK;
}
@ -1483,7 +1484,7 @@ static bool virtio_net_can_receive(NetClientState *nc)
return false;
}
if (nc->queue_index >= n->curr_queues) {
if (nc->queue_index >= n->curr_queue_pairs) {
return false;
}
@ -2763,11 +2764,11 @@ static void virtio_net_del_queue(VirtIONet *n, int index)
virtio_del_queue(vdev, index * 2 + 1);
}
static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
static void virtio_net_change_num_queue_pairs(VirtIONet *n, int new_max_queue_pairs)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
int old_num_queues = virtio_get_num_queues(vdev);
int new_num_queues = new_max_queues * 2 + 1;
int new_num_queues = new_max_queue_pairs * 2 + 1;
int i;
assert(old_num_queues >= 3);
@ -2800,12 +2801,12 @@ static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
{
int max = multiqueue ? n->max_queues : 1;
int max = multiqueue ? n->max_queue_pairs : 1;
n->multiqueue = multiqueue;
virtio_net_change_num_queues(n, max);
virtio_net_change_num_queue_pairs(n, max);
virtio_net_set_queues(n);
virtio_net_set_queue_pairs(n);
}
static int virtio_net_post_load_device(void *opaque, int version_id)
@ -2838,7 +2839,7 @@ static int virtio_net_post_load_device(void *opaque, int version_id)
*/
n->saved_guest_offloads = n->curr_guest_offloads;
virtio_net_set_queues(n);
virtio_net_set_queue_pairs(n);
/* Find the first multicast entry in the saved MAC filter */
for (i = 0; i < n->mac_table.in_use; i++) {
@ -2851,7 +2852,7 @@ static int virtio_net_post_load_device(void *opaque, int version_id)
/* nc.link_down can't be migrated, so infer link_down according
* to link status bit in n->status */
link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
for (i = 0; i < n->max_queues; i++) {
for (i = 0; i < n->max_queue_pairs; i++) {
qemu_get_subqueue(n->nic, i)->link_down = link_down;
}
@ -2916,9 +2917,9 @@ static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
},
};
static bool max_queues_gt_1(void *opaque, int version_id)
static bool max_queue_pairs_gt_1(void *opaque, int version_id)
{
return VIRTIO_NET(opaque)->max_queues > 1;
return VIRTIO_NET(opaque)->max_queue_pairs > 1;
}
static bool has_ctrl_guest_offloads(void *opaque, int version_id)
@ -2943,13 +2944,13 @@ static bool mac_table_doesnt_fit(void *opaque, int version_id)
struct VirtIONetMigTmp {
VirtIONet *parent;
VirtIONetQueue *vqs_1;
uint16_t curr_queues_1;
uint16_t curr_queue_pairs_1;
uint8_t has_ufo;
uint32_t has_vnet_hdr;
};
/* The 2nd and subsequent tx_waiting flags are loaded later than
* the 1st entry in the queues and only if there's more than one
* the 1st entry in the queue_pairs and only if there's more than one
* entry. We use the tmp mechanism to calculate a temporary
* pointer and count and also validate the count.
*/
@ -2959,9 +2960,9 @@ static int virtio_net_tx_waiting_pre_save(void *opaque)
struct VirtIONetMigTmp *tmp = opaque;
tmp->vqs_1 = tmp->parent->vqs + 1;
tmp->curr_queues_1 = tmp->parent->curr_queues - 1;
if (tmp->parent->curr_queues == 0) {
tmp->curr_queues_1 = 0;
tmp->curr_queue_pairs_1 = tmp->parent->curr_queue_pairs - 1;
if (tmp->parent->curr_queue_pairs == 0) {
tmp->curr_queue_pairs_1 = 0;
}
return 0;
@ -2974,9 +2975,9 @@ static int virtio_net_tx_waiting_pre_load(void *opaque)
/* Reuse the pointer setup from save */
virtio_net_tx_waiting_pre_save(opaque);
if (tmp->parent->curr_queues > tmp->parent->max_queues) {
error_report("virtio-net: curr_queues %x > max_queues %x",
tmp->parent->curr_queues, tmp->parent->max_queues);
if (tmp->parent->curr_queue_pairs > tmp->parent->max_queue_pairs) {
error_report("virtio-net: curr_queue_pairs %x > max_queue_pairs %x",
tmp->parent->curr_queue_pairs, tmp->parent->max_queue_pairs);
return -EINVAL;
}
@ -2990,7 +2991,7 @@ static const VMStateDescription vmstate_virtio_net_tx_waiting = {
.pre_save = virtio_net_tx_waiting_pre_save,
.fields = (VMStateField[]) {
VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
curr_queues_1,
curr_queue_pairs_1,
vmstate_virtio_net_queue_tx_waiting,
struct VirtIONetQueue),
VMSTATE_END_OF_LIST()
@ -3132,9 +3133,9 @@ static const VMStateDescription vmstate_virtio_net_device = {
VMSTATE_UINT8(nobcast, VirtIONet),
VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
vmstate_virtio_net_has_ufo),
VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0,
VMSTATE_SINGLE_TEST(max_queue_pairs, VirtIONet, max_queue_pairs_gt_1, 0,
vmstate_info_uint16_equal, uint16_t),
VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1),
VMSTATE_UINT16_TEST(curr_queue_pairs, VirtIONet, max_queue_pairs_gt_1),
VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
vmstate_virtio_net_tx_waiting),
VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
@ -3299,20 +3300,42 @@ static bool failover_hide_primary_device(DeviceListener *listener,
if (!device_opts) {
return false;
}
standby_id = qdict_get_try_str(device_opts, "failover_pair_id");
if (!qdict_haskey(device_opts, "failover_pair_id")) {
return false;
}
if (!qdict_haskey(device_opts, "id")) {
error_setg(errp, "Device with failover_pair_id needs to have id");
return false;
}
standby_id = qdict_get_str(device_opts, "failover_pair_id");
if (g_strcmp0(standby_id, n->netclient_name) != 0) {
return false;
}
/*
* The hide helper can be called several times for a given device.
* Check there is only one primary for a virtio-net device but
* don't duplicate the qdict several times if it's called for the same
* device.
*/
if (n->primary_opts) {
error_setg(errp, "Cannot attach more than one primary device to '%s'",
n->netclient_name);
return false;
const char *old, *new;
/* devices with failover_pair_id always have an id */
old = qdict_get_str(n->primary_opts, "id");
new = qdict_get_str(device_opts, "id");
if (strcmp(old, new) != 0) {
error_setg(errp, "Cannot attach more than one primary device to "
"'%s': '%s' and '%s'", n->netclient_name, old, new);
return false;
}
} else {
n->primary_opts = qdict_clone_shallow(device_opts);
n->primary_opts_from_json = from_json;
}
n->primary_opts = qdict_clone_shallow(device_opts);
n->primary_opts_from_json = from_json;
/* failover_primary_hidden is set during feature negotiation */
return qatomic_read(&n->failover_primary_hidden);
}
@ -3389,16 +3412,30 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
return;
}
n->max_queues = MAX(n->nic_conf.peers.queues, 1);
if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
n->max_ncs = MAX(n->nic_conf.peers.queues, 1);
/*
* Figure out the datapath queue pairs since the backend could
* provide control queue via peers as well.
*/
if (n->nic_conf.peers.queues) {
for (i = 0; i < n->max_ncs; i++) {
if (n->nic_conf.peers.ncs[i]->is_datapath) {
++n->max_queue_pairs;
}
}
}
n->max_queue_pairs = MAX(n->max_queue_pairs, 1);
if (n->max_queue_pairs * 2 + 1 > VIRTIO_QUEUE_MAX) {
error_setg(errp, "Invalid number of queue pairs (= %" PRIu32 "), "
"must be a positive integer less than %d.",
n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
n->max_queue_pairs, (VIRTIO_QUEUE_MAX - 1) / 2);
virtio_cleanup(vdev);
return;
}
n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
n->curr_queues = 1;
n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queue_pairs);
n->curr_queue_pairs = 1;
n->tx_timeout = n->net_conf.txtimer;
if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
@ -3412,7 +3449,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
n->net_conf.tx_queue_size);
for (i = 0; i < n->max_queues; i++) {
for (i = 0; i < n->max_queue_pairs; i++) {
virtio_net_add_queue(n, i);
}
@ -3436,13 +3473,13 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
object_get_typename(OBJECT(dev)), dev->id, n);
}
for (i = 0; i < n->max_queues; i++) {
for (i = 0; i < n->max_queue_pairs; i++) {
n->nic->ncs[i].do_not_pad = true;
}
peer_test_vnet_hdr(n);
if (peer_has_vnet_hdr(n)) {
for (i = 0; i < n->max_queues; i++) {
for (i = 0; i < n->max_queue_pairs; i++) {
qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
}
n->host_hdr_len = sizeof(struct virtio_net_hdr);
@ -3484,7 +3521,7 @@ static void virtio_net_device_unrealize(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIONet *n = VIRTIO_NET(dev);
int i, max_queues;
int i, max_queue_pairs;
if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
virtio_net_unload_ebpf(n);
@ -3509,12 +3546,12 @@ static void virtio_net_device_unrealize(DeviceState *dev)
assert(n->primary_opts == NULL);
}
max_queues = n->multiqueue ? n->max_queues : 1;
for (i = 0; i < max_queues; i++) {
max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
for (i = 0; i < max_queue_pairs; i++) {
virtio_net_del_queue(n, i);
}
/* delete also control vq */
virtio_del_queue(vdev, max_queues * 2);
virtio_del_queue(vdev, max_queue_pairs * 2);
qemu_announce_timer_del(&n->announce_timer, false);
g_free(n->vqs);
qemu_del_nic(n->nic);

View File

@ -448,11 +448,11 @@ int pci_bridge_qemu_reserve_cap_init(PCIDevice *dev, int cap_offset,
PCIBridgeQemuCap cap = {
.len = cap_len,
.type = REDHAT_PCI_CAP_RESOURCE_RESERVE,
.bus_res = res_reserve.bus,
.io = res_reserve.io,
.mem = res_reserve.mem_non_pref,
.mem_pref_32 = res_reserve.mem_pref_32,
.mem_pref_64 = res_reserve.mem_pref_64
.bus_res = cpu_to_le32(res_reserve.bus),
.io = cpu_to_le64(res_reserve.io),
.mem = cpu_to_le32(res_reserve.mem_non_pref),
.mem_pref_32 = cpu_to_le32(res_reserve.mem_pref_32),
.mem_pref_64 = cpu_to_le64(res_reserve.mem_pref_64)
};
int offset = pci_add_capability(dev, PCI_CAP_ID_VNDR,

View File

@ -63,3 +63,8 @@ config VHOST_USER_I2C
bool
default y
depends on VIRTIO && VHOST_USER
config VHOST_USER_RNG
bool
default y
depends on VIRTIO && VHOST_USER

View File

@ -27,6 +27,8 @@ virtio_ss.add(when: 'CONFIG_VIRTIO_IOMMU', if_true: files('virtio-iommu.c'))
virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c'))
virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c'))
virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_I2C'], if_true: files('vhost-user-i2c-pci.c'))
virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c'))
virtio_ss.add(when: ['CONFIG_VHOST_USER_RNG', 'CONFIG_VIRTIO_PCI'], if_true: files('vhost-user-rng-pci.c'))
virtio_pci_ss = ss.source_set()
virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c'))

View File

@ -52,6 +52,7 @@ vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index:
vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
vhost_vdpa_set_owner(void *dev) "dev: %p"
vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64
vhost_vdpa_get_iova_range(void *dev, uint64_t first, uint64_t last) "dev: %p first: 0x%"PRIx64" last: 0x%"PRIx64
# virtio.c
virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u"

View File

@ -0,0 +1,79 @@
/*
* Vhost-user RNG virtio device PCI glue
*
* Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "hw/qdev-properties.h"
#include "hw/virtio/vhost-user-rng.h"
#include "virtio-pci.h"
struct VHostUserRNGPCI {
VirtIOPCIProxy parent_obj;
VHostUserRNG vdev;
};
typedef struct VHostUserRNGPCI VHostUserRNGPCI;
#define TYPE_VHOST_USER_RNG_PCI "vhost-user-rng-pci-base"
DECLARE_INSTANCE_CHECKER(VHostUserRNGPCI, VHOST_USER_RNG_PCI,
TYPE_VHOST_USER_RNG_PCI)
static Property vhost_user_rng_pci_properties[] = {
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
DEV_NVECTORS_UNSPECIFIED),
DEFINE_PROP_END_OF_LIST(),
};
static void vhost_user_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
VHostUserRNGPCI *dev = VHOST_USER_RNG_PCI(vpci_dev);
DeviceState *vdev = DEVICE(&dev->vdev);
if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
vpci_dev->nvectors = 1;
}
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}
static void vhost_user_rng_pci_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
k->realize = vhost_user_rng_pci_realize;
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
device_class_set_props(dc, vhost_user_rng_pci_properties);
pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */
pcidev_k->revision = 0x00;
pcidev_k->class_id = PCI_CLASS_OTHERS;
}
static void vhost_user_rng_pci_instance_init(Object *obj)
{
VHostUserRNGPCI *dev = VHOST_USER_RNG_PCI(obj);
virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
TYPE_VHOST_USER_RNG);
}
static const VirtioPCIDeviceTypeInfo vhost_user_rng_pci_info = {
.base_name = TYPE_VHOST_USER_RNG_PCI,
.non_transitional_name = "vhost-user-rng-pci",
.instance_size = sizeof(VHostUserRNGPCI),
.instance_init = vhost_user_rng_pci_instance_init,
.class_init = vhost_user_rng_pci_class_init,
};
static void vhost_user_rng_pci_register(void)
{
virtio_pci_types_register(&vhost_user_rng_pci_info);
}
type_init(vhost_user_rng_pci_register);

289
hw/virtio/vhost-user-rng.c Normal file
View File

@ -0,0 +1,289 @@
/*
* Vhost-user RNG virtio device
*
* Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
*
* Implementation seriously tailored on vhost-user-i2c.c
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/qdev-properties.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/vhost-user-rng.h"
#include "qemu/error-report.h"
#include "standard-headers/linux/virtio_ids.h"
static void vu_rng_start(VirtIODevice *vdev)
{
VHostUserRNG *rng = VHOST_USER_RNG(vdev);
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int ret;
int i;
if (!k->set_guest_notifiers) {
error_report("binding does not support guest notifiers");
return;
}
ret = vhost_dev_enable_notifiers(&rng->vhost_dev, vdev);
if (ret < 0) {
error_report("Error enabling host notifiers: %d", -ret);
return;
}
ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, true);
if (ret < 0) {
error_report("Error binding guest notifier: %d", -ret);
goto err_host_notifiers;
}
rng->vhost_dev.acked_features = vdev->guest_features;
ret = vhost_dev_start(&rng->vhost_dev, vdev);
if (ret < 0) {
error_report("Error starting vhost-user-rng: %d", -ret);
goto err_guest_notifiers;
}
/*
* guest_notifier_mask/pending not used yet, so just unmask
* everything here. virtio-pci will do the right thing by
* enabling/disabling irqfd.
*/
for (i = 0; i < rng->vhost_dev.nvqs; i++) {
vhost_virtqueue_mask(&rng->vhost_dev, vdev, i, false);
}
return;
err_guest_notifiers:
k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false);
err_host_notifiers:
vhost_dev_disable_notifiers(&rng->vhost_dev, vdev);
}
static void vu_rng_stop(VirtIODevice *vdev)
{
VHostUserRNG *rng = VHOST_USER_RNG(vdev);
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int ret;
if (!k->set_guest_notifiers) {
return;
}
vhost_dev_stop(&rng->vhost_dev, vdev);
ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false);
if (ret < 0) {
error_report("vhost guest notifier cleanup failed: %d", ret);
return;
}
vhost_dev_disable_notifiers(&rng->vhost_dev, vdev);
}
static void vu_rng_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserRNG *rng = VHOST_USER_RNG(vdev);
bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK;
if (!vdev->vm_running) {
should_start = false;
}
if (rng->vhost_dev.started == should_start) {
return;
}
if (should_start) {
vu_rng_start(vdev);
} else {
vu_rng_stop(vdev);
}
}
static uint64_t vu_rng_get_features(VirtIODevice *vdev,
uint64_t requested_features, Error **errp)
{
/* No feature bits used yet */
return requested_features;
}
static void vu_rng_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
/*
* Not normally called; it's the daemon that handles the queue;
* however virtio's cleanup path can call this.
*/
}
static void vu_rng_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
{
VHostUserRNG *rng = VHOST_USER_RNG(vdev);
vhost_virtqueue_mask(&rng->vhost_dev, vdev, idx, mask);
}
static bool vu_rng_guest_notifier_pending(VirtIODevice *vdev, int idx)
{
VHostUserRNG *rng = VHOST_USER_RNG(vdev);
return vhost_virtqueue_pending(&rng->vhost_dev, idx);
}
static void vu_rng_connect(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserRNG *rng = VHOST_USER_RNG(vdev);
if (rng->connected) {
return;
}
rng->connected = true;
/* restore vhost state */
if (virtio_device_started(vdev, vdev->status)) {
vu_rng_start(vdev);
}
}
static void vu_rng_disconnect(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserRNG *rng = VHOST_USER_RNG(vdev);
if (!rng->connected) {
return;
}
rng->connected = false;
if (rng->vhost_dev.started) {
vu_rng_stop(vdev);
}
}
static void vu_rng_event(void *opaque, QEMUChrEvent event)
{
DeviceState *dev = opaque;
switch (event) {
case CHR_EVENT_OPENED:
vu_rng_connect(dev);
break;
case CHR_EVENT_CLOSED:
vu_rng_disconnect(dev);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:
case CHR_EVENT_MUX_OUT:
/* Ignore */
break;
}
}
static void vu_rng_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserRNG *rng = VHOST_USER_RNG(dev);
int ret;
if (!rng->chardev.chr) {
error_setg(errp, "missing chardev");
return;
}
if (!vhost_user_init(&rng->vhost_user, &rng->chardev, errp)) {
return;
}
virtio_init(vdev, "vhost-user-rng", VIRTIO_ID_RNG, 0);
rng->req_vq = virtio_add_queue(vdev, 4, vu_rng_handle_output);
if (!rng->req_vq) {
error_setg_errno(errp, -1, "virtio_add_queue() failed");
goto virtio_add_queue_failed;
}
rng->vhost_dev.nvqs = 1;
rng->vhost_dev.vqs = g_new0(struct vhost_virtqueue, rng->vhost_dev.nvqs);
ret = vhost_dev_init(&rng->vhost_dev, &rng->vhost_user,
VHOST_BACKEND_TYPE_USER, 0, errp);
if (ret < 0) {
error_setg_errno(errp, -ret, "vhost_dev_init() failed");
goto vhost_dev_init_failed;
}
qemu_chr_fe_set_handlers(&rng->chardev, NULL, NULL, vu_rng_event, NULL,
dev, NULL, true);
return;
vhost_dev_init_failed:
virtio_delete_queue(rng->req_vq);
virtio_add_queue_failed:
virtio_cleanup(vdev);
vhost_user_cleanup(&rng->vhost_user);
}
static void vu_rng_device_unrealize(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserRNG *rng = VHOST_USER_RNG(dev);
vu_rng_set_status(vdev, 0);
vhost_dev_cleanup(&rng->vhost_dev);
g_free(rng->vhost_dev.vqs);
rng->vhost_dev.vqs = NULL;
virtio_delete_queue(rng->req_vq);
virtio_cleanup(vdev);
vhost_user_cleanup(&rng->vhost_user);
}
static const VMStateDescription vu_rng_vmstate = {
.name = "vhost-user-rng",
.unmigratable = 1,
};
static Property vu_rng_properties[] = {
DEFINE_PROP_CHR("chardev", VHostUserRNG, chardev),
DEFINE_PROP_END_OF_LIST(),
};
static void vu_rng_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
device_class_set_props(dc, vu_rng_properties);
dc->vmsd = &vu_rng_vmstate;
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
vdc->realize = vu_rng_device_realize;
vdc->unrealize = vu_rng_device_unrealize;
vdc->get_features = vu_rng_get_features;
vdc->set_status = vu_rng_set_status;
vdc->guest_notifier_mask = vu_rng_guest_notifier_mask;
vdc->guest_notifier_pending = vu_rng_guest_notifier_pending;
}
static const TypeInfo vu_rng_info = {
.name = TYPE_VHOST_USER_RNG,
.parent = TYPE_VIRTIO_DEVICE,
.instance_size = sizeof(VHostUserRNG),
.class_init = vu_rng_class_init,
};
static void vu_rng_register_types(void)
{
type_register_static(&vu_rng_info);
}
type_init(vu_rng_register_types)

View File

@ -1526,8 +1526,9 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
user, queue_idx);
memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
page_size, addr);
if (!n->mr.ram) /* Don't init again after suspend. */
memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
page_size, addr);
g_free(name);
if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {

View File

@ -24,19 +24,49 @@
#include "trace.h"
#include "qemu-common.h"
static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
/*
* Return one past the end of the end of section. Be careful with uint64_t
* conversions!
*/
static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section)
{
return (!memory_region_is_ram(section->mr) &&
!memory_region_is_iommu(section->mr)) ||
/* vhost-vDPA doesn't allow MMIO to be mapped */
memory_region_is_ram_device(section->mr) ||
/*
* Sizing an enabled 64-bit BAR can cause spurious mappings to
* addresses in the upper part of the 64-bit address space. These
* are never accessed by the CPU and beyond the address width of
* some IOMMU hardware. TODO: VDPA should tell us the IOMMU width.
*/
section->offset_within_address_space & (1ULL << 63);
Int128 llend = int128_make64(section->offset_within_address_space);
llend = int128_add(llend, section->size);
llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
return llend;
}
static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
uint64_t iova_min,
uint64_t iova_max)
{
Int128 llend;
if ((!memory_region_is_ram(section->mr) &&
!memory_region_is_iommu(section->mr)) ||
memory_region_is_protected(section->mr) ||
/* vhost-vDPA doesn't allow MMIO to be mapped */
memory_region_is_ram_device(section->mr)) {
return true;
}
if (section->offset_within_address_space < iova_min) {
error_report("RAM section out of device range (min=0x%" PRIx64
", addr=0x%" HWADDR_PRIx ")",
iova_min, section->offset_within_address_space);
return true;
}
llend = vhost_vdpa_section_end(section);
if (int128_gt(llend, int128_make64(iova_max))) {
error_report("RAM section out of device range (max=0x%" PRIx64
", end addr=0x%" PRIx64 ")",
iova_max, int128_get64(llend));
return true;
}
return false;
}
static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
@ -148,7 +178,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
void *vaddr;
int ret;
if (vhost_vdpa_listener_skipped_section(section)) {
if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
v->iova_range.last)) {
return;
}
@ -159,10 +190,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
}
iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
llend = int128_make64(section->offset_within_address_space);
llend = int128_add(llend, section->size);
llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
llend = vhost_vdpa_section_end(section);
if (int128_ge(int128_make64(iova), llend)) {
return;
}
@ -209,7 +237,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
Int128 llend, llsize;
int ret;
if (vhost_vdpa_listener_skipped_section(section)) {
if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
v->iova_range.last)) {
return;
}
@ -220,9 +249,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
}
iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
llend = int128_make64(section->offset_within_address_space);
llend = int128_add(llend, section->size);
llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
llend = vhost_vdpa_section_end(section);
trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
@ -279,6 +306,26 @@ static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
}
static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
{
int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
&v->iova_range);
if (ret != 0) {
v->iova_range.first = 0;
v->iova_range.last = UINT64_MAX;
}
trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
v->iova_range.last);
}
static bool vhost_vdpa_one_time_request(struct vhost_dev *dev)
{
struct vhost_vdpa *v = dev->opaque;
return v->index != 0;
}
static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
{
struct vhost_vdpa *v;
@ -291,6 +338,12 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
v->listener = vhost_vdpa_memory_listener;
v->msg_type = VHOST_IOTLB_MSG_V2;
vhost_vdpa_get_iova_range(v);
if (vhost_vdpa_one_time_request(dev)) {
return 0;
}
vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
VIRTIO_CONFIG_S_DRIVER);
@ -401,6 +454,10 @@ static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
struct vhost_memory *mem)
{
if (vhost_vdpa_one_time_request(dev)) {
return 0;
}
trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
@ -424,6 +481,11 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev,
uint64_t features)
{
int ret;
if (vhost_vdpa_one_time_request(dev)) {
return 0;
}
trace_vhost_vdpa_set_features(dev, features);
ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
uint8_t status = 0;
@ -448,9 +510,12 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
}
features &= f;
r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
if (r) {
return -EFAULT;
if (vhost_vdpa_one_time_request(dev)) {
r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
if (r) {
return -EFAULT;
}
}
dev->backend_cap = features;
@ -481,8 +546,8 @@ static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
{
assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index);
return idx - dev->vq_index;
trace_vhost_vdpa_get_vq_index(dev, idx, idx);
return idx;
}
static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
@ -559,11 +624,21 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
{
struct vhost_vdpa *v = dev->opaque;
trace_vhost_vdpa_dev_start(dev, started);
if (started) {
vhost_vdpa_host_notifiers_init(dev);
vhost_vdpa_set_vring_ready(dev);
} else {
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
}
if (dev->vq_index + dev->nvqs != dev->last_index) {
return 0;
}
if (started) {
uint8_t status = 0;
memory_listener_register(&v->listener, &address_space_memory);
vhost_vdpa_host_notifiers_init(dev);
vhost_vdpa_set_vring_ready(dev);
vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
@ -572,7 +647,6 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
vhost_vdpa_reset_device(dev);
vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
VIRTIO_CONFIG_S_DRIVER);
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
memory_listener_unregister(&v->listener);
return 0;
@ -582,6 +656,10 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
struct vhost_log *log)
{
if (vhost_vdpa_one_time_request(dev)) {
return 0;
}
trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
log->log);
return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
@ -647,6 +725,10 @@ static int vhost_vdpa_get_features(struct vhost_dev *dev,
static int vhost_vdpa_set_owner(struct vhost_dev *dev)
{
if (vhost_vdpa_one_time_request(dev)) {
return 0;
}
trace_vhost_vdpa_set_owner(dev);
return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
}

View File

@ -98,9 +98,7 @@ static void virtio_iommu_pci_instance_init(Object *obj)
}
static const VirtioPCIDeviceTypeInfo virtio_iommu_pci_info = {
.base_name = TYPE_VIRTIO_IOMMU_PCI,
.generic_name = "virtio-iommu-pci",
.non_transitional_name = "virtio-iommu-pci-non-transitional",
.generic_name = TYPE_VIRTIO_IOMMU_PCI,
.instance_size = sizeof(VirtIOIOMMUPCI),
.instance_init = virtio_iommu_pci_instance_init,
.class_init = virtio_iommu_pci_class_init,

View File

@ -0,0 +1,33 @@
/*
* Vhost-user RNG virtio device
*
* Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef _QEMU_VHOST_USER_RNG_H
#define _QEMU_VHOST_USER_RNG_H
#include "hw/virtio/virtio.h"
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-user.h"
#include "chardev/char-fe.h"
#define TYPE_VHOST_USER_RNG "vhost-user-rng"
OBJECT_DECLARE_SIMPLE_TYPE(VHostUserRNG, VHOST_USER_RNG)
struct VHostUserRNG {
/*< private >*/
VirtIODevice parent;
CharBackend chardev;
struct vhost_virtqueue *vhost_vq;
struct vhost_dev vhost_dev;
VhostUserState vhost_user;
VirtQueue *req_vq;
bool connected;
/*< public >*/
};
#endif /* _QEMU_VHOST_USER_RNG_H */

View File

@ -13,6 +13,7 @@
#define HW_VIRTIO_VHOST_VDPA_H
#include "hw/virtio/virtio.h"
#include "standard-headers/linux/vhost_types.h"
typedef struct VhostVDPAHostNotifier {
MemoryRegion mr;
@ -21,9 +22,11 @@ typedef struct VhostVDPAHostNotifier {
typedef struct vhost_vdpa {
int device_fd;
int index;
uint32_t msg_type;
bool iotlb_batch_begin_sent;
MemoryListener listener;
struct vhost_vdpa_iova_range iova_range;
struct vhost_dev *dev;
VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
} VhostVDPA;

View File

@ -74,6 +74,8 @@ struct vhost_dev {
unsigned int nvqs;
/* the first virtqueue which would be used by this vhost dev */
int vq_index;
/* the last vq index for the virtio device (not vhost) */
int last_index;
/* if non-zero, minimum required value for max_queues */
int num_queues;
uint64_t features;

View File

@ -26,7 +26,7 @@
#include "qom/object.h"
#define TYPE_VIRTIO_IOMMU "virtio-iommu-device"
#define TYPE_VIRTIO_IOMMU_PCI "virtio-iommu-device-base"
#define TYPE_VIRTIO_IOMMU_PCI "virtio-iommu-pci"
OBJECT_DECLARE_SIMPLE_TYPE(VirtIOIOMMU, VIRTIO_IOMMU)
#define TYPE_VIRTIO_IOMMU_MEMORY_REGION "virtio-iommu-memory-region"

View File

@ -194,8 +194,9 @@ struct VirtIONet {
NICConf nic_conf;
DeviceState *qdev;
int multiqueue;
uint16_t max_queues;
uint16_t curr_queues;
uint16_t max_queue_pairs;
uint16_t curr_queue_pairs;
uint16_t max_ncs;
size_t config_size;
char *netclient_name;
char *netclient_type;

View File

@ -105,6 +105,7 @@ struct NetClientState {
int vnet_hdr_len;
bool is_netdev;
bool do_not_pad; /* do not pad to the minimum ethernet frame length */
bool is_datapath;
QTAILQ_HEAD(, NetFilterState) filters;
};
@ -136,6 +137,10 @@ NetClientState *qemu_new_net_client(NetClientInfo *info,
NetClientState *peer,
const char *model,
const char *name);
NetClientState *qemu_new_net_control_client(NetClientInfo *info,
NetClientState *peer,
const char *model,
const char *name);
NICState *qemu_new_nic(NetClientInfo *info,
NICConf *conf,
const char *model,

View File

@ -21,8 +21,10 @@ typedef struct VhostNetOptions {
uint64_t vhost_net_get_max_queues(VHostNetState *net);
struct vhost_net *vhost_net_init(VhostNetOptions *options);
int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
int data_queue_pairs, int cvq);
void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
int data_queue_pairs, int cvq);
void vhost_net_cleanup(VHostNetState *net);

View File

@ -75,6 +75,12 @@ else
kvm_targets = []
endif
kvm_targets_c = ''
if not get_option('kvm').disabled() and targetos == 'linux'
kvm_targets_c = '"' + '" ,"'.join(kvm_targets) + '"'
endif
config_host_data.set('CONFIG_KVM_TARGETS', kvm_targets_c)
accelerator_targets = { 'CONFIG_KVM': kvm_targets }
if cpu in ['aarch64']

View File

@ -239,7 +239,8 @@ static void qemu_net_client_setup(NetClientState *nc,
NetClientState *peer,
const char *model,
const char *name,
NetClientDestructor *destructor)
NetClientDestructor *destructor,
bool is_datapath)
{
nc->info = info;
nc->model = g_strdup(model);
@ -258,6 +259,7 @@ static void qemu_net_client_setup(NetClientState *nc,
nc->incoming_queue = qemu_new_net_queue(qemu_deliver_packet_iov, nc);
nc->destructor = destructor;
nc->is_datapath = is_datapath;
QTAILQ_INIT(&nc->filters);
}
@ -272,7 +274,23 @@ NetClientState *qemu_new_net_client(NetClientInfo *info,
nc = g_malloc0(info->size);
qemu_net_client_setup(nc, info, peer, model, name,
qemu_net_client_destructor);
qemu_net_client_destructor, true);
return nc;
}
NetClientState *qemu_new_net_control_client(NetClientInfo *info,
NetClientState *peer,
const char *model,
const char *name)
{
NetClientState *nc;
assert(info->size >= sizeof(NetClientState));
nc = g_malloc0(info->size);
qemu_net_client_setup(nc, info, peer, model, name,
qemu_net_client_destructor, false);
return nc;
}
@ -297,7 +315,7 @@ NICState *qemu_new_nic(NetClientInfo *info,
for (i = 0; i < queues; i++) {
qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
NULL);
NULL, true);
nic->ncs[i].queue_index = i;
}

View File

@ -18,6 +18,7 @@
#include "qemu/error-report.h"
#include "qemu/option.h"
#include "qapi/error.h"
#include <linux/vhost.h>
#include <sys/ioctl.h>
#include <err.h>
#include "standard-headers/linux/virtio_net.h"
@ -51,6 +52,14 @@ const int vdpa_feature_bits[] = {
VIRTIO_NET_F_HOST_UFO,
VIRTIO_NET_F_MRG_RXBUF,
VIRTIO_NET_F_MTU,
VIRTIO_NET_F_CTRL_RX,
VIRTIO_NET_F_CTRL_RX_EXTRA,
VIRTIO_NET_F_CTRL_VLAN,
VIRTIO_NET_F_GUEST_ANNOUNCE,
VIRTIO_NET_F_CTRL_MAC_ADDR,
VIRTIO_NET_F_RSS,
VIRTIO_NET_F_MQ,
VIRTIO_NET_F_CTRL_VQ,
VIRTIO_F_IOMMU_PLATFORM,
VIRTIO_F_RING_PACKED,
VIRTIO_NET_F_RSS,
@ -81,7 +90,8 @@ static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
return ret;
}
static int vhost_vdpa_add(NetClientState *ncs, void *be)
static int vhost_vdpa_add(NetClientState *ncs, void *be,
int queue_pair_index, int nvqs)
{
VhostNetOptions options;
struct vhost_net *net = NULL;
@ -94,7 +104,7 @@ static int vhost_vdpa_add(NetClientState *ncs, void *be)
options.net_backend = ncs;
options.opaque = be;
options.busyloop_timeout = 0;
options.nvqs = 2;
options.nvqs = nvqs;
net = vhost_net_init(&options);
if (!net) {
@ -169,36 +179,125 @@ static NetClientInfo net_vhost_vdpa_info = {
.check_peer_type = vhost_vdpa_check_peer_type,
};
static int net_vhost_vdpa_init(NetClientState *peer, const char *device,
const char *name, const char *vhostdev)
static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
const char *device,
const char *name,
int vdpa_device_fd,
int queue_pair_index,
int nvqs,
bool is_datapath)
{
NetClientState *nc = NULL;
VhostVDPAState *s;
int vdpa_device_fd = -1;
int ret = 0;
assert(name);
nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, name);
if (is_datapath) {
nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
name);
} else {
nc = qemu_new_net_control_client(&net_vhost_vdpa_info, peer,
device, name);
}
snprintf(nc->info_str, sizeof(nc->info_str), TYPE_VHOST_VDPA);
s = DO_UPCAST(VhostVDPAState, nc, nc);
vdpa_device_fd = qemu_open_old(vhostdev, O_RDWR);
if (vdpa_device_fd == -1) {
return -errno;
}
s->vhost_vdpa.device_fd = vdpa_device_fd;
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa);
s->vhost_vdpa.index = queue_pair_index;
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
if (ret) {
qemu_close(vdpa_device_fd);
qemu_del_net_client(nc);
return NULL;
}
return ret;
return nc;
}
static int vhost_vdpa_get_max_queue_pairs(int fd, int *has_cvq, Error **errp)
{
unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
struct vhost_vdpa_config *config;
__virtio16 *max_queue_pairs;
uint64_t features;
int ret;
ret = ioctl(fd, VHOST_GET_FEATURES, &features);
if (ret) {
error_setg(errp, "Fail to query features from vhost-vDPA device");
return ret;
}
if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
*has_cvq = 1;
} else {
*has_cvq = 0;
}
if (features & (1 << VIRTIO_NET_F_MQ)) {
config = g_malloc0(config_size + sizeof(*max_queue_pairs));
config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
config->len = sizeof(*max_queue_pairs);
ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
if (ret) {
error_setg(errp, "Fail to get config from vhost-vDPA device");
return -ret;
}
max_queue_pairs = (__virtio16 *)&config->buf;
return lduw_le_p(max_queue_pairs);
}
return 1;
}
int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
NetClientState *peer, Error **errp)
{
const NetdevVhostVDPAOptions *opts;
int vdpa_device_fd;
NetClientState **ncs, *nc;
int queue_pairs, i, has_cvq = 0;
assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
opts = &netdev->u.vhost_vdpa;
return net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, opts->vhostdev);
vdpa_device_fd = qemu_open_old(opts->vhostdev, O_RDWR);
if (vdpa_device_fd == -1) {
return -errno;
}
queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd,
&has_cvq, errp);
if (queue_pairs < 0) {
qemu_close(vdpa_device_fd);
return queue_pairs;
}
ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
for (i = 0; i < queue_pairs; i++) {
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 2, true);
if (!ncs[i])
goto err;
}
if (has_cvq) {
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 1, false);
if (!nc)
goto err;
}
g_free(ncs);
return 0;
err:
if (i) {
qemu_del_net_client(ncs[0]);
}
qemu_close(vdpa_device_fd);
g_free(ncs);
return -1;
}

View File

@ -639,19 +639,13 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts,
}
}
if (qdict_haskey(opts, "failover_pair_id")) {
if (!qdict_haskey(opts, "id")) {
error_setg(errp, "Device with failover_pair_id don't have id");
return NULL;
}
if (qdev_should_hide_device(opts, from_json, errp)) {
if (bus && !qbus_is_hotpluggable(bus)) {
error_setg(errp, QERR_BUS_NO_HOTPLUG, bus->name);
}
return NULL;
} else if (*errp) {
return NULL;
if (qdev_should_hide_device(opts, from_json, errp)) {
if (bus && !qbus_is_hotpluggable(bus)) {
error_setg(errp, QERR_BUS_NO_HOTPLUG, bus->name);
}
return NULL;
} else if (*errp) {
return NULL;
}
if (phase_check(PHASE_MACHINE_READY) && bus && !qbus_is_hotpluggable(bus)) {

View File

@ -816,6 +816,7 @@ vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
shadow_regions[j].gpa = dev->regions[i].gpa;
shadow_regions[j].size = dev->regions[i].size;
shadow_regions[j].qva = dev->regions[i].qva;
shadow_regions[j].mmap_addr = dev->regions[i].mmap_addr;
shadow_regions[j].mmap_offset = dev->regions[i].mmap_offset;
j++;
} else {

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -12,7 +12,7 @@
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
qemu_bins="./qemu-system-x86_64 ./qemu-system-aarch64"
qemu_arches="x86_64 aarch64"
if [ ! -e "tests/qtest/bios-tables-test" ]; then
echo "Test: bios-tables-test is required! Run make check before this script."
@ -20,6 +20,26 @@ if [ ! -e "tests/qtest/bios-tables-test" ]; then
exit 1;
fi
if grep TARGET_DIRS= config-host.mak; then
for arch in $qemu_arches; do
if grep TARGET_DIRS= config-host.mak | grep "$arch"-softmmu;
then
qemu_bins="$qemu_bins ./qemu-system-$arch"
fi
done
else
echo "config-host.mak missing!"
echo "Run this script from the build directory."
exit 1;
fi
if [ -z "$qemu_bins" ]; then
echo "Only the following architectures are currently supported: $qemu_arches"
echo "None of these configured!"
echo "To fix, run configure --target-list=x86_64-softmmu,aarch64-softmmu"
exit 1;
fi
for qemu in $qemu_bins; do
if [ ! -e $qemu ]; then
echo "Run 'make' to build the following QEMU executables: $qemu_bins"

View File

@ -98,6 +98,20 @@ void acpi_fetch_table(QTestState *qts, uint8_t **aml, uint32_t *aml_len,
ACPI_ASSERT_CMP(**aml, sig);
}
if (verify_checksum) {
if (acpi_calc_checksum(*aml, *aml_len)) {
gint fd, ret;
char *fname = NULL;
GError *error = NULL;
fprintf(stderr, "Invalid '%.4s'(%d)\n", *aml, *aml_len);
fd = g_file_open_tmp("malformed-XXXXXX.dat", &fname, &error);
g_assert_no_error(error);
fprintf(stderr, "Dumping invalid table into '%s'\n", fname);
ret = qemu_write_full(fd, *aml, *aml_len);
g_assert(ret == *aml_len);
close(fd);
g_free(fname);
}
g_assert(!acpi_calc_checksum(*aml, *aml_len));
}
}

View File

@ -26,21 +26,6 @@
" 'arguments': { 'type': 'full', "
#define QUERY_TAIL "}}"
static bool kvm_enabled(QTestState *qts)
{
QDict *resp, *qdict;
bool enabled;
resp = qtest_qmp(qts, "{ 'execute': 'query-kvm' }");
g_assert(qdict_haskey(resp, "return"));
qdict = qdict_get_qdict(resp, "return");
g_assert(qdict_haskey(qdict, "enabled"));
enabled = qdict_get_bool(qdict, "enabled");
qobject_unref(resp);
return enabled;
}
static QDict *do_query_no_props(QTestState *qts, const char *cpu_type)
{
return qtest_qmp(qts, QUERY_HEAD "'model': { 'name': %s }"
@ -506,14 +491,6 @@ static void test_query_cpu_model_expansion_kvm(const void *data)
qts = qtest_init(MACHINE_KVM "-cpu max");
/*
* These tests target the 'host' CPU type, so KVM must be enabled.
*/
if (!kvm_enabled(qts)) {
qtest_quit(qts);
return;
}
/* Enabling and disabling kvm-no-adjvtime should always work. */
assert_has_feature_disabled(qts, "host", "kvm-no-adjvtime");
assert_set_feature(qts, "host", "kvm-no-adjvtime", true);
@ -637,7 +614,11 @@ int main(int argc, char **argv)
* order avoid attempting to run an AArch32 QEMU with KVM on
* AArch64 hosts. That won't work and isn't easy to detect.
*/
if (g_str_equal(qtest_get_arch(), "aarch64")) {
if (g_str_equal(qtest_get_arch(), "aarch64") && qtest_has_accel("kvm")) {
/*
* This tests target the 'host' CPU type, so register it only if
* KVM is available.
*/
qtest_add_data_func("/arm/kvm/query-cpu-model-expansion",
NULL, test_query_cpu_model_expansion_kvm);
}

View File

@ -271,19 +271,28 @@ static void dump_aml_files(test_data *data, bool rebuild)
}
}
static bool create_tmp_asl(AcpiSdtTable *sdt)
{
GError *error = NULL;
gint fd;
fd = g_file_open_tmp("asl-XXXXXX.dsl", &sdt->asl_file, &error);
g_assert_no_error(error);
close(fd);
return false;
}
static bool load_asl(GArray *sdts, AcpiSdtTable *sdt)
{
AcpiSdtTable *temp;
GError *error = NULL;
GString *command_line = g_string_new(iasl);
gint fd;
gchar *out, *out_err;
gboolean ret;
int i;
fd = g_file_open_tmp("asl-XXXXXX.dsl", &sdt->asl_file, &error);
g_assert_no_error(error);
close(fd);
create_tmp_asl(sdt);
/* build command line */
g_string_append_printf(command_line, " -p %s ", sdt->asl_file);
@ -463,11 +472,20 @@ static void test_acpi_asl(test_data *data)
err = load_asl(data->tables, sdt);
asl = normalize_asl(sdt->asl);
exp_err = load_asl(exp_data.tables, exp_sdt);
exp_asl = normalize_asl(exp_sdt->asl);
/*
* If expected file is empty - it's likely that it was a stub just
* created for step 1 above: we do want to decompile the actual one.
*/
if (exp_sdt->aml_len) {
exp_err = load_asl(exp_data.tables, exp_sdt);
exp_asl = normalize_asl(exp_sdt->asl);
} else {
exp_err = create_tmp_asl(exp_sdt);
exp_asl = g_string_new("");
}
/* TODO: check for warnings */
g_assert(!err || exp_err);
g_assert(!err || exp_err || !exp_sdt->aml_len);
if (g_strcmp0(asl->str, exp_asl->str)) {
sdt->tmp_files_retain = true;
@ -722,13 +740,6 @@ static void test_acpi_one(const char *params, test_data *data)
char *args;
bool use_uefi = data->uefi_fl1 && data->uefi_fl2;
#ifndef CONFIG_TCG
if (data->tcg_only) {
g_test_skip("TCG disabled, skipping ACPI tcg_only test");
return;
}
#endif /* CONFIG_TCG */
args = test_acpi_create_args(data, params, use_uefi);
data->qts = qtest_init(args);
test_acpi_load_tables(data, use_uefi);
@ -859,6 +870,23 @@ static void test_acpi_q35_tcg_bridge(void)
free_test_data(&data);
}
static void test_acpi_q35_multif_bridge(void)
{
test_data data = {
.machine = MACHINE_Q35,
.variant = ".multi-bridge",
};
test_acpi_one("-device pcie-root-port,id=pcie-root-port-0,"
"multifunction=on,"
"port=0x0,chassis=1,addr=0x2,bus=pcie.0 "
"-device pcie-root-port,id=pcie-root-port-1,"
"port=0x1,chassis=2,addr=0x3.0x1,bus=pcie.0 "
"-device virtio-balloon,id=balloon0,"
"bus=pcie.0,addr=0x4.0x2",
&data);
free_test_data(&data);
}
static void test_acpi_q35_tcg_mmio64(void)
{
test_data data = {
@ -1033,6 +1061,19 @@ static void test_acpi_q35_tcg_numamem(void)
free_test_data(&data);
}
static void test_acpi_q35_kvm_xapic(void)
{
test_data data;
memset(&data, 0, sizeof(data));
data.machine = MACHINE_Q35;
data.variant = ".xapic";
test_acpi_one(" -object memory-backend-ram,id=ram0,size=128M"
" -numa node -numa node,memdev=ram0"
" -machine kernel-irqchip=on -smp 1,maxcpus=288", &data);
free_test_data(&data);
}
static void test_acpi_q35_tcg_nosmm(void)
{
test_data data;
@ -1077,6 +1118,30 @@ static void test_acpi_q35_tcg_nohpet(void)
free_test_data(&data);
}
static void test_acpi_q35_kvm_dmar(void)
{
test_data data;
memset(&data, 0, sizeof(data));
data.machine = MACHINE_Q35;
data.variant = ".dmar";
test_acpi_one("-machine kernel-irqchip=split -accel kvm"
" -device intel-iommu,intremap=on,device-iotlb=on", &data);
free_test_data(&data);
}
static void test_acpi_q35_tcg_ivrs(void)
{
test_data data;
memset(&data, 0, sizeof(data));
data.machine = MACHINE_Q35;
data.variant = ".ivrs";
data.tcg_only = true,
test_acpi_one(" -device amd-iommu", &data);
free_test_data(&data);
}
static void test_acpi_piix4_tcg_numamem(void)
{
test_data data;
@ -1393,9 +1458,6 @@ static void test_acpi_virt_tcg(void)
.scan_len = 128ULL * 1024 * 1024,
};
test_acpi_one("-cpu cortex-a57", &data);
free_test_data(&data);
data.smbios_cpu_max_speed = 2900;
data.smbios_cpu_curr_speed = 2700;
test_acpi_one("-cpu cortex-a57 "
@ -1509,6 +1571,8 @@ static void test_acpi_oem_fields_virt(void)
int main(int argc, char *argv[])
{
const char *arch = qtest_get_arch();
const bool has_kvm = qtest_has_accel("kvm");
const bool has_tcg = qtest_has_accel("tcg");
int ret;
g_test_init(&argc, &argv, NULL);
@ -1534,6 +1598,7 @@ int main(int argc, char *argv[])
test_acpi_piix4_no_acpi_pci_hotplug);
qtest_add_func("acpi/q35", test_acpi_q35_tcg);
qtest_add_func("acpi/q35/bridge", test_acpi_q35_tcg_bridge);
qtest_add_func("acpi/q35/multif-bridge", test_acpi_q35_multif_bridge);
qtest_add_func("acpi/q35/mmio64", test_acpi_q35_tcg_mmio64);
qtest_add_func("acpi/piix4/ipmi", test_acpi_piix4_tcg_ipmi);
qtest_add_func("acpi/q35/ipmi", test_acpi_q35_tcg_ipmi);
@ -1564,15 +1629,24 @@ int main(int argc, char *argv[])
qtest_add_func("acpi/microvm/rtc", test_acpi_microvm_rtc_tcg);
qtest_add_func("acpi/microvm/ioapic2", test_acpi_microvm_ioapic2_tcg);
qtest_add_func("acpi/microvm/oem-fields", test_acpi_oem_fields_microvm);
if (strcmp(arch, "x86_64") == 0) {
qtest_add_func("acpi/microvm/pcie", test_acpi_microvm_pcie_tcg);
if (has_tcg) {
qtest_add_func("acpi/q35/ivrs", test_acpi_q35_tcg_ivrs);
if (strcmp(arch, "x86_64") == 0) {
qtest_add_func("acpi/microvm/pcie", test_acpi_microvm_pcie_tcg);
}
}
if (has_kvm) {
qtest_add_func("acpi/q35/kvm/xapic", test_acpi_q35_kvm_xapic);
qtest_add_func("acpi/q35/kvm/dmar", test_acpi_q35_kvm_dmar);
}
} else if (strcmp(arch, "aarch64") == 0) {
qtest_add_func("acpi/virt", test_acpi_virt_tcg);
qtest_add_func("acpi/virt/numamem", test_acpi_virt_tcg_numamem);
qtest_add_func("acpi/virt/memhp", test_acpi_virt_tcg_memhp);
qtest_add_func("acpi/virt/pxb", test_acpi_virt_tcg_pxb);
qtest_add_func("acpi/virt/oem-fields", test_acpi_oem_fields_virt);
if (has_tcg) {
qtest_add_func("acpi/virt", test_acpi_virt_tcg);
qtest_add_func("acpi/virt/numamem", test_acpi_virt_tcg_numamem);
qtest_add_func("acpi/virt/memhp", test_acpi_virt_tcg_memhp);
qtest_add_func("acpi/virt/pxb", test_acpi_virt_tcg_pxb);
qtest_add_func("acpi/virt/oem-fields", test_acpi_oem_fields_virt);
}
}
ret = g_test_run();
boot_sector_cleanup(disk);

View File

@ -588,6 +588,14 @@ bool qtest_big_endian(QTestState *s);
*/
const char *qtest_get_arch(void);
/**
* qtest_has_accel:
* @accel_name: Accelerator name to check for.
*
* Returns: true if the accelerator is built in.
*/
bool qtest_has_accel(const char *accel_name);
/**
* qtest_add_func:
* @str: Test case path.

View File

@ -922,6 +922,33 @@ const char *qtest_get_arch(void)
return end + 1;
}
bool qtest_has_accel(const char *accel_name)
{
if (g_str_equal(accel_name, "tcg")) {
#if defined(CONFIG_TCG)
return true;
#else
return false;
#endif
} else if (g_str_equal(accel_name, "kvm")) {
int i;
const char *arch = qtest_get_arch();
const char *targets[] = { CONFIG_KVM_TARGETS };
for (i = 0; i < ARRAY_SIZE(targets); i++) {
if (!strncmp(targets[i], arch, strlen(arch))) {
if (!access("/dev/kvm", R_OK | W_OK)) {
return true;
}
}
}
} else {
/* not implemented */
g_assert_not_reached();
}
return false;
}
bool qtest_get_irq(QTestState *s, int num)
{
/* dummy operation in order to make sure irq is up to date */

View File

@ -1420,6 +1420,7 @@ static bool kvm_dirty_ring_supported(void)
int main(int argc, char **argv)
{
char template[] = "/tmp/migration-test-XXXXXX";
const bool has_kvm = qtest_has_accel("kvm");
int ret;
g_test_init(&argc, &argv, NULL);
@ -1434,8 +1435,7 @@ int main(int argc, char **argv)
* some reason)
*/
if (g_str_equal(qtest_get_arch(), "ppc64") &&
(access("/sys/module/kvm_hv", F_OK) ||
access("/dev/kvm", R_OK | W_OK))) {
(!has_kvm || access("/sys/module/kvm_hv", F_OK))) {
g_test_message("Skipping test: kvm_hv not available");
return g_test_run();
}
@ -1444,16 +1444,9 @@ int main(int argc, char **argv)
* Similar to ppc64, s390x seems to be touchy with TCG, so disable it
* there until the problems are resolved
*/
if (g_str_equal(qtest_get_arch(), "s390x")) {
#if defined(HOST_S390X)
if (access("/dev/kvm", R_OK | W_OK)) {
g_test_message("Skipping test: kvm not available");
return g_test_run();
}
#else
g_test_message("Skipping test: Need s390x host to work properly");
if (g_str_equal(qtest_get_arch(), "s390x") && !has_kvm) {
g_test_message("Skipping test: s390x host with KVM is required");
return g_test_run();
#endif
}
tmpfs = mkdtemp(template);

View File

@ -906,9 +906,9 @@ static void start_vhost_user_blk(GString *cmd_line, int vus_instances,
img_path = drive_create();
g_string_append_printf(storage_daemon_command,
"--blockdev driver=file,node-name=disk%d,filename=%s "
"--export type=vhost-user-blk,id=disk%d,addr.type=unix,addr.path=%s,"
"--export type=vhost-user-blk,id=disk%d,addr.type=fd,addr.str=%d,"
"node-name=disk%i,writable=on,num-queues=%d ",
i, img_path, i, sock_path, i, num_queues);
i, img_path, i, fd, i, num_queues);
g_string_append_printf(cmd_line, "-chardev socket,id=char%d,path=%s ",
i + 1, sock_path);