2014-06-10 12:03:23 +02:00
|
|
|
/*
|
|
|
|
* QTest testcase for the vhost-user
|
|
|
|
*
|
|
|
|
* Copyright (c) 2014 Virtual Open Systems Sarl.
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2016-02-08 19:08:51 +01:00
|
|
|
#include "qemu/osdep.h"
|
2014-06-19 17:07:59 +02:00
|
|
|
|
2019-09-03 07:50:26 +02:00
|
|
|
#include "libqtest-single.h"
|
2016-10-22 11:52:55 +02:00
|
|
|
#include "qapi/error.h"
|
2018-02-01 12:18:39 +01:00
|
|
|
#include "qapi/qmp/qdict.h"
|
2016-12-12 18:22:24 +01:00
|
|
|
#include "qemu/config-file.h"
|
2014-06-10 12:03:23 +02:00
|
|
|
#include "qemu/option.h"
|
2015-10-09 17:17:39 +02:00
|
|
|
#include "qemu/range.h"
|
2016-06-22 19:11:19 +02:00
|
|
|
#include "qemu/sockets.h"
|
2017-01-26 15:26:44 +01:00
|
|
|
#include "chardev/char-fe.h"
|
2018-02-15 22:25:49 +01:00
|
|
|
#include "qemu/memfd.h"
|
2019-05-23 16:35:07 +02:00
|
|
|
#include "qemu/module.h"
|
2014-06-10 12:03:23 +02:00
|
|
|
#include "sysemu/sysemu.h"
|
2016-09-02 20:59:43 +02:00
|
|
|
#include "libqos/libqos.h"
|
|
|
|
#include "libqos/pci-pc.h"
|
|
|
|
#include "libqos/virtio-pci.h"
|
2014-06-10 12:03:23 +02:00
|
|
|
|
2016-09-09 13:34:44 +02:00
|
|
|
#include "libqos/malloc-pc.h"
|
2022-08-02 11:50:04 +02:00
|
|
|
#include "libqos/qgraph_internal.h"
|
2016-09-09 13:34:44 +02:00
|
|
|
#include "hw/virtio/virtio-net.h"
|
|
|
|
|
2019-02-14 18:35:52 +01:00
|
|
|
#include "standard-headers/linux/vhost_types.h"
|
|
|
|
#include "standard-headers/linux/virtio_ids.h"
|
|
|
|
#include "standard-headers/linux/virtio_net.h"
|
2022-08-02 11:50:10 +02:00
|
|
|
#include "standard-headers/linux/virtio_gpio.h"
|
2023-06-28 12:05:24 +02:00
|
|
|
#include "standard-headers/linux/virtio_scmi.h"
|
2019-02-14 18:35:52 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_LINUX
|
2014-06-10 12:03:23 +02:00
|
|
|
#include <sys/vfs.h>
|
2019-02-14 18:35:52 +01:00
|
|
|
#endif
|
2014-06-10 12:03:23 +02:00
|
|
|
|
vhost-user-test: Fix 'make check' broken on glib < 2.26
After commit 89b516d8, some logics is turbid and
breaks 'make check' as below errors:
tests/vhost-user-test.c: In function '_cond_wait_until':
tests/vhost-user-test.c:154: error: 'G_TIME_SPAN_SECOND' undeclared (first use in this function)
tests/vhost-user-test.c:154: error: (Each undeclared identifier is reported only once
tests/vhost-user-test.c:154: error: for each function it appears in.)
tests/vhost-user-test.c: In function 'read_guest_mem':
tests/vhost-user-test.c:192: warning: implicit declaration of function 'g_get_monotonic_time'
tests/vhost-user-test.c:192: warning: nested extern declaration of 'g_get_monotonic_time'
tests/vhost-user-test.c:192: error: 'G_TIME_SPAN_SECOND' undeclared (first use in this function)
make: *** [tests/vhost-user-test.o] Error 1
First, vhost-usr-test.c rely on glib-compat.h because
of using G_TIME_SPAN_SECOND [glib < 2.26] and g_get_monotonic_time(),
but vhost-usr-test.c defined QEMU_GLIB_COMPAT_H, which make
glib-compat.h will not be included.
Second, if we remove QEMU_GLIB_COMPAT_H definability in
vhost-usr-test.c, then we will get below warnings:
tests/vhost-user-test.c: In function 'read_guest_mem':
tests/vhost-user-test.c:190: warning: passing argument 1 of 'g_mutex_lock' from incompatible pointer type
tests/vhost-user-test.c:234: warning: passing argument 1 of 'g_mutex_unlock' from incompatible pointer type
That's because glib-compat.h redefine the g_mutex_lock/unlock
function. Those functions' arguments is CompatGMutex/CompatGCond,
but vhost-user-test.c is using GMutex/GCond, which cause the type
is not consistent.
We can rerealize those functions of vhost-user-test.c,
which need a lots of patches. Let's simply address it, and
leave this file alone.
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
Message-id: 1415149259-6188-1-git-send-email-arei.gonglei@huawei.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2014-11-05 02:00:59 +01:00
|
|
|
|
2018-02-15 22:25:49 +01:00
|
|
|
#define QEMU_CMD_MEM " -m %d -object memory-backend-file,id=mem,size=%dM," \
|
2014-06-10 12:03:23 +02:00
|
|
|
"mem-path=%s,share=on -numa node,memdev=mem"
|
2018-02-15 22:25:49 +01:00
|
|
|
#define QEMU_CMD_MEMFD " -m %d -object memory-backend-memfd,id=mem,size=%dM," \
|
|
|
|
" -numa node,memdev=mem"
|
2016-06-06 18:45:08 +02:00
|
|
|
#define QEMU_CMD_CHR " -chardev socket,id=%s,path=%s%s"
|
2022-02-10 15:52:50 +01:00
|
|
|
#define QEMU_CMD_NETDEV " -netdev vhost-user,id=hs0,chardev=%s,vhostforce=on"
|
2014-06-10 12:03:23 +02:00
|
|
|
|
|
|
|
#define HUGETLBFS_MAGIC 0x958458f6
|
|
|
|
|
|
|
|
/*********** FROM hw/virtio/vhost-user.c *************************************/
|
|
|
|
|
|
|
|
#define VHOST_MEMORY_MAX_NREGIONS 8
|
2017-12-21 22:21:23 +01:00
|
|
|
#define VHOST_MAX_VIRTQUEUES 0x100
|
2014-06-10 12:03:23 +02:00
|
|
|
|
2015-09-24 18:22:01 +02:00
|
|
|
#define VHOST_USER_F_PROTOCOL_FEATURES 30
|
2022-08-02 11:50:10 +02:00
|
|
|
#define VIRTIO_F_VERSION_1 32
|
|
|
|
|
2016-09-09 13:34:44 +02:00
|
|
|
#define VHOST_USER_PROTOCOL_F_MQ 0
|
2015-10-09 17:17:39 +02:00
|
|
|
#define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
|
2018-12-03 16:32:22 +01:00
|
|
|
#define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6
|
2022-08-02 11:50:10 +02:00
|
|
|
#define VHOST_USER_PROTOCOL_F_CONFIG 9
|
2015-10-09 17:17:39 +02:00
|
|
|
|
|
|
|
#define VHOST_LOG_PAGE 0x1000
|
2015-09-24 18:22:01 +02:00
|
|
|
|
2014-06-10 12:03:23 +02:00
|
|
|
typedef enum VhostUserRequest {
|
|
|
|
VHOST_USER_NONE = 0,
|
|
|
|
VHOST_USER_GET_FEATURES = 1,
|
|
|
|
VHOST_USER_SET_FEATURES = 2,
|
|
|
|
VHOST_USER_SET_OWNER = 3,
|
2015-11-11 14:24:37 +01:00
|
|
|
VHOST_USER_RESET_OWNER = 4,
|
2014-06-10 12:03:23 +02:00
|
|
|
VHOST_USER_SET_MEM_TABLE = 5,
|
|
|
|
VHOST_USER_SET_LOG_BASE = 6,
|
|
|
|
VHOST_USER_SET_LOG_FD = 7,
|
|
|
|
VHOST_USER_SET_VRING_NUM = 8,
|
|
|
|
VHOST_USER_SET_VRING_ADDR = 9,
|
|
|
|
VHOST_USER_SET_VRING_BASE = 10,
|
|
|
|
VHOST_USER_GET_VRING_BASE = 11,
|
|
|
|
VHOST_USER_SET_VRING_KICK = 12,
|
|
|
|
VHOST_USER_SET_VRING_CALL = 13,
|
|
|
|
VHOST_USER_SET_VRING_ERR = 14,
|
2015-09-24 18:22:01 +02:00
|
|
|
VHOST_USER_GET_PROTOCOL_FEATURES = 15,
|
|
|
|
VHOST_USER_SET_PROTOCOL_FEATURES = 16,
|
2016-09-09 13:34:44 +02:00
|
|
|
VHOST_USER_GET_QUEUE_NUM = 17,
|
2015-11-16 12:33:36 +01:00
|
|
|
VHOST_USER_SET_VRING_ENABLE = 18,
|
2022-08-02 11:50:08 +02:00
|
|
|
VHOST_USER_GET_CONFIG = 24,
|
|
|
|
VHOST_USER_SET_CONFIG = 25,
|
2014-06-10 12:03:23 +02:00
|
|
|
VHOST_USER_MAX
|
|
|
|
} VhostUserRequest;
|
|
|
|
|
|
|
|
typedef struct VhostUserMemoryRegion {
|
|
|
|
uint64_t guest_phys_addr;
|
|
|
|
uint64_t memory_size;
|
|
|
|
uint64_t userspace_addr;
|
2014-07-12 03:43:19 +02:00
|
|
|
uint64_t mmap_offset;
|
2014-06-10 12:03:23 +02:00
|
|
|
} VhostUserMemoryRegion;
|
|
|
|
|
|
|
|
typedef struct VhostUserMemory {
|
|
|
|
uint32_t nregions;
|
|
|
|
uint32_t padding;
|
|
|
|
VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
|
|
|
|
} VhostUserMemory;
|
|
|
|
|
2015-11-11 15:26:02 +01:00
|
|
|
typedef struct VhostUserLog {
|
|
|
|
uint64_t mmap_size;
|
|
|
|
uint64_t mmap_offset;
|
|
|
|
} VhostUserLog;
|
|
|
|
|
2014-06-10 12:03:23 +02:00
|
|
|
typedef struct VhostUserMsg {
|
|
|
|
VhostUserRequest request;
|
|
|
|
|
|
|
|
#define VHOST_USER_VERSION_MASK (0x3)
|
|
|
|
#define VHOST_USER_REPLY_MASK (0x1<<2)
|
|
|
|
uint32_t flags;
|
|
|
|
uint32_t size; /* the following payload size */
|
|
|
|
union {
|
2015-11-11 15:26:02 +01:00
|
|
|
#define VHOST_USER_VRING_IDX_MASK (0xff)
|
|
|
|
#define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
|
2014-06-10 12:03:23 +02:00
|
|
|
uint64_t u64;
|
|
|
|
struct vhost_vring_state state;
|
|
|
|
struct vhost_vring_addr addr;
|
|
|
|
VhostUserMemory memory;
|
2015-11-11 15:26:02 +01:00
|
|
|
VhostUserLog log;
|
2015-10-22 21:28:37 +02:00
|
|
|
} payload;
|
2014-06-10 12:03:23 +02:00
|
|
|
} QEMU_PACKED VhostUserMsg;
|
|
|
|
|
|
|
|
static VhostUserMsg m __attribute__ ((unused));
|
|
|
|
#define VHOST_USER_HDR_SIZE (sizeof(m.request) \
|
|
|
|
+ sizeof(m.flags) \
|
|
|
|
+ sizeof(m.size))
|
|
|
|
|
|
|
|
#define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
|
|
|
|
|
|
|
|
/* The version of the protocol we support */
|
|
|
|
#define VHOST_USER_VERSION (0x1)
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
2016-09-09 13:34:45 +02:00
|
|
|
enum {
|
|
|
|
TEST_FLAGS_OK,
|
|
|
|
TEST_FLAGS_DISCONNECT,
|
|
|
|
TEST_FLAGS_BAD,
|
|
|
|
TEST_FLAGS_END,
|
|
|
|
};
|
|
|
|
|
2020-09-11 10:39:45 +02:00
|
|
|
enum {
|
|
|
|
VHOST_USER_NET,
|
2022-08-02 11:50:10 +02:00
|
|
|
VHOST_USER_GPIO,
|
2023-06-28 12:05:24 +02:00
|
|
|
VHOST_USER_SCMI,
|
2020-09-11 10:39:45 +02:00
|
|
|
};
|
|
|
|
|
2015-10-09 17:17:37 +02:00
|
|
|
typedef struct TestServer {
|
|
|
|
gchar *socket_path;
|
2015-11-27 15:41:19 +01:00
|
|
|
gchar *mig_path;
|
2015-10-09 17:17:37 +02:00
|
|
|
gchar *chr_name;
|
2019-02-14 18:35:56 +01:00
|
|
|
gchar *tmpfs;
|
2016-10-22 11:52:52 +02:00
|
|
|
CharBackend chr;
|
2015-10-09 17:17:37 +02:00
|
|
|
int fds_num;
|
|
|
|
int fds[VHOST_MEMORY_MAX_NREGIONS];
|
|
|
|
VhostUserMemory memory;
|
2019-02-14 18:35:54 +01:00
|
|
|
GMainContext *context;
|
|
|
|
GMainLoop *loop;
|
|
|
|
GThread *thread;
|
2018-05-04 16:34:46 +02:00
|
|
|
GMutex data_mutex;
|
|
|
|
GCond data_cond;
|
2015-10-09 17:17:39 +02:00
|
|
|
int log_fd;
|
2015-11-26 14:14:02 +01:00
|
|
|
uint64_t rings;
|
2016-09-09 13:34:43 +02:00
|
|
|
bool test_fail;
|
2016-09-09 13:34:45 +02:00
|
|
|
int test_flags;
|
2016-09-09 13:34:44 +02:00
|
|
|
int queues;
|
2020-09-11 10:39:45 +02:00
|
|
|
struct vhost_user_ops *vu_ops;
|
2015-10-09 17:17:37 +02:00
|
|
|
} TestServer;
|
2014-06-19 17:07:59 +02:00
|
|
|
|
2020-09-11 10:39:45 +02:00
|
|
|
struct vhost_user_ops {
|
|
|
|
/* Device types. */
|
|
|
|
int type;
|
|
|
|
void (*append_opts)(TestServer *s, GString *cmd_line,
|
|
|
|
const char *chr_opts);
|
|
|
|
|
|
|
|
/* VHOST-USER commands. */
|
2022-08-02 11:50:09 +02:00
|
|
|
uint64_t (*get_features)(TestServer *s);
|
2020-09-11 10:39:45 +02:00
|
|
|
void (*set_features)(TestServer *s, CharBackend *chr,
|
2022-08-02 11:50:09 +02:00
|
|
|
VhostUserMsg *msg);
|
2020-09-11 10:39:45 +02:00
|
|
|
void (*get_protocol_features)(TestServer *s,
|
2022-08-02 11:50:09 +02:00
|
|
|
CharBackend *chr, VhostUserMsg *msg);
|
2020-09-11 10:39:45 +02:00
|
|
|
};
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
static const char *init_hugepagefs(void);
|
2020-09-11 10:39:45 +02:00
|
|
|
static TestServer *test_server_new(const gchar *name,
|
|
|
|
struct vhost_user_ops *ops);
|
2018-02-01 14:27:56 +01:00
|
|
|
static void test_server_free(TestServer *server);
|
|
|
|
static void test_server_listen(TestServer *server);
|
|
|
|
|
2018-02-15 22:25:49 +01:00
|
|
|
enum test_memfd {
|
|
|
|
TEST_MEMFD_AUTO,
|
|
|
|
TEST_MEMFD_YES,
|
|
|
|
TEST_MEMFD_NO,
|
|
|
|
};
|
|
|
|
|
2020-09-11 10:39:45 +02:00
|
|
|
static void append_vhost_net_opts(TestServer *s, GString *cmd_line,
|
2018-11-15 10:08:28 +01:00
|
|
|
const char *chr_opts)
|
2018-02-15 22:25:49 +01:00
|
|
|
{
|
2018-11-15 10:08:28 +01:00
|
|
|
g_string_append_printf(cmd_line, QEMU_CMD_CHR QEMU_CMD_NETDEV,
|
|
|
|
s->chr_name, s->socket_path,
|
|
|
|
chr_opts, s->chr_name);
|
2018-02-15 22:25:49 +01:00
|
|
|
}
|
|
|
|
|
2022-08-02 11:50:10 +02:00
|
|
|
/*
|
|
|
|
* For GPIO there are no other magic devices we need to add (like
|
|
|
|
* block or netdev) so all we need to worry about is the vhost-user
|
|
|
|
* chardev socket.
|
|
|
|
*/
|
|
|
|
static void append_vhost_gpio_opts(TestServer *s, GString *cmd_line,
|
|
|
|
const char *chr_opts)
|
|
|
|
{
|
|
|
|
g_string_append_printf(cmd_line, QEMU_CMD_CHR,
|
|
|
|
s->chr_name, s->socket_path,
|
|
|
|
chr_opts);
|
|
|
|
}
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
static void append_mem_opts(TestServer *server, GString *cmd_line,
|
|
|
|
int size, enum test_memfd memfd)
|
2016-09-02 20:59:43 +02:00
|
|
|
{
|
2018-11-15 10:08:28 +01:00
|
|
|
if (memfd == TEST_MEMFD_AUTO) {
|
2019-03-11 14:58:47 +01:00
|
|
|
memfd = qemu_memfd_check(MFD_ALLOW_SEALING) ? TEST_MEMFD_YES
|
|
|
|
: TEST_MEMFD_NO;
|
2017-12-21 22:21:23 +01:00
|
|
|
}
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
if (memfd == TEST_MEMFD_YES) {
|
|
|
|
g_string_append_printf(cmd_line, QEMU_CMD_MEMFD, size, size);
|
|
|
|
} else {
|
|
|
|
const char *root = init_hugepagefs() ? : server->tmpfs;
|
2016-09-02 20:59:43 +02:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
g_string_append_printf(cmd_line, QEMU_CMD_MEM, size, size, root);
|
2017-12-21 22:21:23 +01:00
|
|
|
}
|
2016-09-02 20:59:43 +02:00
|
|
|
}
|
|
|
|
|
2018-12-03 16:32:23 +01:00
|
|
|
static bool wait_for_fds(TestServer *s)
|
2014-06-10 12:03:23 +02:00
|
|
|
{
|
|
|
|
gint64 end_time;
|
2018-12-03 16:32:23 +01:00
|
|
|
bool got_region;
|
|
|
|
int i;
|
2014-06-10 12:03:23 +02:00
|
|
|
|
2015-10-09 17:17:37 +02:00
|
|
|
g_mutex_lock(&s->data_mutex);
|
2014-06-10 12:03:23 +02:00
|
|
|
|
2015-09-29 14:12:03 +02:00
|
|
|
end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
|
2015-10-09 17:17:37 +02:00
|
|
|
while (!s->fds_num) {
|
|
|
|
if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
|
2014-06-10 12:03:23 +02:00
|
|
|
/* timeout has passed */
|
2015-10-09 17:17:37 +02:00
|
|
|
g_assert(s->fds_num);
|
2014-06-10 12:03:23 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check for sanity */
|
2015-10-09 17:17:37 +02:00
|
|
|
g_assert_cmpint(s->fds_num, >, 0);
|
|
|
|
g_assert_cmpint(s->fds_num, ==, s->memory.nregions);
|
2014-06-10 12:03:23 +02:00
|
|
|
|
2015-10-09 17:17:37 +02:00
|
|
|
g_mutex_unlock(&s->data_mutex);
|
2018-12-03 16:32:23 +01:00
|
|
|
|
|
|
|
got_region = false;
|
|
|
|
for (i = 0; i < s->memory.nregions; ++i) {
|
|
|
|
VhostUserMemoryRegion *reg = &s->memory.regions[i];
|
|
|
|
if (reg->guest_phys_addr == 0) {
|
|
|
|
got_region = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!got_region) {
|
|
|
|
g_test_skip("No memory at address 0x0");
|
|
|
|
}
|
|
|
|
return got_region;
|
2015-10-09 17:17:35 +02:00
|
|
|
}
|
|
|
|
|
2018-12-03 16:32:24 +01:00
|
|
|
static void read_guest_mem_server(QTestState *qts, TestServer *s)
|
2015-10-09 17:17:35 +02:00
|
|
|
{
|
2018-12-03 16:32:22 +01:00
|
|
|
uint8_t *guest_mem;
|
2015-10-09 17:17:35 +02:00
|
|
|
int i, j;
|
|
|
|
size_t size;
|
|
|
|
|
2015-10-09 17:17:37 +02:00
|
|
|
g_mutex_lock(&s->data_mutex);
|
2015-10-09 17:17:35 +02:00
|
|
|
|
2014-06-10 12:03:23 +02:00
|
|
|
/* iterate all regions */
|
2015-10-09 17:17:37 +02:00
|
|
|
for (i = 0; i < s->fds_num; i++) {
|
2014-06-10 12:03:23 +02:00
|
|
|
|
2023-06-21 12:17:09 +02:00
|
|
|
/* We'll check only the region starting at 0x0 */
|
2015-10-09 17:17:37 +02:00
|
|
|
if (s->memory.regions[i].guest_phys_addr != 0x0) {
|
2014-06-10 12:03:23 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:37 +02:00
|
|
|
g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024);
|
2014-06-10 12:03:23 +02:00
|
|
|
|
2015-10-09 17:17:37 +02:00
|
|
|
size = s->memory.regions[i].memory_size +
|
|
|
|
s->memory.regions[i].mmap_offset;
|
2014-07-12 03:43:19 +02:00
|
|
|
|
|
|
|
guest_mem = mmap(0, size, PROT_READ | PROT_WRITE,
|
2015-10-09 17:17:37 +02:00
|
|
|
MAP_SHARED, s->fds[i], 0);
|
2014-07-12 03:43:19 +02:00
|
|
|
|
|
|
|
g_assert(guest_mem != MAP_FAILED);
|
2015-10-09 17:17:37 +02:00
|
|
|
guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem));
|
2014-06-10 12:03:23 +02:00
|
|
|
|
2018-12-03 16:32:22 +01:00
|
|
|
for (j = 0; j < 1024; j++) {
|
2018-12-03 16:32:24 +01:00
|
|
|
uint32_t a = qtest_readb(qts, s->memory.regions[i].guest_phys_addr + j);
|
2014-06-10 12:03:23 +02:00
|
|
|
uint32_t b = guest_mem[j];
|
|
|
|
|
|
|
|
g_assert_cmpint(a, ==, b);
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:37 +02:00
|
|
|
munmap(guest_mem, s->memory.regions[i].memory_size);
|
2014-06-10 12:03:23 +02:00
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:37 +02:00
|
|
|
g_mutex_unlock(&s->data_mutex);
|
2014-06-10 12:03:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *thread_function(void *data)
|
|
|
|
{
|
2015-11-27 15:41:18 +01:00
|
|
|
GMainLoop *loop = data;
|
2014-06-10 12:03:23 +02:00
|
|
|
g_main_loop_run(loop);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int chr_can_read(void *opaque)
|
|
|
|
{
|
|
|
|
return VHOST_USER_HDR_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void chr_read(void *opaque, const uint8_t *buf, int size)
|
|
|
|
{
|
2022-04-25 15:39:06 +02:00
|
|
|
g_autoptr(GError) err = NULL;
|
2015-10-09 17:17:37 +02:00
|
|
|
TestServer *s = opaque;
|
2016-10-22 11:52:52 +02:00
|
|
|
CharBackend *chr = &s->chr;
|
2014-06-10 12:03:23 +02:00
|
|
|
VhostUserMsg msg;
|
|
|
|
uint8_t *p = (uint8_t *) &msg;
|
2018-12-15 13:03:51 +01:00
|
|
|
int fd = -1;
|
2014-06-10 12:03:23 +02:00
|
|
|
|
2016-09-09 13:34:43 +02:00
|
|
|
if (s->test_fail) {
|
2016-10-22 11:52:55 +02:00
|
|
|
qemu_chr_fe_disconnect(chr);
|
2016-09-09 13:34:43 +02:00
|
|
|
/* now switch to non-failure */
|
|
|
|
s->test_fail = false;
|
|
|
|
}
|
|
|
|
|
2014-06-10 12:03:23 +02:00
|
|
|
if (size != VHOST_USER_HDR_SIZE) {
|
2022-08-02 11:50:04 +02:00
|
|
|
qos_printf("%s: Wrong message size received %d\n", __func__, size);
|
2014-06-10 12:03:23 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:37 +02:00
|
|
|
g_mutex_lock(&s->data_mutex);
|
2014-06-10 12:03:23 +02:00
|
|
|
memcpy(p, buf, VHOST_USER_HDR_SIZE);
|
|
|
|
|
|
|
|
if (msg.size) {
|
|
|
|
p += VHOST_USER_HDR_SIZE;
|
2016-10-22 11:52:55 +02:00
|
|
|
size = qemu_chr_fe_read_all(chr, p, msg.size);
|
2016-06-06 18:45:08 +02:00
|
|
|
if (size != msg.size) {
|
2022-08-02 11:50:04 +02:00
|
|
|
qos_printf("%s: Wrong message size received %d != %d\n",
|
|
|
|
__func__, size, msg.size);
|
2023-04-27 14:54:23 +02:00
|
|
|
goto out;
|
2016-06-06 18:45:08 +02:00
|
|
|
}
|
2014-06-10 12:03:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (msg.request) {
|
|
|
|
case VHOST_USER_GET_FEATURES:
|
2022-08-02 11:50:09 +02:00
|
|
|
/* Mandatory for tests to define get_features */
|
|
|
|
g_assert(s->vu_ops->get_features);
|
|
|
|
|
2015-09-24 18:22:01 +02:00
|
|
|
/* send back features to qemu */
|
|
|
|
msg.flags |= VHOST_USER_REPLY_MASK;
|
2015-10-22 21:28:37 +02:00
|
|
|
msg.size = sizeof(m.payload.u64);
|
2022-08-02 11:50:09 +02:00
|
|
|
|
2016-09-09 13:34:45 +02:00
|
|
|
if (s->test_flags >= TEST_FLAGS_BAD) {
|
|
|
|
msg.payload.u64 = 0;
|
|
|
|
s->test_flags = TEST_FLAGS_END;
|
2022-08-02 11:50:09 +02:00
|
|
|
} else {
|
|
|
|
msg.payload.u64 = s->vu_ops->get_features(s);
|
2016-09-09 13:34:45 +02:00
|
|
|
}
|
2022-08-02 11:50:09 +02:00
|
|
|
|
|
|
|
qemu_chr_fe_write_all(chr, (uint8_t *) &msg,
|
|
|
|
VHOST_USER_HDR_SIZE + msg.size);
|
2015-09-24 18:22:01 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VHOST_USER_SET_FEATURES:
|
2020-09-11 10:39:45 +02:00
|
|
|
if (s->vu_ops->set_features) {
|
|
|
|
s->vu_ops->set_features(s, chr, &msg);
|
2016-09-09 13:34:45 +02:00
|
|
|
}
|
2015-09-24 18:22:01 +02:00
|
|
|
break;
|
|
|
|
|
2022-08-02 11:50:05 +02:00
|
|
|
case VHOST_USER_SET_OWNER:
|
|
|
|
/*
|
|
|
|
* We don't need to do anything here, the remote is just
|
|
|
|
* letting us know it is in charge. Just log it.
|
|
|
|
*/
|
|
|
|
qos_printf("set_owner: start of session\n");
|
|
|
|
break;
|
|
|
|
|
2015-09-24 18:22:01 +02:00
|
|
|
case VHOST_USER_GET_PROTOCOL_FEATURES:
|
2020-09-11 10:39:45 +02:00
|
|
|
if (s->vu_ops->get_protocol_features) {
|
|
|
|
s->vu_ops->get_protocol_features(s, chr, &msg);
|
2016-09-09 13:34:44 +02:00
|
|
|
}
|
2014-06-10 12:03:23 +02:00
|
|
|
break;
|
|
|
|
|
2022-08-02 11:50:08 +02:00
|
|
|
case VHOST_USER_GET_CONFIG:
|
|
|
|
/*
|
|
|
|
* Treat GET_CONFIG as a NOP and just reply and let the guest
|
|
|
|
* consider we have updated its memory. Tests currently don't
|
|
|
|
* require working configs.
|
|
|
|
*/
|
|
|
|
msg.flags |= VHOST_USER_REPLY_MASK;
|
|
|
|
p = (uint8_t *) &msg;
|
|
|
|
qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
|
|
|
|
break;
|
|
|
|
|
2022-08-02 11:50:05 +02:00
|
|
|
case VHOST_USER_SET_PROTOCOL_FEATURES:
|
|
|
|
/*
|
|
|
|
* We did set VHOST_USER_F_PROTOCOL_FEATURES so its valid for
|
|
|
|
* the remote end to send this. There is no handshake reply so
|
|
|
|
* just log the details for debugging.
|
|
|
|
*/
|
|
|
|
qos_printf("set_protocol_features: 0x%"PRIx64 "\n", msg.payload.u64);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A real vhost-user backend would actually set the size and
|
|
|
|
* address of the vrings but we can simply report them.
|
|
|
|
*/
|
|
|
|
case VHOST_USER_SET_VRING_NUM:
|
|
|
|
qos_printf("set_vring_num: %d/%d\n",
|
|
|
|
msg.payload.state.index, msg.payload.state.num);
|
|
|
|
break;
|
|
|
|
case VHOST_USER_SET_VRING_ADDR:
|
|
|
|
qos_printf("set_vring_addr: 0x%"PRIx64"/0x%"PRIx64"/0x%"PRIx64"\n",
|
|
|
|
msg.payload.addr.avail_user_addr,
|
|
|
|
msg.payload.addr.desc_user_addr,
|
|
|
|
msg.payload.addr.used_user_addr);
|
|
|
|
break;
|
|
|
|
|
2014-06-10 12:03:23 +02:00
|
|
|
case VHOST_USER_GET_VRING_BASE:
|
|
|
|
/* send back vring base to qemu */
|
|
|
|
msg.flags |= VHOST_USER_REPLY_MASK;
|
2015-10-22 21:28:37 +02:00
|
|
|
msg.size = sizeof(m.payload.state);
|
|
|
|
msg.payload.state.num = 0;
|
2014-06-10 12:03:23 +02:00
|
|
|
p = (uint8_t *) &msg;
|
2016-10-22 11:52:55 +02:00
|
|
|
qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
|
2015-11-26 14:14:02 +01:00
|
|
|
|
2016-09-09 13:34:44 +02:00
|
|
|
assert(msg.payload.state.index < s->queues * 2);
|
2015-11-26 14:14:02 +01:00
|
|
|
s->rings &= ~(0x1ULL << msg.payload.state.index);
|
2018-12-03 16:32:20 +01:00
|
|
|
g_cond_broadcast(&s->data_cond);
|
2014-06-10 12:03:23 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VHOST_USER_SET_MEM_TABLE:
|
|
|
|
/* received the mem table */
|
2015-10-22 21:28:37 +02:00
|
|
|
memcpy(&s->memory, &msg.payload.memory, sizeof(msg.payload.memory));
|
2016-10-22 11:52:55 +02:00
|
|
|
s->fds_num = qemu_chr_fe_get_msgfds(chr, s->fds,
|
2016-10-22 11:52:52 +02:00
|
|
|
G_N_ELEMENTS(s->fds));
|
2014-06-10 12:03:23 +02:00
|
|
|
|
|
|
|
/* signal the test that it can continue */
|
2018-12-03 16:32:19 +01:00
|
|
|
g_cond_broadcast(&s->data_cond);
|
2014-06-10 12:03:23 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VHOST_USER_SET_VRING_KICK:
|
|
|
|
case VHOST_USER_SET_VRING_CALL:
|
|
|
|
/* consume the fd */
|
2016-10-22 11:52:55 +02:00
|
|
|
qemu_chr_fe_get_msgfds(chr, &fd, 1);
|
2014-06-10 12:03:23 +02:00
|
|
|
/*
|
|
|
|
* This is a non-blocking eventfd.
|
|
|
|
* The receive function forces it to be blocking,
|
|
|
|
* so revert it back to non-blocking.
|
|
|
|
*/
|
2022-04-25 15:39:06 +02:00
|
|
|
g_unix_set_fd_nonblocking(fd, true, &err);
|
|
|
|
g_assert_no_error(err);
|
2014-06-10 12:03:23 +02:00
|
|
|
break;
|
2015-10-09 17:17:39 +02:00
|
|
|
|
|
|
|
case VHOST_USER_SET_LOG_BASE:
|
|
|
|
if (s->log_fd != -1) {
|
|
|
|
close(s->log_fd);
|
|
|
|
s->log_fd = -1;
|
|
|
|
}
|
2016-10-22 11:52:55 +02:00
|
|
|
qemu_chr_fe_get_msgfds(chr, &s->log_fd, 1);
|
2015-10-09 17:17:39 +02:00
|
|
|
msg.flags |= VHOST_USER_REPLY_MASK;
|
|
|
|
msg.size = 0;
|
|
|
|
p = (uint8_t *) &msg;
|
2016-10-22 11:52:55 +02:00
|
|
|
qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE);
|
2015-10-09 17:17:39 +02:00
|
|
|
|
2018-12-03 16:32:19 +01:00
|
|
|
g_cond_broadcast(&s->data_cond);
|
2015-10-09 17:17:39 +02:00
|
|
|
break;
|
|
|
|
|
2015-11-26 14:14:02 +01:00
|
|
|
case VHOST_USER_SET_VRING_BASE:
|
2016-09-09 13:34:44 +02:00
|
|
|
assert(msg.payload.state.index < s->queues * 2);
|
2015-11-26 14:14:02 +01:00
|
|
|
s->rings |= 0x1ULL << msg.payload.state.index;
|
2018-12-03 16:32:20 +01:00
|
|
|
g_cond_broadcast(&s->data_cond);
|
2015-10-09 17:17:40 +02:00
|
|
|
break;
|
|
|
|
|
2016-09-09 13:34:44 +02:00
|
|
|
case VHOST_USER_GET_QUEUE_NUM:
|
|
|
|
msg.flags |= VHOST_USER_REPLY_MASK;
|
|
|
|
msg.size = sizeof(m.payload.u64);
|
|
|
|
msg.payload.u64 = s->queues;
|
|
|
|
p = (uint8_t *) &msg;
|
2016-10-22 11:52:55 +02:00
|
|
|
qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
|
2016-09-09 13:34:44 +02:00
|
|
|
break;
|
|
|
|
|
2022-08-02 11:50:05 +02:00
|
|
|
case VHOST_USER_SET_VRING_ENABLE:
|
|
|
|
/*
|
|
|
|
* Another case we ignore as we don't need to respond. With a
|
|
|
|
* fully functioning vhost-user we would enable/disable the
|
|
|
|
* vring monitoring.
|
|
|
|
*/
|
|
|
|
qos_printf("set_vring(%d)=%s\n", msg.payload.state.index,
|
|
|
|
msg.payload.state.num ? "enabled" : "disabled");
|
|
|
|
break;
|
|
|
|
|
2014-06-10 12:03:23 +02:00
|
|
|
default:
|
2022-08-02 11:50:05 +02:00
|
|
|
qos_printf("vhost-user: un-handled message: %d\n", msg.request);
|
2014-06-10 12:03:23 +02:00
|
|
|
break;
|
|
|
|
}
|
2015-10-09 17:17:37 +02:00
|
|
|
|
2023-04-27 14:54:23 +02:00
|
|
|
out:
|
2015-10-09 17:17:37 +02:00
|
|
|
g_mutex_unlock(&s->data_mutex);
|
2014-06-10 12:03:23 +02:00
|
|
|
}
|
|
|
|
|
2019-02-14 18:35:55 +01:00
|
|
|
static const char *init_hugepagefs(void)
|
2014-06-10 12:03:23 +02:00
|
|
|
{
|
2019-02-14 18:35:55 +01:00
|
|
|
#ifdef CONFIG_LINUX
|
2018-11-15 10:08:28 +01:00
|
|
|
static const char *hugepagefs;
|
2019-02-14 18:35:55 +01:00
|
|
|
const char *path = getenv("QTEST_HUGETLBFS_PATH");
|
2014-06-10 12:03:23 +02:00
|
|
|
struct statfs fs;
|
|
|
|
int ret;
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
if (hugepagefs) {
|
|
|
|
return hugepagefs;
|
|
|
|
}
|
2019-02-14 18:35:55 +01:00
|
|
|
if (!path) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-06-10 12:03:23 +02:00
|
|
|
if (access(path, R_OK | W_OK | X_OK)) {
|
2022-08-02 11:50:04 +02:00
|
|
|
qos_printf("access on path (%s): %s", path, strerror(errno));
|
2018-11-15 10:08:28 +01:00
|
|
|
g_test_fail();
|
2014-06-10 12:03:23 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
ret = statfs(path, &fs);
|
|
|
|
} while (ret != 0 && errno == EINTR);
|
|
|
|
|
|
|
|
if (ret != 0) {
|
2022-08-02 11:50:04 +02:00
|
|
|
qos_printf("statfs on path (%s): %s", path, strerror(errno));
|
2018-11-15 10:08:28 +01:00
|
|
|
g_test_fail();
|
2014-06-10 12:03:23 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fs.f_type != HUGETLBFS_MAGIC) {
|
2022-08-02 11:50:04 +02:00
|
|
|
qos_printf("Warning: path not on HugeTLBFS: %s", path);
|
2018-11-15 10:08:28 +01:00
|
|
|
g_test_fail();
|
2014-06-10 12:03:23 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
hugepagefs = path;
|
|
|
|
return hugepagefs;
|
2019-02-14 18:35:55 +01:00
|
|
|
#else
|
|
|
|
return NULL;
|
2019-02-14 18:35:52 +01:00
|
|
|
#endif
|
2019-02-14 18:35:55 +01:00
|
|
|
}
|
2014-06-10 12:03:23 +02:00
|
|
|
|
2020-09-11 10:39:45 +02:00
|
|
|
static TestServer *test_server_new(const gchar *name,
|
|
|
|
struct vhost_user_ops *ops)
|
2015-10-09 17:17:37 +02:00
|
|
|
{
|
|
|
|
TestServer *server = g_new0(TestServer, 1);
|
2022-09-27 13:05:56 +02:00
|
|
|
g_autofree const char *tmpfs = NULL;
|
|
|
|
GError *err = NULL;
|
2015-10-09 17:17:37 +02:00
|
|
|
|
2019-02-14 18:35:54 +01:00
|
|
|
server->context = g_main_context_new();
|
|
|
|
server->loop = g_main_loop_new(server->context, FALSE);
|
|
|
|
|
|
|
|
/* run the main loop thread so the chardev may operate */
|
|
|
|
server->thread = g_thread_new(NULL, thread_function, server->loop);
|
|
|
|
|
2022-09-27 13:05:56 +02:00
|
|
|
tmpfs = g_dir_make_tmp("vhost-test-XXXXXX", &err);
|
2019-02-14 18:35:56 +01:00
|
|
|
if (!tmpfs) {
|
2022-10-17 15:20:23 +02:00
|
|
|
g_test_message("Can't create temporary directory in %s: %s",
|
|
|
|
g_get_tmp_dir(), err->message);
|
2022-09-27 13:05:56 +02:00
|
|
|
g_error_free(err);
|
2019-02-14 18:35:56 +01:00
|
|
|
}
|
|
|
|
g_assert(tmpfs);
|
|
|
|
|
|
|
|
server->tmpfs = g_strdup(tmpfs);
|
2015-10-09 17:17:37 +02:00
|
|
|
server->socket_path = g_strdup_printf("%s/%s.sock", tmpfs, name);
|
2015-11-27 15:41:19 +01:00
|
|
|
server->mig_path = g_strdup_printf("%s/%s.mig", tmpfs, name);
|
2015-10-09 17:17:37 +02:00
|
|
|
server->chr_name = g_strdup_printf("chr-%s", name);
|
|
|
|
|
|
|
|
g_mutex_init(&server->data_mutex);
|
|
|
|
g_cond_init(&server->data_cond);
|
|
|
|
|
2015-10-09 17:17:39 +02:00
|
|
|
server->log_fd = -1;
|
2016-09-09 13:34:44 +02:00
|
|
|
server->queues = 1;
|
2020-09-11 10:39:45 +02:00
|
|
|
server->vu_ops = ops;
|
2015-10-09 17:17:39 +02:00
|
|
|
|
2015-10-09 17:17:37 +02:00
|
|
|
return server;
|
|
|
|
}
|
|
|
|
|
chardev: Use QEMUChrEvent enum in IOEventHandler typedef
The Chardev events are listed in the QEMUChrEvent enum.
By using the enum in the IOEventHandler typedef we:
- make the IOEventHandler type more explicit (this handler
process out-of-band information, while the IOReadHandler
is in-band),
- help static code analyzers.
This patch was produced with the following spatch script:
@match@
expression backend, opaque, context, set_open;
identifier fd_can_read, fd_read, fd_event, be_change;
@@
qemu_chr_fe_set_handlers(backend, fd_can_read, fd_read, fd_event,
be_change, opaque, context, set_open);
@depends on match@
identifier opaque, event;
identifier match.fd_event;
@@
static
-void fd_event(void *opaque, int event)
+void fd_event(void *opaque, QEMUChrEvent event)
{
...
}
Then the typedef was modified manually in
include/chardev/char-fe.h.
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Corey Minyard <cminyard@mvista.com>
Acked-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <20191218172009.8868-15-philmd@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-12-18 18:20:09 +01:00
|
|
|
static void chr_event(void *opaque, QEMUChrEvent event)
|
2016-09-09 13:34:45 +02:00
|
|
|
{
|
|
|
|
TestServer *s = opaque;
|
|
|
|
|
|
|
|
if (s->test_flags == TEST_FLAGS_END &&
|
|
|
|
event == CHR_EVENT_CLOSED) {
|
|
|
|
s->test_flags = TEST_FLAGS_OK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-06 18:45:08 +02:00
|
|
|
static void test_server_create_chr(TestServer *server, const gchar *opt)
|
|
|
|
{
|
2022-05-24 17:40:52 +02:00
|
|
|
g_autofree gchar *chr_path = g_strdup_printf("unix:%s%s",
|
|
|
|
server->socket_path, opt);
|
2016-12-07 14:20:22 +01:00
|
|
|
Chardev *chr;
|
2016-10-22 11:52:55 +02:00
|
|
|
|
2019-02-14 18:35:54 +01:00
|
|
|
chr = qemu_chr_new(server->chr_name, chr_path, server->context);
|
2022-05-24 17:40:52 +02:00
|
|
|
g_assert(chr);
|
2016-06-06 18:45:08 +02:00
|
|
|
|
2016-10-22 11:52:55 +02:00
|
|
|
qemu_chr_fe_init(&server->chr, chr, &error_abort);
|
|
|
|
qemu_chr_fe_set_handlers(&server->chr, chr_can_read, chr_read,
|
2019-02-14 18:35:54 +01:00
|
|
|
chr_event, NULL, server, server->context, true);
|
2016-06-06 18:45:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void test_server_listen(TestServer *server)
|
|
|
|
{
|
2020-11-13 09:10:52 +01:00
|
|
|
test_server_create_chr(server, ",server=on,wait=off");
|
2016-06-06 18:45:08 +02:00
|
|
|
}
|
|
|
|
|
2019-02-14 18:35:54 +01:00
|
|
|
static void test_server_free(TestServer *server)
|
2015-10-09 17:17:37 +02:00
|
|
|
{
|
2019-02-14 18:35:56 +01:00
|
|
|
int i, ret;
|
2015-10-09 17:17:37 +02:00
|
|
|
|
2019-02-14 18:35:54 +01:00
|
|
|
/* finish the helper thread and dispatch pending sources */
|
|
|
|
g_main_loop_quit(server->loop);
|
|
|
|
g_thread_join(server->thread);
|
|
|
|
while (g_main_context_pending(NULL)) {
|
|
|
|
g_main_context_iteration(NULL, TRUE);
|
|
|
|
}
|
|
|
|
|
2019-02-14 18:35:56 +01:00
|
|
|
unlink(server->socket_path);
|
|
|
|
g_free(server->socket_path);
|
|
|
|
|
|
|
|
unlink(server->mig_path);
|
|
|
|
g_free(server->mig_path);
|
|
|
|
|
|
|
|
ret = rmdir(server->tmpfs);
|
|
|
|
if (ret != 0) {
|
|
|
|
g_test_message("unable to rmdir: path (%s): %s",
|
|
|
|
server->tmpfs, strerror(errno));
|
|
|
|
}
|
2019-03-05 23:51:35 +01:00
|
|
|
g_free(server->tmpfs);
|
2019-02-14 18:35:56 +01:00
|
|
|
|
2017-01-26 21:49:13 +01:00
|
|
|
qemu_chr_fe_deinit(&server->chr, true);
|
2015-10-09 17:17:37 +02:00
|
|
|
|
|
|
|
for (i = 0; i < server->fds_num; i++) {
|
|
|
|
close(server->fds[i]);
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:39 +02:00
|
|
|
if (server->log_fd != -1) {
|
|
|
|
close(server->log_fd);
|
|
|
|
}
|
|
|
|
|
|
|
|
g_free(server->chr_name);
|
2017-02-03 13:06:12 +01:00
|
|
|
|
2019-02-14 18:35:54 +01:00
|
|
|
g_main_loop_unref(server->loop);
|
|
|
|
g_main_context_unref(server->context);
|
2019-03-05 23:51:35 +01:00
|
|
|
g_cond_clear(&server->data_cond);
|
|
|
|
g_mutex_clear(&server->data_mutex);
|
2015-10-09 17:17:37 +02:00
|
|
|
g_free(server);
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:39 +02:00
|
|
|
static void wait_for_log_fd(TestServer *s)
|
|
|
|
{
|
|
|
|
gint64 end_time;
|
|
|
|
|
|
|
|
g_mutex_lock(&s->data_mutex);
|
|
|
|
end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
|
|
|
|
while (s->log_fd == -1) {
|
|
|
|
if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
|
|
|
|
/* timeout has passed */
|
|
|
|
g_assert(s->log_fd != -1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
g_mutex_unlock(&s->data_mutex);
|
|
|
|
}
|
|
|
|
|
2016-01-22 16:09:21 +01:00
|
|
|
static void write_guest_mem(TestServer *s, uint32_t seed)
|
2015-10-09 17:17:39 +02:00
|
|
|
{
|
|
|
|
uint32_t *guest_mem;
|
|
|
|
int i, j;
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
/* iterate all regions */
|
|
|
|
for (i = 0; i < s->fds_num; i++) {
|
|
|
|
|
|
|
|
/* We'll write only the region statring at 0x0 */
|
|
|
|
if (s->memory.regions[i].guest_phys_addr != 0x0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024);
|
|
|
|
|
|
|
|
size = s->memory.regions[i].memory_size +
|
|
|
|
s->memory.regions[i].mmap_offset;
|
|
|
|
|
|
|
|
guest_mem = mmap(0, size, PROT_READ | PROT_WRITE,
|
|
|
|
MAP_SHARED, s->fds[i], 0);
|
|
|
|
|
|
|
|
g_assert(guest_mem != MAP_FAILED);
|
|
|
|
guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem));
|
|
|
|
|
|
|
|
for (j = 0; j < 256; j++) {
|
|
|
|
guest_mem[j] = seed + j;
|
|
|
|
}
|
|
|
|
|
|
|
|
munmap(guest_mem, s->memory.regions[i].memory_size);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static guint64 get_log_size(TestServer *s)
|
|
|
|
{
|
|
|
|
guint64 log_size = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < s->memory.nregions; ++i) {
|
|
|
|
VhostUserMemoryRegion *reg = &s->memory.regions[i];
|
|
|
|
guint64 last = range_get_last(reg->guest_phys_addr,
|
|
|
|
reg->memory_size);
|
|
|
|
log_size = MAX(log_size, last / (8 * VHOST_LOG_PAGE) + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return log_size;
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:40 +02:00
|
|
|
typedef struct TestMigrateSource {
|
|
|
|
GSource source;
|
|
|
|
TestServer *src;
|
|
|
|
TestServer *dest;
|
|
|
|
} TestMigrateSource;
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
test_migrate_source_check(GSource *source)
|
|
|
|
{
|
|
|
|
TestMigrateSource *t = (TestMigrateSource *)source;
|
2015-11-26 14:14:02 +01:00
|
|
|
gboolean overlap = t->src->rings && t->dest->rings;
|
2015-10-09 17:17:40 +02:00
|
|
|
|
|
|
|
g_assert(!overlap);
|
|
|
|
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
GSourceFuncs test_migrate_source_funcs = {
|
2015-11-30 17:44:49 +01:00
|
|
|
.check = test_migrate_source_check,
|
2015-10-09 17:17:40 +02:00
|
|
|
};
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
static void vhost_user_test_cleanup(void *s)
|
|
|
|
{
|
|
|
|
TestServer *server = s;
|
|
|
|
|
|
|
|
qos_invalidate_command_line();
|
|
|
|
test_server_free(server);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *vhost_user_test_setup(GString *cmd_line, void *arg)
|
2017-12-21 22:21:22 +01:00
|
|
|
{
|
2020-09-11 10:39:45 +02:00
|
|
|
TestServer *server = test_server_new("vhost-user-test", arg);
|
2018-11-15 10:08:28 +01:00
|
|
|
test_server_listen(server);
|
|
|
|
|
|
|
|
append_mem_opts(server, cmd_line, 256, TEST_MEMFD_AUTO);
|
2020-09-11 10:39:45 +02:00
|
|
|
server->vu_ops->append_opts(server, cmd_line, "");
|
2018-11-15 10:08:28 +01:00
|
|
|
|
|
|
|
g_test_queue_destroy(vhost_user_test_cleanup, server);
|
|
|
|
|
|
|
|
return server;
|
|
|
|
}
|
2017-12-21 22:21:22 +01:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
static void *vhost_user_test_setup_memfd(GString *cmd_line, void *arg)
|
|
|
|
{
|
2020-09-11 10:39:45 +02:00
|
|
|
TestServer *server = test_server_new("vhost-user-test", arg);
|
2017-12-21 22:21:22 +01:00
|
|
|
test_server_listen(server);
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
append_mem_opts(server, cmd_line, 256, TEST_MEMFD_YES);
|
2020-09-11 10:39:45 +02:00
|
|
|
server->vu_ops->append_opts(server, cmd_line, "");
|
2018-11-15 10:08:28 +01:00
|
|
|
|
|
|
|
g_test_queue_destroy(vhost_user_test_cleanup, server);
|
2017-12-21 22:21:22 +01:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
return server;
|
|
|
|
}
|
2017-12-21 22:21:22 +01:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
|
|
|
|
{
|
|
|
|
TestServer *server = arg;
|
2017-12-21 22:21:22 +01:00
|
|
|
|
2018-12-03 16:32:23 +01:00
|
|
|
if (!wait_for_fds(server)) {
|
2018-11-15 10:08:28 +01:00
|
|
|
return;
|
2018-12-03 16:32:23 +01:00
|
|
|
}
|
|
|
|
|
2018-12-03 16:32:24 +01:00
|
|
|
read_guest_mem_server(global_qtest, server);
|
2017-12-21 22:21:22 +01:00
|
|
|
}
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
static void test_migrate(void *obj, void *arg, QGuestAllocator *alloc)
|
2015-10-09 17:17:39 +02:00
|
|
|
{
|
2018-11-15 10:08:28 +01:00
|
|
|
TestServer *s = arg;
|
2020-01-16 15:07:36 +01:00
|
|
|
TestServer *dest;
|
|
|
|
GString *dest_cmdline;
|
|
|
|
char *uri;
|
2018-11-15 10:08:28 +01:00
|
|
|
QTestState *to;
|
2015-10-09 17:17:40 +02:00
|
|
|
GSource *source;
|
2015-10-09 17:17:39 +02:00
|
|
|
QDict *rsp;
|
|
|
|
guint8 *log;
|
|
|
|
guint64 size;
|
|
|
|
|
2018-12-03 16:32:23 +01:00
|
|
|
if (!wait_for_fds(s)) {
|
2018-11-15 10:08:28 +01:00
|
|
|
return;
|
2018-12-03 16:32:23 +01:00
|
|
|
}
|
|
|
|
|
2020-09-11 10:39:45 +02:00
|
|
|
dest = test_server_new("dest", s->vu_ops);
|
2020-01-16 15:07:36 +01:00
|
|
|
dest_cmdline = g_string_new(qos_get_current_command_line());
|
|
|
|
uri = g_strdup_printf("%s%s", "unix:", dest->mig_path);
|
|
|
|
|
2015-10-09 17:17:39 +02:00
|
|
|
size = get_log_size(s);
|
2018-11-15 10:50:15 +01:00
|
|
|
g_assert_cmpint(size, ==, (256 * 1024 * 1024) / (VHOST_LOG_PAGE * 8));
|
2015-10-09 17:17:39 +02:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
test_server_listen(dest);
|
|
|
|
g_string_append_printf(dest_cmdline, " -incoming %s", uri);
|
|
|
|
append_mem_opts(dest, dest_cmdline, 256, TEST_MEMFD_AUTO);
|
2020-09-11 10:39:45 +02:00
|
|
|
dest->vu_ops->append_opts(dest, dest_cmdline, "");
|
2018-11-15 10:08:28 +01:00
|
|
|
to = qtest_init(dest_cmdline->str);
|
|
|
|
|
|
|
|
/* This would be where you call qos_allocate_objects(to, NULL), if you want
|
|
|
|
* to talk to the QVirtioNet object on the destination.
|
|
|
|
*/
|
2015-10-09 17:17:39 +02:00
|
|
|
|
2015-10-09 17:17:40 +02:00
|
|
|
source = g_source_new(&test_migrate_source_funcs,
|
|
|
|
sizeof(TestMigrateSource));
|
|
|
|
((TestMigrateSource *)source)->src = s;
|
|
|
|
((TestMigrateSource *)source)->dest = dest;
|
2019-02-14 18:35:54 +01:00
|
|
|
g_source_attach(source, s->context);
|
2015-10-09 17:17:40 +02:00
|
|
|
|
2015-10-09 17:17:39 +02:00
|
|
|
/* slow down migration to have time to fiddle with log */
|
|
|
|
/* TODO: qtest could learn to break on some places */
|
2021-02-19 19:40:12 +01:00
|
|
|
rsp = qmp("{ 'execute': 'migrate-set-parameters',"
|
|
|
|
"'arguments': { 'max-bandwidth': 10 } }");
|
2015-10-09 17:17:39 +02:00
|
|
|
g_assert(qdict_haskey(rsp, "return"));
|
2018-04-19 17:01:43 +02:00
|
|
|
qobject_unref(rsp);
|
2015-10-09 17:17:39 +02:00
|
|
|
|
2018-08-06 08:53:33 +02:00
|
|
|
rsp = qmp("{ 'execute': 'migrate', 'arguments': { 'uri': %s } }", uri);
|
2015-10-09 17:17:39 +02:00
|
|
|
g_assert(qdict_haskey(rsp, "return"));
|
2018-04-19 17:01:43 +02:00
|
|
|
qobject_unref(rsp);
|
2015-10-09 17:17:39 +02:00
|
|
|
|
|
|
|
wait_for_log_fd(s);
|
|
|
|
|
|
|
|
log = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, s->log_fd, 0);
|
|
|
|
g_assert(log != MAP_FAILED);
|
|
|
|
|
|
|
|
/* modify first page */
|
|
|
|
write_guest_mem(s, 0x42);
|
|
|
|
log[0] = 1;
|
|
|
|
munmap(log, size);
|
|
|
|
|
|
|
|
/* speed things up */
|
2021-02-19 19:40:12 +01:00
|
|
|
rsp = qmp("{ 'execute': 'migrate-set-parameters',"
|
|
|
|
"'arguments': { 'max-bandwidth': 0 } }");
|
2015-10-09 17:17:39 +02:00
|
|
|
g_assert(qdict_haskey(rsp, "return"));
|
2018-04-19 17:01:43 +02:00
|
|
|
qobject_unref(rsp);
|
2015-10-09 17:17:39 +02:00
|
|
|
|
|
|
|
qmp_eventwait("STOP");
|
2018-12-03 16:32:24 +01:00
|
|
|
qtest_qmp_eventwait(to, "RESUME");
|
2015-10-09 17:17:39 +02:00
|
|
|
|
2018-12-03 16:32:24 +01:00
|
|
|
g_assert(wait_for_fds(dest));
|
|
|
|
read_guest_mem_server(to, dest);
|
2015-10-09 17:17:39 +02:00
|
|
|
|
2015-10-09 17:17:40 +02:00
|
|
|
g_source_destroy(source);
|
|
|
|
g_source_unref(source);
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
qtest_quit(to);
|
2015-10-09 17:17:39 +02:00
|
|
|
test_server_free(dest);
|
2015-11-27 15:41:19 +01:00
|
|
|
g_free(uri);
|
2020-01-16 15:07:36 +01:00
|
|
|
g_string_free(dest_cmdline, true);
|
2015-10-09 17:17:39 +02:00
|
|
|
}
|
|
|
|
|
2016-06-06 18:45:08 +02:00
|
|
|
static void wait_for_rings_started(TestServer *s, size_t count)
|
|
|
|
{
|
|
|
|
gint64 end_time;
|
|
|
|
|
|
|
|
g_mutex_lock(&s->data_mutex);
|
|
|
|
end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
|
|
|
|
while (ctpop64(s->rings) != count) {
|
|
|
|
if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
|
|
|
|
/* timeout has passed */
|
|
|
|
g_assert_cmpint(ctpop64(s->rings), ==, count);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
g_mutex_unlock(&s->data_mutex);
|
|
|
|
}
|
|
|
|
|
2017-09-05 20:06:02 +02:00
|
|
|
static inline void test_server_connect(TestServer *server)
|
|
|
|
{
|
|
|
|
test_server_create_chr(server, ",reconnect=1");
|
|
|
|
}
|
|
|
|
|
2016-06-06 18:45:08 +02:00
|
|
|
static gboolean
|
|
|
|
reconnect_cb(gpointer user_data)
|
|
|
|
{
|
|
|
|
TestServer *s = user_data;
|
|
|
|
|
2016-10-22 11:52:55 +02:00
|
|
|
qemu_chr_fe_disconnect(&s->chr);
|
2016-06-06 18:45:08 +02:00
|
|
|
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static gpointer
|
|
|
|
connect_thread(gpointer data)
|
|
|
|
{
|
|
|
|
TestServer *s = data;
|
|
|
|
|
|
|
|
/* wait for qemu to start before first try, to avoid extra warnings */
|
|
|
|
g_usleep(G_USEC_PER_SEC);
|
|
|
|
test_server_connect(s);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
static void *vhost_user_test_setup_reconnect(GString *cmd_line, void *arg)
|
2016-06-06 18:45:08 +02:00
|
|
|
{
|
2020-09-11 10:39:45 +02:00
|
|
|
TestServer *s = test_server_new("reconnect", arg);
|
2016-06-06 18:45:08 +02:00
|
|
|
|
|
|
|
g_thread_new("connect", connect_thread, s);
|
2018-11-15 10:08:28 +01:00
|
|
|
append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
|
2020-11-13 09:10:52 +01:00
|
|
|
s->vu_ops->append_opts(s, cmd_line, ",server=on");
|
2018-11-15 10:08:28 +01:00
|
|
|
|
|
|
|
g_test_queue_destroy(vhost_user_test_cleanup, s);
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void test_reconnect(void *obj, void *arg, QGuestAllocator *alloc)
|
|
|
|
{
|
|
|
|
TestServer *s = arg;
|
|
|
|
GSource *src;
|
2016-06-06 18:45:08 +02:00
|
|
|
|
2018-12-03 16:32:23 +01:00
|
|
|
if (!wait_for_fds(s)) {
|
2018-11-15 10:08:28 +01:00
|
|
|
return;
|
2018-12-03 16:32:23 +01:00
|
|
|
}
|
|
|
|
|
2016-06-06 18:45:08 +02:00
|
|
|
wait_for_rings_started(s, 2);
|
|
|
|
|
|
|
|
/* reconnect */
|
|
|
|
s->fds_num = 0;
|
|
|
|
s->rings = 0;
|
2019-02-14 18:35:54 +01:00
|
|
|
src = g_idle_source_new();
|
|
|
|
g_source_set_callback(src, reconnect_cb, s, NULL);
|
|
|
|
g_source_attach(src, s->context);
|
|
|
|
g_source_unref(src);
|
2018-12-03 16:32:23 +01:00
|
|
|
g_assert(wait_for_fds(s));
|
2016-06-06 18:45:08 +02:00
|
|
|
wait_for_rings_started(s, 2);
|
|
|
|
}
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
static void *vhost_user_test_setup_connect_fail(GString *cmd_line, void *arg)
|
2016-09-09 13:34:43 +02:00
|
|
|
{
|
2020-09-11 10:39:45 +02:00
|
|
|
TestServer *s = test_server_new("connect-fail", arg);
|
2016-09-09 13:34:43 +02:00
|
|
|
|
|
|
|
s->test_fail = true;
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
g_thread_new("connect", connect_thread, s);
|
|
|
|
append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
|
2020-11-13 09:10:52 +01:00
|
|
|
s->vu_ops->append_opts(s, cmd_line, ",server=on");
|
2017-12-21 22:21:23 +01:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
g_test_queue_destroy(vhost_user_test_cleanup, s);
|
2016-09-09 13:34:43 +02:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
return s;
|
2016-09-09 13:34:43 +02:00
|
|
|
}
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
static void *vhost_user_test_setup_flags_mismatch(GString *cmd_line, void *arg)
|
2016-09-09 13:34:45 +02:00
|
|
|
{
|
2020-09-11 10:39:45 +02:00
|
|
|
TestServer *s = test_server_new("flags-mismatch", arg);
|
2016-09-09 13:34:45 +02:00
|
|
|
|
|
|
|
s->test_flags = TEST_FLAGS_DISCONNECT;
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
g_thread_new("connect", connect_thread, s);
|
|
|
|
append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
|
2020-11-13 09:10:52 +01:00
|
|
|
s->vu_ops->append_opts(s, cmd_line, ",server=on");
|
2016-09-09 13:34:45 +02:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
g_test_queue_destroy(vhost_user_test_cleanup, s);
|
2017-12-21 22:21:23 +01:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
return s;
|
2016-09-09 13:34:45 +02:00
|
|
|
}
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
static void test_vhost_user_started(void *obj, void *arg, QGuestAllocator *alloc)
|
2016-09-09 13:34:45 +02:00
|
|
|
{
|
2018-11-15 10:08:28 +01:00
|
|
|
TestServer *s = arg;
|
2016-09-09 13:34:45 +02:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
if (!wait_for_fds(s)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
wait_for_rings_started(s, 2);
|
|
|
|
}
|
2016-06-06 18:45:08 +02:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
static void *vhost_user_test_setup_multiqueue(GString *cmd_line, void *arg)
|
2016-09-09 13:34:44 +02:00
|
|
|
{
|
2018-11-15 10:08:28 +01:00
|
|
|
TestServer *s = vhost_user_test_setup(cmd_line, arg);
|
|
|
|
|
2017-12-21 22:21:25 +01:00
|
|
|
s->queues = 2;
|
2018-11-15 10:08:28 +01:00
|
|
|
g_string_append_printf(cmd_line,
|
|
|
|
" -set netdev.hs0.queues=%d"
|
|
|
|
" -global virtio-net-pci.vectors=%d",
|
|
|
|
s->queues, s->queues * 2 + 2);
|
2016-09-09 13:34:44 +02:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
return s;
|
|
|
|
}
|
2016-09-09 13:34:44 +02:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
static void test_multiqueue(void *obj, void *arg, QGuestAllocator *alloc)
|
|
|
|
{
|
|
|
|
TestServer *s = arg;
|
2016-09-09 13:34:44 +02:00
|
|
|
|
2017-12-21 22:21:25 +01:00
|
|
|
wait_for_rings_started(s, s->queues * 2);
|
2016-09-09 13:34:44 +02:00
|
|
|
}
|
|
|
|
|
2022-08-02 11:50:09 +02:00
|
|
|
|
|
|
|
static uint64_t vu_net_get_features(TestServer *s)
|
|
|
|
{
|
|
|
|
uint64_t features = 0x1ULL << VHOST_F_LOG_ALL |
|
|
|
|
0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
|
|
|
|
|
|
|
|
if (s->queues > 1) {
|
|
|
|
features |= 0x1ULL << VIRTIO_NET_F_MQ;
|
|
|
|
}
|
|
|
|
|
|
|
|
return features;
|
|
|
|
}
|
|
|
|
|
2020-09-11 10:39:45 +02:00
|
|
|
static void vu_net_set_features(TestServer *s, CharBackend *chr,
|
2022-08-02 11:50:09 +02:00
|
|
|
VhostUserMsg *msg)
|
2020-09-11 10:39:45 +02:00
|
|
|
{
|
2022-08-02 11:50:06 +02:00
|
|
|
g_assert(msg->payload.u64 & (0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES));
|
2020-09-11 10:39:45 +02:00
|
|
|
if (s->test_flags == TEST_FLAGS_DISCONNECT) {
|
|
|
|
qemu_chr_fe_disconnect(chr);
|
|
|
|
s->test_flags = TEST_FLAGS_BAD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vu_net_get_protocol_features(TestServer *s, CharBackend *chr,
|
|
|
|
VhostUserMsg *msg)
|
|
|
|
{
|
|
|
|
/* send back features to qemu */
|
|
|
|
msg->flags |= VHOST_USER_REPLY_MASK;
|
|
|
|
msg->size = sizeof(m.payload.u64);
|
|
|
|
msg->payload.u64 = 1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD;
|
|
|
|
msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_CROSS_ENDIAN;
|
|
|
|
if (s->queues > 1) {
|
|
|
|
msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_MQ;
|
|
|
|
}
|
|
|
|
qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Each VHOST-USER device should have its ops structure defined. */
|
|
|
|
static struct vhost_user_ops g_vu_net_ops = {
|
|
|
|
.type = VHOST_USER_NET,
|
|
|
|
|
|
|
|
.append_opts = append_vhost_net_opts,
|
|
|
|
|
2022-08-02 11:50:09 +02:00
|
|
|
.get_features = vu_net_get_features,
|
2020-09-11 10:39:45 +02:00
|
|
|
.set_features = vu_net_set_features,
|
|
|
|
.get_protocol_features = vu_net_get_protocol_features,
|
|
|
|
};
|
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
static void register_vhost_user_test(void)
|
2014-06-10 12:03:23 +02:00
|
|
|
{
|
2018-11-15 10:08:28 +01:00
|
|
|
QOSGraphTestOptions opts = {
|
|
|
|
.before = vhost_user_test_setup,
|
|
|
|
.subprocess = true,
|
2020-09-11 10:39:45 +02:00
|
|
|
.arg = &g_vu_net_ops,
|
2018-11-15 10:08:28 +01:00
|
|
|
};
|
2014-06-10 12:03:23 +02:00
|
|
|
|
2015-10-09 17:17:37 +02:00
|
|
|
qemu_add_opts(&qemu_chardev_opts);
|
2014-06-10 12:03:23 +02:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
qos_add_test("vhost-user/read-guest-mem/memfile",
|
|
|
|
"virtio-net",
|
|
|
|
test_read_guest_mem, &opts);
|
|
|
|
|
2019-03-11 14:58:47 +01:00
|
|
|
if (qemu_memfd_check(MFD_ALLOW_SEALING)) {
|
2018-11-15 10:08:28 +01:00
|
|
|
opts.before = vhost_user_test_setup_memfd;
|
|
|
|
qos_add_test("vhost-user/read-guest-mem/memfd",
|
|
|
|
"virtio-net",
|
|
|
|
test_read_guest_mem, &opts);
|
2018-02-15 22:25:49 +01:00
|
|
|
}
|
2018-11-15 10:08:28 +01:00
|
|
|
|
|
|
|
qos_add_test("vhost-user/migrate",
|
|
|
|
"virtio-net",
|
|
|
|
test_migrate, &opts);
|
2017-09-05 20:06:02 +02:00
|
|
|
|
2022-02-04 21:43:11 +01:00
|
|
|
opts.before = vhost_user_test_setup_reconnect;
|
|
|
|
qos_add_test("vhost-user/reconnect", "virtio-net",
|
|
|
|
test_reconnect, &opts);
|
2018-11-15 10:08:28 +01:00
|
|
|
|
2022-02-04 21:43:11 +01:00
|
|
|
opts.before = vhost_user_test_setup_connect_fail;
|
|
|
|
qos_add_test("vhost-user/connect-fail", "virtio-net",
|
|
|
|
test_vhost_user_started, &opts);
|
2018-11-15 10:08:28 +01:00
|
|
|
|
2022-02-04 21:43:11 +01:00
|
|
|
opts.before = vhost_user_test_setup_flags_mismatch;
|
|
|
|
qos_add_test("vhost-user/flags-mismatch", "virtio-net",
|
|
|
|
test_vhost_user_started, &opts);
|
2014-06-10 12:03:23 +02:00
|
|
|
|
2018-11-15 10:08:28 +01:00
|
|
|
opts.before = vhost_user_test_setup_multiqueue;
|
|
|
|
opts.edge.extra_device_opts = "mq=on";
|
|
|
|
qos_add_test("vhost-user/multiqueue",
|
|
|
|
"virtio-net",
|
|
|
|
test_multiqueue, &opts);
|
2014-06-10 12:03:23 +02:00
|
|
|
}
|
2018-11-15 10:08:28 +01:00
|
|
|
libqos_init(register_vhost_user_test);
|
2022-08-02 11:50:10 +02:00
|
|
|
|
|
|
|
static uint64_t vu_gpio_get_features(TestServer *s)
|
|
|
|
{
|
|
|
|
return 0x1ULL << VIRTIO_F_VERSION_1 |
|
|
|
|
0x1ULL << VIRTIO_GPIO_F_IRQ |
|
|
|
|
0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This stub can't handle all the message types but we should reply
|
|
|
|
* that we support VHOST_USER_PROTOCOL_F_CONFIG as gpio would use it
|
|
|
|
* talking to a read vhost-user daemon.
|
|
|
|
*/
|
|
|
|
static void vu_gpio_get_protocol_features(TestServer *s, CharBackend *chr,
|
|
|
|
VhostUserMsg *msg)
|
|
|
|
{
|
|
|
|
/* send back features to qemu */
|
|
|
|
msg->flags |= VHOST_USER_REPLY_MASK;
|
|
|
|
msg->size = sizeof(m.payload.u64);
|
|
|
|
msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_CONFIG;
|
|
|
|
|
|
|
|
qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct vhost_user_ops g_vu_gpio_ops = {
|
|
|
|
.type = VHOST_USER_GPIO,
|
|
|
|
|
|
|
|
.append_opts = append_vhost_gpio_opts,
|
|
|
|
|
|
|
|
.get_features = vu_gpio_get_features,
|
|
|
|
.set_features = vu_net_set_features,
|
|
|
|
.get_protocol_features = vu_gpio_get_protocol_features,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void register_vhost_gpio_test(void)
|
|
|
|
{
|
|
|
|
QOSGraphTestOptions opts = {
|
|
|
|
.before = vhost_user_test_setup,
|
|
|
|
.subprocess = true,
|
|
|
|
.arg = &g_vu_gpio_ops,
|
|
|
|
};
|
|
|
|
|
|
|
|
qemu_add_opts(&qemu_chardev_opts);
|
|
|
|
|
|
|
|
qos_add_test("read-guest-mem/memfile",
|
|
|
|
"vhost-user-gpio", test_read_guest_mem, &opts);
|
|
|
|
}
|
|
|
|
libqos_init(register_vhost_gpio_test);
|
2023-06-28 12:05:24 +02:00
|
|
|
|
|
|
|
static uint64_t vu_scmi_get_features(TestServer *s)
|
|
|
|
{
|
|
|
|
return 0x1ULL << VIRTIO_F_VERSION_1 |
|
|
|
|
0x1ULL << VIRTIO_SCMI_F_P2A_CHANNELS |
|
|
|
|
0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vu_scmi_get_protocol_features(TestServer *s, CharBackend *chr,
|
|
|
|
VhostUserMsg *msg)
|
|
|
|
{
|
|
|
|
msg->flags |= VHOST_USER_REPLY_MASK;
|
|
|
|
msg->size = sizeof(m.payload.u64);
|
|
|
|
msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_MQ;
|
|
|
|
|
|
|
|
qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct vhost_user_ops g_vu_scmi_ops = {
|
|
|
|
.type = VHOST_USER_SCMI,
|
|
|
|
|
|
|
|
.append_opts = append_vhost_gpio_opts,
|
|
|
|
|
|
|
|
.get_features = vu_scmi_get_features,
|
|
|
|
.set_features = vu_net_set_features,
|
|
|
|
.get_protocol_features = vu_scmi_get_protocol_features,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void register_vhost_scmi_test(void)
|
|
|
|
{
|
|
|
|
QOSGraphTestOptions opts = {
|
|
|
|
.before = vhost_user_test_setup,
|
|
|
|
.subprocess = true,
|
|
|
|
.arg = &g_vu_scmi_ops,
|
|
|
|
};
|
|
|
|
|
|
|
|
qemu_add_opts(&qemu_chardev_opts);
|
|
|
|
|
|
|
|
qos_add_test("scmi/read-guest-mem/memfile",
|
|
|
|
"vhost-user-scmi", test_read_guest_mem, &opts);
|
|
|
|
}
|
|
|
|
libqos_init(register_vhost_scmi_test);
|