2014-05-27 14:06:02 +02:00
|
|
|
/*
|
|
|
|
* vhost-user
|
|
|
|
*
|
|
|
|
* Copyright (c) 2013 Virtual Open Systems Sarl.
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2016-01-26 19:17:07 +01:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 09:01:28 +01:00
|
|
|
#include "qapi/error.h"
|
2014-05-27 14:06:02 +02:00
|
|
|
#include "hw/virtio/vhost.h"
|
2018-05-24 12:33:33 +02:00
|
|
|
#include "hw/virtio/vhost-user.h"
|
2014-05-27 14:06:02 +02:00
|
|
|
#include "hw/virtio/vhost-backend.h"
|
2018-05-24 12:33:34 +02:00
|
|
|
#include "hw/virtio/virtio.h"
|
2015-10-09 17:17:32 +02:00
|
|
|
#include "hw/virtio/virtio-net.h"
|
2017-01-26 15:26:44 +01:00
|
|
|
#include "chardev/char-fe.h"
|
2014-05-27 14:06:02 +02:00
|
|
|
#include "sysemu/kvm.h"
|
|
|
|
#include "qemu/error-report.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 07:23:50 +02:00
|
|
|
#include "qemu/main-loop.h"
|
2014-05-27 14:06:02 +02:00
|
|
|
#include "qemu/sockets.h"
|
2018-03-01 14:46:30 +01:00
|
|
|
#include "sysemu/cryptodev.h"
|
2018-03-12 18:21:00 +01:00
|
|
|
#include "migration/migration.h"
|
|
|
|
#include "migration/postcopy-ram.h"
|
2018-03-12 18:21:06 +01:00
|
|
|
#include "trace.h"
|
2014-05-27 14:06:02 +02:00
|
|
|
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/un.h>
|
2019-02-14 18:35:50 +01:00
|
|
|
|
|
|
|
#include "standard-headers/linux/vhost_types.h"
|
|
|
|
|
|
|
|
#ifdef CONFIG_LINUX
|
2018-03-12 18:21:13 +01:00
|
|
|
#include <linux/userfaultfd.h>
|
2019-02-14 18:35:50 +01:00
|
|
|
#endif
|
2014-05-27 14:06:02 +02:00
|
|
|
|
|
|
|
#define VHOST_MEMORY_MAX_NREGIONS 8
|
2015-09-23 06:19:56 +02:00
|
|
|
#define VHOST_USER_F_PROTOCOL_FEATURES 30
|
2018-05-24 12:33:32 +02:00
|
|
|
#define VHOST_USER_SLAVE_MAX_FDS 8
|
2015-09-23 06:19:58 +02:00
|
|
|
|
2018-01-04 02:53:31 +01:00
|
|
|
/*
|
|
|
|
* Maximum size of virtio device config space
|
|
|
|
*/
|
|
|
|
#define VHOST_USER_MAX_CONFIG_SIZE 256
|
|
|
|
|
2015-10-09 17:17:33 +02:00
|
|
|
enum VhostUserProtocolFeature {
|
|
|
|
VHOST_USER_PROTOCOL_F_MQ = 0,
|
|
|
|
VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
|
|
|
|
VHOST_USER_PROTOCOL_F_RARP = 2,
|
2016-08-05 12:53:50 +02:00
|
|
|
VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
|
2016-12-10 16:30:36 +01:00
|
|
|
VHOST_USER_PROTOCOL_F_NET_MTU = 4,
|
2017-06-02 12:18:30 +02:00
|
|
|
VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
|
2017-06-14 19:44:38 +02:00
|
|
|
VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
|
2018-03-01 14:46:30 +01:00
|
|
|
VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
|
2018-03-12 18:21:00 +01:00
|
|
|
VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
|
2018-03-29 09:52:33 +02:00
|
|
|
VHOST_USER_PROTOCOL_F_CONFIG = 9,
|
2018-05-24 12:33:32 +02:00
|
|
|
VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
|
2018-05-24 12:33:34 +02:00
|
|
|
VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
|
2019-02-28 09:53:49 +01:00
|
|
|
VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
|
2019-10-29 22:38:02 +01:00
|
|
|
VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
|
2015-10-09 17:17:33 +02:00
|
|
|
VHOST_USER_PROTOCOL_F_MAX
|
|
|
|
};
|
|
|
|
|
|
|
|
#define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
|
2014-05-27 14:06:02 +02:00
|
|
|
|
|
|
|
typedef enum VhostUserRequest {
|
|
|
|
VHOST_USER_NONE = 0,
|
|
|
|
VHOST_USER_GET_FEATURES = 1,
|
|
|
|
VHOST_USER_SET_FEATURES = 2,
|
|
|
|
VHOST_USER_SET_OWNER = 3,
|
2015-11-11 14:24:37 +01:00
|
|
|
VHOST_USER_RESET_OWNER = 4,
|
2014-05-27 14:06:02 +02:00
|
|
|
VHOST_USER_SET_MEM_TABLE = 5,
|
|
|
|
VHOST_USER_SET_LOG_BASE = 6,
|
|
|
|
VHOST_USER_SET_LOG_FD = 7,
|
|
|
|
VHOST_USER_SET_VRING_NUM = 8,
|
|
|
|
VHOST_USER_SET_VRING_ADDR = 9,
|
|
|
|
VHOST_USER_SET_VRING_BASE = 10,
|
|
|
|
VHOST_USER_GET_VRING_BASE = 11,
|
|
|
|
VHOST_USER_SET_VRING_KICK = 12,
|
|
|
|
VHOST_USER_SET_VRING_CALL = 13,
|
|
|
|
VHOST_USER_SET_VRING_ERR = 14,
|
2015-09-23 06:19:56 +02:00
|
|
|
VHOST_USER_GET_PROTOCOL_FEATURES = 15,
|
|
|
|
VHOST_USER_SET_PROTOCOL_FEATURES = 16,
|
2015-09-23 06:19:58 +02:00
|
|
|
VHOST_USER_GET_QUEUE_NUM = 17,
|
2015-09-23 06:20:01 +02:00
|
|
|
VHOST_USER_SET_VRING_ENABLE = 18,
|
2015-10-09 17:17:32 +02:00
|
|
|
VHOST_USER_SEND_RARP = 19,
|
2016-12-10 16:30:36 +01:00
|
|
|
VHOST_USER_NET_SET_MTU = 20,
|
2017-06-02 12:18:30 +02:00
|
|
|
VHOST_USER_SET_SLAVE_REQ_FD = 21,
|
2017-06-02 12:18:31 +02:00
|
|
|
VHOST_USER_IOTLB_MSG = 22,
|
2017-06-14 19:44:38 +02:00
|
|
|
VHOST_USER_SET_VRING_ENDIAN = 23,
|
2018-01-04 02:53:31 +01:00
|
|
|
VHOST_USER_GET_CONFIG = 24,
|
|
|
|
VHOST_USER_SET_CONFIG = 25,
|
2018-03-01 14:46:30 +01:00
|
|
|
VHOST_USER_CREATE_CRYPTO_SESSION = 26,
|
|
|
|
VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
|
2018-03-12 18:21:01 +01:00
|
|
|
VHOST_USER_POSTCOPY_ADVISE = 28,
|
2018-03-12 18:21:06 +01:00
|
|
|
VHOST_USER_POSTCOPY_LISTEN = 29,
|
2018-03-12 18:21:19 +01:00
|
|
|
VHOST_USER_POSTCOPY_END = 30,
|
2019-02-28 09:53:49 +01:00
|
|
|
VHOST_USER_GET_INFLIGHT_FD = 31,
|
|
|
|
VHOST_USER_SET_INFLIGHT_FD = 32,
|
2019-05-24 15:09:38 +02:00
|
|
|
VHOST_USER_GPU_SET_SOCKET = 33,
|
2019-10-29 22:38:02 +01:00
|
|
|
VHOST_USER_RESET_DEVICE = 34,
|
2014-05-27 14:06:02 +02:00
|
|
|
VHOST_USER_MAX
|
|
|
|
} VhostUserRequest;
|
|
|
|
|
2017-06-02 12:18:30 +02:00
|
|
|
typedef enum VhostUserSlaveRequest {
|
|
|
|
VHOST_USER_SLAVE_NONE = 0,
|
2017-06-02 12:18:31 +02:00
|
|
|
VHOST_USER_SLAVE_IOTLB_MSG = 1,
|
2018-01-04 02:53:31 +01:00
|
|
|
VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
|
2018-05-24 12:33:34 +02:00
|
|
|
VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
|
2017-06-02 12:18:30 +02:00
|
|
|
VHOST_USER_SLAVE_MAX
|
|
|
|
} VhostUserSlaveRequest;
|
|
|
|
|
2014-05-27 14:06:02 +02:00
|
|
|
typedef struct VhostUserMemoryRegion {
|
|
|
|
uint64_t guest_phys_addr;
|
|
|
|
uint64_t memory_size;
|
|
|
|
uint64_t userspace_addr;
|
2014-06-26 23:01:32 +02:00
|
|
|
uint64_t mmap_offset;
|
2014-05-27 14:06:02 +02:00
|
|
|
} VhostUserMemoryRegion;
|
|
|
|
|
|
|
|
typedef struct VhostUserMemory {
|
|
|
|
uint32_t nregions;
|
|
|
|
uint32_t padding;
|
|
|
|
VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
|
|
|
|
} VhostUserMemory;
|
|
|
|
|
2015-11-11 15:26:02 +01:00
|
|
|
typedef struct VhostUserLog {
|
|
|
|
uint64_t mmap_size;
|
|
|
|
uint64_t mmap_offset;
|
|
|
|
} VhostUserLog;
|
|
|
|
|
2018-01-04 02:53:31 +01:00
|
|
|
typedef struct VhostUserConfig {
|
|
|
|
uint32_t offset;
|
|
|
|
uint32_t size;
|
|
|
|
uint32_t flags;
|
|
|
|
uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
|
|
|
|
} VhostUserConfig;
|
|
|
|
|
2018-03-01 14:46:30 +01:00
|
|
|
#define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512
|
|
|
|
#define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64
|
|
|
|
|
|
|
|
typedef struct VhostUserCryptoSession {
|
|
|
|
/* session id for success, -1 on errors */
|
|
|
|
int64_t session_id;
|
|
|
|
CryptoDevBackendSymSessionInfo session_setup_data;
|
|
|
|
uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
|
|
|
|
uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
|
|
|
|
} VhostUserCryptoSession;
|
|
|
|
|
2018-01-04 02:53:31 +01:00
|
|
|
static VhostUserConfig c __attribute__ ((unused));
|
|
|
|
#define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
|
|
|
|
+ sizeof(c.size) \
|
|
|
|
+ sizeof(c.flags))
|
|
|
|
|
2018-05-24 12:33:34 +02:00
|
|
|
typedef struct VhostUserVringArea {
|
|
|
|
uint64_t u64;
|
|
|
|
uint64_t size;
|
|
|
|
uint64_t offset;
|
|
|
|
} VhostUserVringArea;
|
|
|
|
|
2019-02-28 09:53:49 +01:00
|
|
|
typedef struct VhostUserInflight {
|
|
|
|
uint64_t mmap_size;
|
|
|
|
uint64_t mmap_offset;
|
|
|
|
uint16_t num_queues;
|
|
|
|
uint16_t queue_size;
|
|
|
|
} VhostUserInflight;
|
|
|
|
|
2018-01-08 18:46:02 +01:00
|
|
|
typedef struct {
|
2014-05-27 14:06:02 +02:00
|
|
|
VhostUserRequest request;
|
|
|
|
|
|
|
|
#define VHOST_USER_VERSION_MASK (0x3)
|
|
|
|
#define VHOST_USER_REPLY_MASK (0x1<<2)
|
2016-08-05 12:53:50 +02:00
|
|
|
#define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
|
2014-05-27 14:06:02 +02:00
|
|
|
uint32_t flags;
|
|
|
|
uint32_t size; /* the following payload size */
|
2018-01-08 18:46:02 +01:00
|
|
|
} QEMU_PACKED VhostUserHeader;
|
|
|
|
|
|
|
|
typedef union {
|
2014-05-27 14:06:02 +02:00
|
|
|
#define VHOST_USER_VRING_IDX_MASK (0xff)
|
|
|
|
#define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
|
|
|
|
uint64_t u64;
|
|
|
|
struct vhost_vring_state state;
|
|
|
|
struct vhost_vring_addr addr;
|
|
|
|
VhostUserMemory memory;
|
2015-11-11 15:26:02 +01:00
|
|
|
VhostUserLog log;
|
2017-06-02 12:18:31 +02:00
|
|
|
struct vhost_iotlb_msg iotlb;
|
2018-01-04 02:53:31 +01:00
|
|
|
VhostUserConfig config;
|
2018-03-01 14:46:30 +01:00
|
|
|
VhostUserCryptoSession session;
|
2018-05-24 12:33:34 +02:00
|
|
|
VhostUserVringArea area;
|
2019-02-28 09:53:49 +01:00
|
|
|
VhostUserInflight inflight;
|
2018-01-08 18:46:02 +01:00
|
|
|
} VhostUserPayload;
|
|
|
|
|
|
|
|
typedef struct VhostUserMsg {
|
|
|
|
VhostUserHeader hdr;
|
|
|
|
VhostUserPayload payload;
|
2014-05-27 14:06:02 +02:00
|
|
|
} QEMU_PACKED VhostUserMsg;
|
|
|
|
|
|
|
|
static VhostUserMsg m __attribute__ ((unused));
|
2018-01-08 18:46:02 +01:00
|
|
|
#define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
|
2014-05-27 14:06:02 +02:00
|
|
|
|
2018-01-08 18:46:02 +01:00
|
|
|
#define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
|
2014-05-27 14:06:02 +02:00
|
|
|
|
|
|
|
/* The version of the protocol we support */
|
|
|
|
#define VHOST_USER_VERSION (0x1)
|
|
|
|
|
2017-06-02 12:18:29 +02:00
|
|
|
struct vhost_user {
|
2018-03-12 18:21:00 +01:00
|
|
|
struct vhost_dev *dev;
|
2018-05-24 12:33:33 +02:00
|
|
|
/* Shared between vhost devs of the same virtio device */
|
|
|
|
VhostUserState *user;
|
2017-06-02 12:18:30 +02:00
|
|
|
int slave_fd;
|
2018-03-12 18:21:00 +01:00
|
|
|
NotifierWithReturn postcopy_notifier;
|
2018-03-12 18:21:05 +01:00
|
|
|
struct PostCopyFD postcopy_fd;
|
2018-03-12 18:21:10 +01:00
|
|
|
uint64_t postcopy_client_bases[VHOST_MEMORY_MAX_NREGIONS];
|
2018-03-12 18:21:11 +01:00
|
|
|
/* Length of the region_rb and region_rb_offset arrays */
|
|
|
|
size_t region_rb_len;
|
|
|
|
/* RAMBlock associated with a given region */
|
|
|
|
RAMBlock **region_rb;
|
|
|
|
/* The offset from the start of the RAMBlock to the start of the
|
|
|
|
* vhost region.
|
|
|
|
*/
|
|
|
|
ram_addr_t *region_rb_offset;
|
|
|
|
|
2018-03-12 18:21:06 +01:00
|
|
|
/* True once we've entered postcopy_listen */
|
|
|
|
bool postcopy_listen;
|
2017-06-02 12:18:29 +02:00
|
|
|
};
|
|
|
|
|
2014-05-27 14:06:02 +02:00
|
|
|
static bool ioeventfd_enabled(void)
|
|
|
|
{
|
2018-12-15 13:03:52 +01:00
|
|
|
return !kvm_enabled() || kvm_eventfds_enabled();
|
2014-05-27 14:06:02 +02:00
|
|
|
}
|
|
|
|
|
2019-03-08 15:04:49 +01:00
|
|
|
static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
|
2014-05-27 14:06:02 +02:00
|
|
|
{
|
2017-06-02 12:18:29 +02:00
|
|
|
struct vhost_user *u = dev->opaque;
|
2018-05-24 12:33:33 +02:00
|
|
|
CharBackend *chr = u->user->chr;
|
2014-05-27 14:06:02 +02:00
|
|
|
uint8_t *p = (uint8_t *) msg;
|
|
|
|
int r, size = VHOST_USER_HDR_SIZE;
|
|
|
|
|
|
|
|
r = qemu_chr_fe_read_all(chr, p, size);
|
|
|
|
if (r != size) {
|
2015-11-16 12:55:53 +01:00
|
|
|
error_report("Failed to read msg header. Read %d instead of %d."
|
2018-01-08 18:46:02 +01:00
|
|
|
" Original request %d.", r, size, msg->hdr.request);
|
2019-03-08 15:04:49 +01:00
|
|
|
return -1;
|
2014-05-27 14:06:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* validate received flags */
|
2018-01-08 18:46:02 +01:00
|
|
|
if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
|
2014-05-27 14:06:02 +02:00
|
|
|
error_report("Failed to read msg header."
|
2018-01-08 18:46:02 +01:00
|
|
|
" Flags 0x%x instead of 0x%x.", msg->hdr.flags,
|
2014-05-27 14:06:02 +02:00
|
|
|
VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
|
2019-03-08 15:04:49 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
|
|
|
|
{
|
|
|
|
struct vhost_user *u = dev->opaque;
|
|
|
|
CharBackend *chr = u->user->chr;
|
|
|
|
uint8_t *p = (uint8_t *) msg;
|
|
|
|
int r, size;
|
|
|
|
|
|
|
|
if (vhost_user_read_header(dev, msg) < 0) {
|
|
|
|
return -1;
|
2014-05-27 14:06:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* validate message size is sane */
|
2018-01-08 18:46:02 +01:00
|
|
|
if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) {
|
2014-05-27 14:06:02 +02:00
|
|
|
error_report("Failed to read msg header."
|
2018-01-08 18:46:02 +01:00
|
|
|
" Size %d exceeds the maximum %zu.", msg->hdr.size,
|
2014-05-27 14:06:02 +02:00
|
|
|
VHOST_USER_PAYLOAD_SIZE);
|
2019-03-08 15:04:49 +01:00
|
|
|
return -1;
|
2014-05-27 14:06:02 +02:00
|
|
|
}
|
|
|
|
|
2018-01-08 18:46:02 +01:00
|
|
|
if (msg->hdr.size) {
|
2014-05-27 14:06:02 +02:00
|
|
|
p += VHOST_USER_HDR_SIZE;
|
2018-01-08 18:46:02 +01:00
|
|
|
size = msg->hdr.size;
|
2014-05-27 14:06:02 +02:00
|
|
|
r = qemu_chr_fe_read_all(chr, p, size);
|
|
|
|
if (r != size) {
|
|
|
|
error_report("Failed to read msg payload."
|
2018-01-08 18:46:02 +01:00
|
|
|
" Read %d instead of %d.", r, msg->hdr.size);
|
2019-03-08 15:04:49 +01:00
|
|
|
return -1;
|
2014-05-27 14:06:02 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-05 12:53:50 +02:00
|
|
|
static int process_message_reply(struct vhost_dev *dev,
|
2017-05-24 11:05:20 +02:00
|
|
|
const VhostUserMsg *msg)
|
2016-08-05 12:53:50 +02:00
|
|
|
{
|
2017-05-04 18:25:36 +02:00
|
|
|
VhostUserMsg msg_reply;
|
2016-08-05 12:53:50 +02:00
|
|
|
|
2018-01-08 18:46:02 +01:00
|
|
|
if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
|
2017-05-04 18:25:36 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vhost_user_read(dev, &msg_reply) < 0) {
|
2016-08-05 12:53:50 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-01-08 18:46:02 +01:00
|
|
|
if (msg_reply.hdr.request != msg->hdr.request) {
|
2016-08-05 12:53:50 +02:00
|
|
|
error_report("Received unexpected msg type."
|
|
|
|
"Expected %d received %d",
|
2018-01-08 18:46:02 +01:00
|
|
|
msg->hdr.request, msg_reply.hdr.request);
|
2016-08-05 12:53:50 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-05-04 18:25:36 +02:00
|
|
|
return msg_reply.payload.u64 ? -1 : 0;
|
2016-08-05 12:53:50 +02:00
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
static bool vhost_user_one_time_request(VhostUserRequest request)
|
|
|
|
{
|
|
|
|
switch (request) {
|
|
|
|
case VHOST_USER_SET_OWNER:
|
2015-11-11 14:24:37 +01:00
|
|
|
case VHOST_USER_RESET_OWNER:
|
2015-10-09 17:17:28 +02:00
|
|
|
case VHOST_USER_SET_MEM_TABLE:
|
|
|
|
case VHOST_USER_GET_QUEUE_NUM:
|
2016-12-10 16:30:36 +01:00
|
|
|
case VHOST_USER_NET_SET_MTU:
|
2015-10-09 17:17:28 +02:00
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* most non-init callers ignore the error */
|
2014-05-27 14:06:02 +02:00
|
|
|
static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
|
|
|
|
int *fds, int fd_num)
|
|
|
|
{
|
2017-06-02 12:18:29 +02:00
|
|
|
struct vhost_user *u = dev->opaque;
|
2018-05-24 12:33:33 +02:00
|
|
|
CharBackend *chr = u->user->chr;
|
2018-01-08 18:46:02 +01:00
|
|
|
int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
|
2014-05-27 14:06:02 +02:00
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
/*
|
|
|
|
* For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
|
|
|
|
* we just need send it once in the first time. For later such
|
|
|
|
* request, we just ignore it.
|
|
|
|
*/
|
2018-01-08 18:46:02 +01:00
|
|
|
if (vhost_user_one_time_request(msg->hdr.request) && dev->vq_index != 0) {
|
|
|
|
msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
|
2015-10-09 17:17:28 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-26 23:15:10 +02:00
|
|
|
if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
|
2016-07-26 23:15:24 +02:00
|
|
|
error_report("Failed to set msg fds.");
|
2016-07-26 23:15:10 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2014-05-27 14:06:02 +02:00
|
|
|
|
2016-07-26 23:15:24 +02:00
|
|
|
ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
|
|
|
|
if (ret != size) {
|
|
|
|
error_report("Failed to write msg."
|
|
|
|
" Wrote %d instead of %d.", ret, size);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-05-27 14:06:02 +02:00
|
|
|
}
|
|
|
|
|
2019-05-24 15:09:38 +02:00
|
|
|
int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
|
|
|
|
{
|
|
|
|
VhostUserMsg msg = {
|
|
|
|
.hdr.request = VHOST_USER_GPU_SET_SOCKET,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
|
|
|
};
|
|
|
|
|
|
|
|
return vhost_user_write(dev, &msg, &fd, 1);
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
|
|
|
|
struct vhost_log *log)
|
vhost-user: add multiple queue support
This patch is initially based a patch from Nikolay Nikolaev.
This patch adds vhost-user multiple queue support, by creating a nc
and vhost_net pair for each queue.
Qemu exits if find that the backend can't support the number of requested
queues (by providing queues=# option). The max number is queried by a
new message, VHOST_USER_GET_QUEUE_NUM, and is sent only when protocol
feature VHOST_USER_PROTOCOL_F_MQ is present first.
The max queue check is done at vhost-user initiation stage. We initiate
one queue first, which, in the meantime, also gets the max_queues the
backend supports.
In older version, it was reported that some messages are sent more times
than necessary. Here we came an agreement with Michael that we could
categorize vhost user messages to 2 types: non-vring specific messages,
which should be sent only once, and vring specific messages, which should
be sent per queue.
Here I introduced a helper function vhost_user_one_time_request(), which
lists following messages as non-vring specific messages:
VHOST_USER_SET_OWNER
VHOST_USER_RESET_DEVICE
VHOST_USER_SET_MEM_TABLE
VHOST_USER_GET_QUEUE_NUM
For above messages, we simply ignore them when they are not sent the first
time.
Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Jason Wang <jasowang@redhat.com>
Tested-by: Marcel Apfelbaum <marcel@redhat.com>
2015-09-23 06:20:00 +02:00
|
|
|
{
|
2015-10-09 17:17:28 +02:00
|
|
|
int fds[VHOST_MEMORY_MAX_NREGIONS];
|
|
|
|
size_t fd_num = 0;
|
|
|
|
bool shmfd = virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_LOG_SHMFD);
|
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.request = VHOST_USER_SET_LOG_BASE,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
2015-11-18 15:13:54 +01:00
|
|
|
.payload.log.mmap_size = log->size * sizeof(*(log->log)),
|
2015-11-11 15:26:02 +01:00
|
|
|
.payload.log.mmap_offset = 0,
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.size = sizeof(msg.payload.log),
|
2015-10-09 17:17:28 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
if (shmfd && log->fd != -1) {
|
|
|
|
fds[fd_num++] = log->fd;
|
|
|
|
}
|
|
|
|
|
2016-07-26 23:15:11 +02:00
|
|
|
if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
2015-10-09 17:17:28 +02:00
|
|
|
|
|
|
|
if (shmfd) {
|
2018-01-08 18:46:02 +01:00
|
|
|
msg.hdr.size = 0;
|
2015-10-09 17:17:28 +02:00
|
|
|
if (vhost_user_read(dev, &msg) < 0) {
|
2016-07-26 23:15:11 +02:00
|
|
|
return -1;
|
2015-10-09 17:17:28 +02:00
|
|
|
}
|
|
|
|
|
2018-01-08 18:46:02 +01:00
|
|
|
if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
|
2015-10-09 17:17:28 +02:00
|
|
|
error_report("Received unexpected msg type. "
|
|
|
|
"Expected %d received %d",
|
2018-01-08 18:46:02 +01:00
|
|
|
VHOST_USER_SET_LOG_BASE, msg.hdr.request);
|
2015-10-09 17:17:28 +02:00
|
|
|
return -1;
|
|
|
|
}
|
vhost-user: add multiple queue support
This patch is initially based a patch from Nikolay Nikolaev.
This patch adds vhost-user multiple queue support, by creating a nc
and vhost_net pair for each queue.
Qemu exits if find that the backend can't support the number of requested
queues (by providing queues=# option). The max number is queried by a
new message, VHOST_USER_GET_QUEUE_NUM, and is sent only when protocol
feature VHOST_USER_PROTOCOL_F_MQ is present first.
The max queue check is done at vhost-user initiation stage. We initiate
one queue first, which, in the meantime, also gets the max_queues the
backend supports.
In older version, it was reported that some messages are sent more times
than necessary. Here we came an agreement with Michael that we could
categorize vhost user messages to 2 types: non-vring specific messages,
which should be sent only once, and vring specific messages, which should
be sent per queue.
Here I introduced a helper function vhost_user_one_time_request(), which
lists following messages as non-vring specific messages:
VHOST_USER_SET_OWNER
VHOST_USER_RESET_DEVICE
VHOST_USER_SET_MEM_TABLE
VHOST_USER_GET_QUEUE_NUM
For above messages, we simply ignore them when they are not sent the first
time.
Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Jason Wang <jasowang@redhat.com>
Tested-by: Marcel Apfelbaum <marcel@redhat.com>
2015-09-23 06:20:00 +02:00
|
|
|
}
|
2015-10-09 17:17:28 +02:00
|
|
|
|
|
|
|
return 0;
|
vhost-user: add multiple queue support
This patch is initially based a patch from Nikolay Nikolaev.
This patch adds vhost-user multiple queue support, by creating a nc
and vhost_net pair for each queue.
Qemu exits if find that the backend can't support the number of requested
queues (by providing queues=# option). The max number is queried by a
new message, VHOST_USER_GET_QUEUE_NUM, and is sent only when protocol
feature VHOST_USER_PROTOCOL_F_MQ is present first.
The max queue check is done at vhost-user initiation stage. We initiate
one queue first, which, in the meantime, also gets the max_queues the
backend supports.
In older version, it was reported that some messages are sent more times
than necessary. Here we came an agreement with Michael that we could
categorize vhost user messages to 2 types: non-vring specific messages,
which should be sent only once, and vring specific messages, which should
be sent per queue.
Here I introduced a helper function vhost_user_one_time_request(), which
lists following messages as non-vring specific messages:
VHOST_USER_SET_OWNER
VHOST_USER_RESET_DEVICE
VHOST_USER_SET_MEM_TABLE
VHOST_USER_GET_QUEUE_NUM
For above messages, we simply ignore them when they are not sent the first
time.
Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Jason Wang <jasowang@redhat.com>
Tested-by: Marcel Apfelbaum <marcel@redhat.com>
2015-09-23 06:20:00 +02:00
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:07 +01:00
|
|
|
static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
|
|
|
|
struct vhost_memory *mem)
|
|
|
|
{
|
2018-03-12 18:21:10 +01:00
|
|
|
struct vhost_user *u = dev->opaque;
|
2018-03-12 18:21:07 +01:00
|
|
|
int fds[VHOST_MEMORY_MAX_NREGIONS];
|
|
|
|
int i, fd;
|
|
|
|
size_t fd_num = 0;
|
2018-03-12 18:21:10 +01:00
|
|
|
VhostUserMsg msg_reply;
|
|
|
|
int region_i, msg_i;
|
|
|
|
|
2018-03-12 18:21:07 +01:00
|
|
|
VhostUserMsg msg = {
|
|
|
|
.hdr.request = VHOST_USER_SET_MEM_TABLE,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
|
|
|
};
|
|
|
|
|
2018-03-12 18:21:11 +01:00
|
|
|
if (u->region_rb_len < dev->mem->nregions) {
|
|
|
|
u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
|
|
|
|
u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
|
|
|
|
dev->mem->nregions);
|
|
|
|
memset(&(u->region_rb[u->region_rb_len]), '\0',
|
|
|
|
sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
|
|
|
|
memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
|
|
|
|
sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
|
|
|
|
u->region_rb_len = dev->mem->nregions;
|
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:07 +01:00
|
|
|
for (i = 0; i < dev->mem->nregions; ++i) {
|
|
|
|
struct vhost_memory_region *reg = dev->mem->regions + i;
|
|
|
|
ram_addr_t offset;
|
|
|
|
MemoryRegion *mr;
|
|
|
|
|
|
|
|
assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
|
|
|
|
mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
|
|
|
|
&offset);
|
|
|
|
fd = memory_region_get_fd(mr);
|
|
|
|
if (fd > 0) {
|
2018-03-12 18:21:11 +01:00
|
|
|
trace_vhost_user_set_mem_table_withfd(fd_num, mr->name,
|
|
|
|
reg->memory_size,
|
|
|
|
reg->guest_phys_addr,
|
|
|
|
reg->userspace_addr, offset);
|
|
|
|
u->region_rb_offset[i] = offset;
|
|
|
|
u->region_rb[i] = mr->ram_block;
|
2018-03-12 18:21:07 +01:00
|
|
|
msg.payload.memory.regions[fd_num].userspace_addr =
|
|
|
|
reg->userspace_addr;
|
|
|
|
msg.payload.memory.regions[fd_num].memory_size = reg->memory_size;
|
|
|
|
msg.payload.memory.regions[fd_num].guest_phys_addr =
|
|
|
|
reg->guest_phys_addr;
|
|
|
|
msg.payload.memory.regions[fd_num].mmap_offset = offset;
|
|
|
|
assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
|
|
|
|
fds[fd_num++] = fd;
|
2018-03-12 18:21:11 +01:00
|
|
|
} else {
|
|
|
|
u->region_rb_offset[i] = 0;
|
|
|
|
u->region_rb[i] = NULL;
|
2018-03-12 18:21:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
msg.payload.memory.nregions = fd_num;
|
|
|
|
|
|
|
|
if (!fd_num) {
|
|
|
|
error_report("Failed initializing vhost-user memory map, "
|
|
|
|
"consider using -object memory-backend-file share=on");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
msg.hdr.size = sizeof(msg.payload.memory.nregions);
|
|
|
|
msg.hdr.size += sizeof(msg.payload.memory.padding);
|
|
|
|
msg.hdr.size += fd_num * sizeof(VhostUserMemoryRegion);
|
|
|
|
|
|
|
|
if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:10 +01:00
|
|
|
if (vhost_user_read(dev, &msg_reply) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
|
|
|
|
error_report("%s: Received unexpected msg type."
|
|
|
|
"Expected %d received %d", __func__,
|
|
|
|
VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/* We're using the same structure, just reusing one of the
|
|
|
|
* fields, so it should be the same size.
|
|
|
|
*/
|
|
|
|
if (msg_reply.hdr.size != msg.hdr.size) {
|
|
|
|
error_report("%s: Unexpected size for postcopy reply "
|
|
|
|
"%d vs %d", __func__, msg_reply.hdr.size, msg.hdr.size);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(u->postcopy_client_bases, 0,
|
|
|
|
sizeof(uint64_t) * VHOST_MEMORY_MAX_NREGIONS);
|
|
|
|
|
|
|
|
/* They're in the same order as the regions that were sent
|
|
|
|
* but some of the regions were skipped (above) if they
|
|
|
|
* didn't have fd's
|
|
|
|
*/
|
|
|
|
for (msg_i = 0, region_i = 0;
|
|
|
|
region_i < dev->mem->nregions;
|
|
|
|
region_i++) {
|
|
|
|
if (msg_i < fd_num &&
|
|
|
|
msg_reply.payload.memory.regions[msg_i].guest_phys_addr ==
|
|
|
|
dev->mem->regions[region_i].guest_phys_addr) {
|
|
|
|
u->postcopy_client_bases[region_i] =
|
|
|
|
msg_reply.payload.memory.regions[msg_i].userspace_addr;
|
|
|
|
trace_vhost_user_set_mem_table_postcopy(
|
|
|
|
msg_reply.payload.memory.regions[msg_i].userspace_addr,
|
|
|
|
msg.payload.memory.regions[msg_i].userspace_addr,
|
|
|
|
msg_i, region_i);
|
|
|
|
msg_i++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (msg_i != fd_num) {
|
|
|
|
error_report("%s: postcopy reply not fully consumed "
|
|
|
|
"%d vs %zd",
|
|
|
|
__func__, msg_i, fd_num);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/* Now we've registered this with the postcopy code, we ack to the client,
|
|
|
|
* because now we're in the position to be able to deal with any faults
|
|
|
|
* it generates.
|
|
|
|
*/
|
|
|
|
/* TODO: Use this for failure cases as well with a bad value */
|
|
|
|
msg.hdr.size = sizeof(msg.payload.u64);
|
|
|
|
msg.payload.u64 = 0; /* OK */
|
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:07 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-15 15:35:24 +02:00
|
|
|
static int vhost_user_set_mem_table(struct vhost_dev *dev,
|
|
|
|
struct vhost_memory *mem)
|
|
|
|
{
|
2018-03-12 18:21:07 +01:00
|
|
|
struct vhost_user *u = dev->opaque;
|
2016-08-15 15:35:24 +02:00
|
|
|
int fds[VHOST_MEMORY_MAX_NREGIONS];
|
|
|
|
int i, fd;
|
|
|
|
size_t fd_num = 0;
|
2018-03-12 18:21:07 +01:00
|
|
|
bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
|
2016-08-15 15:35:24 +02:00
|
|
|
bool reply_supported = virtio_has_feature(dev->protocol_features,
|
2018-10-02 16:09:47 +02:00
|
|
|
VHOST_USER_PROTOCOL_F_REPLY_ACK);
|
2016-08-15 15:35:24 +02:00
|
|
|
|
2018-03-12 18:21:07 +01:00
|
|
|
if (do_postcopy) {
|
|
|
|
/* Postcopy has enough differences that it's best done in it's own
|
|
|
|
* version
|
|
|
|
*/
|
|
|
|
return vhost_user_set_mem_table_postcopy(dev, mem);
|
|
|
|
}
|
|
|
|
|
2016-08-15 15:35:24 +02:00
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.request = VHOST_USER_SET_MEM_TABLE,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
2016-08-15 15:35:24 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
if (reply_supported) {
|
2018-01-08 18:46:02 +01:00
|
|
|
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
|
2016-08-15 15:35:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < dev->mem->nregions; ++i) {
|
|
|
|
struct vhost_memory_region *reg = dev->mem->regions + i;
|
|
|
|
ram_addr_t offset;
|
|
|
|
MemoryRegion *mr;
|
|
|
|
|
|
|
|
assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
|
|
|
|
mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
|
|
|
|
&offset);
|
|
|
|
fd = memory_region_get_fd(mr);
|
|
|
|
if (fd > 0) {
|
2018-01-12 03:47:57 +01:00
|
|
|
if (fd_num == VHOST_MEMORY_MAX_NREGIONS) {
|
|
|
|
error_report("Failed preparing vhost-user memory table msg");
|
|
|
|
return -1;
|
|
|
|
}
|
2018-03-12 18:21:07 +01:00
|
|
|
msg.payload.memory.regions[fd_num].userspace_addr =
|
|
|
|
reg->userspace_addr;
|
2016-08-15 15:35:24 +02:00
|
|
|
msg.payload.memory.regions[fd_num].memory_size = reg->memory_size;
|
2018-03-12 18:21:07 +01:00
|
|
|
msg.payload.memory.regions[fd_num].guest_phys_addr =
|
|
|
|
reg->guest_phys_addr;
|
2016-08-15 15:35:24 +02:00
|
|
|
msg.payload.memory.regions[fd_num].mmap_offset = offset;
|
|
|
|
fds[fd_num++] = fd;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
msg.payload.memory.nregions = fd_num;
|
|
|
|
|
|
|
|
if (!fd_num) {
|
|
|
|
error_report("Failed initializing vhost-user memory map, "
|
|
|
|
"consider using -object memory-backend-file share=on");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-01-08 18:46:02 +01:00
|
|
|
msg.hdr.size = sizeof(msg.payload.memory.nregions);
|
|
|
|
msg.hdr.size += sizeof(msg.payload.memory.padding);
|
|
|
|
msg.hdr.size += fd_num * sizeof(VhostUserMemoryRegion);
|
2016-08-15 15:35:24 +02:00
|
|
|
|
|
|
|
if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reply_supported) {
|
2017-05-24 11:05:20 +02:00
|
|
|
return process_message_reply(dev, &msg);
|
2016-08-15 15:35:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
static int vhost_user_set_vring_addr(struct vhost_dev *dev,
|
|
|
|
struct vhost_vring_addr *addr)
|
|
|
|
{
|
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.request = VHOST_USER_SET_VRING_ADDR,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
2015-10-22 21:28:37 +02:00
|
|
|
.payload.addr = *addr,
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.size = sizeof(msg.payload.addr),
|
2015-10-09 17:17:28 +02:00
|
|
|
};
|
2014-05-27 14:06:02 +02:00
|
|
|
|
2016-07-26 23:15:11 +02:00
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
2014-05-27 14:06:02 +02:00
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2014-05-27 14:06:02 +02:00
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
static int vhost_user_set_vring_endian(struct vhost_dev *dev,
|
|
|
|
struct vhost_vring_state *ring)
|
|
|
|
{
|
2017-06-14 19:44:38 +02:00
|
|
|
bool cross_endian = virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_CROSS_ENDIAN);
|
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.request = VHOST_USER_SET_VRING_ENDIAN,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
2017-06-14 19:44:38 +02:00
|
|
|
.payload.state = *ring,
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.size = sizeof(msg.payload.state),
|
2017-06-14 19:44:38 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
if (!cross_endian) {
|
|
|
|
error_report("vhost-user trying to send unhandled ioctl");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2015-10-09 17:17:28 +02:00
|
|
|
}
|
2014-05-27 14:06:02 +02:00
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
static int vhost_set_vring(struct vhost_dev *dev,
|
|
|
|
unsigned long int request,
|
|
|
|
struct vhost_vring_state *ring)
|
|
|
|
{
|
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.request = request,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
2015-10-22 21:28:37 +02:00
|
|
|
.payload.state = *ring,
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.size = sizeof(msg.payload.state),
|
2015-10-09 17:17:28 +02:00
|
|
|
};
|
|
|
|
|
2016-07-26 23:15:11 +02:00
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
2015-10-09 17:17:28 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_user_set_vring_num(struct vhost_dev *dev,
|
|
|
|
struct vhost_vring_state *ring)
|
|
|
|
{
|
|
|
|
return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
|
|
|
|
}
|
|
|
|
|
2018-05-24 12:33:34 +02:00
|
|
|
static void vhost_user_host_notifier_restore(struct vhost_dev *dev,
|
|
|
|
int queue_idx)
|
|
|
|
{
|
|
|
|
struct vhost_user *u = dev->opaque;
|
|
|
|
VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
|
|
|
|
VirtIODevice *vdev = dev->vdev;
|
|
|
|
|
|
|
|
if (n->addr && !n->set) {
|
|
|
|
virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true);
|
|
|
|
n->set = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
|
|
|
|
int queue_idx)
|
|
|
|
{
|
|
|
|
struct vhost_user *u = dev->opaque;
|
|
|
|
VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
|
|
|
|
VirtIODevice *vdev = dev->vdev;
|
|
|
|
|
|
|
|
if (n->addr && n->set) {
|
|
|
|
virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
|
|
|
|
n->set = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
static int vhost_user_set_vring_base(struct vhost_dev *dev,
|
|
|
|
struct vhost_vring_state *ring)
|
|
|
|
{
|
2018-05-24 12:33:34 +02:00
|
|
|
vhost_user_host_notifier_restore(dev, ring->index);
|
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
|
|
|
|
{
|
2015-11-16 17:40:18 +01:00
|
|
|
int i;
|
2015-10-09 17:17:28 +02:00
|
|
|
|
2015-11-13 08:24:09 +01:00
|
|
|
if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
|
2014-05-27 14:06:02 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-11-16 17:40:18 +01:00
|
|
|
for (i = 0; i < dev->nvqs; ++i) {
|
|
|
|
struct vhost_vring_state state = {
|
|
|
|
.index = dev->vq_index + i,
|
|
|
|
.num = enable,
|
|
|
|
};
|
|
|
|
|
|
|
|
vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
|
|
|
|
}
|
2015-10-09 17:17:28 +02:00
|
|
|
|
2015-11-16 17:40:18 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2015-10-09 17:17:28 +02:00
|
|
|
|
|
|
|
static int vhost_user_get_vring_base(struct vhost_dev *dev,
|
|
|
|
struct vhost_vring_state *ring)
|
|
|
|
{
|
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.request = VHOST_USER_GET_VRING_BASE,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
2015-10-22 21:28:37 +02:00
|
|
|
.payload.state = *ring,
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.size = sizeof(msg.payload.state),
|
2015-10-09 17:17:28 +02:00
|
|
|
};
|
|
|
|
|
2018-05-24 12:33:34 +02:00
|
|
|
vhost_user_host_notifier_remove(dev, ring->index);
|
|
|
|
|
2016-07-26 23:15:11 +02:00
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
2015-10-09 17:17:28 +02:00
|
|
|
|
|
|
|
if (vhost_user_read(dev, &msg) < 0) {
|
2016-07-26 23:15:11 +02:00
|
|
|
return -1;
|
2014-05-27 14:06:02 +02:00
|
|
|
}
|
|
|
|
|
2018-01-08 18:46:02 +01:00
|
|
|
if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
|
2015-10-09 17:17:28 +02:00
|
|
|
error_report("Received unexpected msg type. Expected %d received %d",
|
2018-01-08 18:46:02 +01:00
|
|
|
VHOST_USER_GET_VRING_BASE, msg.hdr.request);
|
2015-10-09 17:17:28 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2014-05-27 14:06:02 +02:00
|
|
|
|
2018-01-08 18:46:02 +01:00
|
|
|
if (msg.hdr.size != sizeof(msg.payload.state)) {
|
2015-10-09 17:17:28 +02:00
|
|
|
error_report("Received bad msg size.");
|
|
|
|
return -1;
|
2014-05-27 14:06:02 +02:00
|
|
|
}
|
|
|
|
|
2015-10-22 21:28:37 +02:00
|
|
|
*ring = msg.payload.state;
|
2015-10-09 17:17:28 +02:00
|
|
|
|
2014-05-27 14:06:02 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
static int vhost_set_vring_file(struct vhost_dev *dev,
|
|
|
|
VhostUserRequest request,
|
|
|
|
struct vhost_vring_file *file)
|
2015-10-09 17:17:23 +02:00
|
|
|
{
|
2015-10-09 17:17:26 +02:00
|
|
|
int fds[VHOST_MEMORY_MAX_NREGIONS];
|
|
|
|
size_t fd_num = 0;
|
2015-10-09 17:17:23 +02:00
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.request = request,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
2015-10-22 21:28:37 +02:00
|
|
|
.payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.size = sizeof(msg.payload.u64),
|
2015-10-09 17:17:23 +02:00
|
|
|
};
|
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
if (ioeventfd_enabled() && file->fd > 0) {
|
|
|
|
fds[fd_num++] = file->fd;
|
|
|
|
} else {
|
2015-10-22 21:28:37 +02:00
|
|
|
msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
|
2015-10-09 17:17:26 +02:00
|
|
|
}
|
|
|
|
|
2016-07-26 23:15:11 +02:00
|
|
|
if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
2015-10-09 17:17:26 +02:00
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2015-10-09 17:17:26 +02:00
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
static int vhost_user_set_vring_kick(struct vhost_dev *dev,
|
|
|
|
struct vhost_vring_file *file)
|
|
|
|
{
|
|
|
|
return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_user_set_vring_call(struct vhost_dev *dev,
|
|
|
|
struct vhost_vring_file *file)
|
|
|
|
{
|
|
|
|
return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
|
|
|
|
{
|
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.request = request,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
2015-10-22 21:28:37 +02:00
|
|
|
.payload.u64 = u64,
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.size = sizeof(msg.payload.u64),
|
2015-10-09 17:17:28 +02:00
|
|
|
};
|
|
|
|
|
2016-07-26 23:15:11 +02:00
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
2015-10-09 17:17:28 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_user_set_features(struct vhost_dev *dev,
|
|
|
|
uint64_t features)
|
|
|
|
{
|
|
|
|
return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_user_set_protocol_features(struct vhost_dev *dev,
|
|
|
|
uint64_t features)
|
|
|
|
{
|
|
|
|
return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
|
|
|
|
{
|
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.request = request,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
2015-10-09 17:17:28 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
|
|
|
|
return 0;
|
2015-10-09 17:17:26 +02:00
|
|
|
}
|
2015-10-09 17:17:23 +02:00
|
|
|
|
2016-07-26 23:15:11 +02:00
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
2015-10-09 17:17:28 +02:00
|
|
|
|
|
|
|
if (vhost_user_read(dev, &msg) < 0) {
|
2016-07-26 23:15:11 +02:00
|
|
|
return -1;
|
2015-10-09 17:17:28 +02:00
|
|
|
}
|
|
|
|
|
2018-01-08 18:46:02 +01:00
|
|
|
if (msg.hdr.request != request) {
|
2015-10-09 17:17:28 +02:00
|
|
|
error_report("Received unexpected msg type. Expected %d received %d",
|
2018-01-08 18:46:02 +01:00
|
|
|
request, msg.hdr.request);
|
2015-10-09 17:17:28 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-01-08 18:46:02 +01:00
|
|
|
if (msg.hdr.size != sizeof(msg.payload.u64)) {
|
2015-10-09 17:17:28 +02:00
|
|
|
error_report("Received bad msg size.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-10-22 21:28:37 +02:00
|
|
|
*u64 = msg.payload.u64;
|
2015-10-09 17:17:28 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
|
|
|
|
{
|
|
|
|
return vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_user_set_owner(struct vhost_dev *dev)
|
|
|
|
{
|
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.request = VHOST_USER_SET_OWNER,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
2015-10-09 17:17:28 +02:00
|
|
|
};
|
|
|
|
|
2016-07-26 23:15:11 +02:00
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
2015-10-09 17:17:28 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_user_reset_device(struct vhost_dev *dev)
|
|
|
|
{
|
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
2015-10-09 17:17:28 +02:00
|
|
|
};
|
|
|
|
|
2019-10-29 22:38:02 +01:00
|
|
|
msg.hdr.request = virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_RESET_DEVICE)
|
|
|
|
? VHOST_USER_RESET_DEVICE
|
|
|
|
: VHOST_USER_RESET_OWNER;
|
|
|
|
|
2016-07-26 23:15:11 +02:00
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
2015-10-09 17:17:28 +02:00
|
|
|
|
2015-10-09 17:17:23 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-04 02:53:31 +01:00
|
|
|
static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (!dev->config_ops) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev->config_ops->vhost_dev_config_notifier) {
|
|
|
|
ret = dev->config_ops->vhost_dev_config_notifier(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-05-24 12:33:34 +02:00
|
|
|
static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
|
|
|
|
VhostUserVringArea *area,
|
|
|
|
int fd)
|
|
|
|
{
|
|
|
|
int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
|
|
|
|
size_t page_size = qemu_real_host_page_size;
|
|
|
|
struct vhost_user *u = dev->opaque;
|
|
|
|
VhostUserState *user = u->user;
|
|
|
|
VirtIODevice *vdev = dev->vdev;
|
|
|
|
VhostUserHostNotifier *n;
|
|
|
|
void *addr;
|
|
|
|
char *name;
|
|
|
|
|
|
|
|
if (!virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
|
|
|
|
vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
n = &user->notifier[queue_idx];
|
|
|
|
|
|
|
|
if (n->addr) {
|
|
|
|
virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
|
|
|
|
object_unparent(OBJECT(&n->mr));
|
|
|
|
munmap(n->addr, page_size);
|
|
|
|
n->addr = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sanity check. */
|
|
|
|
if (area->size != page_size) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
|
|
|
|
fd, area->offset);
|
|
|
|
if (addr == MAP_FAILED) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
|
|
|
|
user, queue_idx);
|
|
|
|
memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
|
|
|
|
page_size, addr);
|
|
|
|
g_free(name);
|
|
|
|
|
|
|
|
if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
|
|
|
|
munmap(addr, page_size);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
n->addr = addr;
|
|
|
|
n->set = true;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-02 12:18:30 +02:00
|
|
|
static void slave_read(void *opaque)
|
|
|
|
{
|
|
|
|
struct vhost_dev *dev = opaque;
|
|
|
|
struct vhost_user *u = dev->opaque;
|
2018-01-08 18:47:11 +01:00
|
|
|
VhostUserHeader hdr = { 0, };
|
|
|
|
VhostUserPayload payload = { 0, };
|
2017-06-02 12:18:30 +02:00
|
|
|
int size, ret = 0;
|
2018-04-12 17:12:29 +02:00
|
|
|
struct iovec iov;
|
|
|
|
struct msghdr msgh;
|
2018-05-24 12:33:32 +02:00
|
|
|
int fd[VHOST_USER_SLAVE_MAX_FDS];
|
2018-04-12 17:12:29 +02:00
|
|
|
char control[CMSG_SPACE(sizeof(fd))];
|
|
|
|
struct cmsghdr *cmsg;
|
2018-05-24 12:33:32 +02:00
|
|
|
int i, fdsize = 0;
|
2018-04-12 17:12:29 +02:00
|
|
|
|
|
|
|
memset(&msgh, 0, sizeof(msgh));
|
|
|
|
msgh.msg_iov = &iov;
|
|
|
|
msgh.msg_iovlen = 1;
|
|
|
|
msgh.msg_control = control;
|
|
|
|
msgh.msg_controllen = sizeof(control);
|
2017-06-02 12:18:30 +02:00
|
|
|
|
2018-05-24 12:33:32 +02:00
|
|
|
memset(fd, -1, sizeof(fd));
|
|
|
|
|
2017-06-02 12:18:30 +02:00
|
|
|
/* Read header */
|
2018-04-12 17:12:29 +02:00
|
|
|
iov.iov_base = &hdr;
|
|
|
|
iov.iov_len = VHOST_USER_HDR_SIZE;
|
|
|
|
|
2019-03-08 15:04:47 +01:00
|
|
|
do {
|
|
|
|
size = recvmsg(u->slave_fd, &msgh, 0);
|
|
|
|
} while (size < 0 && (errno == EINTR || errno == EAGAIN));
|
|
|
|
|
2017-06-02 12:18:30 +02:00
|
|
|
if (size != VHOST_USER_HDR_SIZE) {
|
|
|
|
error_report("Failed to read from slave.");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2018-04-12 17:12:29 +02:00
|
|
|
if (msgh.msg_flags & MSG_CTRUNC) {
|
|
|
|
error_report("Truncated message.");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
|
|
|
|
cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
|
|
|
|
if (cmsg->cmsg_level == SOL_SOCKET &&
|
|
|
|
cmsg->cmsg_type == SCM_RIGHTS) {
|
|
|
|
fdsize = cmsg->cmsg_len - CMSG_LEN(0);
|
2018-05-24 12:33:32 +02:00
|
|
|
memcpy(fd, CMSG_DATA(cmsg), fdsize);
|
2018-04-12 17:12:29 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-08 18:47:11 +01:00
|
|
|
if (hdr.size > VHOST_USER_PAYLOAD_SIZE) {
|
2017-06-02 12:18:30 +02:00
|
|
|
error_report("Failed to read msg header."
|
2018-01-08 18:47:11 +01:00
|
|
|
" Size %d exceeds the maximum %zu.", hdr.size,
|
2017-06-02 12:18:30 +02:00
|
|
|
VHOST_USER_PAYLOAD_SIZE);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Read payload */
|
2019-03-08 15:04:47 +01:00
|
|
|
do {
|
|
|
|
size = read(u->slave_fd, &payload, hdr.size);
|
|
|
|
} while (size < 0 && (errno == EINTR || errno == EAGAIN));
|
|
|
|
|
2018-01-08 18:47:11 +01:00
|
|
|
if (size != hdr.size) {
|
2017-06-02 12:18:30 +02:00
|
|
|
error_report("Failed to read payload from slave.");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2018-01-08 18:47:11 +01:00
|
|
|
switch (hdr.request) {
|
2017-06-02 12:18:31 +02:00
|
|
|
case VHOST_USER_SLAVE_IOTLB_MSG:
|
2018-01-08 18:47:11 +01:00
|
|
|
ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
|
2017-06-02 12:18:31 +02:00
|
|
|
break;
|
2018-01-04 02:53:31 +01:00
|
|
|
case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG :
|
|
|
|
ret = vhost_user_slave_handle_config_change(dev);
|
|
|
|
break;
|
2018-05-24 12:33:34 +02:00
|
|
|
case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
|
|
|
|
ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area,
|
|
|
|
fd[0]);
|
|
|
|
break;
|
2017-06-02 12:18:30 +02:00
|
|
|
default:
|
|
|
|
error_report("Received unexpected msg type.");
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-05-24 12:33:32 +02:00
|
|
|
/* Close the remaining file descriptors. */
|
|
|
|
for (i = 0; i < fdsize; i++) {
|
|
|
|
if (fd[i] != -1) {
|
|
|
|
close(fd[i]);
|
|
|
|
}
|
|
|
|
}
|
2018-04-12 17:12:29 +02:00
|
|
|
|
2017-06-02 12:18:30 +02:00
|
|
|
/*
|
|
|
|
* REPLY_ACK feature handling. Other reply types has to be managed
|
|
|
|
* directly in their request handlers.
|
|
|
|
*/
|
2018-01-08 18:47:11 +01:00
|
|
|
if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
|
|
|
|
struct iovec iovec[2];
|
2017-06-02 12:18:30 +02:00
|
|
|
|
|
|
|
|
2018-01-08 18:47:11 +01:00
|
|
|
hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
|
|
|
|
hdr.flags |= VHOST_USER_REPLY_MASK;
|
|
|
|
|
|
|
|
payload.u64 = !!ret;
|
|
|
|
hdr.size = sizeof(payload.u64);
|
|
|
|
|
|
|
|
iovec[0].iov_base = &hdr;
|
|
|
|
iovec[0].iov_len = VHOST_USER_HDR_SIZE;
|
|
|
|
iovec[1].iov_base = &payload;
|
|
|
|
iovec[1].iov_len = hdr.size;
|
|
|
|
|
2019-03-08 15:04:47 +01:00
|
|
|
do {
|
|
|
|
size = writev(u->slave_fd, iovec, ARRAY_SIZE(iovec));
|
|
|
|
} while (size < 0 && (errno == EINTR || errno == EAGAIN));
|
|
|
|
|
2018-01-08 18:47:11 +01:00
|
|
|
if (size != VHOST_USER_HDR_SIZE + hdr.size) {
|
2017-06-02 12:18:30 +02:00
|
|
|
error_report("Failed to send msg reply to slave.");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
err:
|
|
|
|
qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
|
|
|
|
close(u->slave_fd);
|
|
|
|
u->slave_fd = -1;
|
2018-05-24 12:33:32 +02:00
|
|
|
for (i = 0; i < fdsize; i++) {
|
|
|
|
if (fd[i] != -1) {
|
|
|
|
close(fd[i]);
|
|
|
|
}
|
2018-04-12 17:12:29 +02:00
|
|
|
}
|
2017-06-02 12:18:30 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_setup_slave_channel(struct vhost_dev *dev)
|
|
|
|
{
|
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.request = VHOST_USER_SET_SLAVE_REQ_FD,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
2017-06-02 12:18:30 +02:00
|
|
|
};
|
|
|
|
struct vhost_user *u = dev->opaque;
|
|
|
|
int sv[2], ret = 0;
|
|
|
|
bool reply_supported = virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_REPLY_ACK);
|
|
|
|
|
|
|
|
if (!virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
|
|
|
|
error_report("socketpair() failed");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
u->slave_fd = sv[0];
|
|
|
|
qemu_set_fd_handler(u->slave_fd, slave_read, NULL, dev);
|
|
|
|
|
|
|
|
if (reply_supported) {
|
2018-01-08 18:46:02 +01:00
|
|
|
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
|
2017-06-02 12:18:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = vhost_user_write(dev, &msg, &sv[1], 1);
|
|
|
|
if (ret) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reply_supported) {
|
|
|
|
ret = process_message_reply(dev, &msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
close(sv[1]);
|
|
|
|
if (ret) {
|
|
|
|
qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
|
|
|
|
close(u->slave_fd);
|
|
|
|
u->slave_fd = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-02-14 18:35:50 +01:00
|
|
|
#ifdef CONFIG_LINUX
|
2018-03-12 18:21:05 +01:00
|
|
|
/*
|
|
|
|
* Called back from the postcopy fault thread when a fault is received on our
|
|
|
|
* ufd.
|
|
|
|
* TODO: This is Linux specific
|
|
|
|
*/
|
|
|
|
static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd,
|
|
|
|
void *ufd)
|
|
|
|
{
|
2018-03-12 18:21:13 +01:00
|
|
|
struct vhost_dev *dev = pcfd->data;
|
|
|
|
struct vhost_user *u = dev->opaque;
|
|
|
|
struct uffd_msg *msg = ufd;
|
|
|
|
uint64_t faultaddr = msg->arg.pagefault.address;
|
|
|
|
RAMBlock *rb = NULL;
|
|
|
|
uint64_t rb_offset;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr,
|
|
|
|
dev->mem->nregions);
|
|
|
|
for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
|
|
|
|
trace_vhost_user_postcopy_fault_handler_loop(i,
|
|
|
|
u->postcopy_client_bases[i], dev->mem->regions[i].memory_size);
|
|
|
|
if (faultaddr >= u->postcopy_client_bases[i]) {
|
|
|
|
/* Ofset of the fault address in the vhost region */
|
|
|
|
uint64_t region_offset = faultaddr - u->postcopy_client_bases[i];
|
|
|
|
if (region_offset < dev->mem->regions[i].memory_size) {
|
|
|
|
rb_offset = region_offset + u->region_rb_offset[i];
|
|
|
|
trace_vhost_user_postcopy_fault_handler_found(i,
|
|
|
|
region_offset, rb_offset);
|
|
|
|
rb = u->region_rb[i];
|
|
|
|
return postcopy_request_shared_page(pcfd, rb, faultaddr,
|
|
|
|
rb_offset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
error_report("%s: Failed to find region for fault %" PRIx64,
|
|
|
|
__func__, faultaddr);
|
|
|
|
return -1;
|
2018-03-12 18:21:05 +01:00
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:16 +01:00
|
|
|
static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb,
|
|
|
|
uint64_t offset)
|
|
|
|
{
|
|
|
|
struct vhost_dev *dev = pcfd->data;
|
|
|
|
struct vhost_user *u = dev->opaque;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset);
|
|
|
|
|
|
|
|
if (!u) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* Translate the offset into an address in the clients address space */
|
|
|
|
for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
|
|
|
|
if (u->region_rb[i] == rb &&
|
|
|
|
offset >= u->region_rb_offset[i] &&
|
|
|
|
offset < (u->region_rb_offset[i] +
|
|
|
|
dev->mem->regions[i].memory_size)) {
|
|
|
|
uint64_t client_addr = (offset - u->region_rb_offset[i]) +
|
|
|
|
u->postcopy_client_bases[i];
|
|
|
|
trace_vhost_user_postcopy_waker_found(client_addr);
|
|
|
|
return postcopy_wake_shared(pcfd, client_addr, rb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset);
|
|
|
|
return 0;
|
|
|
|
}
|
2019-02-14 18:35:50 +01:00
|
|
|
#endif
|
2018-03-12 18:21:16 +01:00
|
|
|
|
2018-03-12 18:21:01 +01:00
|
|
|
/*
|
|
|
|
* Called at the start of an inbound postcopy on reception of the
|
|
|
|
* 'advise' command.
|
|
|
|
*/
|
|
|
|
static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
|
|
|
|
{
|
2019-02-14 18:35:50 +01:00
|
|
|
#ifdef CONFIG_LINUX
|
2018-03-12 18:21:01 +01:00
|
|
|
struct vhost_user *u = dev->opaque;
|
2018-05-24 12:33:33 +02:00
|
|
|
CharBackend *chr = u->user->chr;
|
2018-03-12 18:21:01 +01:00
|
|
|
int ufd;
|
|
|
|
VhostUserMsg msg = {
|
|
|
|
.hdr.request = VHOST_USER_POSTCOPY_ADVISE,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
error_setg(errp, "Failed to send postcopy_advise to vhost");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vhost_user_read(dev, &msg) < 0) {
|
|
|
|
error_setg(errp, "Failed to get postcopy_advise reply from vhost");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
|
|
|
|
error_setg(errp, "Unexpected msg type. Expected %d received %d",
|
|
|
|
VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msg.hdr.size) {
|
|
|
|
error_setg(errp, "Received bad msg size.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
ufd = qemu_chr_fe_get_msgfd(chr);
|
|
|
|
if (ufd < 0) {
|
|
|
|
error_setg(errp, "%s: Failed to get ufd", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
2018-05-02 12:55:52 +02:00
|
|
|
qemu_set_nonblock(ufd);
|
2018-03-12 18:21:01 +01:00
|
|
|
|
2018-03-12 18:21:05 +01:00
|
|
|
/* register ufd with userfault thread */
|
|
|
|
u->postcopy_fd.fd = ufd;
|
|
|
|
u->postcopy_fd.data = dev;
|
|
|
|
u->postcopy_fd.handler = vhost_user_postcopy_fault_handler;
|
2018-03-12 18:21:16 +01:00
|
|
|
u->postcopy_fd.waker = vhost_user_postcopy_waker;
|
2018-03-12 18:21:05 +01:00
|
|
|
u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */
|
|
|
|
postcopy_register_shared_ufd(&u->postcopy_fd);
|
2018-03-12 18:21:01 +01:00
|
|
|
return 0;
|
2019-02-14 18:35:50 +01:00
|
|
|
#else
|
|
|
|
error_setg(errp, "Postcopy not supported on non-Linux systems");
|
|
|
|
return -1;
|
|
|
|
#endif
|
2018-03-12 18:21:01 +01:00
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:06 +01:00
|
|
|
/*
|
|
|
|
* Called at the switch to postcopy on reception of the 'listen' command.
|
|
|
|
*/
|
|
|
|
static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
|
|
|
|
{
|
|
|
|
struct vhost_user *u = dev->opaque;
|
|
|
|
int ret;
|
|
|
|
VhostUserMsg msg = {
|
|
|
|
.hdr.request = VHOST_USER_POSTCOPY_LISTEN,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
|
|
|
|
};
|
|
|
|
u->postcopy_listen = true;
|
|
|
|
trace_vhost_user_postcopy_listen();
|
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
error_setg(errp, "Failed to send postcopy_listen to vhost");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = process_message_reply(dev, &msg);
|
|
|
|
if (ret) {
|
|
|
|
error_setg(errp, "Failed to receive reply to postcopy_listen");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:20 +01:00
|
|
|
/*
|
|
|
|
* Called at the end of postcopy
|
|
|
|
*/
|
|
|
|
static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
|
|
|
|
{
|
|
|
|
VhostUserMsg msg = {
|
|
|
|
.hdr.request = VHOST_USER_POSTCOPY_END,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
struct vhost_user *u = dev->opaque;
|
|
|
|
|
|
|
|
trace_vhost_user_postcopy_end_entry();
|
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
error_setg(errp, "Failed to send postcopy_end to vhost");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = process_message_reply(dev, &msg);
|
|
|
|
if (ret) {
|
|
|
|
error_setg(errp, "Failed to receive reply to postcopy_end");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
postcopy_unregister_shared_ufd(&u->postcopy_fd);
|
2018-10-08 18:05:36 +02:00
|
|
|
close(u->postcopy_fd.fd);
|
2018-03-12 18:21:20 +01:00
|
|
|
u->postcopy_fd.handler = NULL;
|
|
|
|
|
|
|
|
trace_vhost_user_postcopy_end_exit();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:00 +01:00
|
|
|
static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
struct PostcopyNotifyData *pnd = opaque;
|
|
|
|
struct vhost_user *u = container_of(notifier, struct vhost_user,
|
|
|
|
postcopy_notifier);
|
|
|
|
struct vhost_dev *dev = u->dev;
|
|
|
|
|
|
|
|
switch (pnd->reason) {
|
|
|
|
case POSTCOPY_NOTIFY_PROBE:
|
|
|
|
if (!virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_PAGEFAULT)) {
|
|
|
|
/* TODO: Get the device name into this error somehow */
|
|
|
|
error_setg(pnd->errp,
|
|
|
|
"vhost-user backend not capable of postcopy");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2018-03-12 18:21:01 +01:00
|
|
|
case POSTCOPY_NOTIFY_INBOUND_ADVISE:
|
|
|
|
return vhost_user_postcopy_advise(dev, pnd->errp);
|
|
|
|
|
2018-03-12 18:21:06 +01:00
|
|
|
case POSTCOPY_NOTIFY_INBOUND_LISTEN:
|
|
|
|
return vhost_user_postcopy_listen(dev, pnd->errp);
|
|
|
|
|
2018-03-12 18:21:20 +01:00
|
|
|
case POSTCOPY_NOTIFY_INBOUND_END:
|
|
|
|
return vhost_user_postcopy_end(dev, pnd->errp);
|
|
|
|
|
2018-03-12 18:21:00 +01:00
|
|
|
default:
|
|
|
|
/* We ignore notifications we don't know */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-24 12:33:33 +02:00
|
|
|
static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque)
|
2014-05-27 14:06:02 +02:00
|
|
|
{
|
2017-06-02 12:18:31 +02:00
|
|
|
uint64_t features, protocol_features;
|
2017-06-02 12:18:29 +02:00
|
|
|
struct vhost_user *u;
|
2015-09-23 06:19:56 +02:00
|
|
|
int err;
|
|
|
|
|
2014-05-27 14:06:02 +02:00
|
|
|
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
|
|
|
|
|
2017-06-02 12:18:29 +02:00
|
|
|
u = g_new0(struct vhost_user, 1);
|
2018-05-24 12:33:33 +02:00
|
|
|
u->user = opaque;
|
2017-06-02 12:18:30 +02:00
|
|
|
u->slave_fd = -1;
|
2018-03-12 18:21:00 +01:00
|
|
|
u->dev = dev;
|
2017-06-02 12:18:29 +02:00
|
|
|
dev->opaque = u;
|
2014-05-27 14:06:02 +02:00
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
err = vhost_user_get_features(dev, &features);
|
2015-09-23 06:19:56 +02:00
|
|
|
if (err < 0) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
|
|
|
|
dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
|
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
|
2017-06-02 12:18:31 +02:00
|
|
|
&protocol_features);
|
2015-09-23 06:19:56 +02:00
|
|
|
if (err < 0) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-06-02 12:18:31 +02:00
|
|
|
dev->protocol_features =
|
|
|
|
protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK;
|
2018-03-29 09:52:33 +02:00
|
|
|
|
|
|
|
if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
|
|
|
|
/* Don't acknowledge CONFIG feature if device doesn't support it */
|
|
|
|
dev->protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
|
|
|
|
} else if (!(protocol_features &
|
|
|
|
(1ULL << VHOST_USER_PROTOCOL_F_CONFIG))) {
|
|
|
|
error_report("Device expects VHOST_USER_PROTOCOL_F_CONFIG "
|
|
|
|
"but backend does not support it.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:28 +02:00
|
|
|
err = vhost_user_set_protocol_features(dev, dev->protocol_features);
|
2015-09-23 06:19:56 +02:00
|
|
|
if (err < 0) {
|
|
|
|
return err;
|
|
|
|
}
|
2015-09-23 06:19:58 +02:00
|
|
|
|
|
|
|
/* query the max queues we support if backend supports Multiple Queue */
|
|
|
|
if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
|
2015-10-09 17:17:28 +02:00
|
|
|
err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
|
|
|
|
&dev->max_queues);
|
2015-09-23 06:19:58 +02:00
|
|
|
if (err < 0) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
2017-06-02 12:18:31 +02:00
|
|
|
|
|
|
|
if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
|
|
|
|
!(virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_SLAVE_REQ) &&
|
|
|
|
virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
|
|
|
|
error_report("IOMMU support requires reply-ack and "
|
|
|
|
"slave-req protocol features.");
|
|
|
|
return -1;
|
|
|
|
}
|
2015-09-23 06:19:56 +02:00
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:27 +02:00
|
|
|
if (dev->migration_blocker == NULL &&
|
|
|
|
!virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
|
|
|
|
error_setg(&dev->migration_blocker,
|
|
|
|
"Migration disabled: vhost-user backend lacks "
|
|
|
|
"VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
|
|
|
|
}
|
|
|
|
|
2017-06-02 12:18:30 +02:00
|
|
|
err = vhost_setup_slave_channel(dev);
|
|
|
|
if (err < 0) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:00 +01:00
|
|
|
u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
|
|
|
|
postcopy_add_notifier(&u->postcopy_notifier);
|
|
|
|
|
2014-05-27 14:06:02 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-24 12:33:33 +02:00
|
|
|
static int vhost_user_backend_cleanup(struct vhost_dev *dev)
|
2014-05-27 14:06:02 +02:00
|
|
|
{
|
2017-06-02 12:18:29 +02:00
|
|
|
struct vhost_user *u;
|
|
|
|
|
2014-05-27 14:06:02 +02:00
|
|
|
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
|
|
|
|
|
2017-06-02 12:18:29 +02:00
|
|
|
u = dev->opaque;
|
2018-03-12 18:21:00 +01:00
|
|
|
if (u->postcopy_notifier.notify) {
|
|
|
|
postcopy_remove_notifier(&u->postcopy_notifier);
|
|
|
|
u->postcopy_notifier.notify = NULL;
|
|
|
|
}
|
2018-10-08 18:05:36 +02:00
|
|
|
u->postcopy_listen = false;
|
|
|
|
if (u->postcopy_fd.handler) {
|
|
|
|
postcopy_unregister_shared_ufd(&u->postcopy_fd);
|
|
|
|
close(u->postcopy_fd.fd);
|
|
|
|
u->postcopy_fd.handler = NULL;
|
|
|
|
}
|
2017-06-02 12:18:30 +02:00
|
|
|
if (u->slave_fd >= 0) {
|
2017-06-30 18:04:22 +02:00
|
|
|
qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
|
2017-06-02 12:18:30 +02:00
|
|
|
close(u->slave_fd);
|
|
|
|
u->slave_fd = -1;
|
|
|
|
}
|
2018-03-12 18:21:11 +01:00
|
|
|
g_free(u->region_rb);
|
|
|
|
u->region_rb = NULL;
|
|
|
|
g_free(u->region_rb_offset);
|
|
|
|
u->region_rb_offset = NULL;
|
|
|
|
u->region_rb_len = 0;
|
2017-06-02 12:18:29 +02:00
|
|
|
g_free(u);
|
2014-05-27 14:06:02 +02:00
|
|
|
dev->opaque = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-23 06:19:59 +02:00
|
|
|
static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
|
|
|
|
{
|
|
|
|
assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
|
|
|
|
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
2015-10-06 10:37:27 +02:00
|
|
|
static int vhost_user_memslots_limit(struct vhost_dev *dev)
|
|
|
|
{
|
|
|
|
return VHOST_MEMORY_MAX_NREGIONS;
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:24 +02:00
|
|
|
static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
|
|
|
|
{
|
|
|
|
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
|
|
|
|
|
|
|
|
return virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_LOG_SHMFD);
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:17:32 +02:00
|
|
|
static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
|
|
|
|
{
|
2018-05-12 06:59:40 +02:00
|
|
|
VhostUserMsg msg = { };
|
2015-10-09 17:17:32 +02:00
|
|
|
|
|
|
|
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
|
|
|
|
|
|
|
|
/* If guest supports GUEST_ANNOUNCE do nothing */
|
|
|
|
if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
|
|
|
|
if (virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_RARP)) {
|
2018-01-08 18:46:02 +01:00
|
|
|
msg.hdr.request = VHOST_USER_SEND_RARP;
|
|
|
|
msg.hdr.flags = VHOST_USER_VERSION;
|
2015-10-22 21:28:37 +02:00
|
|
|
memcpy((char *)&msg.payload.u64, mac_addr, 6);
|
2018-01-08 18:46:02 +01:00
|
|
|
msg.hdr.size = sizeof(msg.payload.u64);
|
2015-10-09 17:17:32 +02:00
|
|
|
|
2016-07-26 23:15:11 +02:00
|
|
|
return vhost_user_write(dev, &msg, NULL, 0);
|
2015-10-09 17:17:32 +02:00
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-02-21 16:01:47 +01:00
|
|
|
static bool vhost_user_can_merge(struct vhost_dev *dev,
|
|
|
|
uint64_t start1, uint64_t size1,
|
|
|
|
uint64_t start2, uint64_t size2)
|
|
|
|
{
|
2016-03-25 12:55:08 +01:00
|
|
|
ram_addr_t offset;
|
2016-02-21 16:01:47 +01:00
|
|
|
int mfd, rfd;
|
|
|
|
MemoryRegion *mr;
|
|
|
|
|
2016-03-25 12:55:08 +01:00
|
|
|
mr = memory_region_from_host((void *)(uintptr_t)start1, &offset);
|
2016-03-25 12:30:16 +01:00
|
|
|
mfd = memory_region_get_fd(mr);
|
2016-02-21 16:01:47 +01:00
|
|
|
|
2016-03-25 12:55:08 +01:00
|
|
|
mr = memory_region_from_host((void *)(uintptr_t)start2, &offset);
|
2016-03-25 12:30:16 +01:00
|
|
|
rfd = memory_region_get_fd(mr);
|
2016-02-21 16:01:47 +01:00
|
|
|
|
|
|
|
return mfd == rfd;
|
|
|
|
}
|
|
|
|
|
2016-12-10 16:30:36 +01:00
|
|
|
static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
|
|
|
|
{
|
|
|
|
VhostUserMsg msg;
|
|
|
|
bool reply_supported = virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_REPLY_ACK);
|
|
|
|
|
|
|
|
if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-08 18:46:02 +01:00
|
|
|
msg.hdr.request = VHOST_USER_NET_SET_MTU;
|
2016-12-10 16:30:36 +01:00
|
|
|
msg.payload.u64 = mtu;
|
2018-01-08 18:46:02 +01:00
|
|
|
msg.hdr.size = sizeof(msg.payload.u64);
|
|
|
|
msg.hdr.flags = VHOST_USER_VERSION;
|
2016-12-10 16:30:36 +01:00
|
|
|
if (reply_supported) {
|
2018-01-08 18:46:02 +01:00
|
|
|
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
|
2016-12-10 16:30:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If reply_ack supported, slave has to ack specified MTU is valid */
|
|
|
|
if (reply_supported) {
|
2017-05-24 11:05:20 +02:00
|
|
|
return process_message_reply(dev, &msg);
|
2016-12-10 16:30:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-02 12:18:31 +02:00
|
|
|
static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
|
|
|
|
struct vhost_iotlb_msg *imsg)
|
|
|
|
{
|
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.request = VHOST_USER_IOTLB_MSG,
|
|
|
|
.hdr.size = sizeof(msg.payload.iotlb),
|
|
|
|
.hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
|
2017-06-02 12:18:31 +02:00
|
|
|
.payload.iotlb = *imsg,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return process_message_reply(dev, &msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
|
|
|
|
{
|
|
|
|
/* No-op as the receive channel is not dedicated to IOTLB messages. */
|
|
|
|
}
|
|
|
|
|
2018-01-04 02:53:31 +01:00
|
|
|
static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
|
|
|
|
uint32_t config_len)
|
|
|
|
{
|
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.request = VHOST_USER_GET_CONFIG,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
|
|
|
.hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
|
2018-01-04 02:53:31 +01:00
|
|
|
};
|
|
|
|
|
2018-03-29 09:52:33 +02:00
|
|
|
if (!virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_CONFIG)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-01-04 02:53:31 +01:00
|
|
|
if (config_len > VHOST_USER_MAX_CONFIG_SIZE) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
msg.payload.config.offset = 0;
|
|
|
|
msg.payload.config.size = config_len;
|
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vhost_user_read(dev, &msg) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-01-08 18:46:02 +01:00
|
|
|
if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
|
2018-01-04 02:53:31 +01:00
|
|
|
error_report("Received unexpected msg type. Expected %d received %d",
|
2018-01-08 18:46:02 +01:00
|
|
|
VHOST_USER_GET_CONFIG, msg.hdr.request);
|
2018-01-04 02:53:31 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-01-08 18:46:02 +01:00
|
|
|
if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
|
2018-01-04 02:53:31 +01:00
|
|
|
error_report("Received bad msg size.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(config, msg.payload.config.region, config_len);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
|
|
|
|
uint32_t offset, uint32_t size, uint32_t flags)
|
|
|
|
{
|
|
|
|
uint8_t *p;
|
|
|
|
bool reply_supported = virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_REPLY_ACK);
|
|
|
|
|
|
|
|
VhostUserMsg msg = {
|
2018-01-08 18:46:02 +01:00
|
|
|
.hdr.request = VHOST_USER_SET_CONFIG,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
|
|
|
.hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size,
|
2018-01-04 02:53:31 +01:00
|
|
|
};
|
|
|
|
|
2018-03-29 09:52:33 +02:00
|
|
|
if (!virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_CONFIG)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-01-04 02:53:31 +01:00
|
|
|
if (reply_supported) {
|
2018-01-08 18:46:02 +01:00
|
|
|
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
|
2018-01-04 02:53:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (size > VHOST_USER_MAX_CONFIG_SIZE) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
msg.payload.config.offset = offset,
|
|
|
|
msg.payload.config.size = size,
|
|
|
|
msg.payload.config.flags = flags,
|
|
|
|
p = msg.payload.config.region;
|
|
|
|
memcpy(p, data, size);
|
|
|
|
|
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reply_supported) {
|
|
|
|
return process_message_reply(dev, &msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-01 14:46:30 +01:00
|
|
|
static int vhost_user_crypto_create_session(struct vhost_dev *dev,
|
|
|
|
void *session_info,
|
|
|
|
uint64_t *session_id)
|
|
|
|
{
|
|
|
|
bool crypto_session = virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
|
|
|
|
CryptoDevBackendSymSessionInfo *sess_info = session_info;
|
|
|
|
VhostUserMsg msg = {
|
|
|
|
.hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
|
|
|
.hdr.size = sizeof(msg.payload.session),
|
|
|
|
};
|
|
|
|
|
|
|
|
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
|
|
|
|
|
|
|
|
if (!crypto_session) {
|
|
|
|
error_report("vhost-user trying to send unhandled ioctl");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(&msg.payload.session.session_setup_data, sess_info,
|
|
|
|
sizeof(CryptoDevBackendSymSessionInfo));
|
|
|
|
if (sess_info->key_len) {
|
|
|
|
memcpy(&msg.payload.session.key, sess_info->cipher_key,
|
|
|
|
sess_info->key_len);
|
|
|
|
}
|
|
|
|
if (sess_info->auth_key_len > 0) {
|
|
|
|
memcpy(&msg.payload.session.auth_key, sess_info->auth_key,
|
|
|
|
sess_info->auth_key_len);
|
|
|
|
}
|
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
error_report("vhost_user_write() return -1, create session failed");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vhost_user_read(dev, &msg) < 0) {
|
|
|
|
error_report("vhost_user_read() return -1, create session failed");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
|
|
|
|
error_report("Received unexpected msg type. Expected %d received %d",
|
|
|
|
VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msg.hdr.size != sizeof(msg.payload.session)) {
|
|
|
|
error_report("Received bad msg size.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msg.payload.session.session_id < 0) {
|
|
|
|
error_report("Bad session id: %" PRId64 "",
|
|
|
|
msg.payload.session.session_id);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
*session_id = msg.payload.session.session_id;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
|
|
|
|
{
|
|
|
|
bool crypto_session = virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
|
|
|
|
VhostUserMsg msg = {
|
|
|
|
.hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
|
|
|
.hdr.size = sizeof(msg.payload.u64),
|
|
|
|
};
|
|
|
|
msg.payload.u64 = session_id;
|
|
|
|
|
|
|
|
if (!crypto_session) {
|
|
|
|
error_report("vhost-user trying to send unhandled ioctl");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
error_report("vhost_user_write() return -1, close session failed");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-24 12:33:31 +02:00
|
|
|
static bool vhost_user_mem_section_filter(struct vhost_dev *dev,
|
|
|
|
MemoryRegionSection *section)
|
|
|
|
{
|
|
|
|
bool result;
|
|
|
|
|
|
|
|
result = memory_region_get_fd(section->mr) >= 0;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-02-28 09:53:49 +01:00
|
|
|
static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
|
|
|
|
uint16_t queue_size,
|
|
|
|
struct vhost_inflight *inflight)
|
|
|
|
{
|
|
|
|
void *addr;
|
|
|
|
int fd;
|
|
|
|
struct vhost_user *u = dev->opaque;
|
|
|
|
CharBackend *chr = u->user->chr;
|
|
|
|
VhostUserMsg msg = {
|
|
|
|
.hdr.request = VHOST_USER_GET_INFLIGHT_FD,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
|
|
|
.payload.inflight.num_queues = dev->nvqs,
|
|
|
|
.payload.inflight.queue_size = queue_size,
|
|
|
|
.hdr.size = sizeof(msg.payload.inflight),
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vhost_user_read(dev, &msg) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
|
|
|
|
error_report("Received unexpected msg type. "
|
|
|
|
"Expected %d received %d",
|
|
|
|
VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msg.hdr.size != sizeof(msg.payload.inflight)) {
|
|
|
|
error_report("Received bad msg size.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!msg.payload.inflight.mmap_size) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
fd = qemu_chr_fe_get_msgfd(chr);
|
|
|
|
if (fd < 0) {
|
|
|
|
error_report("Failed to get mem fd");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
|
|
|
|
MAP_SHARED, fd, msg.payload.inflight.mmap_offset);
|
|
|
|
|
|
|
|
if (addr == MAP_FAILED) {
|
|
|
|
error_report("Failed to mmap mem fd");
|
|
|
|
close(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
inflight->addr = addr;
|
|
|
|
inflight->fd = fd;
|
|
|
|
inflight->size = msg.payload.inflight.mmap_size;
|
|
|
|
inflight->offset = msg.payload.inflight.mmap_offset;
|
|
|
|
inflight->queue_size = queue_size;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
|
|
|
|
struct vhost_inflight *inflight)
|
|
|
|
{
|
|
|
|
VhostUserMsg msg = {
|
|
|
|
.hdr.request = VHOST_USER_SET_INFLIGHT_FD,
|
|
|
|
.hdr.flags = VHOST_USER_VERSION,
|
|
|
|
.payload.inflight.mmap_size = inflight->size,
|
|
|
|
.payload.inflight.mmap_offset = inflight->offset,
|
|
|
|
.payload.inflight.num_queues = dev->nvqs,
|
|
|
|
.payload.inflight.queue_size = inflight->queue_size,
|
|
|
|
.hdr.size = sizeof(msg.payload.inflight),
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!virtio_has_feature(dev->protocol_features,
|
|
|
|
VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-03-08 15:04:45 +01:00
|
|
|
bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
|
2018-05-24 12:33:33 +02:00
|
|
|
{
|
2019-03-08 15:04:45 +01:00
|
|
|
if (user->chr) {
|
|
|
|
error_setg(errp, "Cannot initialize vhost-user state");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
user->chr = chr;
|
|
|
|
return true;
|
2018-05-24 12:33:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void vhost_user_cleanup(VhostUserState *user)
|
|
|
|
{
|
2018-05-24 12:33:34 +02:00
|
|
|
int i;
|
|
|
|
|
2019-03-08 15:04:45 +01:00
|
|
|
if (!user->chr) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-05-24 12:33:34 +02:00
|
|
|
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
|
|
|
if (user->notifier[i].addr) {
|
|
|
|
object_unparent(OBJECT(&user->notifier[i].mr));
|
|
|
|
munmap(user->notifier[i].addr, qemu_real_host_page_size);
|
|
|
|
user->notifier[i].addr = NULL;
|
|
|
|
}
|
|
|
|
}
|
2019-03-08 15:04:45 +01:00
|
|
|
user->chr = NULL;
|
2018-05-24 12:33:33 +02:00
|
|
|
}
|
|
|
|
|
2014-05-27 14:06:02 +02:00
|
|
|
const VhostOps user_ops = {
|
|
|
|
.backend_type = VHOST_BACKEND_TYPE_USER,
|
2018-05-24 12:33:33 +02:00
|
|
|
.vhost_backend_init = vhost_user_backend_init,
|
|
|
|
.vhost_backend_cleanup = vhost_user_backend_cleanup,
|
2015-10-06 10:37:27 +02:00
|
|
|
.vhost_backend_memslots_limit = vhost_user_memslots_limit,
|
2015-10-09 17:17:28 +02:00
|
|
|
.vhost_set_log_base = vhost_user_set_log_base,
|
|
|
|
.vhost_set_mem_table = vhost_user_set_mem_table,
|
|
|
|
.vhost_set_vring_addr = vhost_user_set_vring_addr,
|
|
|
|
.vhost_set_vring_endian = vhost_user_set_vring_endian,
|
|
|
|
.vhost_set_vring_num = vhost_user_set_vring_num,
|
|
|
|
.vhost_set_vring_base = vhost_user_set_vring_base,
|
|
|
|
.vhost_get_vring_base = vhost_user_get_vring_base,
|
|
|
|
.vhost_set_vring_kick = vhost_user_set_vring_kick,
|
|
|
|
.vhost_set_vring_call = vhost_user_set_vring_call,
|
|
|
|
.vhost_set_features = vhost_user_set_features,
|
|
|
|
.vhost_get_features = vhost_user_get_features,
|
|
|
|
.vhost_set_owner = vhost_user_set_owner,
|
|
|
|
.vhost_reset_device = vhost_user_reset_device,
|
|
|
|
.vhost_get_vq_index = vhost_user_get_vq_index,
|
|
|
|
.vhost_set_vring_enable = vhost_user_set_vring_enable,
|
2015-10-09 17:17:24 +02:00
|
|
|
.vhost_requires_shm_log = vhost_user_requires_shm_log,
|
2015-10-09 17:17:32 +02:00
|
|
|
.vhost_migration_done = vhost_user_migration_done,
|
2016-02-21 16:01:47 +01:00
|
|
|
.vhost_backend_can_merge = vhost_user_can_merge,
|
2016-12-10 16:30:36 +01:00
|
|
|
.vhost_net_set_mtu = vhost_user_net_set_mtu,
|
2017-06-02 12:18:31 +02:00
|
|
|
.vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
|
|
|
|
.vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
|
2018-01-04 02:53:31 +01:00
|
|
|
.vhost_get_config = vhost_user_get_config,
|
|
|
|
.vhost_set_config = vhost_user_set_config,
|
2018-03-01 14:46:30 +01:00
|
|
|
.vhost_crypto_create_session = vhost_user_crypto_create_session,
|
|
|
|
.vhost_crypto_close_session = vhost_user_crypto_close_session,
|
2018-05-24 12:33:31 +02:00
|
|
|
.vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
|
2019-02-28 09:53:49 +01:00
|
|
|
.vhost_get_inflight_fd = vhost_user_get_inflight_fd,
|
|
|
|
.vhost_set_inflight_fd = vhost_user_set_inflight_fd,
|
2015-09-23 06:19:59 +02:00
|
|
|
};
|