vhost-user: fully use new backend/frontend naming

Slave/master nomenclature was replaced with backend/frontend in commit
1fc19b6527 ("vhost-user: Adopt new backend naming")

This patch replaces all remaining uses of master and slave in the
codebase.

Signed-off-by: Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org>
Message-Id: <20230613080849.2115347-1-manos.pitsidianakis@linaro.org>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
This commit is contained in:
Manos Pitsidianakis 2023-06-13 11:08:48 +03:00 committed by Michael S. Tsirkin
parent 535a3d9a32
commit f8ed3648b5
12 changed files with 74 additions and 72 deletions

View File

@ -167,7 +167,7 @@ vu_blk_set_config(VuDev *vu_dev, const uint8_t *data,
uint8_t wce;
/* don't support live migration */
if (flags != VHOST_SET_CONFIG_TYPE_MASTER) {
if (flags != VHOST_SET_CONFIG_TYPE_FRONTEND) {
return -EINVAL;
}

View File

@ -421,7 +421,7 @@ vub_set_config(VuDev *vu_dev, const uint8_t *data,
int fd;
/* don't support live migration */
if (flags != VHOST_SET_CONFIG_TYPE_MASTER) {
if (flags != VHOST_SET_CONFIG_TYPE_FRONTEND) {
return -1;
}

View File

@ -81,7 +81,7 @@ static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
ret = vhost_dev_set_config(&s->dev, &blkcfg->wce,
offsetof(struct virtio_blk_config, wce),
sizeof(blkcfg->wce),
VHOST_SET_CONFIG_TYPE_MASTER);
VHOST_SET_CONFIG_TYPE_FRONTEND);
if (ret) {
error_report("set device config space failed");
return;

View File

@ -452,7 +452,7 @@ vhost_user_gpu_set_config(VirtIODevice *vdev,
ret = vhost_dev_set_config(&g->vhost->dev, config_data,
0, sizeof(struct virtio_gpu_config),
VHOST_SET_CONFIG_TYPE_MASTER);
VHOST_SET_CONFIG_TYPE_FRONTEND);
if (ret) {
error_report("vhost-user-gpu: set device config space failed");
return;

View File

@ -69,7 +69,7 @@ static void vhost_input_set_config(VirtIODevice *vdev,
ret = vhost_dev_set_config(&vhi->vhost->dev, config_data,
0, sizeof(virtio_input_config),
VHOST_SET_CONFIG_TYPE_MASTER);
VHOST_SET_CONFIG_TYPE_FRONTEND);
if (ret) {
error_report("vhost-user-input: set device config space failed");
return;

View File

@ -211,7 +211,7 @@ static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
vhost_net_set_config(get_vhost_net(nc->peer),
(uint8_t *)&netcfg, 0, n->config_size,
VHOST_SET_CONFIG_TYPE_MASTER);
VHOST_SET_CONFIG_TYPE_FRONTEND);
}
}
@ -3733,7 +3733,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
struct virtio_net_config netcfg = {};
memcpy(&netcfg.mac, &n->nic_conf.macaddr, ETH_ALEN);
vhost_net_set_config(get_vhost_net(nc->peer),
(uint8_t *)&netcfg, 0, ETH_ALEN, VHOST_SET_CONFIG_TYPE_MASTER);
(uint8_t *)&netcfg, 0, ETH_ALEN, VHOST_SET_CONFIG_TYPE_FRONTEND);
}
QTAILQ_INIT(&n->rsc_chains);
n->qdev = dev;

View File

@ -203,7 +203,7 @@ vhost_vdpa_device_set_config(VirtIODevice *vdev, const uint8_t *config)
int ret;
ret = vhost_dev_set_config(&s->dev, s->config, 0, s->config_size,
VHOST_SET_CONFIG_TYPE_MASTER);
VHOST_SET_CONFIG_TYPE_FRONTEND);
if (ret) {
error_report("set device config space failed");
return;

View File

@ -124,13 +124,13 @@ typedef enum VhostUserRequest {
VHOST_USER_MAX
} VhostUserRequest;
typedef enum VhostUserSlaveRequest {
typedef enum VhostUserBackendRequest {
VHOST_USER_BACKEND_NONE = 0,
VHOST_USER_BACKEND_IOTLB_MSG = 1,
VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2,
VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3,
VHOST_USER_BACKEND_MAX
} VhostUserSlaveRequest;
} VhostUserBackendRequest;
typedef struct VhostUserMemoryRegion {
uint64_t guest_phys_addr;
@ -245,8 +245,8 @@ struct vhost_user {
struct vhost_dev *dev;
/* Shared between vhost devs of the same virtio device */
VhostUserState *user;
QIOChannel *slave_ioc;
GSource *slave_src;
QIOChannel *backend_ioc;
GSource *backend_src;
NotifierWithReturn postcopy_notifier;
struct PostCopyFD postcopy_fd;
uint64_t postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS];
@ -1495,7 +1495,7 @@ static int vhost_user_reset_device(struct vhost_dev *dev)
return vhost_user_write(dev, &msg, NULL, 0);
}
static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
static int vhost_user_backend_handle_config_change(struct vhost_dev *dev)
{
if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
return -ENOSYS;
@ -1532,7 +1532,7 @@ static VhostUserHostNotifier *fetch_or_create_notifier(VhostUserState *u,
return n;
}
static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
static int vhost_user_backend_handle_vring_host_notifier(struct vhost_dev *dev,
VhostUserVringArea *area,
int fd)
{
@ -1594,16 +1594,16 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
return 0;
}
static void close_slave_channel(struct vhost_user *u)
static void close_backend_channel(struct vhost_user *u)
{
g_source_destroy(u->slave_src);
g_source_unref(u->slave_src);
u->slave_src = NULL;
object_unref(OBJECT(u->slave_ioc));
u->slave_ioc = NULL;
g_source_destroy(u->backend_src);
g_source_unref(u->backend_src);
u->backend_src = NULL;
object_unref(OBJECT(u->backend_ioc));
u->backend_ioc = NULL;
}
static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
static gboolean backend_read(QIOChannel *ioc, GIOCondition condition,
gpointer opaque)
{
struct vhost_dev *dev = opaque;
@ -1645,10 +1645,10 @@ static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
break;
case VHOST_USER_BACKEND_CONFIG_CHANGE_MSG:
ret = vhost_user_slave_handle_config_change(dev);
ret = vhost_user_backend_handle_config_change(dev);
break;
case VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG:
ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area,
ret = vhost_user_backend_handle_vring_host_notifier(dev, &payload.area,
fd ? fd[0] : -1);
break;
default:
@ -1684,7 +1684,7 @@ static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
goto fdcleanup;
err:
close_slave_channel(u);
close_backend_channel(u);
rc = G_SOURCE_REMOVE;
fdcleanup:
@ -1696,7 +1696,7 @@ fdcleanup:
return rc;
}
static int vhost_setup_slave_channel(struct vhost_dev *dev)
static int vhost_setup_backend_channel(struct vhost_dev *dev)
{
VhostUserMsg msg = {
.hdr.request = VHOST_USER_SET_BACKEND_REQ_FD,
@ -1725,10 +1725,10 @@ static int vhost_setup_slave_channel(struct vhost_dev *dev)
error_report_err(local_err);
return -ECONNREFUSED;
}
u->slave_ioc = ioc;
u->slave_src = qio_channel_add_watch_source(u->slave_ioc,
u->backend_ioc = ioc;
u->backend_src = qio_channel_add_watch_source(u->backend_ioc,
G_IO_IN | G_IO_HUP,
slave_read, dev, NULL, NULL);
backend_read, dev, NULL, NULL);
if (reply_supported) {
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
@ -1746,7 +1746,7 @@ static int vhost_setup_slave_channel(struct vhost_dev *dev)
out:
close(sv[1]);
if (ret) {
close_slave_channel(u);
close_backend_channel(u);
}
return ret;
@ -2072,7 +2072,7 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque,
virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
error_setg(errp, "IOMMU support requires reply-ack and "
"slave-req protocol features.");
"backend-req protocol features.");
return -EINVAL;
}
@ -2108,7 +2108,7 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque,
}
if (dev->vq_index == 0) {
err = vhost_setup_slave_channel(dev);
err = vhost_setup_backend_channel(dev);
if (err < 0) {
error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
return -EPROTO;
@ -2138,8 +2138,8 @@ static int vhost_user_backend_cleanup(struct vhost_dev *dev)
close(u->postcopy_fd.fd);
u->postcopy_fd.handler = NULL;
}
if (u->slave_ioc) {
close_slave_channel(u);
if (u->backend_ioc) {
close_backend_channel(u);
}
g_free(u->region_rb);
u->region_rb = NULL;
@ -2235,7 +2235,7 @@ static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
return ret;
}
/* If reply_ack supported, slave has to ack specified MTU is valid */
/* If reply_ack supported, backend has to ack specified MTU is valid */
if (reply_supported) {
return process_message_reply(dev, &msg);
}

View File

@ -117,7 +117,7 @@ static const qmp_virtio_feature_map_t vhost_user_protocol_map[] = {
"VHOST_USER_PROTOCOL_F_CONFIG: Vhost-user messaging for virtio "
"device configuration space supported"),
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD, \
"VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD: Slave fd communication "
"VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD: Backend fd communication "
"channel supported"),
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_HOST_NOTIFIER, \
"VHOST_USER_PROTOCOL_F_HOST_NOTIFIER: Host notifiers for specified "

View File

@ -22,7 +22,7 @@ typedef enum VhostBackendType {
} VhostBackendType;
typedef enum VhostSetConfigType {
VHOST_SET_CONFIG_TYPE_MASTER = 0,
VHOST_SET_CONFIG_TYPE_FRONTEND = 0,
VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
} VhostSetConfigType;

View File

@ -421,8 +421,8 @@ vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
}
/*
* Processes a reply on the slave channel.
* Entered with slave_mutex held and releases it before exit.
* Processes a reply on the backend channel.
* Entered with backend_mutex held and releases it before exit.
* Returns true on success.
*/
static bool
@ -436,7 +436,7 @@ vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg)
goto out;
}
if (!vu_message_read_default(dev, dev->slave_fd, &msg_reply)) {
if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) {
goto out;
}
@ -449,7 +449,7 @@ vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg)
result = msg_reply.payload.u64 == 0;
out:
pthread_mutex_unlock(&dev->slave_mutex);
pthread_mutex_unlock(&dev->backend_mutex);
return result;
}
@ -1393,13 +1393,13 @@ bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
return false;
}
pthread_mutex_lock(&dev->slave_mutex);
if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
pthread_mutex_unlock(&dev->slave_mutex);
pthread_mutex_lock(&dev->backend_mutex);
if (!vu_message_write(dev, dev->backend_fd, &vmsg)) {
pthread_mutex_unlock(&dev->backend_mutex);
return false;
}
/* Also unlocks the slave_mutex */
/* Also unlocks the backend_mutex */
return vu_process_message_reply(dev, &vmsg);
}
@ -1463,7 +1463,7 @@ vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
* a device implementation can return it in its callback
* (get_protocol_features) if it wants to use this for
* simulation, but it is otherwise not desirable (if even
* implemented by the master.)
* implemented by the frontend.)
*/
uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ |
1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
@ -1508,7 +1508,7 @@ vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
* of the other features are required.
* Theoretically, one could use only kick messages, or do them without
* having F_REPLY_ACK, but too many (possibly pending) messages on the
* socket will eventually cause the master to hang, to avoid this in
* socket will eventually cause the frontend to hang, to avoid this in
* scenarios where not desired enforce that the settings are in a way
* that actually enables the simulation case.
*/
@ -1550,18 +1550,18 @@ vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg)
}
static bool
vu_set_slave_req_fd(VuDev *dev, VhostUserMsg *vmsg)
vu_set_backend_req_fd(VuDev *dev, VhostUserMsg *vmsg)
{
if (vmsg->fd_num != 1) {
vu_panic(dev, "Invalid slave_req_fd message (%d fd's)", vmsg->fd_num);
vu_panic(dev, "Invalid backend_req_fd message (%d fd's)", vmsg->fd_num);
return false;
}
if (dev->slave_fd != -1) {
close(dev->slave_fd);
if (dev->backend_fd != -1) {
close(dev->backend_fd);
}
dev->slave_fd = vmsg->fds[0];
DPRINT("Got slave_fd: %d\n", vmsg->fds[0]);
dev->backend_fd = vmsg->fds[0];
DPRINT("Got backend_fd: %d\n", vmsg->fds[0]);
return false;
}
@ -1577,7 +1577,7 @@ vu_get_config(VuDev *dev, VhostUserMsg *vmsg)
}
if (ret) {
/* resize to zero to indicate an error to master */
/* resize to zero to indicate an error to frontend */
vmsg->size = 0;
}
@ -1917,7 +1917,7 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
case VHOST_USER_SET_VRING_ENABLE:
return vu_set_vring_enable_exec(dev, vmsg);
case VHOST_USER_SET_BACKEND_REQ_FD:
return vu_set_slave_req_fd(dev, vmsg);
return vu_set_backend_req_fd(dev, vmsg);
case VHOST_USER_GET_CONFIG:
return vu_get_config(dev, vmsg);
case VHOST_USER_SET_CONFIG:
@ -2038,11 +2038,11 @@ vu_deinit(VuDev *dev)
}
vu_close_log(dev);
if (dev->slave_fd != -1) {
close(dev->slave_fd);
dev->slave_fd = -1;
if (dev->backend_fd != -1) {
close(dev->backend_fd);
dev->backend_fd = -1;
}
pthread_mutex_destroy(&dev->slave_mutex);
pthread_mutex_destroy(&dev->backend_mutex);
if (dev->sock != -1) {
close(dev->sock);
@ -2080,8 +2080,8 @@ vu_init(VuDev *dev,
dev->remove_watch = remove_watch;
dev->iface = iface;
dev->log_call_fd = -1;
pthread_mutex_init(&dev->slave_mutex, NULL);
dev->slave_fd = -1;
pthread_mutex_init(&dev->backend_mutex, NULL);
dev->backend_fd = -1;
dev->max_queues = max_queues;
dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
@ -2439,9 +2439,9 @@ static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync)
vmsg.flags |= VHOST_USER_NEED_REPLY_MASK;
}
vu_message_write(dev, dev->slave_fd, &vmsg);
vu_message_write(dev, dev->backend_fd, &vmsg);
if (ack) {
vu_message_read_default(dev, dev->slave_fd, &vmsg);
vu_message_read_default(dev, dev->backend_fd, &vmsg);
}
return;
}
@ -2468,7 +2468,7 @@ void vu_config_change_msg(VuDev *dev)
.flags = VHOST_USER_VERSION,
};
vu_message_write(dev, dev->slave_fd, &vmsg);
vu_message_write(dev, dev->backend_fd, &vmsg);
}
static inline void

View File

@ -39,7 +39,7 @@
#define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
typedef enum VhostSetConfigType {
VHOST_SET_CONFIG_TYPE_MASTER = 0,
VHOST_SET_CONFIG_TYPE_FRONTEND = 0,
VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
} VhostSetConfigType;
@ -112,7 +112,7 @@ typedef enum VhostUserRequest {
VHOST_USER_MAX
} VhostUserRequest;
typedef enum VhostUserSlaveRequest {
typedef enum VhostUserBackendRequest {
VHOST_USER_BACKEND_NONE = 0,
VHOST_USER_BACKEND_IOTLB_MSG = 1,
VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2,
@ -120,7 +120,7 @@ typedef enum VhostUserSlaveRequest {
VHOST_USER_BACKEND_VRING_CALL = 4,
VHOST_USER_BACKEND_VRING_ERR = 5,
VHOST_USER_BACKEND_MAX
} VhostUserSlaveRequest;
} VhostUserBackendRequest;
typedef struct VhostUserMemoryRegion {
uint64_t guest_phys_addr;
@ -296,8 +296,10 @@ typedef struct VuVirtqInflight {
* Zero value indicates a vm reset happened. */
uint16_t version;
/* The size of VuDescStateSplit array. It's equal to the virtqueue
* size. Slave could get it from queue size field of VhostUserInflight. */
/*
* The size of VuDescStateSplit array. It's equal to the virtqueue size.
* Backend could get it from queue size field of VhostUserInflight.
*/
uint16_t desc_num;
/* The head of list that track the last batch of used descriptors. */
@ -384,9 +386,9 @@ struct VuDev {
VuVirtq *vq;
VuDevInflightInfo inflight_info;
int log_call_fd;
/* Must be held while using slave_fd */
pthread_mutex_t slave_mutex;
int slave_fd;
/* Must be held while using backend_fd */
pthread_mutex_t backend_mutex;
int backend_fd;
uint64_t log_size;
uint8_t *log_table;
uint64_t features;
@ -445,7 +447,7 @@ typedef struct VuVirtqElement {
* vu_init:
* @dev: a VuDev context
* @max_queues: maximum number of virtqueues
* @socket: the socket connected to vhost-user master
* @socket: the socket connected to vhost-user frontend
* @panic: a panic callback
* @set_watch: a set_watch callback
* @remove_watch: a remove_watch callback