libvhost-user: implement in-band notifications

Add support for VHOST_USER_PROTOCOL_F_IN_BAND_NOTIFICATIONS, but
as it's not desired by default, don't enable it unless the device
implementation opts in by returning it from its protocol features
callback.

Note that I updated vu_set_vring_err_exec(), but didn't add any
sending of the VHOST_USER_SLAVE_VRING_ERR message as there's no
write to the err_fd today either.

This also adds vu_queue_notify_sync() which can be used to force
a synchronous notification if inband notifications are supported.
Previously, I had left out the slave->master direction handling
of F_REPLY_ACK, this now adds some code to support it as well.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Message-Id: <20200123081708.7817-7-johannes@sipsolutions.net>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Johannes Berg 2020-01-23 09:17:08 +01:00 committed by Michael S. Tsirkin
parent 3348e7e34f
commit ff1320050a
2 changed files with 114 additions and 3 deletions

View File

@ -136,6 +136,7 @@ vu_request_to_string(unsigned int req)
REQ(VHOST_USER_GET_INFLIGHT_FD),
REQ(VHOST_USER_SET_INFLIGHT_FD),
REQ(VHOST_USER_GPU_SET_SOCKET),
REQ(VHOST_USER_VRING_KICK),
REQ(VHOST_USER_MAX),
};
#undef REQ
@ -163,7 +164,10 @@ vu_panic(VuDev *dev, const char *msg, ...)
dev->panic(dev, buf);
free(buf);
/* FIXME: find a way to call virtio_error? */
/*
* FIXME:
* find a way to call virtio_error, or perhaps close the connection?
*/
}
/* Translate guest physical address to our virtual address. */
@ -1203,6 +1207,14 @@ vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg)
static bool
vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
{
/*
* Note that we support, but intentionally do not set,
* VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. This means that
* a device implementation can return it in its callback
* (get_protocol_features) if it wants to use this for
* simulation, but it is otherwise not desirable (if even
* implemented by the master.)
*/
uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ |
1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ |
@ -1235,6 +1247,25 @@ vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
dev->protocol_features = vmsg->payload.u64;
if (vu_has_protocol_feature(dev,
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
(!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_REQ) ||
!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
/*
* The use case for using messages for kick/call is simulation, to make
* the kick and call synchronous. To actually get that behaviour, both
* of the other features are required.
* Theoretically, one could use only kick messages, or do them without
* having F_REPLY_ACK, but too many (possibly pending) messages on the
* socket will eventually cause the master to hang, to avoid this in
* scenarios where not desired enforce that the settings are in a way
* that actually enables the simulation case.
*/
vu_panic(dev,
"F_IN_BAND_NOTIFICATIONS requires F_SLAVE_REQ && F_REPLY_ACK");
return false;
}
if (dev->iface->set_protocol_features) {
dev->iface->set_protocol_features(dev, features);
}
@ -1495,6 +1526,34 @@ vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
return false;
}
static bool
vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg)
{
unsigned int index = vmsg->payload.state.index;
if (index >= dev->max_queues) {
vu_panic(dev, "Invalid queue index: %u", index);
return false;
}
DPRINT("Got kick message: handler:%p idx:%d\n",
dev->vq[index].handler, index);
if (!dev->vq[index].started) {
dev->vq[index].started = true;
if (dev->iface->queue_set_started) {
dev->iface->queue_set_started(dev, index, true);
}
}
if (dev->vq[index].handler) {
dev->vq[index].handler(dev, index);
}
return false;
}
static bool
vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
{
@ -1577,6 +1636,8 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
return vu_get_inflight_fd(dev, vmsg);
case VHOST_USER_SET_INFLIGHT_FD:
return vu_set_inflight_fd(dev, vmsg);
case VHOST_USER_VRING_KICK:
return vu_handle_vring_kick(dev, vmsg);
default:
vmsg_close_fds(vmsg);
vu_panic(dev, "Unhandled request: %d", vmsg->request);
@ -2038,8 +2099,7 @@ vring_notify(VuDev *dev, VuVirtq *vq)
return !v || vring_need_event(vring_get_used_event(vq), new, old);
}
void
vu_queue_notify(VuDev *dev, VuVirtq *vq)
static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync)
{
if (unlikely(dev->broken) ||
unlikely(!vq->vring.avail)) {
@ -2051,11 +2111,48 @@ vu_queue_notify(VuDev *dev, VuVirtq *vq)
return;
}
if (vq->call_fd < 0 &&
vu_has_protocol_feature(dev,
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
VhostUserMsg vmsg = {
.request = VHOST_USER_SLAVE_VRING_CALL,
.flags = VHOST_USER_VERSION,
.size = sizeof(vmsg.payload.state),
.payload.state = {
.index = vq - dev->vq,
},
};
bool ack = sync &&
vu_has_protocol_feature(dev,
VHOST_USER_PROTOCOL_F_REPLY_ACK);
if (ack) {
vmsg.flags |= VHOST_USER_NEED_REPLY_MASK;
}
vu_message_write(dev, dev->slave_fd, &vmsg);
if (ack) {
vu_message_read(dev, dev->slave_fd, &vmsg);
}
return;
}
if (eventfd_write(vq->call_fd, 1) < 0) {
vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
}
}
void vu_queue_notify(VuDev *dev, VuVirtq *vq)
{
_vu_queue_notify(dev, vq, false);
}
void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq)
{
_vu_queue_notify(dev, vq, true);
}
static inline void
vring_used_flags_set_bit(VuVirtq *vq, int mask)
{

View File

@ -54,6 +54,7 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
VHOST_USER_PROTOCOL_F_MAX
};
@ -95,6 +96,7 @@ typedef enum VhostUserRequest {
VHOST_USER_GET_INFLIGHT_FD = 31,
VHOST_USER_SET_INFLIGHT_FD = 32,
VHOST_USER_GPU_SET_SOCKET = 33,
VHOST_USER_VRING_KICK = 35,
VHOST_USER_MAX
} VhostUserRequest;
@ -103,6 +105,8 @@ typedef enum VhostUserSlaveRequest {
VHOST_USER_SLAVE_IOTLB_MSG = 1,
VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
VHOST_USER_SLAVE_VRING_CALL = 4,
VHOST_USER_SLAVE_VRING_ERR = 5,
VHOST_USER_SLAVE_MAX
} VhostUserSlaveRequest;
@ -528,6 +532,16 @@ bool vu_queue_empty(VuDev *dev, VuVirtq *vq);
*/
void vu_queue_notify(VuDev *dev, VuVirtq *vq);
/**
* vu_queue_notify_sync:
* @dev: a VuDev context
* @vq: a VuVirtq queue
*
* Request to notify the queue via callfd (skipped if unnecessary)
* or sync message if possible.
*/
void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq);
/**
* vu_queue_pop:
* @dev: a VuDev context