hw/virtio: generalise CHR_EVENT_CLOSED handling

..and use for both virtio-user-blk and virtio-user-gpio. This avoids
the circular close by deferring shutdown due to disconnection until a
later point. virtio-user-blk already had this mechanism in place so
generalise it as a vhost-user helper function and use for both blk and
gpio devices.

While we are at it we also fix up vhost-user-gpio to re-establish the
event handler after close down so we can reconnect later.

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Raphael Norwitz <raphael.norwitz@nutanix.com>
Message-Id: <20221130112439.2527228-5-alex.bennee@linaro.org>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Alex Bennée 2022-11-30 11:24:38 +00:00 committed by Michael S. Tsirkin
parent 060f4a9440
commit 71e076a07d
4 changed files with 104 additions and 37 deletions

View File

@ -369,17 +369,10 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
vhost_user_blk_stop(vdev); vhost_user_blk_stop(vdev);
vhost_dev_cleanup(&s->dev); vhost_dev_cleanup(&s->dev);
}
static void vhost_user_blk_chr_closed_bh(void *opaque) /* Re-instate the event handler for new connections */
{
DeviceState *dev = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserBlk *s = VHOST_USER_BLK(vdev);
vhost_user_blk_disconnect(dev);
qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event, qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
NULL, opaque, NULL, true); NULL, dev, NULL, true);
} }
static void vhost_user_blk_event(void *opaque, QEMUChrEvent event) static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
@ -398,33 +391,9 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
} }
break; break;
case CHR_EVENT_CLOSED: case CHR_EVENT_CLOSED:
if (!runstate_check(RUN_STATE_SHUTDOWN)) { /* defer close until later to avoid circular close */
/* vhost_user_async_close(dev, &s->chardev, &s->dev,
* A close event may happen during a read/write, but vhost vhost_user_blk_disconnect);
* code assumes the vhost_dev remains setup, so delay the
* stop & clear.
*/
AioContext *ctx = qemu_get_current_aio_context();
qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, NULL, NULL,
NULL, NULL, false);
aio_bh_schedule_oneshot(ctx, vhost_user_blk_chr_closed_bh, opaque);
/*
* Move vhost device to the stopped state. The vhost-user device
* will be clean up and disconnected in BH. This can be useful in
* the vhost migration code. If disconnect was caught there is an
* option for the general vhost code to get the dev state without
* knowing its type (in this case vhost-user).
*
* FIXME: this is sketchy to be reaching into vhost_dev
* now because we are forcing something that implies we
* have executed vhost_dev_stop() but that won't happen
* until vhost_user_blk_stop() gets called from the bh.
* Really this state check should be tracked locally.
*/
s->dev.started = false;
}
break; break;
case CHR_EVENT_BREAK: case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN: case CHR_EVENT_MUX_IN:

View File

@ -233,6 +233,8 @@ static int vu_gpio_connect(DeviceState *dev, Error **errp)
return 0; return 0;
} }
static void vu_gpio_event(void *opaque, QEMUChrEvent event);
static void vu_gpio_disconnect(DeviceState *dev) static void vu_gpio_disconnect(DeviceState *dev)
{ {
VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@ -245,6 +247,11 @@ static void vu_gpio_disconnect(DeviceState *dev)
vu_gpio_stop(vdev); vu_gpio_stop(vdev);
vhost_dev_cleanup(&gpio->vhost_dev); vhost_dev_cleanup(&gpio->vhost_dev);
/* Re-instate the event handler for new connections */
qemu_chr_fe_set_handlers(&gpio->chardev,
NULL, NULL, vu_gpio_event,
NULL, dev, NULL, true);
} }
static void vu_gpio_event(void *opaque, QEMUChrEvent event) static void vu_gpio_event(void *opaque, QEMUChrEvent event)
@ -262,7 +269,9 @@ static void vu_gpio_event(void *opaque, QEMUChrEvent event)
} }
break; break;
case CHR_EVENT_CLOSED: case CHR_EVENT_CLOSED:
vu_gpio_disconnect(dev); /* defer close until later to avoid circular close */
vhost_user_async_close(dev, &gpio->chardev, &gpio->vhost_dev,
vu_gpio_disconnect);
break; break;
case CHR_EVENT_BREAK: case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN: case CHR_EVENT_MUX_IN:

View File

@ -21,6 +21,7 @@
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
#include "qemu/sockets.h" #include "qemu/sockets.h"
#include "sysemu/runstate.h"
#include "sysemu/cryptodev.h" #include "sysemu/cryptodev.h"
#include "migration/migration.h" #include "migration/migration.h"
#include "migration/postcopy-ram.h" #include "migration/postcopy-ram.h"
@ -2670,6 +2671,76 @@ void vhost_user_cleanup(VhostUserState *user)
user->chr = NULL; user->chr = NULL;
} }
typedef struct {
vu_async_close_fn cb;
DeviceState *dev;
CharBackend *cd;
struct vhost_dev *vhost;
} VhostAsyncCallback;
static void vhost_user_async_close_bh(void *opaque)
{
VhostAsyncCallback *data = opaque;
struct vhost_dev *vhost = data->vhost;
/*
* If the vhost_dev has been cleared in the meantime there is
* nothing left to do as some other path has completed the
* cleanup.
*/
if (vhost->vdev) {
data->cb(data->dev);
}
g_free(data);
}
/*
* We only schedule the work if the machine is running. If suspended
* we want to keep all the in-flight data as is for migration
* purposes.
*/
void vhost_user_async_close(DeviceState *d,
CharBackend *chardev, struct vhost_dev *vhost,
vu_async_close_fn cb)
{
if (!runstate_check(RUN_STATE_SHUTDOWN)) {
/*
* A close event may happen during a read/write, but vhost
* code assumes the vhost_dev remains setup, so delay the
* stop & clear.
*/
AioContext *ctx = qemu_get_current_aio_context();
VhostAsyncCallback *data = g_new0(VhostAsyncCallback, 1);
/* Save data for the callback */
data->cb = cb;
data->dev = d;
data->cd = chardev;
data->vhost = vhost;
/* Disable any further notifications on the chardev */
qemu_chr_fe_set_handlers(chardev,
NULL, NULL, NULL, NULL, NULL, NULL,
false);
aio_bh_schedule_oneshot(ctx, vhost_user_async_close_bh, data);
/*
* Move vhost device to the stopped state. The vhost-user device
* will be clean up and disconnected in BH. This can be useful in
* the vhost migration code. If disconnect was caught there is an
* option for the general vhost code to get the dev state without
* knowing its type (in this case vhost-user).
*
* Note if the vhost device is fully cleared by the time we
* execute the bottom half we won't continue with the cleanup.
*/
vhost->started = false;
}
}
static int vhost_user_dev_start(struct vhost_dev *dev, bool started) static int vhost_user_dev_start(struct vhost_dev *dev, bool started)
{ {
if (!virtio_has_feature(dev->protocol_features, if (!virtio_has_feature(dev->protocol_features,

View File

@ -68,4 +68,22 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp);
*/ */
void vhost_user_cleanup(VhostUserState *user); void vhost_user_cleanup(VhostUserState *user);
/**
* vhost_user_async_close() - cleanup vhost-user post connection drop
* @d: DeviceState for the associated device (passed to callback)
* @chardev: the CharBackend associated with the connection
* @vhost: the common vhost device
* @cb: the user callback function to complete the clean-up
*
* This function is used to handle the shutdown of a vhost-user
* connection to a backend. We handle this centrally to make sure we
* do all the steps and handle potential races due to VM shutdowns.
* Once the connection is disabled we call a backhalf to ensure
*/
typedef void (*vu_async_close_fn)(DeviceState *cb);
void vhost_user_async_close(DeviceState *d,
CharBackend *chardev, struct vhost_dev *vhost,
vu_async_close_fn cb);
#endif #endif