virtio-scsi: implement BlockDevOps->drained_begin()
The virtio-scsi Host Bus Adapter provides access to devices on a SCSI bus. Those SCSI devices typically have a BlockBackend. When the BlockBackend enters a drained section, the SCSI device must temporarily stop submitting new I/O requests. Implement this behavior by temporarily stopping virtio-scsi virtqueue processing when one of the SCSI devices enters a drained section. The new scsi_device_drained_begin() API allows scsi-disk to message the virtio-scsi HBA. scsi_device_drained_begin() uses a drain counter so that multiple SCSI devices can have overlapping drained sections. The HBA only sees one pair of .drained_begin/end() calls. After this commit, virtio-scsi no longer depends on hw/virtio's ioeventfd aio_set_event_notifier(is_external=true). This commit is a step towards removing the aio_disable_external() API. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20230516190238.8401-19-stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
1665d9326f
commit
766aa2de0f
@ -1669,6 +1669,46 @@ void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
|
||||
scsi_device_set_ua(sdev, sense);
|
||||
}
|
||||
|
||||
void scsi_device_drained_begin(SCSIDevice *sdev)
|
||||
{
|
||||
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus);
|
||||
if (!bus) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
|
||||
assert(bus->drain_count < INT_MAX);
|
||||
|
||||
/*
|
||||
* Multiple BlockBackends can be on a SCSIBus and each may begin/end
|
||||
* draining at any time. Keep a counter so HBAs only see begin/end once.
|
||||
*/
|
||||
if (bus->drain_count++ == 0) {
|
||||
trace_scsi_bus_drained_begin(bus, sdev);
|
||||
if (bus->info->drained_begin) {
|
||||
bus->info->drained_begin(bus);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void scsi_device_drained_end(SCSIDevice *sdev)
|
||||
{
|
||||
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus);
|
||||
if (!bus) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
|
||||
assert(bus->drain_count > 0);
|
||||
|
||||
if (bus->drain_count-- == 1) {
|
||||
trace_scsi_bus_drained_end(bus, sdev);
|
||||
if (bus->info->drained_end) {
|
||||
bus->info->drained_end(bus);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static char *scsibus_get_dev_path(DeviceState *dev)
|
||||
{
|
||||
SCSIDevice *d = SCSI_DEVICE(dev);
|
||||
|
@ -2360,6 +2360,20 @@ static void scsi_disk_reset(DeviceState *dev)
|
||||
s->qdev.scsi_version = s->qdev.default_scsi_version;
|
||||
}
|
||||
|
||||
static void scsi_disk_drained_begin(void *opaque)
|
||||
{
|
||||
SCSIDiskState *s = opaque;
|
||||
|
||||
scsi_device_drained_begin(&s->qdev);
|
||||
}
|
||||
|
||||
static void scsi_disk_drained_end(void *opaque)
|
||||
{
|
||||
SCSIDiskState *s = opaque;
|
||||
|
||||
scsi_device_drained_end(&s->qdev);
|
||||
}
|
||||
|
||||
static void scsi_disk_resize_cb(void *opaque)
|
||||
{
|
||||
SCSIDiskState *s = opaque;
|
||||
@ -2414,16 +2428,19 @@ static bool scsi_cd_is_medium_locked(void *opaque)
|
||||
}
|
||||
|
||||
static const BlockDevOps scsi_disk_removable_block_ops = {
|
||||
.change_media_cb = scsi_cd_change_media_cb,
|
||||
.change_media_cb = scsi_cd_change_media_cb,
|
||||
.drained_begin = scsi_disk_drained_begin,
|
||||
.drained_end = scsi_disk_drained_end,
|
||||
.eject_request_cb = scsi_cd_eject_request_cb,
|
||||
.is_tray_open = scsi_cd_is_tray_open,
|
||||
.is_medium_locked = scsi_cd_is_medium_locked,
|
||||
|
||||
.resize_cb = scsi_disk_resize_cb,
|
||||
.is_tray_open = scsi_cd_is_tray_open,
|
||||
.resize_cb = scsi_disk_resize_cb,
|
||||
};
|
||||
|
||||
static const BlockDevOps scsi_disk_block_ops = {
|
||||
.resize_cb = scsi_disk_resize_cb,
|
||||
.drained_begin = scsi_disk_drained_begin,
|
||||
.drained_end = scsi_disk_drained_end,
|
||||
.resize_cb = scsi_disk_resize_cb,
|
||||
};
|
||||
|
||||
static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
|
||||
|
@ -6,6 +6,8 @@ scsi_req_cancel(int target, int lun, int tag) "target %d lun %d tag %d"
|
||||
scsi_req_data(int target, int lun, int tag, int len) "target %d lun %d tag %d len %d"
|
||||
scsi_req_data_canceled(int target, int lun, int tag, int len) "target %d lun %d tag %d len %d"
|
||||
scsi_req_dequeue(int target, int lun, int tag) "target %d lun %d tag %d"
|
||||
scsi_bus_drained_begin(void *bus, void *sdev) "bus %p sdev %p"
|
||||
scsi_bus_drained_end(void *bus, void *sdev) "bus %p sdev %p"
|
||||
scsi_req_continue(int target, int lun, int tag) "target %d lun %d tag %d"
|
||||
scsi_req_continue_canceled(int target, int lun, int tag) "target %d lun %d tag %d"
|
||||
scsi_req_parsed(int target, int lun, int tag, int cmd, int mode, int xfer) "target %d lun %d tag %d command %d dir %d length %d"
|
||||
|
@ -158,14 +158,16 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev)
|
||||
s->dataplane_starting = false;
|
||||
s->dataplane_started = true;
|
||||
|
||||
aio_context_acquire(s->ctx);
|
||||
virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx);
|
||||
virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx);
|
||||
if (s->bus.drain_count == 0) {
|
||||
aio_context_acquire(s->ctx);
|
||||
virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx);
|
||||
virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx);
|
||||
|
||||
for (i = 0; i < vs->conf.num_queues; i++) {
|
||||
virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx);
|
||||
for (i = 0; i < vs->conf.num_queues; i++) {
|
||||
virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx);
|
||||
}
|
||||
aio_context_release(s->ctx);
|
||||
}
|
||||
aio_context_release(s->ctx);
|
||||
return 0;
|
||||
|
||||
fail_host_notifiers:
|
||||
@ -211,7 +213,9 @@ void virtio_scsi_dataplane_stop(VirtIODevice *vdev)
|
||||
}
|
||||
s->dataplane_stopping = true;
|
||||
|
||||
aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s);
|
||||
if (s->bus.drain_count == 0) {
|
||||
aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s);
|
||||
}
|
||||
|
||||
blk_drain_all(); /* ensure there are no in-flight requests */
|
||||
|
||||
|
@ -1117,6 +1117,42 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
||||
}
|
||||
}
|
||||
|
||||
/* Suspend virtqueue ioeventfd processing during drain */
|
||||
static void virtio_scsi_drained_begin(SCSIBus *bus)
|
||||
{
|
||||
VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
||||
uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED +
|
||||
s->parent_obj.conf.num_queues;
|
||||
|
||||
if (!s->dataplane_started) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < total_queues; i++) {
|
||||
VirtQueue *vq = virtio_get_queue(vdev, i);
|
||||
virtio_queue_aio_detach_host_notifier(vq, s->ctx);
|
||||
}
|
||||
}
|
||||
|
||||
/* Resume virtqueue ioeventfd processing after drain */
|
||||
static void virtio_scsi_drained_end(SCSIBus *bus)
|
||||
{
|
||||
VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
||||
uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED +
|
||||
s->parent_obj.conf.num_queues;
|
||||
|
||||
if (!s->dataplane_started) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < total_queues; i++) {
|
||||
VirtQueue *vq = virtio_get_queue(vdev, i);
|
||||
virtio_queue_aio_attach_host_notifier(vq, s->ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static struct SCSIBusInfo virtio_scsi_scsi_info = {
|
||||
.tcq = true,
|
||||
.max_channel = VIRTIO_SCSI_MAX_CHANNEL,
|
||||
@ -1131,6 +1167,8 @@ static struct SCSIBusInfo virtio_scsi_scsi_info = {
|
||||
.get_sg_list = virtio_scsi_get_sg_list,
|
||||
.save_request = virtio_scsi_save_request,
|
||||
.load_request = virtio_scsi_load_request,
|
||||
.drained_begin = virtio_scsi_drained_begin,
|
||||
.drained_end = virtio_scsi_drained_end,
|
||||
};
|
||||
|
||||
void virtio_scsi_common_realize(DeviceState *dev,
|
||||
|
@ -133,6 +133,16 @@ struct SCSIBusInfo {
|
||||
void (*save_request)(QEMUFile *f, SCSIRequest *req);
|
||||
void *(*load_request)(QEMUFile *f, SCSIRequest *req);
|
||||
void (*free_request)(SCSIBus *bus, void *priv);
|
||||
|
||||
/*
|
||||
* Temporarily stop submitting new requests between drained_begin() and
|
||||
* drained_end(). Called from the main loop thread with the BQL held.
|
||||
*
|
||||
* Implement these callbacks if request processing is triggered by a file
|
||||
* descriptor like an EventNotifier. Otherwise set them to NULL.
|
||||
*/
|
||||
void (*drained_begin)(SCSIBus *bus);
|
||||
void (*drained_end)(SCSIBus *bus);
|
||||
};
|
||||
|
||||
#define TYPE_SCSI_BUS "SCSI"
|
||||
@ -144,6 +154,8 @@ struct SCSIBus {
|
||||
|
||||
SCSISense unit_attention;
|
||||
const SCSIBusInfo *info;
|
||||
|
||||
int drain_count; /* protected by BQL */
|
||||
};
|
||||
|
||||
/**
|
||||
@ -213,6 +225,8 @@ void scsi_req_cancel_complete(SCSIRequest *req);
|
||||
void scsi_req_cancel(SCSIRequest *req);
|
||||
void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier);
|
||||
void scsi_req_retry(SCSIRequest *req);
|
||||
void scsi_device_drained_begin(SCSIDevice *sdev);
|
||||
void scsi_device_drained_end(SCSIDevice *sdev);
|
||||
void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense);
|
||||
void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense);
|
||||
void scsi_device_report_change(SCSIDevice *dev, SCSISense sense);
|
||||
|
Loading…
Reference in New Issue
Block a user