2011-02-11 09:40:59 +01:00
|
|
|
/*
|
|
|
|
* Virtio SCSI HBA
|
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2010
|
|
|
|
* Copyright Red Hat, Inc. 2011
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
|
|
|
|
* Paolo Bonzini <pbonzini@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2016-01-26 19:17:07 +01:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 09:01:28 +01:00
|
|
|
#include "qapi/error.h"
|
2015-02-16 22:36:20 +01:00
|
|
|
#include "standard-headers/linux/virtio_ids.h"
|
2013-02-05 17:06:20 +01:00
|
|
|
#include "hw/virtio/virtio-scsi.h"
|
2019-08-12 07:23:39 +02:00
|
|
|
#include "migration/qemu-file-types.h"
|
2023-09-13 22:00:43 +02:00
|
|
|
#include "qemu/defer-call.h"
|
2013-02-04 11:37:52 +01:00
|
|
|
#include "qemu/error-report.h"
|
2014-06-10 16:21:18 +02:00
|
|
|
#include "qemu/iov.h"
|
2019-05-23 16:35:07 +02:00
|
|
|
#include "qemu/module.h"
|
2014-10-07 13:59:18 +02:00
|
|
|
#include "sysemu/block-backend.h"
|
2022-12-22 11:03:24 +01:00
|
|
|
#include "sysemu/dma.h"
|
2019-08-12 07:23:51 +02:00
|
|
|
#include "hw/qdev-properties.h"
|
2016-06-22 19:11:19 +02:00
|
|
|
#include "hw/scsi/scsi.h"
|
2017-08-22 09:23:55 +02:00
|
|
|
#include "scsi/constants.h"
|
2016-06-22 19:11:19 +02:00
|
|
|
#include "hw/virtio/virtio-bus.h"
|
2014-06-24 19:48:53 +02:00
|
|
|
#include "hw/virtio/virtio-access.h"
|
2020-11-16 19:31:12 +01:00
|
|
|
#include "trace.h"
|
2011-02-11 09:40:59 +01:00
|
|
|
|
2022-04-27 16:35:41 +02:00
|
|
|
typedef struct VirtIOSCSIReq {
|
|
|
|
/*
|
|
|
|
* Note:
|
|
|
|
* - fields up to resp_iov are initialized by virtio_scsi_init_req;
|
|
|
|
* - fields starting at vring are zeroed by virtio_scsi_init_req.
|
|
|
|
*/
|
|
|
|
VirtQueueElement elem;
|
|
|
|
|
|
|
|
VirtIOSCSI *dev;
|
|
|
|
VirtQueue *vq;
|
|
|
|
QEMUSGList qsgl;
|
|
|
|
QEMUIOVector resp_iov;
|
|
|
|
|
2023-02-21 22:22:18 +01:00
|
|
|
/* Used for two-stage request submission and TMFs deferred to BH */
|
|
|
|
QTAILQ_ENTRY(VirtIOSCSIReq) next;
|
2022-04-27 16:35:41 +02:00
|
|
|
|
2023-02-21 22:22:18 +01:00
|
|
|
/* Used for cancellation of request during TMFs */
|
|
|
|
int remaining;
|
2022-04-27 16:35:41 +02:00
|
|
|
|
|
|
|
SCSIRequest *sreq;
|
|
|
|
size_t resp_size;
|
|
|
|
enum SCSIXferMode mode;
|
|
|
|
union {
|
|
|
|
VirtIOSCSICmdResp cmd;
|
|
|
|
VirtIOSCSICtrlTMFResp tmf;
|
|
|
|
VirtIOSCSICtrlANResp an;
|
|
|
|
VirtIOSCSIEvent event;
|
|
|
|
} resp;
|
|
|
|
union {
|
|
|
|
VirtIOSCSICmdReq cmd;
|
|
|
|
VirtIOSCSICtrlTMFReq tmf;
|
|
|
|
VirtIOSCSICtrlANReq an;
|
|
|
|
} req;
|
|
|
|
} VirtIOSCSIReq;
|
|
|
|
|
2011-11-14 16:58:41 +01:00
|
|
|
static inline int virtio_scsi_get_lun(uint8_t *lun)
|
|
|
|
{
|
|
|
|
return ((lun[2] << 8) | lun[3]) & 0x3FFF;
|
|
|
|
}
|
|
|
|
|
2020-10-06 14:39:03 +02:00
|
|
|
static inline SCSIDevice *virtio_scsi_device_get(VirtIOSCSI *s, uint8_t *lun)
|
2011-11-14 16:58:41 +01:00
|
|
|
{
|
|
|
|
if (lun[0] != 1) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (lun[2] != 0 && !(lun[2] >= 0x40 && lun[2] < 0x80)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-10-06 14:39:03 +02:00
|
|
|
return scsi_device_get(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
|
2011-11-14 16:58:41 +01:00
|
|
|
}
|
|
|
|
|
2022-04-27 16:35:41 +02:00
|
|
|
static void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
|
2014-06-10 16:21:18 +02:00
|
|
|
{
|
2017-07-04 14:21:06 +02:00
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
2016-02-14 18:17:10 +01:00
|
|
|
const size_t zero_skip =
|
|
|
|
offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov);
|
2014-06-10 16:21:18 +02:00
|
|
|
|
|
|
|
req->vq = vq;
|
|
|
|
req->dev = s;
|
2017-07-04 14:21:06 +02:00
|
|
|
qemu_sglist_init(&req->qsgl, DEVICE(s), 8, vdev->dma_as);
|
2014-06-10 16:40:31 +02:00
|
|
|
qemu_iovec_init(&req->resp_iov, 1);
|
2014-09-16 09:20:18 +02:00
|
|
|
memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
|
2014-06-10 16:21:18 +02:00
|
|
|
}
|
|
|
|
|
2022-04-27 16:35:41 +02:00
|
|
|
static void virtio_scsi_free_req(VirtIOSCSIReq *req)
|
2014-06-10 16:21:18 +02:00
|
|
|
{
|
2014-06-10 16:40:31 +02:00
|
|
|
qemu_iovec_destroy(&req->resp_iov);
|
2014-06-10 16:21:18 +02:00
|
|
|
qemu_sglist_destroy(&req->qsgl);
|
2015-10-01 12:59:01 +02:00
|
|
|
g_free(req);
|
2014-06-10 16:21:18 +02:00
|
|
|
}
|
|
|
|
|
2011-02-13 11:55:52 +01:00
|
|
|
static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
|
|
|
|
{
|
|
|
|
VirtIOSCSI *s = req->dev;
|
|
|
|
VirtQueue *vq = req->vq;
|
2013-03-21 15:15:18 +01:00
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
2014-06-10 16:40:31 +02:00
|
|
|
|
|
|
|
qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
|
2016-02-14 18:17:10 +01:00
|
|
|
virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
|
2016-04-06 12:16:24 +02:00
|
|
|
if (s->dataplane_started && !s->dataplane_fenced) {
|
virtio: set ISR on dataplane notifications
Dataplane has been omitting forever the step of setting ISR when
an interrupt is raised. This caused little breakage, because the
specification actually says that ISR may not be updated in MSI mode.
Some versions of the Windows drivers however didn't clear MSI mode
correctly, and proceeded using polling mode (using ISR, not the used
ring index!) for crashdump and hibernation. If it were just crashdump
and hibernation it would not be a big deal, but recent releases of
Windows do not really shut down, but rather log out and hibernate to
make the next startup faster. Hence, this manifested as a more serious
hang during shutdown with e.g. Windows 8.1 and virtio-win 1.8.0 RPMs.
Newer versions fixed this, while older versions do not use MSI at all.
The failure has always been there for virtio dataplane, but it became
visible after commits 9ffe337 ("virtio-blk: always use dataplane path
if ioeventfd is active", 2016-10-30) and ad07cd6 ("virtio-scsi: always
use dataplane path if ioeventfd is active", 2016-10-30) made virtio-blk
and virtio-scsi always use the dataplane code under KVM. The good news
therefore is that it was not a bug in the patches---they were doing
exactly what they were meant for, i.e. shake out remaining dataplane bugs.
The fix is not hard, so it's worth arranging for the broken drivers.
The virtio_should_notify+event_notifier_set pair that is common to
virtio-blk and virtio-scsi dataplane is replaced with a new public
function virtio_notify_irqfd that also sets ISR. The irqfd emulation
code now need not set ISR anymore, so virtio_irq is removed.
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Tested-by: Farhan Ali <alifm@linux.vnet.ibm.com>
Tested-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-11-18 16:07:02 +01:00
|
|
|
virtio_notify_irqfd(vdev, vq);
|
2014-09-23 09:49:25 +02:00
|
|
|
} else {
|
|
|
|
virtio_notify(vdev, vq);
|
|
|
|
}
|
|
|
|
|
2011-02-13 11:55:52 +01:00
|
|
|
if (req->sreq) {
|
|
|
|
req->sreq->hba_private = NULL;
|
|
|
|
scsi_req_unref(req->sreq);
|
|
|
|
}
|
2014-06-10 16:21:18 +02:00
|
|
|
virtio_scsi_free_req(req);
|
2011-02-13 11:55:52 +01:00
|
|
|
}
|
|
|
|
|
2023-12-05 19:19:58 +01:00
|
|
|
static void virtio_scsi_complete_req_bh(void *opaque)
|
|
|
|
{
|
|
|
|
VirtIOSCSIReq *req = opaque;
|
|
|
|
|
|
|
|
virtio_scsi_complete_req(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from virtio_scsi_do_one_tmf_bh() in main loop thread. The main loop
|
|
|
|
* thread cannot touch the virtqueue since that could race with an IOThread.
|
|
|
|
*/
|
|
|
|
static void virtio_scsi_complete_req_from_main_loop(VirtIOSCSIReq *req)
|
|
|
|
{
|
|
|
|
VirtIOSCSI *s = req->dev;
|
|
|
|
|
|
|
|
if (!s->ctx || s->ctx == qemu_get_aio_context()) {
|
|
|
|
/* No need to schedule a BH when there is no IOThread */
|
|
|
|
virtio_scsi_complete_req(req);
|
|
|
|
} else {
|
|
|
|
/* Run request completion in the IOThread */
|
|
|
|
aio_wait_bh_oneshot(s->ctx, virtio_scsi_complete_req_bh, req);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
The virtio_scsi_bad_req() function is called when a guest sends a
request with missing or ill-sized headers. This generally happens
when the virtio_scsi_parse_req() function returns an error.
With this patch, virtio_scsi_bad_req() will mark the device as broken,
detach the request from the virtqueue and free it, instead of forcing
QEMU to exit.
In nearly all locations where virtio_scsi_bad_req() is called, the only
thing to do next is to return to the caller.
The virtio_scsi_handle_cmd_req_prepare() function is an exception though.
It is called in a loop by virtio_scsi_handle_cmd_vq() and passed requests
freshly popped from a cmd virtqueue; virtio_scsi_handle_cmd_req_prepare()
does some sanity checks on the request and returns a boolean flag to
indicate whether the request should be queued or not. In the latter case,
virtio_scsi_handle_cmd_req_prepare() has detected a non-fatal error and
sent a response back to the guest.
We have now a new condition to take into account: the device is broken
and should stop all processing.
The return value of virtio_scsi_handle_cmd_req_prepare() is hence changed
to an int. A return value of zero means that the request should be queued.
Other non-fatal error cases where the request shoudn't be queued return
a negative errno (values are vaguely inspired by the error condition, but
the only goal here is to discriminate the case we're interested in).
And finally, if virtio_scsi_bad_req() was called, -EINVAL is returned. In
this case, virtio_scsi_handle_cmd_vq() detaches and frees already queued
requests, instead of submitting them.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-30 17:13:40 +02:00
|
|
|
static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
|
2011-02-13 11:55:52 +01:00
|
|
|
{
|
virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
The virtio_scsi_bad_req() function is called when a guest sends a
request with missing or ill-sized headers. This generally happens
when the virtio_scsi_parse_req() function returns an error.
With this patch, virtio_scsi_bad_req() will mark the device as broken,
detach the request from the virtqueue and free it, instead of forcing
QEMU to exit.
In nearly all locations where virtio_scsi_bad_req() is called, the only
thing to do next is to return to the caller.
The virtio_scsi_handle_cmd_req_prepare() function is an exception though.
It is called in a loop by virtio_scsi_handle_cmd_vq() and passed requests
freshly popped from a cmd virtqueue; virtio_scsi_handle_cmd_req_prepare()
does some sanity checks on the request and returns a boolean flag to
indicate whether the request should be queued or not. In the latter case,
virtio_scsi_handle_cmd_req_prepare() has detected a non-fatal error and
sent a response back to the guest.
We have now a new condition to take into account: the device is broken
and should stop all processing.
The return value of virtio_scsi_handle_cmd_req_prepare() is hence changed
to an int. A return value of zero means that the request should be queued.
Other non-fatal error cases where the request shoudn't be queued return
a negative errno (values are vaguely inspired by the error condition, but
the only goal here is to discriminate the case we're interested in).
And finally, if virtio_scsi_bad_req() was called, -EINVAL is returned. In
this case, virtio_scsi_handle_cmd_vq() detaches and frees already queued
requests, instead of submitting them.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-30 17:13:40 +02:00
|
|
|
virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
|
|
|
|
virtqueue_detach_element(req->vq, &req->elem, 0);
|
|
|
|
virtio_scsi_free_req(req);
|
2011-02-13 11:55:52 +01:00
|
|
|
}
|
|
|
|
|
2014-06-10 16:39:24 +02:00
|
|
|
static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
|
|
|
|
hwaddr *addr, int num, size_t skip)
|
2011-02-13 11:55:52 +01:00
|
|
|
{
|
2013-06-03 14:17:19 +02:00
|
|
|
QEMUSGList *qsgl = &req->qsgl;
|
2014-06-10 16:39:24 +02:00
|
|
|
size_t copied = 0;
|
|
|
|
|
|
|
|
while (num) {
|
|
|
|
if (skip >= iov->iov_len) {
|
|
|
|
skip -= iov->iov_len;
|
|
|
|
} else {
|
|
|
|
qemu_sglist_add(qsgl, *addr + skip, iov->iov_len - skip);
|
|
|
|
copied += iov->iov_len - skip;
|
|
|
|
skip = 0;
|
|
|
|
}
|
|
|
|
iov++;
|
|
|
|
addr++;
|
|
|
|
num--;
|
2011-02-13 11:55:52 +01:00
|
|
|
}
|
2014-06-10 16:39:24 +02:00
|
|
|
|
|
|
|
assert(skip == 0);
|
|
|
|
return copied;
|
2011-02-13 11:55:52 +01:00
|
|
|
}
|
|
|
|
|
2014-06-10 16:21:18 +02:00
|
|
|
static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
|
|
|
|
unsigned req_size, unsigned resp_size)
|
2011-02-13 11:55:52 +01:00
|
|
|
{
|
2014-11-07 14:00:02 +01:00
|
|
|
VirtIODevice *vdev = (VirtIODevice *) req->dev;
|
2014-06-10 16:40:31 +02:00
|
|
|
size_t in_size, out_size;
|
2014-06-10 16:21:18 +02:00
|
|
|
|
2014-06-10 16:40:31 +02:00
|
|
|
if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
|
|
|
|
&req->req, req_size) < req_size) {
|
2014-06-10 16:21:18 +02:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-06-10 16:40:31 +02:00
|
|
|
if (qemu_iovec_concat_iov(&req->resp_iov,
|
|
|
|
req->elem.in_sg, req->elem.in_num, 0,
|
|
|
|
resp_size) < resp_size) {
|
2014-06-10 16:21:18 +02:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2014-11-07 14:00:02 +01:00
|
|
|
|
2014-06-10 16:58:19 +02:00
|
|
|
req->resp_size = resp_size;
|
2011-02-13 11:55:52 +01:00
|
|
|
|
2014-11-07 14:00:02 +01:00
|
|
|
/* Old BIOSes left some padding by mistake after the req_size/resp_size.
|
|
|
|
* As a workaround, always consider the first buffer as the virtio-scsi
|
|
|
|
* request/response, making the payload start at the second element
|
|
|
|
* of the iovec.
|
|
|
|
*
|
|
|
|
* The actual length of the response header, stored in req->resp_size,
|
|
|
|
* does not change.
|
|
|
|
*
|
|
|
|
* TODO: always disable this workaround for virtio 1.0 devices.
|
|
|
|
*/
|
2015-08-17 11:48:29 +02:00
|
|
|
if (!virtio_vdev_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) {
|
2015-03-13 08:55:54 +01:00
|
|
|
if (req->elem.out_num) {
|
|
|
|
req_size = req->elem.out_sg[0].iov_len;
|
|
|
|
}
|
|
|
|
if (req->elem.in_num) {
|
|
|
|
resp_size = req->elem.in_sg[0].iov_len;
|
|
|
|
}
|
2014-11-07 14:00:02 +01:00
|
|
|
}
|
|
|
|
|
2014-06-10 16:40:31 +02:00
|
|
|
out_size = qemu_sgl_concat(req, req->elem.out_sg,
|
|
|
|
&req->elem.out_addr[0], req->elem.out_num,
|
|
|
|
req_size);
|
|
|
|
in_size = qemu_sgl_concat(req, req->elem.in_sg,
|
|
|
|
&req->elem.in_addr[0], req->elem.in_num,
|
|
|
|
resp_size);
|
|
|
|
|
|
|
|
if (out_size && in_size) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (out_size) {
|
|
|
|
req->mode = SCSI_XFER_TO_DEV;
|
|
|
|
} else if (in_size) {
|
|
|
|
req->mode = SCSI_XFER_FROM_DEV;
|
2011-02-13 11:55:52 +01:00
|
|
|
}
|
2014-06-10 16:21:18 +02:00
|
|
|
|
|
|
|
return 0;
|
2011-02-13 11:55:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
|
|
|
|
{
|
2016-02-04 15:26:51 +01:00
|
|
|
VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
|
|
|
|
VirtIOSCSIReq *req;
|
|
|
|
|
|
|
|
req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
|
|
|
|
if (!req) {
|
2011-02-13 11:55:52 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
2016-02-04 15:26:51 +01:00
|
|
|
virtio_scsi_init_req(s, vq, req);
|
2011-02-13 11:55:52 +01:00
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
2011-12-02 15:23:15 +01:00
|
|
|
static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq)
|
|
|
|
{
|
|
|
|
VirtIOSCSIReq *req = sreq->hba_private;
|
2013-03-29 02:08:15 +01:00
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev);
|
2019-10-25 10:35:24 +02:00
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
|
2020-08-18 16:33:45 +02:00
|
|
|
uint32_t n = virtio_get_queue_index(req->vq) - VIRTIO_SCSI_VQ_NUM_FIXED;
|
2011-12-02 15:23:15 +01:00
|
|
|
|
2013-03-29 02:08:15 +01:00
|
|
|
assert(n < vs->conf.num_queues);
|
2012-04-06 10:20:43 +02:00
|
|
|
qemu_put_be32s(f, &n);
|
2019-10-25 10:35:24 +02:00
|
|
|
qemu_put_virtqueue_element(vdev, f, &req->elem);
|
2011-12-02 15:23:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
|
|
|
|
{
|
|
|
|
SCSIBus *bus = sreq->bus;
|
|
|
|
VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
|
2013-03-29 02:08:15 +01:00
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
|
2016-12-30 11:09:10 +01:00
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
2011-12-02 15:23:15 +01:00
|
|
|
VirtIOSCSIReq *req;
|
2012-04-06 10:20:43 +02:00
|
|
|
uint32_t n;
|
2011-12-02 15:23:15 +01:00
|
|
|
|
2012-04-06 10:20:43 +02:00
|
|
|
qemu_get_be32s(f, &n);
|
2013-03-29 02:08:15 +01:00
|
|
|
assert(n < vs->conf.num_queues);
|
2016-12-30 11:09:10 +01:00
|
|
|
req = qemu_get_virtqueue_element(vdev, f,
|
|
|
|
sizeof(VirtIOSCSIReq) + vs->cdb_size);
|
2016-02-04 15:26:51 +01:00
|
|
|
virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
|
2015-10-27 09:22:13 +01:00
|
|
|
|
2014-06-10 16:21:18 +02:00
|
|
|
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
|
|
|
|
sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) {
|
|
|
|
error_report("invalid SCSI request migration data");
|
|
|
|
exit(1);
|
|
|
|
}
|
2011-12-02 15:23:15 +01:00
|
|
|
|
|
|
|
scsi_req_ref(sreq);
|
|
|
|
req->sreq = sreq;
|
|
|
|
if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
|
2014-06-10 16:40:31 +02:00
|
|
|
assert(req->sreq->cmd.mode == req->mode);
|
2011-12-02 15:23:15 +01:00
|
|
|
}
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
2014-09-30 05:40:23 +02:00
|
|
|
typedef struct {
|
|
|
|
Notifier notifier;
|
|
|
|
VirtIOSCSIReq *tmf_req;
|
|
|
|
} VirtIOSCSICancelNotifier;
|
|
|
|
|
|
|
|
static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
|
|
|
|
{
|
|
|
|
VirtIOSCSICancelNotifier *n = container_of(notifier,
|
|
|
|
VirtIOSCSICancelNotifier,
|
|
|
|
notifier);
|
|
|
|
|
|
|
|
if (--n->tmf_req->remaining == 0) {
|
2020-11-16 19:31:12 +01:00
|
|
|
VirtIOSCSIReq *req = n->tmf_req;
|
|
|
|
|
|
|
|
trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
|
|
|
|
req->req.tmf.tag, req->resp.tmf.response);
|
|
|
|
virtio_scsi_complete_req(req);
|
2014-09-30 05:40:23 +02:00
|
|
|
}
|
2015-10-01 12:59:01 +02:00
|
|
|
g_free(n);
|
2014-09-30 05:40:23 +02:00
|
|
|
}
|
|
|
|
|
2016-09-14 12:17:04 +02:00
|
|
|
static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d)
|
|
|
|
{
|
|
|
|
if (s->dataplane_started && d && blk_is_available(d->conf.blk)) {
|
|
|
|
assert(blk_get_aio_context(d->conf.blk) == s->ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-21 22:22:18 +01:00
|
|
|
static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req)
|
|
|
|
{
|
|
|
|
VirtIOSCSI *s = req->dev;
|
|
|
|
SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
|
|
|
|
BusChild *kid;
|
|
|
|
int target;
|
|
|
|
|
|
|
|
switch (req->req.tmf.subtype) {
|
|
|
|
case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
|
|
|
|
if (!d) {
|
|
|
|
req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
|
|
|
|
req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
qatomic_inc(&s->resetting);
|
|
|
|
device_cold_reset(&d->qdev);
|
|
|
|
qatomic_dec(&s->resetting);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
|
|
|
|
target = req->req.tmf.lun[1];
|
|
|
|
qatomic_inc(&s->resetting);
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
|
|
|
|
SCSIDevice *d1 = SCSI_DEVICE(kid->child);
|
|
|
|
if (d1->channel == 0 && d1->id == target) {
|
|
|
|
device_cold_reset(&d1->qdev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
qatomic_dec(&s->resetting);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
object_unref(OBJECT(d));
|
2023-12-05 19:19:58 +01:00
|
|
|
virtio_scsi_complete_req_from_main_loop(req);
|
2023-02-21 22:22:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Some TMFs must be processed from the main loop thread */
|
|
|
|
static void virtio_scsi_do_tmf_bh(void *opaque)
|
|
|
|
{
|
|
|
|
VirtIOSCSI *s = opaque;
|
|
|
|
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
|
|
|
|
VirtIOSCSIReq *req;
|
|
|
|
VirtIOSCSIReq *tmp;
|
|
|
|
|
|
|
|
GLOBAL_STATE_CODE();
|
|
|
|
|
2023-12-05 19:19:58 +01:00
|
|
|
WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
|
|
|
|
QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
|
|
|
|
QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
|
|
|
|
QTAILQ_INSERT_TAIL(&reqs, req, next);
|
|
|
|
}
|
2023-02-21 22:22:18 +01:00
|
|
|
|
2023-12-05 19:19:58 +01:00
|
|
|
qemu_bh_delete(s->tmf_bh);
|
|
|
|
s->tmf_bh = NULL;
|
2023-02-21 22:22:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
QTAILQ_FOREACH_SAFE(req, &reqs, next, tmp) {
|
|
|
|
QTAILQ_REMOVE(&reqs, req, next);
|
|
|
|
virtio_scsi_do_one_tmf_bh(req);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
|
|
|
|
{
|
|
|
|
VirtIOSCSIReq *req;
|
|
|
|
VirtIOSCSIReq *tmp;
|
|
|
|
|
|
|
|
GLOBAL_STATE_CODE();
|
|
|
|
|
2023-12-05 19:19:58 +01:00
|
|
|
/* Called after ioeventfd has been stopped, so tmf_bh_lock is not needed */
|
2023-02-21 22:22:18 +01:00
|
|
|
if (s->tmf_bh) {
|
|
|
|
qemu_bh_delete(s->tmf_bh);
|
|
|
|
s->tmf_bh = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
|
|
|
|
QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
|
|
|
|
|
|
|
|
/* SAM-6 6.3.2 Hard reset */
|
|
|
|
req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE;
|
|
|
|
virtio_scsi_complete_req(req);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req)
|
|
|
|
{
|
|
|
|
VirtIOSCSI *s = req->dev;
|
|
|
|
|
2023-12-05 19:19:58 +01:00
|
|
|
WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
|
|
|
|
QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next);
|
2023-02-21 22:22:18 +01:00
|
|
|
|
2023-12-05 19:19:58 +01:00
|
|
|
if (!s->tmf_bh) {
|
|
|
|
s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s);
|
|
|
|
qemu_bh_schedule(s->tmf_bh);
|
|
|
|
}
|
2023-02-21 22:22:18 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-30 05:40:23 +02:00
|
|
|
/* Return 0 if the request is ready to be completed and return to guest;
|
|
|
|
* -EINPROGRESS if the request is submitted and will be completed later, in the
|
|
|
|
* case of async cancellation. */
|
|
|
|
static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
2011-02-13 11:55:52 +01:00
|
|
|
{
|
2020-10-06 14:39:03 +02:00
|
|
|
SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
|
2011-11-14 17:44:09 +01:00
|
|
|
SCSIRequest *r, *next;
|
2014-09-30 05:40:23 +02:00
|
|
|
int ret = 0;
|
2011-11-14 17:44:09 +01:00
|
|
|
|
2016-09-14 12:17:04 +02:00
|
|
|
virtio_scsi_ctx_check(s, d);
|
2011-11-14 17:44:09 +01:00
|
|
|
/* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
|
2014-06-10 16:40:31 +02:00
|
|
|
req->resp.tmf.response = VIRTIO_SCSI_S_OK;
|
2011-11-14 17:44:09 +01:00
|
|
|
|
2019-02-28 18:59:42 +01:00
|
|
|
/*
|
|
|
|
* req->req.tmf has the QEMU_PACKED attribute. Don't use virtio_tswap32s()
|
|
|
|
* to avoid compiler errors.
|
|
|
|
*/
|
|
|
|
req->req.tmf.subtype =
|
|
|
|
virtio_tswap32(VIRTIO_DEVICE(s), req->req.tmf.subtype);
|
|
|
|
|
2020-11-16 19:31:12 +01:00
|
|
|
trace_virtio_scsi_tmf_req(virtio_scsi_get_lun(req->req.tmf.lun),
|
|
|
|
req->req.tmf.tag, req->req.tmf.subtype);
|
|
|
|
|
2014-06-10 16:40:31 +02:00
|
|
|
switch (req->req.tmf.subtype) {
|
2011-11-14 17:44:09 +01:00
|
|
|
case VIRTIO_SCSI_T_TMF_ABORT_TASK:
|
|
|
|
case VIRTIO_SCSI_T_TMF_QUERY_TASK:
|
|
|
|
if (!d) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2014-06-10 16:40:31 +02:00
|
|
|
if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
|
2011-11-14 17:44:09 +01:00
|
|
|
goto incorrect_lun;
|
|
|
|
}
|
|
|
|
QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
|
2012-08-08 16:26:16 +02:00
|
|
|
VirtIOSCSIReq *cmd_req = r->hba_private;
|
2014-06-10 16:40:31 +02:00
|
|
|
if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
|
2011-11-14 17:44:09 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2012-08-08 16:26:16 +02:00
|
|
|
if (r) {
|
|
|
|
/*
|
|
|
|
* Assert that the request has not been completed yet, we
|
|
|
|
* check for it in the loop above.
|
|
|
|
*/
|
|
|
|
assert(r->hba_private);
|
2014-06-10 16:40:31 +02:00
|
|
|
if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
|
2011-11-14 17:44:09 +01:00
|
|
|
/* "If the specified command is present in the task set, then
|
|
|
|
* return a service response set to FUNCTION SUCCEEDED".
|
|
|
|
*/
|
2014-06-10 16:40:31 +02:00
|
|
|
req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
|
2011-11-14 17:44:09 +01:00
|
|
|
} else {
|
2014-09-30 05:40:23 +02:00
|
|
|
VirtIOSCSICancelNotifier *notifier;
|
|
|
|
|
|
|
|
req->remaining = 1;
|
2015-10-01 12:59:01 +02:00
|
|
|
notifier = g_new(VirtIOSCSICancelNotifier, 1);
|
2014-09-30 05:40:23 +02:00
|
|
|
notifier->tmf_req = req;
|
|
|
|
notifier->notifier.notify = virtio_scsi_cancel_notify;
|
|
|
|
scsi_req_cancel_async(r, ¬ifier->notifier);
|
|
|
|
ret = -EINPROGRESS;
|
2011-11-14 17:44:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
|
2023-02-21 22:22:18 +01:00
|
|
|
case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
|
|
|
|
virtio_scsi_defer_tmf_to_bh(req);
|
|
|
|
ret = -EINPROGRESS;
|
2011-11-14 17:44:09 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
|
|
|
|
case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
|
|
|
|
case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
|
|
|
|
if (!d) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2014-06-10 16:40:31 +02:00
|
|
|
if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
|
2011-11-14 17:44:09 +01:00
|
|
|
goto incorrect_lun;
|
|
|
|
}
|
2014-09-30 05:40:23 +02:00
|
|
|
|
|
|
|
/* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
|
|
|
|
* This way, if the bus starts calling back to the notifiers
|
|
|
|
* even before we finish the loop, virtio_scsi_cancel_notify
|
|
|
|
* will not complete the TMF too early.
|
|
|
|
*/
|
|
|
|
req->remaining = 1;
|
2011-11-14 17:44:09 +01:00
|
|
|
QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
|
|
|
|
if (r->hba_private) {
|
2014-06-10 16:40:31 +02:00
|
|
|
if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
|
2011-11-14 17:44:09 +01:00
|
|
|
/* "If there is any command present in the task set, then
|
|
|
|
* return a service response set to FUNCTION SUCCEEDED".
|
|
|
|
*/
|
2014-06-10 16:40:31 +02:00
|
|
|
req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
|
2011-11-14 17:44:09 +01:00
|
|
|
break;
|
|
|
|
} else {
|
2014-09-30 05:40:23 +02:00
|
|
|
VirtIOSCSICancelNotifier *notifier;
|
|
|
|
|
|
|
|
req->remaining++;
|
2015-10-01 12:59:01 +02:00
|
|
|
notifier = g_new(VirtIOSCSICancelNotifier, 1);
|
2014-09-30 05:40:23 +02:00
|
|
|
notifier->notifier.notify = virtio_scsi_cancel_notify;
|
|
|
|
notifier->tmf_req = req;
|
|
|
|
scsi_req_cancel_async(r, ¬ifier->notifier);
|
2011-11-14 17:44:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-09-30 05:40:23 +02:00
|
|
|
if (--req->remaining > 0) {
|
|
|
|
ret = -EINPROGRESS;
|
|
|
|
}
|
2011-11-14 17:44:09 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
|
|
|
|
default:
|
2014-06-10 16:40:31 +02:00
|
|
|
req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
|
2011-11-14 17:44:09 +01:00
|
|
|
break;
|
2011-02-13 11:55:52 +01:00
|
|
|
}
|
|
|
|
|
2020-10-06 14:39:03 +02:00
|
|
|
object_unref(OBJECT(d));
|
2014-09-30 05:40:23 +02:00
|
|
|
return ret;
|
2011-11-14 17:44:09 +01:00
|
|
|
|
|
|
|
incorrect_lun:
|
2014-06-10 16:40:31 +02:00
|
|
|
req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
|
2020-10-06 14:39:03 +02:00
|
|
|
object_unref(OBJECT(d));
|
2014-09-30 05:40:23 +02:00
|
|
|
return ret;
|
2011-11-14 17:44:09 +01:00
|
|
|
|
|
|
|
fail:
|
2014-06-10 16:40:31 +02:00
|
|
|
req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
|
2020-10-06 14:39:03 +02:00
|
|
|
object_unref(OBJECT(d));
|
2014-09-30 05:40:23 +02:00
|
|
|
return ret;
|
2011-02-13 11:55:52 +01:00
|
|
|
}
|
|
|
|
|
2016-04-06 12:16:27 +02:00
|
|
|
static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
2011-02-11 09:40:59 +01:00
|
|
|
{
|
2014-08-06 07:35:01 +02:00
|
|
|
VirtIODevice *vdev = (VirtIODevice *)s;
|
2014-10-25 04:43:44 +02:00
|
|
|
uint32_t type;
|
2014-09-30 05:40:23 +02:00
|
|
|
int r = 0;
|
2011-02-13 11:55:52 +01:00
|
|
|
|
2014-08-06 07:35:01 +02:00
|
|
|
if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
|
|
|
|
&type, sizeof(type)) < sizeof(type)) {
|
virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
The virtio_scsi_bad_req() function is called when a guest sends a
request with missing or ill-sized headers. This generally happens
when the virtio_scsi_parse_req() function returns an error.
With this patch, virtio_scsi_bad_req() will mark the device as broken,
detach the request from the virtqueue and free it, instead of forcing
QEMU to exit.
In nearly all locations where virtio_scsi_bad_req() is called, the only
thing to do next is to return to the caller.
The virtio_scsi_handle_cmd_req_prepare() function is an exception though.
It is called in a loop by virtio_scsi_handle_cmd_vq() and passed requests
freshly popped from a cmd virtqueue; virtio_scsi_handle_cmd_req_prepare()
does some sanity checks on the request and returns a boolean flag to
indicate whether the request should be queued or not. In the latter case,
virtio_scsi_handle_cmd_req_prepare() has detected a non-fatal error and
sent a response back to the guest.
We have now a new condition to take into account: the device is broken
and should stop all processing.
The return value of virtio_scsi_handle_cmd_req_prepare() is hence changed
to an int. A return value of zero means that the request should be queued.
Other non-fatal error cases where the request shoudn't be queued return
a negative errno (values are vaguely inspired by the error condition, but
the only goal here is to discriminate the case we're interested in).
And finally, if virtio_scsi_bad_req() was called, -EINVAL is returned. In
this case, virtio_scsi_handle_cmd_vq() detaches and frees already queued
requests, instead of submitting them.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-30 17:13:40 +02:00
|
|
|
virtio_scsi_bad_req(req);
|
2014-08-06 07:35:01 +02:00
|
|
|
return;
|
|
|
|
}
|
2014-06-10 16:21:18 +02:00
|
|
|
|
2014-10-25 04:43:44 +02:00
|
|
|
virtio_tswap32s(vdev, &type);
|
|
|
|
if (type == VIRTIO_SCSI_T_TMF) {
|
2014-08-06 07:35:01 +02:00
|
|
|
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
|
|
|
|
sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
|
virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
The virtio_scsi_bad_req() function is called when a guest sends a
request with missing or ill-sized headers. This generally happens
when the virtio_scsi_parse_req() function returns an error.
With this patch, virtio_scsi_bad_req() will mark the device as broken,
detach the request from the virtqueue and free it, instead of forcing
QEMU to exit.
In nearly all locations where virtio_scsi_bad_req() is called, the only
thing to do next is to return to the caller.
The virtio_scsi_handle_cmd_req_prepare() function is an exception though.
It is called in a loop by virtio_scsi_handle_cmd_vq() and passed requests
freshly popped from a cmd virtqueue; virtio_scsi_handle_cmd_req_prepare()
does some sanity checks on the request and returns a boolean flag to
indicate whether the request should be queued or not. In the latter case,
virtio_scsi_handle_cmd_req_prepare() has detected a non-fatal error and
sent a response back to the guest.
We have now a new condition to take into account: the device is broken
and should stop all processing.
The return value of virtio_scsi_handle_cmd_req_prepare() is hence changed
to an int. A return value of zero means that the request should be queued.
Other non-fatal error cases where the request shoudn't be queued return
a negative errno (values are vaguely inspired by the error condition, but
the only goal here is to discriminate the case we're interested in).
And finally, if virtio_scsi_bad_req() was called, -EINVAL is returned. In
this case, virtio_scsi_handle_cmd_vq() detaches and frees already queued
requests, instead of submitting them.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-30 17:13:40 +02:00
|
|
|
virtio_scsi_bad_req(req);
|
|
|
|
return;
|
2014-08-06 07:35:01 +02:00
|
|
|
} else {
|
2014-09-30 05:40:23 +02:00
|
|
|
r = virtio_scsi_do_tmf(s, req);
|
2014-06-10 16:53:39 +02:00
|
|
|
}
|
2011-11-14 17:44:09 +01:00
|
|
|
|
2014-10-25 04:43:44 +02:00
|
|
|
} else if (type == VIRTIO_SCSI_T_AN_QUERY ||
|
|
|
|
type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
|
2014-08-06 07:35:01 +02:00
|
|
|
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
|
|
|
|
sizeof(VirtIOSCSICtrlANResp)) < 0) {
|
virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
The virtio_scsi_bad_req() function is called when a guest sends a
request with missing or ill-sized headers. This generally happens
when the virtio_scsi_parse_req() function returns an error.
With this patch, virtio_scsi_bad_req() will mark the device as broken,
detach the request from the virtqueue and free it, instead of forcing
QEMU to exit.
In nearly all locations where virtio_scsi_bad_req() is called, the only
thing to do next is to return to the caller.
The virtio_scsi_handle_cmd_req_prepare() function is an exception though.
It is called in a loop by virtio_scsi_handle_cmd_vq() and passed requests
freshly popped from a cmd virtqueue; virtio_scsi_handle_cmd_req_prepare()
does some sanity checks on the request and returns a boolean flag to
indicate whether the request should be queued or not. In the latter case,
virtio_scsi_handle_cmd_req_prepare() has detected a non-fatal error and
sent a response back to the guest.
We have now a new condition to take into account: the device is broken
and should stop all processing.
The return value of virtio_scsi_handle_cmd_req_prepare() is hence changed
to an int. A return value of zero means that the request should be queued.
Other non-fatal error cases where the request shoudn't be queued return
a negative errno (values are vaguely inspired by the error condition, but
the only goal here is to discriminate the case we're interested in).
And finally, if virtio_scsi_bad_req() was called, -EINVAL is returned. In
this case, virtio_scsi_handle_cmd_vq() detaches and frees already queued
requests, instead of submitting them.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-30 17:13:40 +02:00
|
|
|
virtio_scsi_bad_req(req);
|
|
|
|
return;
|
2014-08-06 07:35:01 +02:00
|
|
|
} else {
|
2020-11-16 19:31:12 +01:00
|
|
|
req->req.an.event_requested =
|
|
|
|
virtio_tswap32(VIRTIO_DEVICE(s), req->req.an.event_requested);
|
|
|
|
trace_virtio_scsi_an_req(virtio_scsi_get_lun(req->req.an.lun),
|
|
|
|
req->req.an.event_requested);
|
2014-08-06 07:35:01 +02:00
|
|
|
req->resp.an.event_actual = 0;
|
|
|
|
req->resp.an.response = VIRTIO_SCSI_S_OK;
|
2011-11-14 17:44:09 +01:00
|
|
|
}
|
2014-08-06 07:35:01 +02:00
|
|
|
}
|
2014-09-30 05:40:23 +02:00
|
|
|
if (r == 0) {
|
2020-11-16 19:31:12 +01:00
|
|
|
if (type == VIRTIO_SCSI_T_TMF)
|
|
|
|
trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
|
|
|
|
req->req.tmf.tag,
|
|
|
|
req->resp.tmf.response);
|
|
|
|
else if (type == VIRTIO_SCSI_T_AN_QUERY ||
|
|
|
|
type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
|
|
|
|
trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
|
|
|
|
req->resp.an.response);
|
2014-09-30 05:40:23 +02:00
|
|
|
virtio_scsi_complete_req(req);
|
|
|
|
} else {
|
|
|
|
assert(r == -EINPROGRESS);
|
|
|
|
}
|
2014-08-06 07:35:01 +02:00
|
|
|
}
|
|
|
|
|
2022-04-27 16:35:39 +02:00
|
|
|
static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
|
2014-08-06 07:35:01 +02:00
|
|
|
{
|
|
|
|
VirtIOSCSIReq *req;
|
|
|
|
|
|
|
|
while ((req = virtio_scsi_pop_req(s, vq))) {
|
|
|
|
virtio_scsi_handle_ctrl_req(s, req);
|
2011-02-13 11:55:52 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-27 16:35:36 +02:00
|
|
|
/*
|
|
|
|
* If dataplane is configured but not yet started, do so now and return true on
|
|
|
|
* success.
|
|
|
|
*
|
|
|
|
* Dataplane is started by the core virtio code but virtqueue handler functions
|
|
|
|
* can also be invoked when a guest kicks before DRIVER_OK, so this helper
|
|
|
|
* function helps us deal with manually starting ioeventfd in that case.
|
|
|
|
*/
|
|
|
|
static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s)
|
|
|
|
{
|
|
|
|
if (!s->ctx || s->dataplane_started) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtio_device_start_ioeventfd(&s->parent_obj.parent_obj);
|
|
|
|
return !s->dataplane_fenced;
|
|
|
|
}
|
|
|
|
|
2016-04-06 12:16:27 +02:00
|
|
|
static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
|
|
|
|
{
|
|
|
|
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
|
|
|
|
|
2022-04-27 16:35:36 +02:00
|
|
|
if (virtio_scsi_defer_to_dataplane(s)) {
|
|
|
|
return;
|
2016-04-06 12:16:27 +02:00
|
|
|
}
|
2022-04-27 16:35:36 +02:00
|
|
|
|
2016-04-06 12:16:27 +02:00
|
|
|
virtio_scsi_handle_ctrl_vq(s, vq);
|
|
|
|
}
|
|
|
|
|
2014-06-10 20:16:20 +02:00
|
|
|
static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
|
|
|
|
{
|
2020-11-16 19:31:12 +01:00
|
|
|
trace_virtio_scsi_cmd_resp(virtio_scsi_get_lun(req->req.cmd.lun),
|
|
|
|
req->req.cmd.tag,
|
|
|
|
req->resp.cmd.response,
|
|
|
|
req->resp.cmd.status);
|
2014-06-10 16:40:31 +02:00
|
|
|
/* Sense data is not in req->resp and is copied separately
|
|
|
|
* in virtio_scsi_command_complete.
|
|
|
|
*/
|
|
|
|
req->resp_size = sizeof(VirtIOSCSICmdResp);
|
2014-06-10 20:16:20 +02:00
|
|
|
virtio_scsi_complete_req(req);
|
|
|
|
}
|
|
|
|
|
2021-02-24 19:14:50 +01:00
|
|
|
static void virtio_scsi_command_failed(SCSIRequest *r)
|
|
|
|
{
|
|
|
|
VirtIOSCSIReq *req = r->hba_private;
|
|
|
|
|
|
|
|
if (r->io_canceled) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
req->resp.cmd.status = GOOD;
|
|
|
|
switch (r->host_status) {
|
|
|
|
case SCSI_HOST_NO_LUN:
|
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_INCORRECT_LUN;
|
|
|
|
break;
|
|
|
|
case SCSI_HOST_BUSY:
|
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_BUSY;
|
|
|
|
break;
|
|
|
|
case SCSI_HOST_TIME_OUT:
|
|
|
|
case SCSI_HOST_ABORTED:
|
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
|
|
|
|
break;
|
|
|
|
case SCSI_HOST_BAD_RESPONSE:
|
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
|
|
|
|
break;
|
|
|
|
case SCSI_HOST_RESET:
|
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
|
|
|
|
break;
|
|
|
|
case SCSI_HOST_TRANSPORT_DISRUPTED:
|
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_TRANSPORT_FAILURE;
|
|
|
|
break;
|
|
|
|
case SCSI_HOST_TARGET_FAILURE:
|
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_TARGET_FAILURE;
|
|
|
|
break;
|
|
|
|
case SCSI_HOST_RESERVATION_ERROR:
|
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_NEXUS_FAILURE;
|
|
|
|
break;
|
|
|
|
case SCSI_HOST_ALLOCATION_FAILURE:
|
|
|
|
case SCSI_HOST_MEDIUM_ERROR:
|
|
|
|
case SCSI_HOST_ERROR:
|
|
|
|
default:
|
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
virtio_scsi_complete_cmd_req(req);
|
|
|
|
}
|
|
|
|
|
2020-11-16 19:40:36 +01:00
|
|
|
static void virtio_scsi_command_complete(SCSIRequest *r, size_t resid)
|
2011-11-14 16:58:41 +01:00
|
|
|
{
|
|
|
|
VirtIOSCSIReq *req = r->hba_private;
|
2014-06-10 16:58:19 +02:00
|
|
|
uint8_t sense[SCSI_SENSE_BUF_SIZE];
|
2012-11-23 06:08:44 +01:00
|
|
|
uint32_t sense_len;
|
2014-06-24 19:48:53 +02:00
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
|
2011-11-14 16:58:41 +01:00
|
|
|
|
2014-01-14 20:16:25 +01:00
|
|
|
if (r->io_canceled) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-06-10 16:40:31 +02:00
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_OK;
|
2020-11-16 19:40:36 +01:00
|
|
|
req->resp.cmd.status = r->status;
|
2014-06-10 16:40:31 +02:00
|
|
|
if (req->resp.cmd.status == GOOD) {
|
2014-06-24 19:48:53 +02:00
|
|
|
req->resp.cmd.resid = virtio_tswap32(vdev, resid);
|
2011-11-14 16:58:41 +01:00
|
|
|
} else {
|
2014-06-10 16:40:31 +02:00
|
|
|
req->resp.cmd.resid = 0;
|
2014-06-10 16:58:19 +02:00
|
|
|
sense_len = scsi_req_get_sense(r, sense, sizeof(sense));
|
2014-06-10 16:40:31 +02:00
|
|
|
sense_len = MIN(sense_len, req->resp_iov.size - sizeof(req->resp.cmd));
|
|
|
|
qemu_iovec_from_buf(&req->resp_iov, sizeof(req->resp.cmd),
|
2014-10-27 09:51:41 +01:00
|
|
|
sense, sense_len);
|
2014-06-24 19:48:53 +02:00
|
|
|
req->resp.cmd.sense_len = virtio_tswap32(vdev, sense_len);
|
2011-11-14 16:58:41 +01:00
|
|
|
}
|
2014-06-10 20:16:20 +02:00
|
|
|
virtio_scsi_complete_cmd_req(req);
|
2011-11-14 16:58:41 +01:00
|
|
|
}
|
|
|
|
|
2014-07-16 11:04:37 +02:00
|
|
|
static int virtio_scsi_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
|
2022-08-17 07:34:58 +02:00
|
|
|
uint8_t *buf, size_t buf_len,
|
|
|
|
void *hba_private)
|
2014-07-16 11:04:37 +02:00
|
|
|
{
|
|
|
|
VirtIOSCSIReq *req = hba_private;
|
|
|
|
|
|
|
|
if (cmd->len == 0) {
|
2015-03-11 14:31:29 +01:00
|
|
|
cmd->len = MIN(VIRTIO_SCSI_CDB_DEFAULT_SIZE, SCSI_CMD_BUF_SIZE);
|
2014-07-16 11:04:37 +02:00
|
|
|
memcpy(cmd->buf, buf, cmd->len);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Extract the direction and mode directly from the request, for
|
|
|
|
* host device passthrough.
|
|
|
|
*/
|
|
|
|
cmd->xfer = req->qsgl.size;
|
2014-09-17 18:10:37 +02:00
|
|
|
cmd->mode = req->mode;
|
2014-07-16 11:04:37 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-14 16:58:41 +01:00
|
|
|
static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r)
|
|
|
|
{
|
|
|
|
VirtIOSCSIReq *req = r->hba_private;
|
|
|
|
|
|
|
|
return &req->qsgl;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_scsi_request_cancelled(SCSIRequest *r)
|
|
|
|
{
|
|
|
|
VirtIOSCSIReq *req = r->hba_private;
|
|
|
|
|
|
|
|
if (!req) {
|
|
|
|
return;
|
|
|
|
}
|
2023-02-21 22:22:18 +01:00
|
|
|
if (qatomic_read(&req->dev->resetting)) {
|
2014-06-10 16:40:31 +02:00
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
|
2011-11-14 17:44:09 +01:00
|
|
|
} else {
|
2014-06-10 16:40:31 +02:00
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
|
2011-11-14 17:44:09 +01:00
|
|
|
}
|
2014-06-10 20:16:20 +02:00
|
|
|
virtio_scsi_complete_cmd_req(req);
|
2011-11-14 16:58:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
|
2011-02-13 11:55:52 +01:00
|
|
|
{
|
2014-06-10 16:40:31 +02:00
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
|
2014-06-10 20:16:20 +02:00
|
|
|
virtio_scsi_complete_cmd_req(req);
|
2011-02-11 09:40:59 +01:00
|
|
|
}
|
|
|
|
|
virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
The virtio_scsi_bad_req() function is called when a guest sends a
request with missing or ill-sized headers. This generally happens
when the virtio_scsi_parse_req() function returns an error.
With this patch, virtio_scsi_bad_req() will mark the device as broken,
detach the request from the virtqueue and free it, instead of forcing
QEMU to exit.
In nearly all locations where virtio_scsi_bad_req() is called, the only
thing to do next is to return to the caller.
The virtio_scsi_handle_cmd_req_prepare() function is an exception though.
It is called in a loop by virtio_scsi_handle_cmd_vq() and passed requests
freshly popped from a cmd virtqueue; virtio_scsi_handle_cmd_req_prepare()
does some sanity checks on the request and returns a boolean flag to
indicate whether the request should be queued or not. In the latter case,
virtio_scsi_handle_cmd_req_prepare() has detected a non-fatal error and
sent a response back to the guest.
We have now a new condition to take into account: the device is broken
and should stop all processing.
The return value of virtio_scsi_handle_cmd_req_prepare() is hence changed
to an int. A return value of zero means that the request should be queued.
Other non-fatal error cases where the request shoudn't be queued return
a negative errno (values are vaguely inspired by the error condition, but
the only goal here is to discriminate the case we're interested in).
And finally, if virtio_scsi_bad_req() was called, -EINVAL is returned. In
this case, virtio_scsi_handle_cmd_vq() detaches and frees already queued
requests, instead of submitting them.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-30 17:13:40 +02:00
|
|
|
static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
2014-08-06 07:35:00 +02:00
|
|
|
{
|
2023-10-16 17:00:29 +02:00
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
|
2014-08-06 07:35:00 +02:00
|
|
|
SCSIDevice *d;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
|
|
|
|
sizeof(VirtIOSCSICmdResp) + vs->sense_size);
|
|
|
|
if (rc < 0) {
|
|
|
|
if (rc == -ENOTSUP) {
|
|
|
|
virtio_scsi_fail_cmd_req(req);
|
virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
The virtio_scsi_bad_req() function is called when a guest sends a
request with missing or ill-sized headers. This generally happens
when the virtio_scsi_parse_req() function returns an error.
With this patch, virtio_scsi_bad_req() will mark the device as broken,
detach the request from the virtqueue and free it, instead of forcing
QEMU to exit.
In nearly all locations where virtio_scsi_bad_req() is called, the only
thing to do next is to return to the caller.
The virtio_scsi_handle_cmd_req_prepare() function is an exception though.
It is called in a loop by virtio_scsi_handle_cmd_vq() and passed requests
freshly popped from a cmd virtqueue; virtio_scsi_handle_cmd_req_prepare()
does some sanity checks on the request and returns a boolean flag to
indicate whether the request should be queued or not. In the latter case,
virtio_scsi_handle_cmd_req_prepare() has detected a non-fatal error and
sent a response back to the guest.
We have now a new condition to take into account: the device is broken
and should stop all processing.
The return value of virtio_scsi_handle_cmd_req_prepare() is hence changed
to an int. A return value of zero means that the request should be queued.
Other non-fatal error cases where the request shoudn't be queued return
a negative errno (values are vaguely inspired by the error condition, but
the only goal here is to discriminate the case we're interested in).
And finally, if virtio_scsi_bad_req() was called, -EINVAL is returned. In
this case, virtio_scsi_handle_cmd_vq() detaches and frees already queued
requests, instead of submitting them.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-30 17:13:40 +02:00
|
|
|
return -ENOTSUP;
|
2014-08-06 07:35:00 +02:00
|
|
|
} else {
|
virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
The virtio_scsi_bad_req() function is called when a guest sends a
request with missing or ill-sized headers. This generally happens
when the virtio_scsi_parse_req() function returns an error.
With this patch, virtio_scsi_bad_req() will mark the device as broken,
detach the request from the virtqueue and free it, instead of forcing
QEMU to exit.
In nearly all locations where virtio_scsi_bad_req() is called, the only
thing to do next is to return to the caller.
The virtio_scsi_handle_cmd_req_prepare() function is an exception though.
It is called in a loop by virtio_scsi_handle_cmd_vq() and passed requests
freshly popped from a cmd virtqueue; virtio_scsi_handle_cmd_req_prepare()
does some sanity checks on the request and returns a boolean flag to
indicate whether the request should be queued or not. In the latter case,
virtio_scsi_handle_cmd_req_prepare() has detected a non-fatal error and
sent a response back to the guest.
We have now a new condition to take into account: the device is broken
and should stop all processing.
The return value of virtio_scsi_handle_cmd_req_prepare() is hence changed
to an int. A return value of zero means that the request should be queued.
Other non-fatal error cases where the request shoudn't be queued return
a negative errno (values are vaguely inspired by the error condition, but
the only goal here is to discriminate the case we're interested in).
And finally, if virtio_scsi_bad_req() was called, -EINVAL is returned. In
this case, virtio_scsi_handle_cmd_vq() detaches and frees already queued
requests, instead of submitting them.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-30 17:13:40 +02:00
|
|
|
virtio_scsi_bad_req(req);
|
|
|
|
return -EINVAL;
|
2014-08-06 07:35:00 +02:00
|
|
|
}
|
|
|
|
}
|
2020-11-16 19:31:12 +01:00
|
|
|
trace_virtio_scsi_cmd_req(virtio_scsi_get_lun(req->req.cmd.lun),
|
|
|
|
req->req.cmd.tag, req->req.cmd.cdb[0]);
|
2014-08-06 07:35:00 +02:00
|
|
|
|
2020-10-06 14:39:03 +02:00
|
|
|
d = virtio_scsi_device_get(s, req->req.cmd.lun);
|
2014-08-06 07:35:00 +02:00
|
|
|
if (!d) {
|
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
|
|
|
|
virtio_scsi_complete_cmd_req(req);
|
virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
The virtio_scsi_bad_req() function is called when a guest sends a
request with missing or ill-sized headers. This generally happens
when the virtio_scsi_parse_req() function returns an error.
With this patch, virtio_scsi_bad_req() will mark the device as broken,
detach the request from the virtqueue and free it, instead of forcing
QEMU to exit.
In nearly all locations where virtio_scsi_bad_req() is called, the only
thing to do next is to return to the caller.
The virtio_scsi_handle_cmd_req_prepare() function is an exception though.
It is called in a loop by virtio_scsi_handle_cmd_vq() and passed requests
freshly popped from a cmd virtqueue; virtio_scsi_handle_cmd_req_prepare()
does some sanity checks on the request and returns a boolean flag to
indicate whether the request should be queued or not. In the latter case,
virtio_scsi_handle_cmd_req_prepare() has detected a non-fatal error and
sent a response back to the guest.
We have now a new condition to take into account: the device is broken
and should stop all processing.
The return value of virtio_scsi_handle_cmd_req_prepare() is hence changed
to an int. A return value of zero means that the request should be queued.
Other non-fatal error cases where the request shoudn't be queued return
a negative errno (values are vaguely inspired by the error condition, but
the only goal here is to discriminate the case we're interested in).
And finally, if virtio_scsi_bad_req() was called, -EINVAL is returned. In
this case, virtio_scsi_handle_cmd_vq() detaches and frees already queued
requests, instead of submitting them.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-30 17:13:40 +02:00
|
|
|
return -ENOENT;
|
2014-08-06 07:35:00 +02:00
|
|
|
}
|
2016-09-14 12:17:04 +02:00
|
|
|
virtio_scsi_ctx_check(s, d);
|
2014-08-06 07:35:00 +02:00
|
|
|
req->sreq = scsi_req_new(d, req->req.cmd.tag,
|
|
|
|
virtio_scsi_get_lun(req->req.cmd.lun),
|
2022-08-17 07:34:58 +02:00
|
|
|
req->req.cmd.cdb, vs->cdb_size, req);
|
2014-08-06 07:35:00 +02:00
|
|
|
|
|
|
|
if (req->sreq->cmd.mode != SCSI_XFER_NONE
|
|
|
|
&& (req->sreq->cmd.mode != req->mode ||
|
|
|
|
req->sreq->cmd.xfer > req->qsgl.size)) {
|
|
|
|
req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
|
|
|
|
virtio_scsi_complete_cmd_req(req);
|
2020-10-06 14:39:03 +02:00
|
|
|
object_unref(OBJECT(d));
|
virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
The virtio_scsi_bad_req() function is called when a guest sends a
request with missing or ill-sized headers. This generally happens
when the virtio_scsi_parse_req() function returns an error.
With this patch, virtio_scsi_bad_req() will mark the device as broken,
detach the request from the virtqueue and free it, instead of forcing
QEMU to exit.
In nearly all locations where virtio_scsi_bad_req() is called, the only
thing to do next is to return to the caller.
The virtio_scsi_handle_cmd_req_prepare() function is an exception though.
It is called in a loop by virtio_scsi_handle_cmd_vq() and passed requests
freshly popped from a cmd virtqueue; virtio_scsi_handle_cmd_req_prepare()
does some sanity checks on the request and returns a boolean flag to
indicate whether the request should be queued or not. In the latter case,
virtio_scsi_handle_cmd_req_prepare() has detected a non-fatal error and
sent a response back to the guest.
We have now a new condition to take into account: the device is broken
and should stop all processing.
The return value of virtio_scsi_handle_cmd_req_prepare() is hence changed
to an int. A return value of zero means that the request should be queued.
Other non-fatal error cases where the request shoudn't be queued return
a negative errno (values are vaguely inspired by the error condition, but
the only goal here is to discriminate the case we're interested in).
And finally, if virtio_scsi_bad_req() was called, -EINVAL is returned. In
this case, virtio_scsi_handle_cmd_vq() detaches and frees already queued
requests, instead of submitting them.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-30 17:13:40 +02:00
|
|
|
return -ENOBUFS;
|
2014-08-06 07:35:00 +02:00
|
|
|
}
|
2014-09-23 09:49:29 +02:00
|
|
|
scsi_req_ref(req->sreq);
|
2023-09-13 22:00:42 +02:00
|
|
|
defer_call_begin();
|
2020-10-06 14:39:03 +02:00
|
|
|
object_unref(OBJECT(d));
|
virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
The virtio_scsi_bad_req() function is called when a guest sends a
request with missing or ill-sized headers. This generally happens
when the virtio_scsi_parse_req() function returns an error.
With this patch, virtio_scsi_bad_req() will mark the device as broken,
detach the request from the virtqueue and free it, instead of forcing
QEMU to exit.
In nearly all locations where virtio_scsi_bad_req() is called, the only
thing to do next is to return to the caller.
The virtio_scsi_handle_cmd_req_prepare() function is an exception though.
It is called in a loop by virtio_scsi_handle_cmd_vq() and passed requests
freshly popped from a cmd virtqueue; virtio_scsi_handle_cmd_req_prepare()
does some sanity checks on the request and returns a boolean flag to
indicate whether the request should be queued or not. In the latter case,
virtio_scsi_handle_cmd_req_prepare() has detected a non-fatal error and
sent a response back to the guest.
We have now a new condition to take into account: the device is broken
and should stop all processing.
The return value of virtio_scsi_handle_cmd_req_prepare() is hence changed
to an int. A return value of zero means that the request should be queued.
Other non-fatal error cases where the request shoudn't be queued return
a negative errno (values are vaguely inspired by the error condition, but
the only goal here is to discriminate the case we're interested in).
And finally, if virtio_scsi_bad_req() was called, -EINVAL is returned. In
this case, virtio_scsi_handle_cmd_vq() detaches and frees already queued
requests, instead of submitting them.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-30 17:13:40 +02:00
|
|
|
return 0;
|
2014-09-23 09:49:27 +02:00
|
|
|
}
|
2014-08-06 07:35:00 +02:00
|
|
|
|
2016-04-06 12:16:27 +02:00
|
|
|
static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
2014-09-23 09:49:27 +02:00
|
|
|
{
|
2014-10-08 01:19:00 +02:00
|
|
|
SCSIRequest *sreq = req->sreq;
|
|
|
|
if (scsi_req_enqueue(sreq)) {
|
|
|
|
scsi_req_continue(sreq);
|
2014-08-06 07:35:00 +02:00
|
|
|
}
|
2023-09-13 22:00:42 +02:00
|
|
|
defer_call_end();
|
2014-10-08 01:19:00 +02:00
|
|
|
scsi_req_unref(sreq);
|
2014-08-06 07:35:00 +02:00
|
|
|
}
|
|
|
|
|
2022-04-27 16:35:40 +02:00
|
|
|
static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
|
2011-02-11 09:40:59 +01:00
|
|
|
{
|
2014-09-23 09:49:28 +02:00
|
|
|
VirtIOSCSIReq *req, *next;
|
2016-12-01 20:26:47 +01:00
|
|
|
int ret = 0;
|
2019-12-09 22:09:57 +01:00
|
|
|
bool suppress_notifications = virtio_queue_get_notification(vq);
|
virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
The virtio_scsi_bad_req() function is called when a guest sends a
request with missing or ill-sized headers. This generally happens
when the virtio_scsi_parse_req() function returns an error.
With this patch, virtio_scsi_bad_req() will mark the device as broken,
detach the request from the virtqueue and free it, instead of forcing
QEMU to exit.
In nearly all locations where virtio_scsi_bad_req() is called, the only
thing to do next is to return to the caller.
The virtio_scsi_handle_cmd_req_prepare() function is an exception though.
It is called in a loop by virtio_scsi_handle_cmd_vq() and passed requests
freshly popped from a cmd virtqueue; virtio_scsi_handle_cmd_req_prepare()
does some sanity checks on the request and returns a boolean flag to
indicate whether the request should be queued or not. In the latter case,
virtio_scsi_handle_cmd_req_prepare() has detected a non-fatal error and
sent a response back to the guest.
We have now a new condition to take into account: the device is broken
and should stop all processing.
The return value of virtio_scsi_handle_cmd_req_prepare() is hence changed
to an int. A return value of zero means that the request should be queued.
Other non-fatal error cases where the request shoudn't be queued return
a negative errno (values are vaguely inspired by the error condition, but
the only goal here is to discriminate the case we're interested in).
And finally, if virtio_scsi_bad_req() was called, -EINVAL is returned. In
this case, virtio_scsi_handle_cmd_vq() detaches and frees already queued
requests, instead of submitting them.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-30 17:13:40 +02:00
|
|
|
|
2014-09-23 09:49:28 +02:00
|
|
|
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
|
2011-02-13 11:55:52 +01:00
|
|
|
|
2016-12-01 20:26:47 +01:00
|
|
|
do {
|
2019-12-09 22:09:57 +01:00
|
|
|
if (suppress_notifications) {
|
|
|
|
virtio_queue_set_notification(vq, 0);
|
|
|
|
}
|
2016-12-01 20:26:47 +01:00
|
|
|
|
|
|
|
while ((req = virtio_scsi_pop_req(s, vq))) {
|
|
|
|
ret = virtio_scsi_handle_cmd_req_prepare(s, req);
|
|
|
|
if (!ret) {
|
|
|
|
QTAILQ_INSERT_TAIL(&reqs, req, next);
|
|
|
|
} else if (ret == -EINVAL) {
|
|
|
|
/* The device is broken and shouldn't process any request */
|
|
|
|
while (!QTAILQ_EMPTY(&reqs)) {
|
|
|
|
req = QTAILQ_FIRST(&reqs);
|
|
|
|
QTAILQ_REMOVE(&reqs, req, next);
|
2023-09-13 22:00:42 +02:00
|
|
|
defer_call_end();
|
2016-12-01 20:26:47 +01:00
|
|
|
scsi_req_unref(req->sreq);
|
|
|
|
virtqueue_detach_element(req->vq, &req->elem, 0);
|
|
|
|
virtio_scsi_free_req(req);
|
|
|
|
}
|
virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
The virtio_scsi_bad_req() function is called when a guest sends a
request with missing or ill-sized headers. This generally happens
when the virtio_scsi_parse_req() function returns an error.
With this patch, virtio_scsi_bad_req() will mark the device as broken,
detach the request from the virtqueue and free it, instead of forcing
QEMU to exit.
In nearly all locations where virtio_scsi_bad_req() is called, the only
thing to do next is to return to the caller.
The virtio_scsi_handle_cmd_req_prepare() function is an exception though.
It is called in a loop by virtio_scsi_handle_cmd_vq() and passed requests
freshly popped from a cmd virtqueue; virtio_scsi_handle_cmd_req_prepare()
does some sanity checks on the request and returns a boolean flag to
indicate whether the request should be queued or not. In the latter case,
virtio_scsi_handle_cmd_req_prepare() has detected a non-fatal error and
sent a response back to the guest.
We have now a new condition to take into account: the device is broken
and should stop all processing.
The return value of virtio_scsi_handle_cmd_req_prepare() is hence changed
to an int. A return value of zero means that the request should be queued.
Other non-fatal error cases where the request shoudn't be queued return
a negative errno (values are vaguely inspired by the error condition, but
the only goal here is to discriminate the case we're interested in).
And finally, if virtio_scsi_bad_req() was called, -EINVAL is returned. In
this case, virtio_scsi_handle_cmd_vq() detaches and frees already queued
requests, instead of submitting them.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-30 17:13:40 +02:00
|
|
|
}
|
2014-09-23 09:49:27 +02:00
|
|
|
}
|
2016-12-01 20:26:47 +01:00
|
|
|
|
2019-12-09 22:09:57 +01:00
|
|
|
if (suppress_notifications) {
|
|
|
|
virtio_queue_set_notification(vq, 1);
|
|
|
|
}
|
2016-12-01 20:26:47 +01:00
|
|
|
} while (ret != -EINVAL && !virtio_queue_empty(vq));
|
2014-09-23 09:49:28 +02:00
|
|
|
|
|
|
|
QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
|
|
|
|
virtio_scsi_handle_cmd_req_submit(s, req);
|
|
|
|
}
|
2011-02-11 09:40:59 +01:00
|
|
|
}
|
|
|
|
|
2016-04-06 12:16:27 +02:00
|
|
|
static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
|
|
|
|
{
|
|
|
|
/* use non-QOM casts in the data path */
|
|
|
|
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
|
|
|
|
|
2022-04-27 16:35:36 +02:00
|
|
|
if (virtio_scsi_defer_to_dataplane(s)) {
|
|
|
|
return;
|
2016-04-06 12:16:27 +02:00
|
|
|
}
|
2022-04-27 16:35:36 +02:00
|
|
|
|
2016-04-06 12:16:27 +02:00
|
|
|
virtio_scsi_handle_cmd_vq(s, vq);
|
|
|
|
}
|
|
|
|
|
2011-02-11 09:40:59 +01:00
|
|
|
static void virtio_scsi_get_config(VirtIODevice *vdev,
|
|
|
|
uint8_t *config)
|
|
|
|
{
|
|
|
|
VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
|
2013-03-29 02:08:15 +01:00
|
|
|
VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(vdev);
|
2011-02-11 09:40:59 +01:00
|
|
|
|
2014-06-24 19:48:53 +02:00
|
|
|
virtio_stl_p(vdev, &scsiconf->num_queues, s->conf.num_queues);
|
2019-12-20 15:09:04 +01:00
|
|
|
virtio_stl_p(vdev, &scsiconf->seg_max,
|
|
|
|
s->conf.seg_max_adjust ? s->conf.virtqueue_size - 2 : 128 - 2);
|
2014-06-24 19:48:53 +02:00
|
|
|
virtio_stl_p(vdev, &scsiconf->max_sectors, s->conf.max_sectors);
|
|
|
|
virtio_stl_p(vdev, &scsiconf->cmd_per_lun, s->conf.cmd_per_lun);
|
|
|
|
virtio_stl_p(vdev, &scsiconf->event_info_size, sizeof(VirtIOSCSIEvent));
|
|
|
|
virtio_stl_p(vdev, &scsiconf->sense_size, s->sense_size);
|
|
|
|
virtio_stl_p(vdev, &scsiconf->cdb_size, s->cdb_size);
|
|
|
|
virtio_stw_p(vdev, &scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL);
|
|
|
|
virtio_stw_p(vdev, &scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET);
|
|
|
|
virtio_stl_p(vdev, &scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN);
|
2011-02-11 09:40:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_scsi_set_config(VirtIODevice *vdev,
|
|
|
|
const uint8_t *config)
|
|
|
|
{
|
|
|
|
VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
|
2013-03-29 02:08:15 +01:00
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
|
2011-02-11 09:40:59 +01:00
|
|
|
|
2014-06-24 19:48:53 +02:00
|
|
|
if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 ||
|
|
|
|
(uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) {
|
2016-09-30 17:13:48 +02:00
|
|
|
virtio_error(vdev,
|
|
|
|
"bad data written to virtio-scsi configuration space");
|
|
|
|
return;
|
2011-02-11 09:40:59 +01:00
|
|
|
}
|
|
|
|
|
2014-06-24 19:48:53 +02:00
|
|
|
vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size);
|
|
|
|
vs->cdb_size = virtio_ldl_p(vdev, &scsiconf->cdb_size);
|
2011-02-11 09:40:59 +01:00
|
|
|
}
|
|
|
|
|
2015-06-01 10:45:40 +02:00
|
|
|
static uint64_t virtio_scsi_get_features(VirtIODevice *vdev,
|
2015-07-27 11:49:19 +02:00
|
|
|
uint64_t requested_features,
|
|
|
|
Error **errp)
|
2011-02-11 09:40:59 +01:00
|
|
|
{
|
2015-04-28 13:51:13 +02:00
|
|
|
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
|
|
|
|
|
|
|
/* Firstly sync all virtio-scsi possible supported features */
|
|
|
|
requested_features |= s->host_features;
|
2011-02-11 09:40:59 +01:00
|
|
|
return requested_features;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_scsi_reset(VirtIODevice *vdev)
|
|
|
|
{
|
2013-03-29 02:08:15 +01:00
|
|
|
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
|
2011-02-11 09:40:59 +01:00
|
|
|
|
2016-10-21 22:48:10 +02:00
|
|
|
assert(!s->dataplane_started);
|
2023-02-21 22:22:18 +01:00
|
|
|
|
|
|
|
virtio_scsi_reset_tmf_bh(s);
|
|
|
|
|
|
|
|
qatomic_inc(&s->resetting);
|
2022-10-13 18:06:22 +02:00
|
|
|
bus_cold_reset(BUS(&s->bus));
|
2023-02-21 22:22:18 +01:00
|
|
|
qatomic_dec(&s->resetting);
|
2013-01-10 15:49:08 +01:00
|
|
|
|
2015-03-11 14:31:29 +01:00
|
|
|
vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
|
|
|
|
vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
|
2012-07-16 14:50:27 +02:00
|
|
|
s->events_dropped = false;
|
2011-02-11 09:40:59 +01:00
|
|
|
}
|
|
|
|
|
2023-05-16 21:02:21 +02:00
|
|
|
typedef struct {
|
|
|
|
uint32_t event;
|
|
|
|
uint32_t reason;
|
|
|
|
union {
|
|
|
|
/* Used by messages specific to a device */
|
|
|
|
struct {
|
|
|
|
uint32_t id;
|
|
|
|
uint32_t lun;
|
|
|
|
} address;
|
|
|
|
};
|
|
|
|
} VirtIOSCSIEventInfo;
|
|
|
|
|
|
|
|
static void virtio_scsi_push_event(VirtIOSCSI *s,
|
|
|
|
const VirtIOSCSIEventInfo *info)
|
2012-06-20 08:47:11 +02:00
|
|
|
{
|
2013-03-29 02:08:15 +01:00
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
|
2014-05-16 17:44:06 +02:00
|
|
|
VirtIOSCSIReq *req;
|
2012-06-20 08:47:11 +02:00
|
|
|
VirtIOSCSIEvent *evt;
|
2013-03-21 15:15:18 +01:00
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
2023-05-16 21:02:21 +02:00
|
|
|
uint32_t event = info->event;
|
|
|
|
uint32_t reason = info->reason;
|
2012-06-20 08:47:11 +02:00
|
|
|
|
2013-03-21 15:15:18 +01:00
|
|
|
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
2012-10-08 16:50:51 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-02-14 18:17:10 +01:00
|
|
|
req = virtio_scsi_pop_req(s, vs->event_vq);
|
2012-07-02 10:47:35 +02:00
|
|
|
if (!req) {
|
|
|
|
s->events_dropped = true;
|
virtio-scsi: Fix acquire/release in dataplane handlers
After the AioContext lock push down, there is a race between
virtio_scsi_dataplane_start and those "assert(s->ctx &&
s->dataplane_started)", because the latter doesn't isn't wrapped in
aio_context_acquire.
Reproducer is simply booting a Fedora guest with an empty
virtio-scsi-dataplane controller:
qemu-system-x86_64 \
-drive if=none,id=root,format=raw,file=Fedora-Cloud-Base-25-1.3.x86_64.raw \
-device virtio-scsi \
-device scsi-disk,drive=root,bootindex=1 \
-object iothread,id=io \
-device virtio-scsi-pci,iothread=io \
-net user,hostfwd=tcp::10022-:22 -net nic,model=virtio -m 2048 \
--enable-kvm
Fix this by moving acquire/release pairs from virtio_scsi_handle_*_vq to
their callers - and wrap the broken assertions in.
Signed-off-by: Fam Zheng <famz@redhat.com>
Message-Id: <20170317061447.16243-3-famz@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-03-17 07:14:47 +01:00
|
|
|
return;
|
2012-07-02 10:47:35 +02:00
|
|
|
}
|
2012-06-20 08:47:11 +02:00
|
|
|
|
2012-07-02 10:47:35 +02:00
|
|
|
if (s->events_dropped) {
|
|
|
|
event |= VIRTIO_SCSI_T_EVENTS_MISSED;
|
|
|
|
s->events_dropped = false;
|
|
|
|
}
|
|
|
|
|
2014-06-30 17:33:18 +02:00
|
|
|
if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
|
virtio-scsi: convert virtio_scsi_bad_req() to use virtio_error()
The virtio_scsi_bad_req() function is called when a guest sends a
request with missing or ill-sized headers. This generally happens
when the virtio_scsi_parse_req() function returns an error.
With this patch, virtio_scsi_bad_req() will mark the device as broken,
detach the request from the virtqueue and free it, instead of forcing
QEMU to exit.
In nearly all locations where virtio_scsi_bad_req() is called, the only
thing to do next is to return to the caller.
The virtio_scsi_handle_cmd_req_prepare() function is an exception though.
It is called in a loop by virtio_scsi_handle_cmd_vq() and passed requests
freshly popped from a cmd virtqueue; virtio_scsi_handle_cmd_req_prepare()
does some sanity checks on the request and returns a boolean flag to
indicate whether the request should be queued or not. In the latter case,
virtio_scsi_handle_cmd_req_prepare() has detected a non-fatal error and
sent a response back to the guest.
We have now a new condition to take into account: the device is broken
and should stop all processing.
The return value of virtio_scsi_handle_cmd_req_prepare() is hence changed
to an int. A return value of zero means that the request should be queued.
Other non-fatal error cases where the request shoudn't be queued return
a negative errno (values are vaguely inspired by the error condition, but
the only goal here is to discriminate the case we're interested in).
And finally, if virtio_scsi_bad_req() was called, -EINVAL is returned. In
this case, virtio_scsi_handle_cmd_vq() detaches and frees already queued
requests, instead of submitting them.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-09-30 17:13:40 +02:00
|
|
|
virtio_scsi_bad_req(req);
|
virtio-scsi: Fix acquire/release in dataplane handlers
After the AioContext lock push down, there is a race between
virtio_scsi_dataplane_start and those "assert(s->ctx &&
s->dataplane_started)", because the latter doesn't isn't wrapped in
aio_context_acquire.
Reproducer is simply booting a Fedora guest with an empty
virtio-scsi-dataplane controller:
qemu-system-x86_64 \
-drive if=none,id=root,format=raw,file=Fedora-Cloud-Base-25-1.3.x86_64.raw \
-device virtio-scsi \
-device scsi-disk,drive=root,bootindex=1 \
-object iothread,id=io \
-device virtio-scsi-pci,iothread=io \
-net user,hostfwd=tcp::10022-:22 -net nic,model=virtio -m 2048 \
--enable-kvm
Fix this by moving acquire/release pairs from virtio_scsi_handle_*_vq to
their callers - and wrap the broken assertions in.
Signed-off-by: Fam Zheng <famz@redhat.com>
Message-Id: <20170317061447.16243-3-famz@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-03-17 07:14:47 +01:00
|
|
|
return;
|
2012-07-02 10:47:35 +02:00
|
|
|
}
|
|
|
|
|
2014-06-10 16:40:31 +02:00
|
|
|
evt = &req->resp.event;
|
2012-07-02 10:47:35 +02:00
|
|
|
memset(evt, 0, sizeof(VirtIOSCSIEvent));
|
2014-06-30 17:17:17 +02:00
|
|
|
evt->event = virtio_tswap32(vdev, event);
|
|
|
|
evt->reason = virtio_tswap32(vdev, reason);
|
2023-05-16 21:02:21 +02:00
|
|
|
if (event != VIRTIO_SCSI_T_EVENTS_MISSED) {
|
2012-06-20 08:47:11 +02:00
|
|
|
evt->lun[0] = 1;
|
2023-05-16 21:02:21 +02:00
|
|
|
evt->lun[1] = info->address.id;
|
2012-06-20 08:47:11 +02:00
|
|
|
|
|
|
|
/* Linux wants us to keep the same encoding we use for REPORT LUNS. */
|
2023-05-16 21:02:21 +02:00
|
|
|
if (info->address.lun >= 256) {
|
|
|
|
evt->lun[2] = (info->address.lun >> 8) | 0x40;
|
2012-06-20 08:47:11 +02:00
|
|
|
}
|
2023-05-16 21:02:21 +02:00
|
|
|
evt->lun[3] = info->address.lun & 0xFF;
|
2012-07-02 10:47:35 +02:00
|
|
|
}
|
2020-11-16 19:31:12 +01:00
|
|
|
trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
|
2023-05-16 21:02:21 +02:00
|
|
|
|
2012-07-02 10:47:35 +02:00
|
|
|
virtio_scsi_complete_req(req);
|
|
|
|
}
|
|
|
|
|
2022-04-27 16:35:38 +02:00
|
|
|
static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
|
2016-04-06 12:16:27 +02:00
|
|
|
{
|
|
|
|
if (s->events_dropped) {
|
2023-05-16 21:02:21 +02:00
|
|
|
VirtIOSCSIEventInfo info = {
|
|
|
|
.event = VIRTIO_SCSI_T_NO_EVENT,
|
|
|
|
};
|
|
|
|
virtio_scsi_push_event(s, &info);
|
2016-04-06 12:16:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-02 10:47:35 +02:00
|
|
|
static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
|
|
|
|
{
|
2013-03-21 15:15:18 +01:00
|
|
|
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
2012-07-02 10:47:35 +02:00
|
|
|
|
2022-04-27 16:35:36 +02:00
|
|
|
if (virtio_scsi_defer_to_dataplane(s)) {
|
|
|
|
return;
|
2012-06-20 08:47:11 +02:00
|
|
|
}
|
2022-04-27 16:35:36 +02:00
|
|
|
|
2016-04-06 12:16:27 +02:00
|
|
|
virtio_scsi_handle_event_vq(s, vq);
|
2012-06-20 08:47:11 +02:00
|
|
|
}
|
|
|
|
|
2012-07-16 14:22:52 +02:00
|
|
|
static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
|
|
|
|
{
|
|
|
|
VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
|
2013-03-21 15:15:18 +01:00
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
2012-07-16 14:22:52 +02:00
|
|
|
|
2015-08-17 11:48:29 +02:00
|
|
|
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
|
2012-07-16 14:22:52 +02:00
|
|
|
dev->type != TYPE_ROM) {
|
2023-05-16 21:02:21 +02:00
|
|
|
VirtIOSCSIEventInfo info = {
|
|
|
|
.event = VIRTIO_SCSI_T_PARAM_CHANGE,
|
|
|
|
.reason = sense.asc | (sense.ascq << 8),
|
|
|
|
.address = {
|
|
|
|
.id = dev->id,
|
|
|
|
.lun = dev->lun,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
virtio_scsi_push_event(s, &info);
|
2012-07-16 14:22:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-26 19:29:47 +02:00
|
|
|
static void virtio_scsi_pre_hotplug(HotplugHandler *hotplug_dev,
|
|
|
|
DeviceState *dev, Error **errp)
|
|
|
|
{
|
|
|
|
SCSIDevice *sd = SCSI_DEVICE(dev);
|
|
|
|
sd->hba_supports_iothread = true;
|
|
|
|
}
|
|
|
|
|
2014-09-26 11:28:33 +02:00
|
|
|
static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|
|
|
Error **errp)
|
2012-06-20 08:47:11 +02:00
|
|
|
{
|
2014-09-26 11:28:33 +02:00
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
|
2014-10-19 06:47:42 +02:00
|
|
|
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
|
|
|
SCSIDevice *sd = SCSI_DEVICE(dev);
|
2019-05-02 11:10:59 +02:00
|
|
|
int ret;
|
2014-10-19 06:47:42 +02:00
|
|
|
|
2016-04-06 12:16:24 +02:00
|
|
|
if (s->ctx && !s->dataplane_fenced) {
|
2014-10-19 06:47:42 +02:00
|
|
|
if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
|
|
|
|
return;
|
2019-01-22 16:53:23 +01:00
|
|
|
}
|
2019-05-02 11:10:59 +02:00
|
|
|
ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
return;
|
|
|
|
}
|
2018-10-16 15:33:40 +02:00
|
|
|
}
|
2012-06-20 08:47:11 +02:00
|
|
|
|
2015-08-17 11:48:29 +02:00
|
|
|
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
|
2023-05-16 21:02:21 +02:00
|
|
|
VirtIOSCSIEventInfo info = {
|
|
|
|
.event = VIRTIO_SCSI_T_TRANSPORT_RESET,
|
|
|
|
.reason = VIRTIO_SCSI_EVT_RESET_RESCAN,
|
|
|
|
.address = {
|
|
|
|
.id = sd->id,
|
|
|
|
.lun = sd->lun,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
virtio_scsi_push_event(s, &info);
|
2022-10-06 21:49:46 +02:00
|
|
|
scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
|
2012-06-20 08:47:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-26 11:28:33 +02:00
|
|
|
static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|
|
|
Error **errp)
|
2012-06-20 08:47:11 +02:00
|
|
|
{
|
2014-09-26 11:28:33 +02:00
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
|
2014-10-19 06:47:42 +02:00
|
|
|
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
|
|
|
SCSIDevice *sd = SCSI_DEVICE(dev);
|
2023-05-16 21:02:21 +02:00
|
|
|
VirtIOSCSIEventInfo info = {
|
|
|
|
.event = VIRTIO_SCSI_T_TRANSPORT_RESET,
|
|
|
|
.reason = VIRTIO_SCSI_EVT_RESET_REMOVED,
|
|
|
|
.address = {
|
|
|
|
.id = sd->id,
|
|
|
|
.lun = sd->lun,
|
|
|
|
},
|
|
|
|
};
|
2014-10-19 06:47:42 +02:00
|
|
|
|
virtio-scsi: fixed virtio_scsi_ctx_check failed when detaching scsi disk
commit a6f230c move blockbackend back to main AioContext on unplug. It set the AioContext of
SCSIDevice to the main AioContex, but s->ctx is still the iothread AioContex(if the scsi controller
is configure with iothread). So if there are having in-flight requests during unplug, a failing assertion
happend. The bt is below:
(gdb) bt
#0 0x0000ffff86aacbd0 in raise () from /lib64/libc.so.6
#1 0x0000ffff86aadf7c in abort () from /lib64/libc.so.6
#2 0x0000ffff86aa6124 in __assert_fail_base () from /lib64/libc.so.6
#3 0x0000ffff86aa61a4 in __assert_fail () from /lib64/libc.so.6
#4 0x0000000000529118 in virtio_scsi_ctx_check (d=<optimized out>, s=<optimized out>, s=<optimized out>) at /home/qemu-4.0.0/hw/scsi/virtio-scsi.c:246
#5 0x0000000000529ec4 in virtio_scsi_handle_cmd_req_prepare (s=0x2779ec00, req=0xffff740397d0) at /home/qemu-4.0.0/hw/scsi/virtio-scsi.c:559
#6 0x000000000052a228 in virtio_scsi_handle_cmd_vq (s=0x2779ec00, vq=0xffff7c6d7110) at /home/qemu-4.0.0/hw/scsi/virtio-scsi.c:603
#7 0x000000000052afa8 in virtio_scsi_data_plane_handle_cmd (vdev=<optimized out>, vq=0xffff7c6d7110) at /home/qemu-4.0.0/hw/scsi/virtio-scsi-dataplane.c:59
#8 0x000000000054d94c in virtio_queue_host_notifier_aio_poll (opaque=<optimized out>) at /home/qemu-4.0.0/hw/virtio/virtio.c:2452
assert(blk_get_aio_context(d->conf.blk) == s->ctx) failed.
To avoid assertion failed, moving the "if" after qdev_simple_device_unplug_cb.
In addition, to avoid another qemu crash below, add aio_disable_external before
qdev_simple_device_unplug_cb, which disable the further processing of external clients
when doing qdev_simple_device_unplug_cb.
(gdb) bt
#0 scsi_req_unref (req=0xffff6802c6f0) at hw/scsi/scsi-bus.c:1283
#1 0x00000000005294a4 in virtio_scsi_handle_cmd_req_submit (req=<optimized out>,
s=<optimized out>) at /home/qemu-4.0.0/hw/scsi/virtio-scsi.c:589
#2 0x000000000052a2a8 in virtio_scsi_handle_cmd_vq (s=s@entry=0x9c90e90,
vq=vq@entry=0xffff7c05f110) at /home/qemu-4.0.0/hw/scsi/virtio-scsi.c:625
#3 0x000000000052afd8 in virtio_scsi_data_plane_handle_cmd (vdev=<optimized out>,
vq=0xffff7c05f110) at /home/qemu-4.0.0/hw/scsi/virtio-scsi-dataplane.c:60
#4 0x000000000054d97c in virtio_queue_host_notifier_aio_poll (opaque=<optimized out>)
at /home/qemu-4.0.0/hw/virtio/virtio.c:2447
#5 0x00000000009b204c in run_poll_handlers_once (ctx=ctx@entry=0x6efea40,
timeout=timeout@entry=0xffff7d7f7308) at util/aio-posix.c:521
#6 0x00000000009b2b64 in run_poll_handlers (ctx=ctx@entry=0x6efea40,
max_ns=max_ns@entry=4000, timeout=timeout@entry=0xffff7d7f7308) at util/aio-posix.c:559
#7 0x00000000009b2ca0 in try_poll_mode (ctx=ctx@entry=0x6efea40, timeout=0xffff7d7f7308,
timeout@entry=0xffff7d7f7348) at util/aio-posix.c:594
#8 0x00000000009b31b8 in aio_poll (ctx=0x6efea40, blocking=blocking@entry=true)
at util/aio-posix.c:636
#9 0x00000000006973cc in iothread_run (opaque=0x6ebd800) at iothread.c:75
#10 0x00000000009b592c in qemu_thread_start (args=0x6efef60) at util/qemu-thread-posix.c:502
#11 0x0000ffff8057f8bc in start_thread () from /lib64/libpthread.so.0
#12 0x0000ffff804e5f8c in thread_start () from /lib64/libc.so.6
(gdb) p bus
$1 = (SCSIBus *) 0x0
Signed-off-by: Zhengui li <lizhengui@huawei.com>
Message-Id: <1563696502-7972-1-git-send-email-lizhengui@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <1563829520-17525-1-git-send-email-pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-07-22 23:05:20 +02:00
|
|
|
qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
|
|
|
|
|
2019-01-22 16:53:21 +01:00
|
|
|
if (s->ctx) {
|
2019-05-02 11:10:59 +02:00
|
|
|
/* If other users keep the BlockBackend in the iothread, that's ok */
|
|
|
|
blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
|
2019-01-22 16:53:21 +01:00
|
|
|
}
|
2023-05-16 21:02:21 +02:00
|
|
|
|
|
|
|
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
|
|
|
|
virtio_scsi_push_event(s, &info);
|
|
|
|
scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
|
|
|
|
}
|
2012-06-20 08:47:11 +02:00
|
|
|
}
|
|
|
|
|
2023-05-16 21:02:36 +02:00
|
|
|
/* Suspend virtqueue ioeventfd processing during drain */
|
|
|
|
static void virtio_scsi_drained_begin(SCSIBus *bus)
|
|
|
|
{
|
|
|
|
VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
|
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
|
|
|
uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED +
|
|
|
|
s->parent_obj.conf.num_queues;
|
|
|
|
|
2023-06-11 21:39:24 +02:00
|
|
|
/*
|
|
|
|
* Drain is called when stopping dataplane but the host notifier has
|
|
|
|
* already been detached. Detaching multiple times is a no-op if nothing
|
|
|
|
* else is using the monitoring same file descriptor, but avoid it just in
|
|
|
|
* case.
|
|
|
|
*
|
|
|
|
* Also, don't detach if dataplane has not even been started yet because
|
|
|
|
* the host notifier isn't attached.
|
|
|
|
*/
|
|
|
|
if (s->dataplane_stopping || !s->dataplane_started) {
|
2023-05-16 21:02:36 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < total_queues; i++) {
|
|
|
|
VirtQueue *vq = virtio_get_queue(vdev, i);
|
|
|
|
virtio_queue_aio_detach_host_notifier(vq, s->ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Resume virtqueue ioeventfd processing after drain */
|
|
|
|
static void virtio_scsi_drained_end(SCSIBus *bus)
|
|
|
|
{
|
|
|
|
VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
|
2024-02-02 16:31:56 +01:00
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
|
2023-05-16 21:02:36 +02:00
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
|
|
|
uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED +
|
|
|
|
s->parent_obj.conf.num_queues;
|
|
|
|
|
2023-06-11 21:39:24 +02:00
|
|
|
/*
|
|
|
|
* Drain is called when stopping dataplane. Keep the host notifier detached
|
|
|
|
* so it's not left dangling after dataplane is stopped.
|
|
|
|
*
|
|
|
|
* Also, don't attach if dataplane has not even been started yet. We're not
|
|
|
|
* ready.
|
|
|
|
*/
|
|
|
|
if (s->dataplane_stopping || !s->dataplane_started) {
|
2023-05-16 21:02:36 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < total_queues; i++) {
|
|
|
|
VirtQueue *vq = virtio_get_queue(vdev, i);
|
2024-02-02 16:31:56 +01:00
|
|
|
if (vq == vs->event_vq) {
|
|
|
|
virtio_queue_aio_attach_host_notifier_no_poll(vq, s->ctx);
|
|
|
|
} else {
|
|
|
|
virtio_queue_aio_attach_host_notifier(vq, s->ctx);
|
|
|
|
}
|
2023-05-16 21:02:36 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-14 16:58:41 +01:00
|
|
|
static struct SCSIBusInfo virtio_scsi_scsi_info = {
|
|
|
|
.tcq = true,
|
|
|
|
.max_channel = VIRTIO_SCSI_MAX_CHANNEL,
|
|
|
|
.max_target = VIRTIO_SCSI_MAX_TARGET,
|
|
|
|
.max_lun = VIRTIO_SCSI_MAX_LUN,
|
|
|
|
|
|
|
|
.complete = virtio_scsi_command_complete,
|
2021-02-24 19:14:50 +01:00
|
|
|
.fail = virtio_scsi_command_failed,
|
2011-11-14 16:58:41 +01:00
|
|
|
.cancel = virtio_scsi_request_cancelled,
|
2012-07-16 14:22:52 +02:00
|
|
|
.change = virtio_scsi_change,
|
2014-07-16 11:04:37 +02:00
|
|
|
.parse_cdb = virtio_scsi_parse_cdb,
|
2011-11-14 16:58:41 +01:00
|
|
|
.get_sg_list = virtio_scsi_get_sg_list,
|
2011-12-02 15:23:15 +01:00
|
|
|
.save_request = virtio_scsi_save_request,
|
|
|
|
.load_request = virtio_scsi_load_request,
|
2023-05-16 21:02:36 +02:00
|
|
|
.drained_begin = virtio_scsi_drained_begin,
|
|
|
|
.drained_end = virtio_scsi_drained_end,
|
2011-11-14 16:58:41 +01:00
|
|
|
};
|
|
|
|
|
2017-04-21 14:27:07 +02:00
|
|
|
void virtio_scsi_common_realize(DeviceState *dev,
|
2016-07-13 07:09:48 +02:00
|
|
|
VirtIOHandleOutput ctrl,
|
|
|
|
VirtIOHandleOutput evt,
|
2017-04-21 14:27:07 +02:00
|
|
|
VirtIOHandleOutput cmd,
|
|
|
|
Error **errp)
|
2011-02-11 09:40:59 +01:00
|
|
|
{
|
2013-07-30 03:19:55 +02:00
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
|
|
|
VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev);
|
2012-04-06 10:39:46 +02:00
|
|
|
int i;
|
2011-02-11 09:40:59 +01:00
|
|
|
|
2022-04-01 15:23:18 +02:00
|
|
|
virtio_init(vdev, VIRTIO_ID_SCSI, sizeof(VirtIOSCSIConfig));
|
2013-03-21 15:15:14 +01:00
|
|
|
|
2020-08-18 16:33:46 +02:00
|
|
|
if (s->conf.num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) {
|
|
|
|
s->conf.num_queues = 1;
|
|
|
|
}
|
2014-10-31 04:04:31 +01:00
|
|
|
if (s->conf.num_queues == 0 ||
|
2020-08-18 16:33:45 +02:00
|
|
|
s->conf.num_queues > VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED) {
|
2014-10-31 04:04:31 +01:00
|
|
|
error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
|
2014-08-26 08:30:30 +02:00
|
|
|
"must be a positive integer less than %d.",
|
2020-08-18 16:33:45 +02:00
|
|
|
s->conf.num_queues,
|
|
|
|
VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED);
|
2014-10-30 12:50:26 +01:00
|
|
|
virtio_cleanup(vdev);
|
2014-08-26 08:30:30 +02:00
|
|
|
return;
|
|
|
|
}
|
2019-12-20 15:09:04 +01:00
|
|
|
if (s->conf.virtqueue_size <= 2) {
|
|
|
|
error_setg(errp, "invalid virtqueue_size property (= %" PRIu32 "), "
|
|
|
|
"must be > 2", s->conf.virtqueue_size);
|
|
|
|
return;
|
|
|
|
}
|
2014-12-04 14:12:45 +01:00
|
|
|
s->cmd_vqs = g_new0(VirtQueue *, s->conf.num_queues);
|
2015-03-11 14:31:29 +01:00
|
|
|
s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
|
|
|
|
s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
|
2013-03-21 15:15:14 +01:00
|
|
|
|
2017-08-10 18:52:55 +02:00
|
|
|
s->ctrl_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, ctrl);
|
|
|
|
s->event_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, evt);
|
2013-03-21 15:15:11 +01:00
|
|
|
for (i = 0; i < s->conf.num_queues; i++) {
|
2017-08-10 18:52:55 +02:00
|
|
|
s->cmd_vqs[i] = virtio_add_queue(vdev, s->conf.virtqueue_size, cmd);
|
2014-09-23 09:49:30 +02:00
|
|
|
}
|
2013-03-29 02:08:15 +01:00
|
|
|
}
|
|
|
|
|
2013-07-30 03:19:55 +02:00
|
|
|
static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
|
2013-03-29 02:08:15 +01:00
|
|
|
{
|
2013-07-30 03:19:55 +02:00
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
2013-07-30 05:41:42 +02:00
|
|
|
VirtIOSCSI *s = VIRTIO_SCSI(dev);
|
2013-07-21 12:16:34 +02:00
|
|
|
Error *err = NULL;
|
2013-03-29 02:08:15 +01:00
|
|
|
|
2023-02-21 22:22:18 +01:00
|
|
|
QTAILQ_INIT(&s->tmf_bh_list);
|
2023-12-05 19:19:58 +01:00
|
|
|
qemu_mutex_init(&s->tmf_bh_lock);
|
2023-02-21 22:22:18 +01:00
|
|
|
|
2017-04-21 14:27:07 +02:00
|
|
|
virtio_scsi_common_realize(dev,
|
|
|
|
virtio_scsi_handle_ctrl,
|
2014-06-19 10:12:00 +02:00
|
|
|
virtio_scsi_handle_event,
|
2017-04-21 14:27:07 +02:00
|
|
|
virtio_scsi_handle_cmd,
|
|
|
|
&err);
|
2013-07-30 03:19:55 +02:00
|
|
|
if (err != NULL) {
|
|
|
|
error_propagate(errp, err);
|
|
|
|
return;
|
2013-03-29 02:08:15 +01:00
|
|
|
}
|
|
|
|
|
2021-09-23 14:11:48 +02:00
|
|
|
scsi_bus_init_named(&s->bus, sizeof(s->bus), dev,
|
|
|
|
&virtio_scsi_scsi_info, vdev->bus_name);
|
2014-09-26 11:28:33 +02:00
|
|
|
/* override default SCSI bus hotplug-handler, with virtio-scsi's one */
|
qdev: Drop qbus_set_hotplug_handler() parameter @errp
qbus_set_hotplug_handler() is a simple wrapper around
object_property_set_link().
object_property_set_link() fails when the property doesn't exist, is
not settable, or its .check() method fails. These are all programming
errors here, so passing &error_abort to qbus_set_hotplug_handler() is
appropriate.
Most of its callers do. Exceptions:
* pcie_cap_slot_init(), shpc_init(), spapr_phb_realize() pass NULL,
i.e. they ignore errors.
* spapr_machine_init() passes &error_fatal.
* s390_pcihost_realize(), virtio_serial_device_realize(),
s390_pcihost_plug() pass the error to their callers. The latter two
keep going after the error, which looks wrong.
Drop the @errp parameter, and instead pass &error_abort to
object_property_set_link().
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Daniel P. Berrangé" <berrange@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20200630090351.1247703-15-armbru@redhat.com>
2020-06-30 11:03:39 +02:00
|
|
|
qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev));
|
2013-04-30 16:08:51 +02:00
|
|
|
|
2016-10-21 22:48:10 +02:00
|
|
|
virtio_scsi_dataplane_setup(s, errp);
|
2013-03-21 15:15:14 +01:00
|
|
|
}
|
|
|
|
|
2019-07-17 11:46:50 +02:00
|
|
|
void virtio_scsi_common_unrealize(DeviceState *dev)
|
2013-03-29 02:08:15 +01:00
|
|
|
{
|
2013-07-30 03:50:44 +02:00
|
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
|
|
|
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
|
2020-01-17 08:55:46 +01:00
|
|
|
int i;
|
2013-03-29 02:08:15 +01:00
|
|
|
|
2020-01-17 08:55:47 +01:00
|
|
|
virtio_delete_queue(vs->ctrl_vq);
|
|
|
|
virtio_delete_queue(vs->event_vq);
|
2020-01-17 08:55:46 +01:00
|
|
|
for (i = 0; i < vs->conf.num_queues; i++) {
|
2020-01-17 08:55:47 +01:00
|
|
|
virtio_delete_queue(vs->cmd_vqs[i]);
|
2020-01-17 08:55:46 +01:00
|
|
|
}
|
2013-03-29 02:08:15 +01:00
|
|
|
g_free(vs->cmd_vqs);
|
2013-04-24 10:21:22 +02:00
|
|
|
virtio_cleanup(vdev);
|
2013-03-29 02:08:15 +01:00
|
|
|
}
|
|
|
|
|
qdev: Unrealize must not fail
Devices may have component devices and buses.
Device realization may fail. Realization is recursive: a device's
realize() method realizes its components, and device_set_realized()
realizes its buses (which should in turn realize the devices on that
bus, except bus_set_realized() doesn't implement that, yet).
When realization of a component or bus fails, we need to roll back:
unrealize everything we realized so far. If any of these unrealizes
failed, the device would be left in an inconsistent state. Must not
happen.
device_set_realized() lets it happen: it ignores errors in the roll
back code starting at label child_realize_fail.
Since realization is recursive, unrealization must be recursive, too.
But how could a partly failed unrealize be rolled back? We'd have to
re-realize, which can fail. This design is fundamentally broken.
device_set_realized() does not roll back at all. Instead, it keeps
unrealizing, ignoring further errors.
It can screw up even for a device with no buses: if the lone
dc->unrealize() fails, it still unregisters vmstate, and calls
listeners' unrealize() callback.
bus_set_realized() does not roll back either. Instead, it stops
unrealizing.
Fortunately, no unrealize method can fail, as we'll see below.
To fix the design error, drop parameter @errp from all the unrealize
methods.
Any unrealize method that uses @errp now needs an update. This leads
us to unrealize() methods that can fail. Merely passing it to another
unrealize method cannot cause failure, though. Here are the ones that
do other things with @errp:
* virtio_serial_device_unrealize()
Fails when qbus_set_hotplug_handler() fails, but still does all the
other work. On failure, the device would stay realized with its
resources completely gone. Oops. Can't happen, because
qbus_set_hotplug_handler() can't actually fail here. Pass
&error_abort to qbus_set_hotplug_handler() instead.
* hw/ppc/spapr_drc.c's unrealize()
Fails when object_property_del() fails, but all the other work is
already done. On failure, the device would stay realized with its
vmstate registration gone. Oops. Can't happen, because
object_property_del() can't actually fail here. Pass &error_abort
to object_property_del() instead.
* spapr_phb_unrealize()
Fails and bails out when remove_drcs() fails, but other work is
already done. On failure, the device would stay realized with some
of its resources gone. Oops. remove_drcs() fails only when
chassis_from_bus()'s object_property_get_uint() fails, and it can't
here. Pass &error_abort to remove_drcs() instead.
Therefore, no unrealize method can fail before this patch.
device_set_realized()'s recursive unrealization via bus uses
object_property_set_bool(). Can't drop @errp there, so pass
&error_abort.
We similarly unrealize with object_property_set_bool() elsewhere,
always ignoring errors. Pass &error_abort instead.
Several unrealize methods no longer handle errors from other unrealize
methods: virtio_9p_device_unrealize(),
virtio_input_device_unrealize(), scsi_qdev_unrealize(), ...
Much of the deleted error handling looks wrong anyway.
One unrealize methods no longer ignore such errors:
usb_ehci_pci_exit().
Several realize methods no longer ignore errors when rolling back:
v9fs_device_realize_common(), pci_qdev_unrealize(),
spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(),
virtio_device_realize().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 17:29:24 +02:00
|
|
|
static void virtio_scsi_device_unrealize(DeviceState *dev)
|
2013-03-21 15:15:14 +01:00
|
|
|
{
|
2017-05-18 12:28:08 +02:00
|
|
|
VirtIOSCSI *s = VIRTIO_SCSI(dev);
|
|
|
|
|
2023-02-21 22:22:18 +01:00
|
|
|
virtio_scsi_reset_tmf_bh(s);
|
|
|
|
|
qdev: Drop qbus_set_hotplug_handler() parameter @errp
qbus_set_hotplug_handler() is a simple wrapper around
object_property_set_link().
object_property_set_link() fails when the property doesn't exist, is
not settable, or its .check() method fails. These are all programming
errors here, so passing &error_abort to qbus_set_hotplug_handler() is
appropriate.
Most of its callers do. Exceptions:
* pcie_cap_slot_init(), shpc_init(), spapr_phb_realize() pass NULL,
i.e. they ignore errors.
* spapr_machine_init() passes &error_fatal.
* s390_pcihost_realize(), virtio_serial_device_realize(),
s390_pcihost_plug() pass the error to their callers. The latter two
keep going after the error, which looks wrong.
Drop the @errp parameter, and instead pass &error_abort to
object_property_set_link().
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Daniel P. Berrangé" <berrange@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20200630090351.1247703-15-armbru@redhat.com>
2020-06-30 11:03:39 +02:00
|
|
|
qbus_set_hotplug_handler(BUS(&s->bus), NULL);
|
2019-07-17 11:46:50 +02:00
|
|
|
virtio_scsi_common_unrealize(dev);
|
2023-12-05 19:19:58 +01:00
|
|
|
qemu_mutex_destroy(&s->tmf_bh_lock);
|
2013-03-21 15:15:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static Property virtio_scsi_properties[] = {
|
2020-08-18 16:33:46 +02:00
|
|
|
DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues,
|
|
|
|
VIRTIO_SCSI_AUTO_NUM_QUEUES),
|
2017-08-10 18:52:55 +02:00
|
|
|
DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSI,
|
2020-02-14 08:46:48 +01:00
|
|
|
parent_obj.conf.virtqueue_size, 256),
|
2019-12-20 15:09:04 +01:00
|
|
|
DEFINE_PROP_BOOL("seg_max_adjust", VirtIOSCSI,
|
|
|
|
parent_obj.conf.seg_max_adjust, true),
|
2015-06-10 17:04:32 +02:00
|
|
|
DEFINE_PROP_UINT32("max_sectors", VirtIOSCSI, parent_obj.conf.max_sectors,
|
|
|
|
0xFFFF),
|
|
|
|
DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSI, parent_obj.conf.cmd_per_lun,
|
|
|
|
128),
|
|
|
|
DEFINE_PROP_BIT("hotplug", VirtIOSCSI, host_features,
|
|
|
|
VIRTIO_SCSI_F_HOTPLUG, true),
|
|
|
|
DEFINE_PROP_BIT("param_change", VirtIOSCSI, host_features,
|
|
|
|
VIRTIO_SCSI_F_CHANGE, true),
|
2017-07-14 04:14:56 +02:00
|
|
|
DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread,
|
|
|
|
TYPE_IOTHREAD, IOThread *),
|
2013-03-21 15:15:14 +01:00
|
|
|
DEFINE_PROP_END_OF_LIST(),
|
|
|
|
};
|
|
|
|
|
2016-10-06 14:55:46 +02:00
|
|
|
static const VMStateDescription vmstate_virtio_scsi = {
|
|
|
|
.name = "virtio-scsi",
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.version_id = 1,
|
2023-12-21 04:16:32 +01:00
|
|
|
.fields = (const VMStateField[]) {
|
2016-10-06 14:55:46 +02:00
|
|
|
VMSTATE_VIRTIO_DEVICE,
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
};
|
2016-07-14 19:22:46 +02:00
|
|
|
|
2013-03-29 02:08:15 +01:00
|
|
|
static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
2013-07-29 16:17:45 +02:00
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
2013-03-29 02:08:15 +01:00
|
|
|
|
|
|
|
vdc->get_config = virtio_scsi_get_config;
|
2013-07-29 16:17:45 +02:00
|
|
|
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
|
2013-03-29 02:08:15 +01:00
|
|
|
}
|
|
|
|
|
2013-03-21 15:15:14 +01:00
|
|
|
static void virtio_scsi_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
|
|
|
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
2014-09-26 11:28:33 +02:00
|
|
|
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
|
2013-07-30 03:19:55 +02:00
|
|
|
|
2020-01-10 16:30:32 +01:00
|
|
|
device_class_set_props(dc, virtio_scsi_properties);
|
2016-07-14 19:22:46 +02:00
|
|
|
dc->vmsd = &vmstate_virtio_scsi;
|
2013-07-29 16:17:45 +02:00
|
|
|
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
|
2013-07-30 03:19:55 +02:00
|
|
|
vdc->realize = virtio_scsi_device_realize;
|
2013-07-30 03:50:44 +02:00
|
|
|
vdc->unrealize = virtio_scsi_device_unrealize;
|
2013-03-21 15:15:14 +01:00
|
|
|
vdc->set_config = virtio_scsi_set_config;
|
|
|
|
vdc->get_features = virtio_scsi_get_features;
|
|
|
|
vdc->reset = virtio_scsi_reset;
|
2016-10-21 22:48:10 +02:00
|
|
|
vdc->start_ioeventfd = virtio_scsi_dataplane_start;
|
|
|
|
vdc->stop_ioeventfd = virtio_scsi_dataplane_stop;
|
2019-04-26 19:29:47 +02:00
|
|
|
hc->pre_plug = virtio_scsi_pre_hotplug;
|
2014-09-26 11:28:33 +02:00
|
|
|
hc->plug = virtio_scsi_hotplug;
|
|
|
|
hc->unplug = virtio_scsi_hotunplug;
|
2013-03-21 15:15:14 +01:00
|
|
|
}
|
|
|
|
|
2013-03-29 02:08:15 +01:00
|
|
|
static const TypeInfo virtio_scsi_common_info = {
|
|
|
|
.name = TYPE_VIRTIO_SCSI_COMMON,
|
|
|
|
.parent = TYPE_VIRTIO_DEVICE,
|
|
|
|
.instance_size = sizeof(VirtIOSCSICommon),
|
2013-08-19 17:53:15 +02:00
|
|
|
.abstract = true,
|
2013-03-29 02:08:15 +01:00
|
|
|
.class_init = virtio_scsi_common_class_init,
|
|
|
|
};
|
|
|
|
|
2013-03-21 15:15:14 +01:00
|
|
|
static const TypeInfo virtio_scsi_info = {
|
|
|
|
.name = TYPE_VIRTIO_SCSI,
|
2013-03-29 02:08:15 +01:00
|
|
|
.parent = TYPE_VIRTIO_SCSI_COMMON,
|
2013-03-21 15:15:14 +01:00
|
|
|
.instance_size = sizeof(VirtIOSCSI),
|
|
|
|
.class_init = virtio_scsi_class_init,
|
2014-09-26 11:28:33 +02:00
|
|
|
.interfaces = (InterfaceInfo[]) {
|
|
|
|
{ TYPE_HOTPLUG_HANDLER },
|
|
|
|
{ }
|
|
|
|
}
|
2013-03-21 15:15:14 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static void virtio_register_types(void)
|
|
|
|
{
|
2013-03-29 02:08:15 +01:00
|
|
|
type_register_static(&virtio_scsi_common_info);
|
2013-03-21 15:15:14 +01:00
|
|
|
type_register_static(&virtio_scsi_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(virtio_register_types)
|