Merge remote-tracking branch 'bonzini/virtio-scsi' into staging

* bonzini/virtio-scsi:
  scsi-block: always use scsi_generic_ops for cache != none
  scsi: fix searching for an empty id
  scsi: fix wrong return for target INQUIRY
  virtio-scsi: add migration support
  virtio-scsi: process control queue requests
  virtio-scsi: add basic SCSI bus operation
  virtio-scsi: Add basic request processing infrastructure
  virtio-scsi: Add virtio-scsi stub device
  scsi-disk: add migration support
  scsi-generic: add migration support
  scsi: add SCSIDevice vmstate definitions
  scsi-disk: enable scatter/gather functionality
  scsi: add scatter/gather functionality
  scsi: pass residual amount to command_complete
  ahci: use new DMA helpers
  dma-helpers: add accounting wrappers
  dma-helpers: add dma_buf_read and dma_buf_write
  dma-helpers: make QEMUSGList target independent
This commit is contained in:
Anthony Liguori 2012-02-24 09:33:03 -06:00
commit 9ef1300b1b
22 changed files with 1135 additions and 116 deletions

View File

@ -200,6 +200,7 @@ obj-y = arch_init.o cpus.o monitor.o machine.o gdbstub.o balloon.o ioport.o
# need to fix this properly
obj-$(CONFIG_NO_PCI) += pci-stub.o
obj-$(CONFIG_VIRTIO) += virtio.o virtio-blk.o virtio-balloon.o virtio-net.o virtio-serial-bus.o
obj-$(CONFIG_VIRTIO_SCSI) += virtio-scsi.o
obj-y += vhost_net.o
obj-$(CONFIG_VHOST_NET) += vhost.o
obj-$(CONFIG_REALLY_VIRTFS) += 9pfs/virtio-9p-device.o

View File

@ -1,5 +1,6 @@
CONFIG_PCI=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_SCSI=y
CONFIG_VIRTIO=y
CONFIG_USB_UHCI=y
CONFIG_USB_OHCI=y

View File

@ -1 +1,2 @@
CONFIG_VIRTIO=y
CONFIG_VIRTIO_SCSI=y

View File

@ -204,3 +204,40 @@ BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
{
return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque, true);
}
static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, bool to_dev)
{
uint64_t resid;
int sg_cur_index;
resid = sg->size;
sg_cur_index = 0;
len = MIN(len, resid);
while (len > 0) {
ScatterGatherEntry entry = sg->sg[sg_cur_index++];
int32_t xfer = MIN(len, entry.len);
cpu_physical_memory_rw(entry.base, ptr, xfer, !to_dev);
ptr += xfer;
len -= xfer;
resid -= xfer;
}
return resid;
}
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
{
return dma_buf_rw(ptr, len, sg, 0);
}
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
{
return dma_buf_rw(ptr, len, sg, 1);
}
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
QEMUSGList *sg, enum BlockAcctType type)
{
bdrv_acct_start(bs, cookie, sg->size, type);
}

20
dma.h
View File

@ -17,6 +17,13 @@
typedef struct ScatterGatherEntry ScatterGatherEntry;
struct QEMUSGList {
ScatterGatherEntry *sg;
int nsg;
int nalloc;
size_t size;
};
#if defined(TARGET_PHYS_ADDR_BITS)
typedef target_phys_addr_t dma_addr_t;
@ -32,13 +39,6 @@ struct ScatterGatherEntry {
dma_addr_t len;
};
struct QEMUSGList {
ScatterGatherEntry *sg;
int nsg;
int nalloc;
dma_addr_t size;
};
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint);
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
void qemu_sglist_destroy(QEMUSGList *qsg);
@ -58,4 +58,10 @@ BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
QEMUSGList *sg, uint64_t sector,
BlockDriverCompletionFunc *cb, void *opaque);
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
QEMUSGList *sg, enum BlockAcctType type);
#endif

View File

@ -390,7 +390,8 @@ static void esp_do_dma(ESPState *s)
esp_dma_done(s);
}
static void esp_command_complete(SCSIRequest *req, uint32_t status)
static void esp_command_complete(SCSIRequest *req, uint32_t status,
size_t resid)
{
ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);

View File

@ -428,55 +428,6 @@ static void ahci_reg_init(AHCIState *s)
}
}
static uint32_t read_from_sglist(uint8_t *buffer, uint32_t len,
QEMUSGList *sglist)
{
uint32_t i = 0;
uint32_t total = 0, once;
ScatterGatherEntry *cur_prd;
uint32_t sgcount;
cur_prd = sglist->sg;
sgcount = sglist->nsg;
for (i = 0; len && sgcount; i++) {
once = MIN(cur_prd->len, len);
cpu_physical_memory_read(cur_prd->base, buffer, once);
cur_prd++;
sgcount--;
len -= once;
buffer += once;
total += once;
}
return total;
}
static uint32_t write_to_sglist(uint8_t *buffer, uint32_t len,
QEMUSGList *sglist)
{
uint32_t i = 0;
uint32_t total = 0, once;
ScatterGatherEntry *cur_prd;
uint32_t sgcount;
DPRINTF(-1, "total: 0x%x bytes\n", len);
cur_prd = sglist->sg;
sgcount = sglist->nsg;
for (i = 0; len && sgcount; i++) {
once = MIN(cur_prd->len, len);
DPRINTF(-1, "write 0x%x bytes to 0x%lx\n", once, (long)cur_prd->base);
cpu_physical_memory_write(cur_prd->base, buffer, once);
cur_prd++;
sgcount--;
len -= once;
buffer += once;
total += once;
}
return total;
}
static void check_cmd(AHCIState *s, int port)
{
AHCIPortRegs *pr = &s->dev[port].port_regs;
@ -802,9 +753,8 @@ static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis,
DPRINTF(port, "tag %d aio read %"PRId64"\n",
ncq_tfs->tag, ncq_tfs->lba);
bdrv_acct_start(ncq_tfs->drive->port.ifs[0].bs, &ncq_tfs->acct,
(ncq_tfs->sector_count-1) * BDRV_SECTOR_SIZE,
BDRV_ACCT_READ);
dma_acct_start(ncq_tfs->drive->port.ifs[0].bs, &ncq_tfs->acct,
&ncq_tfs->sglist, BDRV_ACCT_READ);
ncq_tfs->aiocb = dma_bdrv_read(ncq_tfs->drive->port.ifs[0].bs,
&ncq_tfs->sglist, ncq_tfs->lba,
ncq_cb, ncq_tfs);
@ -816,9 +766,8 @@ static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis,
DPRINTF(port, "tag %d aio write %"PRId64"\n",
ncq_tfs->tag, ncq_tfs->lba);
bdrv_acct_start(ncq_tfs->drive->port.ifs[0].bs, &ncq_tfs->acct,
(ncq_tfs->sector_count-1) * BDRV_SECTOR_SIZE,
BDRV_ACCT_WRITE);
dma_acct_start(ncq_tfs->drive->port.ifs[0].bs, &ncq_tfs->acct,
&ncq_tfs->sglist, BDRV_ACCT_WRITE);
ncq_tfs->aiocb = dma_bdrv_write(ncq_tfs->drive->port.ifs[0].bs,
&ncq_tfs->sglist, ncq_tfs->lba,
ncq_cb, ncq_tfs);
@ -1023,12 +972,12 @@ static int ahci_start_transfer(IDEDMA *dma)
is_write ? "writ" : "read", size, is_atapi ? "atapi" : "ata",
has_sglist ? "" : "o");
if (is_write && has_sglist && (s->data_ptr < s->data_end)) {
read_from_sglist(s->data_ptr, size, &s->sg);
}
if (!is_write && has_sglist && (s->data_ptr < s->data_end)) {
write_to_sglist(s->data_ptr, size, &s->sg);
if (has_sglist && size) {
if (is_write) {
dma_buf_write(s->data_ptr, size, &s->sg);
} else {
dma_buf_read(s->data_ptr, size, &s->sg);
}
}
/* update number of transferred bytes */
@ -1067,14 +1016,9 @@ static int ahci_dma_prepare_buf(IDEDMA *dma, int is_write)
{
AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
IDEState *s = &ad->port.ifs[0];
int i;
ahci_populate_sglist(ad, &s->sg);
s->io_buffer_size = 0;
for (i = 0; i < s->sg.nsg; i++) {
s->io_buffer_size += s->sg.sg[i].len;
}
s->io_buffer_size = s->sg.size;
DPRINTF(ad->port_no, "len=%#x\n", s->io_buffer_size);
return s->io_buffer_size != 0;
@ -1092,9 +1036,9 @@ static int ahci_dma_rw_buf(IDEDMA *dma, int is_write)
}
if (is_write) {
write_to_sglist(p, l, &s->sg);
dma_buf_read(p, l, &s->sg);
} else {
read_from_sglist(p, l, &s->sg);
dma_buf_write(p, l, &s->sg);
}
/* update number of transferred bytes */

View File

@ -699,7 +699,7 @@ static int lsi_queue_req(LSIState *s, SCSIRequest *req, uint32_t len)
}
/* Callback to indicate that the SCSI layer has completed a command. */
static void lsi_command_complete(SCSIRequest *req, uint32_t status)
static void lsi_command_complete(SCSIRequest *req, uint32_t status, size_t resid)
{
LSIState *s = DO_UPCAST(LSIState, dev.qdev, req->bus->qbus.parent);
int out;

View File

@ -75,6 +75,7 @@
#define PCI_DEVICE_ID_VIRTIO_BLOCK 0x1001
#define PCI_DEVICE_ID_VIRTIO_BALLOON 0x1002
#define PCI_DEVICE_ID_VIRTIO_CONSOLE 0x1003
#define PCI_DEVICE_ID_VIRTIO_SCSI 0x1004
#define FMT_PCIBUS PRIx64

View File

@ -169,6 +169,18 @@ static int s390_virtio_serial_init(VirtIOS390Device *dev)
return r;
}
static int s390_virtio_scsi_init(VirtIOS390Device *dev)
{
VirtIODevice *vdev;
vdev = virtio_scsi_init((DeviceState *)dev, &dev->scsi);
if (!vdev) {
return -1;
}
return s390_virtio_device_init(dev, vdev);
}
static uint64_t s390_virtio_device_vq_token(VirtIOS390Device *dev, int vq)
{
ram_addr_t token_off;
@ -433,6 +445,26 @@ static TypeInfo virtio_s390_device_info = {
.abstract = true,
};
static Property s390_virtio_scsi_properties[] = {
DEFINE_VIRTIO_SCSI_PROPERTIES(VirtIOS390Device, host_features, scsi),
DEFINE_PROP_END_OF_LIST(),
};
static void s390_virtio_scsi_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOS390DeviceClass *k = VIRTIO_S390_DEVICE_CLASS(klass);
k->init = s390_virtio_scsi_init;
dc->props = s390_virtio_scsi_properties;
}
static TypeInfo s390_virtio_scsi = {
.name = "virtio-scsi-s390",
.parent = TYPE_VIRTIO_S390_DEVICE,
.instance_size = sizeof(VirtIOS390Device),
.class_init = s390_virtio_scsi_class_init,
};
/***************** S390 Virtio Bus Bridge Device *******************/
/* Only required to have the virtio bus as child in the system bus */
@ -465,6 +497,7 @@ static void s390_virtio_register_types(void)
type_register_static(&s390_virtio_serial);
type_register_static(&s390_virtio_blk);
type_register_static(&s390_virtio_net);
type_register_static(&s390_virtio_scsi);
type_register_static(&s390_virtio_bridge_info);
}

View File

@ -19,6 +19,7 @@
#include "virtio-net.h"
#include "virtio-serial.h"
#include "virtio-scsi.h"
#define VIRTIO_DEV_OFFS_TYPE 0 /* 8 bits */
#define VIRTIO_DEV_OFFS_NUM_VQ 1 /* 8 bits */
@ -67,6 +68,7 @@ struct VirtIOS390Device {
uint32_t host_features;
virtio_serial_conf serial;
virtio_net_conf net;
VirtIOSCSIConf scsi;
};
typedef struct VirtIOS390Bus {

View File

@ -5,6 +5,7 @@
#include "qdev.h"
#include "blockdev.h"
#include "trace.h"
#include "dma.h"
static char *scsibus_get_fw_dev_path(DeviceState *dev);
static int scsi_req_parse(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf);
@ -86,6 +87,7 @@ static void scsi_dma_restart_bh(void *opaque)
scsi_req_continue(req);
break;
case SCSI_XFER_NONE:
assert(!req->sg);
scsi_req_dequeue(req);
scsi_req_enqueue(req);
break;
@ -130,6 +132,10 @@ static int scsi_qdev_init(DeviceState *qdev)
error_report("bad scsi device id: %d", dev->id);
goto err;
}
if (dev->lun != -1 && dev->lun > bus->info->max_lun) {
error_report("bad scsi device lun: %d", dev->lun);
goto err;
}
if (dev->id == -1) {
int id = -1;
@ -138,8 +144,8 @@ static int scsi_qdev_init(DeviceState *qdev)
}
do {
d = scsi_device_find(bus, dev->channel, ++id, dev->lun);
} while (d && d->lun == dev->lun && id <= bus->info->max_target);
if (id > bus->info->max_target) {
} while (d && d->lun == dev->lun && id < bus->info->max_target);
if (d && d->lun == dev->lun) {
error_report("no free target");
goto err;
}
@ -149,14 +155,15 @@ static int scsi_qdev_init(DeviceState *qdev)
do {
d = scsi_device_find(bus, dev->channel, dev->id, ++lun);
} while (d && d->lun == lun && lun < bus->info->max_lun);
if (lun > bus->info->max_lun) {
if (d && d->lun == lun) {
error_report("no free lun");
goto err;
}
dev->lun = lun;
} else {
d = scsi_device_find(bus, dev->channel, dev->id, dev->lun);
if (dev->lun == d->lun && dev != d) {
assert(d);
if (d->lun == dev->lun && dev != d) {
qdev_free(&d->qdev);
}
}
@ -215,7 +222,7 @@ int scsi_bus_legacy_handle_cmdline(SCSIBus *bus)
int res = 0, unit;
loc_push_none(&loc);
for (unit = 0; unit < bus->info->max_target; unit++) {
for (unit = 0; unit <= bus->info->max_target; unit++) {
dinfo = drive_get(IF_SCSI, bus->busnr, unit);
if (dinfo == NULL) {
continue;
@ -378,7 +385,7 @@ static bool scsi_target_emulate_inquiry(SCSITargetReq *r)
/* PAGE CODE == 0 */
if (r->req.cmd.xfer < 5) {
return -1;
return false;
}
r->len = MIN(r->req.cmd.xfer, 36);
@ -533,6 +540,8 @@ SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun,
}
req->cmd = cmd;
req->resid = req->cmd.xfer;
switch (buf[0]) {
case INQUIRY:
trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]);
@ -643,15 +652,25 @@ void scsi_req_build_sense(SCSIRequest *req, SCSISense sense)
req->sense_len = 18;
}
static void scsi_req_enqueue_internal(SCSIRequest *req)
{
assert(!req->enqueued);
scsi_req_ref(req);
if (req->bus->info->get_sg_list) {
req->sg = req->bus->info->get_sg_list(req);
} else {
req->sg = NULL;
}
req->enqueued = true;
QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
}
int32_t scsi_req_enqueue(SCSIRequest *req)
{
int32_t rc;
assert(!req->enqueued);
scsi_req_ref(req);
req->enqueued = true;
QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
assert(!req->retry);
scsi_req_enqueue_internal(req);
scsi_req_ref(req);
rc = req->ops->send_command(req, req->cmd.buf);
scsi_req_unref(req);
@ -1273,12 +1292,32 @@ void scsi_req_continue(SCSIRequest *req)
Once it completes, calling scsi_req_continue will restart I/O. */
void scsi_req_data(SCSIRequest *req, int len)
{
uint8_t *buf;
if (req->io_canceled) {
trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len);
} else {
trace_scsi_req_data(req->dev->id, req->lun, req->tag, len);
req->bus->info->transfer_data(req, len);
return;
}
trace_scsi_req_data(req->dev->id, req->lun, req->tag, len);
assert(req->cmd.mode != SCSI_XFER_NONE);
if (!req->sg) {
req->resid -= len;
req->bus->info->transfer_data(req, len);
return;
}
/* If the device calls scsi_req_data and the HBA specified a
* scatter/gather list, the transfer has to happen in a single
* step. */
assert(!req->dma_started);
req->dma_started = true;
buf = scsi_req_get_buf(req);
if (req->cmd.mode == SCSI_XFER_FROM_DEV) {
req->resid = dma_buf_read(buf, len, req->sg);
} else {
req->resid = dma_buf_write(buf, len, req->sg);
}
scsi_req_continue(req);
}
void scsi_req_print(SCSIRequest *req)
@ -1337,7 +1376,7 @@ void scsi_req_complete(SCSIRequest *req, int status)
scsi_req_ref(req);
scsi_req_dequeue(req);
req->bus->info->complete(req, req->status);
req->bus->info->complete(req, req->status, req->resid);
scsi_req_unref(req);
}
@ -1413,6 +1452,102 @@ SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun)
return target_dev;
}
/* SCSI request list. For simplicity, pv points to the whole device */
static void put_scsi_requests(QEMUFile *f, void *pv, size_t size)
{
SCSIDevice *s = pv;
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
SCSIRequest *req;
QTAILQ_FOREACH(req, &s->requests, next) {
assert(!req->io_canceled);
assert(req->status == -1);
assert(req->retry);
assert(req->enqueued);
qemu_put_sbyte(f, 1);
qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
qemu_put_be32s(f, &req->tag);
qemu_put_be32s(f, &req->lun);
if (bus->info->save_request) {
bus->info->save_request(f, req);
}
if (req->ops->save_request) {
req->ops->save_request(f, req);
}
}
qemu_put_sbyte(f, 0);
}
static int get_scsi_requests(QEMUFile *f, void *pv, size_t size)
{
SCSIDevice *s = pv;
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
while (qemu_get_sbyte(f)) {
uint8_t buf[SCSI_CMD_BUF_SIZE];
uint32_t tag;
uint32_t lun;
SCSIRequest *req;
qemu_get_buffer(f, buf, sizeof(buf));
qemu_get_be32s(f, &tag);
qemu_get_be32s(f, &lun);
req = scsi_req_new(s, tag, lun, buf, NULL);
if (bus->info->load_request) {
req->hba_private = bus->info->load_request(f, req);
}
if (req->ops->load_request) {
req->ops->load_request(f, req);
}
/* Just restart it later. */
req->retry = true;
scsi_req_enqueue_internal(req);
/* At this point, the request will be kept alive by the reference
* added by scsi_req_enqueue_internal, so we can release our reference.
* The HBA of course will add its own reference in the load_request
* callback if it needs to hold on the SCSIRequest.
*/
scsi_req_unref(req);
}
return 0;
}
const VMStateInfo vmstate_info_scsi_requests = {
.name = "scsi-requests",
.get = get_scsi_requests,
.put = put_scsi_requests,
};
const VMStateDescription vmstate_scsi_device = {
.name = "SCSIDevice",
.version_id = 1,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT8(unit_attention.key, SCSIDevice),
VMSTATE_UINT8(unit_attention.asc, SCSIDevice),
VMSTATE_UINT8(unit_attention.ascq, SCSIDevice),
VMSTATE_BOOL(sense_is_ua, SCSIDevice),
VMSTATE_UINT8_ARRAY(sense, SCSIDevice, SCSI_SENSE_BUF_SIZE),
VMSTATE_UINT32(sense_len, SCSIDevice),
{
.name = "requests",
.version_id = 0,
.field_exists = NULL,
.size = 0, /* ouch */
.info = &vmstate_info_scsi_requests,
.flags = VMS_SINGLE,
.offset = 0,
},
VMSTATE_END_OF_LIST()
}
};
static void scsi_device_class_init(ObjectClass *klass, void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);

View File

@ -38,6 +38,7 @@ do { fprintf(stderr, "scsi-disk: " fmt , ## __VA_ARGS__); } while (0)
#include "sysemu.h"
#include "blockdev.h"
#include "block_int.h"
#include "dma.h"
#ifdef __linux
#include <scsi/sg.h>
@ -110,12 +111,12 @@ static void scsi_cancel_io(SCSIRequest *req)
r->req.aiocb = NULL;
}
static uint32_t scsi_init_iovec(SCSIDiskReq *r)
static uint32_t scsi_init_iovec(SCSIDiskReq *r, size_t size)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
if (!r->iov.iov_base) {
r->buflen = SCSI_DMA_BUF_SIZE;
r->buflen = size;
r->iov.iov_base = qemu_blockalign(s->qdev.conf.bs, r->buflen);
}
r->iov.iov_len = MIN(r->sector_count * 512, r->buflen);
@ -123,6 +124,56 @@ static uint32_t scsi_init_iovec(SCSIDiskReq *r)
return r->qiov.size / 512;
}
static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
{
SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
qemu_put_be64s(f, &r->sector);
qemu_put_be32s(f, &r->sector_count);
qemu_put_be32s(f, &r->buflen);
if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
}
}
static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
{
SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
qemu_get_be64s(f, &r->sector);
qemu_get_be32s(f, &r->sector_count);
qemu_get_be32s(f, &r->buflen);
if (r->buflen) {
scsi_init_iovec(r, r->buflen);
if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
}
}
qemu_iovec_init_external(&r->qiov, &r->iov, 1);
}
static void scsi_dma_complete(void *opaque, int ret)
{
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
bdrv_acct_done(s->qdev.conf.bs, &r->acct);
if (ret) {
if (scsi_handle_rw_error(r, -ret)) {
goto done;
}
}
r->sector += r->sector_count;
r->sector_count = 0;
scsi_req_complete(&r->req, GOOD);
done:
scsi_req_unref(&r->req);
}
static void scsi_read_complete(void * opaque, int ret)
{
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
@ -213,10 +264,17 @@ static void scsi_read_data(SCSIRequest *req)
return;
}
n = scsi_init_iovec(r);
bdrv_acct_start(s->qdev.conf.bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
r->req.aiocb = bdrv_aio_readv(s->qdev.conf.bs, r->sector, &r->qiov, n,
scsi_read_complete, r);
if (r->req.sg) {
dma_acct_start(s->qdev.conf.bs, &r->acct, r->req.sg, BDRV_ACCT_READ);
r->req.resid -= r->req.sg->size;
r->req.aiocb = dma_bdrv_read(s->qdev.conf.bs, r->req.sg, r->sector,
scsi_dma_complete, r);
} else {
n = scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
bdrv_acct_start(s->qdev.conf.bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
r->req.aiocb = bdrv_aio_readv(s->qdev.conf.bs, r->sector, &r->qiov, n,
scsi_read_complete, r);
}
}
/*
@ -287,7 +345,7 @@ static void scsi_write_complete(void * opaque, int ret)
if (r->sector_count == 0) {
scsi_req_complete(&r->req, GOOD);
} else {
scsi_init_iovec(r);
scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
DPRINTF("Write complete tag=0x%x more=%d\n", r->req.tag, r->qiov.size);
scsi_req_data(&r->req, r->qiov.size);
}
@ -315,18 +373,26 @@ static void scsi_write_data(SCSIRequest *req)
return;
}
n = r->qiov.size / 512;
if (n) {
if (s->tray_open) {
scsi_write_complete(r, -ENOMEDIUM);
return;
}
if (!r->req.sg && !r->qiov.size) {
/* Called for the first time. Ask the driver to send us more data. */
scsi_write_complete(r, 0);
return;
}
if (s->tray_open) {
scsi_write_complete(r, -ENOMEDIUM);
return;
}
if (r->req.sg) {
dma_acct_start(s->qdev.conf.bs, &r->acct, r->req.sg, BDRV_ACCT_WRITE);
r->req.resid -= r->req.sg->size;
r->req.aiocb = dma_bdrv_write(s->qdev.conf.bs, r->req.sg, r->sector,
scsi_dma_complete, r);
} else {
n = r->qiov.size / 512;
bdrv_acct_start(s->qdev.conf.bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_WRITE);
r->req.aiocb = bdrv_aio_writev(s->qdev.conf.bs, r->sector, &r->qiov, n,
scsi_write_complete, r);
} else {
/* Called for the first time. Ask the driver to send us more data. */
scsi_write_complete(r, 0);
}
}
@ -1584,6 +1650,8 @@ static const SCSIReqOps scsi_disk_reqops = {
.write_data = scsi_write_data,
.cancel_io = scsi_cancel_io,
.get_buf = scsi_get_buf,
.load_request = scsi_disk_load_request,
.save_request = scsi_disk_save_request,
};
static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
@ -1686,6 +1754,15 @@ static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
case WRITE_VERIFY_10:
case WRITE_VERIFY_12:
case WRITE_VERIFY_16:
/* If we are not using O_DIRECT, we might read stale data from the
* host cache if writes were made using other commands than these
* ones (such as WRITE SAME or EXTENDED COPY, etc.). So, without
* O_DIRECT everything must go through SG_IO.
*/
if (!(s->qdev.conf.bs->open_flags & BDRV_O_NOCACHE)) {
break;
}
/* MMC writing cannot be done via pread/pwrite, because it sometimes
* involves writing beyond the maximum LBA or to negative LBA (lead-in).
* And once you do these writes, reading from the block device is
@ -1696,10 +1773,11 @@ static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
* seen, but performance usually isn't paramount on optical media. So,
* just make scsi-block operate the same as scsi-generic for them.
*/
if (s->qdev.type != TYPE_ROM) {
return scsi_req_alloc(&scsi_disk_reqops, &s->qdev, tag, lun,
hba_private);
}
if (s->qdev.type == TYPE_ROM) {
break;
}
return scsi_req_alloc(&scsi_disk_reqops, &s->qdev, tag, lun,
hba_private);
}
return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
@ -1718,6 +1796,22 @@ static Property scsi_hd_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_scsi_disk_state = {
.name = "scsi-disk",
.version_id = 1,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
VMSTATE_BOOL(media_changed, SCSIDiskState),
VMSTATE_BOOL(media_event, SCSIDiskState),
VMSTATE_BOOL(eject_request, SCSIDiskState),
VMSTATE_BOOL(tray_open, SCSIDiskState),
VMSTATE_BOOL(tray_locked, SCSIDiskState),
VMSTATE_END_OF_LIST()
}
};
static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@ -1731,6 +1825,7 @@ static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
dc->desc = "virtual SCSI disk";
dc->reset = scsi_disk_reset;
dc->props = scsi_hd_properties;
dc->vmsd = &vmstate_scsi_disk_state;
}
static TypeInfo scsi_hd_info = {
@ -1758,6 +1853,7 @@ static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
dc->desc = "virtual SCSI CD-ROM";
dc->reset = scsi_disk_reset;
dc->props = scsi_cd_properties;
dc->vmsd = &vmstate_scsi_disk_state;
}
static TypeInfo scsi_cd_info = {
@ -1785,6 +1881,7 @@ static void scsi_block_class_initfn(ObjectClass *klass, void *data)
dc->desc = "SCSI block device passthrough";
dc->reset = scsi_disk_reset;
dc->props = scsi_block_properties;
dc->vmsd = &vmstate_scsi_disk_state;
}
static TypeInfo scsi_block_info = {
@ -1814,6 +1911,7 @@ static void scsi_disk_class_initfn(ObjectClass *klass, void *data)
dc->desc = "virtual SCSI disk or CD-ROM (legacy)";
dc->reset = scsi_disk_reset;
dc->props = scsi_disk_properties;
dc->vmsd = &vmstate_scsi_disk_state;
}
static TypeInfo scsi_disk_info = {

View File

@ -59,6 +59,28 @@ typedef struct SCSIGenericReq {
sg_io_hdr_t io_header;
} SCSIGenericReq;
static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req)
{
SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
qemu_put_sbe32s(f, &r->buflen);
if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
assert(!r->req.sg);
qemu_put_buffer(f, r->buf, r->req.cmd.xfer);
}
}
static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req)
{
SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
qemu_get_sbe32s(f, &r->buflen);
if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
assert(!r->req.sg);
qemu_get_buffer(f, r->buf, r->req.cmd.xfer);
}
}
static void scsi_free_request(SCSIRequest *req)
{
SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
@ -446,6 +468,8 @@ const SCSIReqOps scsi_generic_req_ops = {
.write_data = scsi_write_data,
.cancel_io = scsi_cancel_io,
.get_buf = scsi_get_buf,
.load_request = scsi_generic_load_request,
.save_request = scsi_generic_save_request,
};
static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
@ -474,6 +498,7 @@ static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
dc->desc = "pass through generic scsi device (/dev/sg*)";
dc->reset = scsi_generic_reset;
dc->props = scsi_generic_properties;
dc->vmsd = &vmstate_scsi_device;
}
static TypeInfo scsi_generic_info = {

View File

@ -46,8 +46,11 @@ struct SCSIRequest {
uint32_t tag;
uint32_t lun;
uint32_t status;
size_t resid;
SCSICommand cmd;
BlockDriverAIOCB *aiocb;
QEMUSGList *sg;
bool dma_started;
uint8_t sense[SCSI_SENSE_BUF_SIZE];
uint32_t sense_len;
bool enqueued;
@ -93,6 +96,16 @@ struct SCSIDevice
uint64_t max_lba;
};
extern const VMStateDescription vmstate_scsi_device;
#define VMSTATE_SCSI_DEVICE(_field, _state) { \
.name = (stringify(_field)), \
.size = sizeof(SCSIDevice), \
.vmsd = &vmstate_scsi_device, \
.flags = VMS_STRUCT, \
.offset = vmstate_offset_value(_state, _field, SCSIDevice), \
}
/* cdrom.c */
int cdrom_read_toc(int nb_sectors, uint8_t *buf, int msf, int start_track);
int cdrom_read_toc_raw(int nb_sectors, uint8_t *buf, int msf, int session_num);
@ -106,14 +119,21 @@ struct SCSIReqOps {
void (*write_data)(SCSIRequest *req);
void (*cancel_io)(SCSIRequest *req);
uint8_t *(*get_buf)(SCSIRequest *req);
void (*save_request)(QEMUFile *f, SCSIRequest *req);
void (*load_request)(QEMUFile *f, SCSIRequest *req);
};
struct SCSIBusInfo {
int tcq;
int max_channel, max_target, max_lun;
void (*transfer_data)(SCSIRequest *req, uint32_t arg);
void (*complete)(SCSIRequest *req, uint32_t arg);
void (*complete)(SCSIRequest *req, uint32_t arg, size_t resid);
void (*cancel)(SCSIRequest *req);
QEMUSGList *(*get_sg_list)(SCSIRequest *req);
void (*save_request)(QEMUFile *f, SCSIRequest *req);
void *(*load_request)(QEMUFile *f, SCSIRequest *req);
};
struct SCSIBus {

View File

@ -494,7 +494,7 @@ static void vscsi_transfer_data(SCSIRequest *sreq, uint32_t len)
}
/* Callback to indicate that the SCSI layer has completed a transfer. */
static void vscsi_command_complete(SCSIRequest *sreq, uint32_t status)
static void vscsi_command_complete(SCSIRequest *sreq, uint32_t status, size_t resid)
{
VSCSIState *s = DO_UPCAST(VSCSIState, vdev.qdev, sreq->bus->qbus.parent);
vscsi_req *req = sreq->hba_private;

View File

@ -223,7 +223,7 @@ static void usb_msd_transfer_data(SCSIRequest *req, uint32_t len)
}
}
static void usb_msd_command_complete(SCSIRequest *req, uint32_t status)
static void usb_msd_command_complete(SCSIRequest *req, uint32_t status, size_t resid)
{
MSDState *s = DO_UPCAST(MSDState, dev.qdev, req->bus->qbus.parent);
USBPacket *p = s->packet;

View File

@ -21,6 +21,7 @@
#include "virtio-blk.h"
#include "virtio-net.h"
#include "virtio-serial.h"
#include "virtio-scsi.h"
#include "pci.h"
#include "qemu-error.h"
#include "msix.h"
@ -930,12 +931,67 @@ static TypeInfo virtio_balloon_info = {
.class_init = virtio_balloon_class_init,
};
static int virtio_scsi_init_pci(PCIDevice *pci_dev)
{
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
VirtIODevice *vdev;
vdev = virtio_scsi_init(&pci_dev->qdev, &proxy->scsi);
if (!vdev) {
return -EINVAL;
}
vdev->nvectors = proxy->nvectors;
virtio_init_pci(proxy, vdev);
/* make the actual value visible */
proxy->nvectors = vdev->nvectors;
return 0;
}
static int virtio_scsi_exit_pci(PCIDevice *pci_dev)
{
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
virtio_scsi_exit(proxy->vdev);
return virtio_exit_pci(pci_dev);
}
static Property virtio_scsi_properties[] = {
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
DEFINE_VIRTIO_SCSI_PROPERTIES(VirtIOPCIProxy, host_features, scsi),
DEFINE_PROP_END_OF_LIST(),
};
static void virtio_scsi_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->init = virtio_scsi_init_pci;
k->exit = virtio_scsi_exit_pci;
k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
k->revision = 0x00;
k->class_id = PCI_CLASS_STORAGE_SCSI;
dc->reset = virtio_pci_reset;
dc->props = virtio_scsi_properties;
}
static TypeInfo virtio_scsi_info = {
.name = "virtio-scsi-pci",
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(VirtIOPCIProxy),
.class_init = virtio_scsi_class_init,
};
static void virtio_pci_register_types(void)
{
type_register_static(&virtio_blk_info);
type_register_static(&virtio_net_info);
type_register_static(&virtio_serial_info);
type_register_static(&virtio_balloon_info);
type_register_static(&virtio_scsi_info);
}
type_init(virtio_pci_register_types)

View File

@ -17,6 +17,7 @@
#include "virtio-net.h"
#include "virtio-serial.h"
#include "virtio-scsi.h"
/* Performance improves when virtqueue kick processing is decoupled from the
* vcpu thread using ioeventfd for some devices. */
@ -40,6 +41,7 @@ typedef struct {
#endif
virtio_serial_conf serial;
virtio_net_conf net;
VirtIOSCSIConf scsi;
bool ioeventfd_disabled;
bool ioeventfd_started;
} VirtIOPCIProxy;

617
hw/virtio-scsi.c Normal file
View File

@ -0,0 +1,617 @@
/*
* Virtio SCSI HBA
*
* Copyright IBM, Corp. 2010
* Copyright Red Hat, Inc. 2011
*
* Authors:
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
* Paolo Bonzini <pbonzini@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "virtio-scsi.h"
#include <hw/scsi.h>
#include <hw/scsi-defs.h>
#define VIRTIO_SCSI_VQ_SIZE 128
#define VIRTIO_SCSI_CDB_SIZE 32
#define VIRTIO_SCSI_SENSE_SIZE 96
#define VIRTIO_SCSI_MAX_CHANNEL 0
#define VIRTIO_SCSI_MAX_TARGET 255
#define VIRTIO_SCSI_MAX_LUN 16383
/* Response codes */
#define VIRTIO_SCSI_S_OK 0
#define VIRTIO_SCSI_S_OVERRUN 1
#define VIRTIO_SCSI_S_ABORTED 2
#define VIRTIO_SCSI_S_BAD_TARGET 3
#define VIRTIO_SCSI_S_RESET 4
#define VIRTIO_SCSI_S_BUSY 5
#define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6
#define VIRTIO_SCSI_S_TARGET_FAILURE 7
#define VIRTIO_SCSI_S_NEXUS_FAILURE 8
#define VIRTIO_SCSI_S_FAILURE 9
#define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10
#define VIRTIO_SCSI_S_FUNCTION_REJECTED 11
#define VIRTIO_SCSI_S_INCORRECT_LUN 12
/* Controlq type codes. */
#define VIRTIO_SCSI_T_TMF 0
#define VIRTIO_SCSI_T_AN_QUERY 1
#define VIRTIO_SCSI_T_AN_SUBSCRIBE 2
/* Valid TMF subtypes. */
#define VIRTIO_SCSI_T_TMF_ABORT_TASK 0
#define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1
#define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2
#define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3
#define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4
#define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5
#define VIRTIO_SCSI_T_TMF_QUERY_TASK 6
#define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7
/* Events. */
#define VIRTIO_SCSI_T_EVENTS_MISSED 0x80000000
#define VIRTIO_SCSI_T_NO_EVENT 0
#define VIRTIO_SCSI_T_TRANSPORT_RESET 1
#define VIRTIO_SCSI_T_ASYNC_NOTIFY 2
/* SCSI command request, followed by data-out */
typedef struct {
uint8_t lun[8]; /* Logical Unit Number */
uint64_t tag; /* Command identifier */
uint8_t task_attr; /* Task attribute */
uint8_t prio;
uint8_t crn;
uint8_t cdb[];
} QEMU_PACKED VirtIOSCSICmdReq;
/* Response, followed by sense data and data-in */
typedef struct {
uint32_t sense_len; /* Sense data length */
uint32_t resid; /* Residual bytes in data buffer */
uint16_t status_qualifier; /* Status qualifier */
uint8_t status; /* Command completion status */
uint8_t response; /* Response values */
uint8_t sense[];
} QEMU_PACKED VirtIOSCSICmdResp;
/* Task Management Request */
typedef struct {
uint32_t type;
uint32_t subtype;
uint8_t lun[8];
uint64_t tag;
} QEMU_PACKED VirtIOSCSICtrlTMFReq;
typedef struct {
uint8_t response;
} QEMU_PACKED VirtIOSCSICtrlTMFResp;
/* Asynchronous notification query/subscription */
typedef struct {
uint32_t type;
uint8_t lun[8];
uint32_t event_requested;
} QEMU_PACKED VirtIOSCSICtrlANReq;
typedef struct {
uint32_t event_actual;
uint8_t response;
} QEMU_PACKED VirtIOSCSICtrlANResp;
typedef struct {
uint32_t event;
uint8_t lun[8];
uint32_t reason;
} QEMU_PACKED VirtIOSCSIEvent;
typedef struct {
uint32_t num_queues;
uint32_t seg_max;
uint32_t max_sectors;
uint32_t cmd_per_lun;
uint32_t event_info_size;
uint32_t sense_size;
uint32_t cdb_size;
uint16_t max_channel;
uint16_t max_target;
uint32_t max_lun;
} QEMU_PACKED VirtIOSCSIConfig;
typedef struct {
VirtIODevice vdev;
DeviceState *qdev;
VirtIOSCSIConf *conf;
SCSIBus bus;
VirtQueue *ctrl_vq;
VirtQueue *event_vq;
VirtQueue *cmd_vq;
uint32_t sense_size;
uint32_t cdb_size;
int resetting;
} VirtIOSCSI;
typedef struct VirtIOSCSIReq {
VirtIOSCSI *dev;
VirtQueue *vq;
VirtQueueElement elem;
QEMUSGList qsgl;
SCSIRequest *sreq;
union {
char *buf;
VirtIOSCSICmdReq *cmd;
VirtIOSCSICtrlTMFReq *tmf;
VirtIOSCSICtrlANReq *an;
} req;
union {
char *buf;
VirtIOSCSICmdResp *cmd;
VirtIOSCSICtrlTMFResp *tmf;
VirtIOSCSICtrlANResp *an;
VirtIOSCSIEvent *event;
} resp;
} VirtIOSCSIReq;
static inline int virtio_scsi_get_lun(uint8_t *lun)
{
return ((lun[2] << 8) | lun[3]) & 0x3FFF;
}
static inline SCSIDevice *virtio_scsi_device_find(VirtIOSCSI *s, uint8_t *lun)
{
if (lun[0] != 1) {
return NULL;
}
if (lun[2] != 0 && !(lun[2] >= 0x40 && lun[2] < 0x80)) {
return NULL;
}
return scsi_device_find(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
}
static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
{
VirtIOSCSI *s = req->dev;
VirtQueue *vq = req->vq;
virtqueue_push(vq, &req->elem, req->qsgl.size + req->elem.in_sg[0].iov_len);
qemu_sglist_destroy(&req->qsgl);
if (req->sreq) {
req->sreq->hba_private = NULL;
scsi_req_unref(req->sreq);
}
g_free(req);
virtio_notify(&s->vdev, vq);
}
static void virtio_scsi_bad_req(void)
{
error_report("wrong size for virtio-scsi headers");
exit(1);
}
static void qemu_sgl_init_external(QEMUSGList *qsgl, struct iovec *sg,
target_phys_addr_t *addr, int num)
{
memset(qsgl, 0, sizeof(*qsgl));
while (num--) {
qemu_sglist_add(qsgl, *(addr++), (sg++)->iov_len);
}
}
static void virtio_scsi_parse_req(VirtIOSCSI *s, VirtQueue *vq,
VirtIOSCSIReq *req)
{
assert(req->elem.out_num && req->elem.in_num);
req->vq = vq;
req->dev = s;
req->sreq = NULL;
req->req.buf = req->elem.out_sg[0].iov_base;
req->resp.buf = req->elem.in_sg[0].iov_base;
if (req->elem.out_num > 1) {
qemu_sgl_init_external(&req->qsgl, &req->elem.out_sg[1],
&req->elem.out_addr[1],
req->elem.out_num - 1);
} else {
qemu_sgl_init_external(&req->qsgl, &req->elem.in_sg[1],
&req->elem.in_addr[1],
req->elem.in_num - 1);
}
}
static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
{
VirtIOSCSIReq *req;
req = g_malloc(sizeof(*req));
if (!virtqueue_pop(vq, &req->elem)) {
g_free(req);
return NULL;
}
virtio_scsi_parse_req(s, vq, req);
return req;
}
static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq)
{
VirtIOSCSIReq *req = sreq->hba_private;
qemu_put_buffer(f, (unsigned char *)&req->elem, sizeof(req->elem));
}
static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
{
SCSIBus *bus = sreq->bus;
VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
VirtIOSCSIReq *req;
req = g_malloc(sizeof(*req));
qemu_get_buffer(f, (unsigned char *)&req->elem, sizeof(req->elem));
virtio_scsi_parse_req(s, s->cmd_vq, req);
scsi_req_ref(sreq);
req->sreq = sreq;
if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
int req_mode =
(req->elem.in_num > 1 ? SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV);
assert(req->sreq->cmd.mode == req_mode);
}
return req;
}
static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
{
SCSIDevice *d = virtio_scsi_device_find(s, req->req.tmf->lun);
SCSIRequest *r, *next;
DeviceState *qdev;
int target;
/* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
req->resp.tmf->response = VIRTIO_SCSI_S_OK;
switch (req->req.tmf->subtype) {
case VIRTIO_SCSI_T_TMF_ABORT_TASK:
case VIRTIO_SCSI_T_TMF_QUERY_TASK:
if (!d) {
goto fail;
}
if (d->lun != virtio_scsi_get_lun(req->req.tmf->lun)) {
goto incorrect_lun;
}
QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
if (r->tag == req->req.tmf->tag) {
break;
}
}
if (r && r->hba_private) {
if (req->req.tmf->subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
/* "If the specified command is present in the task set, then
* return a service response set to FUNCTION SUCCEEDED".
*/
req->resp.tmf->response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
} else {
scsi_req_cancel(r);
}
}
break;
case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
if (!d) {
goto fail;
}
if (d->lun != virtio_scsi_get_lun(req->req.tmf->lun)) {
goto incorrect_lun;
}
s->resetting++;
qdev_reset_all(&d->qdev);
s->resetting--;
break;
case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
if (!d) {
goto fail;
}
if (d->lun != virtio_scsi_get_lun(req->req.tmf->lun)) {
goto incorrect_lun;
}
QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
if (r->hba_private) {
if (req->req.tmf->subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
/* "If there is any command present in the task set, then
* return a service response set to FUNCTION SUCCEEDED".
*/
req->resp.tmf->response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
break;
} else {
scsi_req_cancel(r);
}
}
}
break;
case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
target = req->req.tmf->lun[1];
s->resetting++;
QTAILQ_FOREACH(qdev, &s->bus.qbus.children, sibling) {
d = DO_UPCAST(SCSIDevice, qdev, qdev);
if (d->channel == 0 && d->id == target) {
qdev_reset_all(&d->qdev);
}
}
s->resetting--;
break;
case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
default:
req->resp.tmf->response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
break;
}
return;
incorrect_lun:
req->resp.tmf->response = VIRTIO_SCSI_S_INCORRECT_LUN;
return;
fail:
req->resp.tmf->response = VIRTIO_SCSI_S_BAD_TARGET;
}
static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
VirtIOSCSIReq *req;
while ((req = virtio_scsi_pop_req(s, vq))) {
int out_size, in_size;
if (req->elem.out_num < 1 || req->elem.in_num < 1) {
virtio_scsi_bad_req();
continue;
}
out_size = req->elem.out_sg[0].iov_len;
in_size = req->elem.in_sg[0].iov_len;
if (req->req.tmf->type == VIRTIO_SCSI_T_TMF) {
if (out_size < sizeof(VirtIOSCSICtrlTMFReq) ||
in_size < sizeof(VirtIOSCSICtrlTMFResp)) {
virtio_scsi_bad_req();
}
virtio_scsi_do_tmf(s, req);
} else if (req->req.tmf->type == VIRTIO_SCSI_T_AN_QUERY ||
req->req.tmf->type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
if (out_size < sizeof(VirtIOSCSICtrlANReq) ||
in_size < sizeof(VirtIOSCSICtrlANResp)) {
virtio_scsi_bad_req();
}
req->resp.an->event_actual = 0;
req->resp.an->response = VIRTIO_SCSI_S_OK;
}
virtio_scsi_complete_req(req);
}
}
static void virtio_scsi_command_complete(SCSIRequest *r, uint32_t status,
size_t resid)
{
VirtIOSCSIReq *req = r->hba_private;
req->resp.cmd->response = VIRTIO_SCSI_S_OK;
req->resp.cmd->status = status;
if (req->resp.cmd->status == GOOD) {
req->resp.cmd->resid = resid;
} else {
req->resp.cmd->resid = 0;
req->resp.cmd->sense_len =
scsi_req_get_sense(r, req->resp.cmd->sense, VIRTIO_SCSI_SENSE_SIZE);
}
virtio_scsi_complete_req(req);
}
static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r)
{
VirtIOSCSIReq *req = r->hba_private;
return &req->qsgl;
}
static void virtio_scsi_request_cancelled(SCSIRequest *r)
{
VirtIOSCSIReq *req = r->hba_private;
if (!req) {
return;
}
if (req->dev->resetting) {
req->resp.cmd->response = VIRTIO_SCSI_S_RESET;
} else {
req->resp.cmd->response = VIRTIO_SCSI_S_ABORTED;
}
virtio_scsi_complete_req(req);
}
static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
{
req->resp.cmd->response = VIRTIO_SCSI_S_FAILURE;
virtio_scsi_complete_req(req);
}
static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
VirtIOSCSIReq *req;
int n;
while ((req = virtio_scsi_pop_req(s, vq))) {
SCSIDevice *d;
int out_size, in_size;
if (req->elem.out_num < 1 || req->elem.in_num < 1) {
virtio_scsi_bad_req();
}
out_size = req->elem.out_sg[0].iov_len;
in_size = req->elem.in_sg[0].iov_len;
if (out_size < sizeof(VirtIOSCSICmdReq) + s->cdb_size ||
in_size < sizeof(VirtIOSCSICmdResp) + s->sense_size) {
virtio_scsi_bad_req();
}
if (req->elem.out_num > 1 && req->elem.in_num > 1) {
virtio_scsi_fail_cmd_req(req);
continue;
}
d = virtio_scsi_device_find(s, req->req.cmd->lun);
if (!d) {
req->resp.cmd->response = VIRTIO_SCSI_S_BAD_TARGET;
virtio_scsi_complete_req(req);
continue;
}
req->sreq = scsi_req_new(d, req->req.cmd->tag,
virtio_scsi_get_lun(req->req.cmd->lun),
req->req.cmd->cdb, req);
if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
int req_mode =
(req->elem.in_num > 1 ? SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV);
if (req->sreq->cmd.mode != req_mode ||
req->sreq->cmd.xfer > req->qsgl.size) {
req->resp.cmd->response = VIRTIO_SCSI_S_OVERRUN;
virtio_scsi_complete_req(req);
continue;
}
}
n = scsi_req_enqueue(req->sreq);
if (n) {
scsi_req_continue(req->sreq);
}
}
}
static void virtio_scsi_get_config(VirtIODevice *vdev,
uint8_t *config)
{
VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
stl_raw(&scsiconf->num_queues, s->conf->num_queues);
stl_raw(&scsiconf->seg_max, 128 - 2);
stl_raw(&scsiconf->max_sectors, s->conf->max_sectors);
stl_raw(&scsiconf->cmd_per_lun, s->conf->cmd_per_lun);
stl_raw(&scsiconf->event_info_size, sizeof(VirtIOSCSIEvent));
stl_raw(&scsiconf->sense_size, s->sense_size);
stl_raw(&scsiconf->cdb_size, s->cdb_size);
stl_raw(&scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL);
stl_raw(&scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET);
stl_raw(&scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN);
}
static void virtio_scsi_set_config(VirtIODevice *vdev,
const uint8_t *config)
{
VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
if ((uint32_t) ldl_raw(&scsiconf->sense_size) >= 65536 ||
(uint32_t) ldl_raw(&scsiconf->cdb_size) >= 256) {
error_report("bad data written to virtio-scsi configuration space");
exit(1);
}
s->sense_size = ldl_raw(&scsiconf->sense_size);
s->cdb_size = ldl_raw(&scsiconf->cdb_size);
}
static uint32_t virtio_scsi_get_features(VirtIODevice *vdev,
uint32_t requested_features)
{
return requested_features;
}
static void virtio_scsi_reset(VirtIODevice *vdev)
{
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
s->sense_size = VIRTIO_SCSI_SENSE_SIZE;
s->cdb_size = VIRTIO_SCSI_CDB_SIZE;
}
/* The device does not have anything to save beyond the virtio data.
* Request data is saved with callbacks from SCSI devices.
*/
static void virtio_scsi_save(QEMUFile *f, void *opaque)
{
VirtIOSCSI *s = opaque;
virtio_save(&s->vdev, f);
}
static int virtio_scsi_load(QEMUFile *f, void *opaque, int version_id)
{
VirtIOSCSI *s = opaque;
virtio_load(&s->vdev, f);
return 0;
}
static struct SCSIBusInfo virtio_scsi_scsi_info = {
.tcq = true,
.max_channel = VIRTIO_SCSI_MAX_CHANNEL,
.max_target = VIRTIO_SCSI_MAX_TARGET,
.max_lun = VIRTIO_SCSI_MAX_LUN,
.complete = virtio_scsi_command_complete,
.cancel = virtio_scsi_request_cancelled,
.get_sg_list = virtio_scsi_get_sg_list,
.save_request = virtio_scsi_save_request,
.load_request = virtio_scsi_load_request,
};
VirtIODevice *virtio_scsi_init(DeviceState *dev, VirtIOSCSIConf *proxyconf)
{
VirtIOSCSI *s;
static int virtio_scsi_id;
s = (VirtIOSCSI *)virtio_common_init("virtio-scsi", VIRTIO_ID_SCSI,
sizeof(VirtIOSCSIConfig),
sizeof(VirtIOSCSI));
s->qdev = dev;
s->conf = proxyconf;
/* TODO set up vdev function pointers */
s->vdev.get_config = virtio_scsi_get_config;
s->vdev.set_config = virtio_scsi_set_config;
s->vdev.get_features = virtio_scsi_get_features;
s->vdev.reset = virtio_scsi_reset;
s->ctrl_vq = virtio_add_queue(&s->vdev, VIRTIO_SCSI_VQ_SIZE,
virtio_scsi_handle_ctrl);
s->event_vq = virtio_add_queue(&s->vdev, VIRTIO_SCSI_VQ_SIZE,
NULL);
s->cmd_vq = virtio_add_queue(&s->vdev, VIRTIO_SCSI_VQ_SIZE,
virtio_scsi_handle_cmd);
scsi_bus_new(&s->bus, dev, &virtio_scsi_scsi_info);
if (!dev->hotplugged) {
scsi_bus_legacy_handle_cmdline(&s->bus);
}
register_savevm(dev, "virtio-scsi", virtio_scsi_id++, 1,
virtio_scsi_save, virtio_scsi_load, s);
return &s->vdev;
}
void virtio_scsi_exit(VirtIODevice *vdev)
{
virtio_cleanup(vdev);
}

36
hw/virtio-scsi.h Normal file
View File

@ -0,0 +1,36 @@
/*
* Virtio SCSI HBA
*
* Copyright IBM, Corp. 2010
*
* Authors:
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#ifndef _QEMU_VIRTIO_SCSI_H
#define _QEMU_VIRTIO_SCSI_H
#include "virtio.h"
#include "net.h"
#include "pci.h"
/* The ID for virtio_scsi */
#define VIRTIO_ID_SCSI 8
struct VirtIOSCSIConf {
uint32_t num_queues;
uint32_t max_sectors;
uint32_t cmd_per_lun;
};
#define DEFINE_VIRTIO_SCSI_PROPERTIES(_state, _features_field, _conf_field) \
DEFINE_VIRTIO_COMMON_FEATURES(_state, _features_field), \
DEFINE_PROP_UINT32("num_queues", _state, _conf_field.num_queues, 1), \
DEFINE_PROP_UINT32("max_sectors", _state, _conf_field.max_sectors, 0xFFFF), \
DEFINE_PROP_UINT32("cmd_per_lun", _state, _conf_field.cmd_per_lun, 128)
#endif /* _QEMU_VIRTIO_SCSI_H */

View File

@ -199,6 +199,8 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
typedef struct virtio_serial_conf virtio_serial_conf;
VirtIODevice *virtio_serial_init(DeviceState *dev, virtio_serial_conf *serial);
VirtIODevice *virtio_balloon_init(DeviceState *dev);
typedef struct VirtIOSCSIConf VirtIOSCSIConf;
VirtIODevice *virtio_scsi_init(DeviceState *dev, VirtIOSCSIConf *conf);
#ifdef CONFIG_LINUX
VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf);
#endif
@ -208,6 +210,7 @@ void virtio_net_exit(VirtIODevice *vdev);
void virtio_blk_exit(VirtIODevice *vdev);
void virtio_serial_exit(VirtIODevice *vdev);
void virtio_balloon_exit(VirtIODevice *vdev);
void virtio_scsi_exit(VirtIODevice *vdev);
#define DEFINE_VIRTIO_COMMON_FEATURES(_state, _field) \
DEFINE_PROP_BIT("indirect_desc", _state, _field, \