block: Make the block accounting functions operate on BlockAcctStats

This is the next step for decoupling block accounting functions from
BlockDriverState.
In a future commit the BlockAcctStats structure will be moved from
BlockDriverState to the device models structures.

Note that bdrv_get_stats was introduced so device models can retrieve the
BlockAcctStats structure of a BlockDriverState without being aware of it's
layout.
This function should go away when BlockAcctStats will be embedded in the device
models structures.

CC: Kevin Wolf <kwolf@redhat.com>
CC: Stefan Hajnoczi <stefanha@redhat.com>
CC: Keith Busch <keith.busch@intel.com>
CC: Anthony Liguori <aliguori@amazon.com>
CC: "Michael S. Tsirkin" <mst@redhat.com>
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: Eric Blake <eblake@redhat.com>
CC: Peter Maydell <peter.maydell@linaro.org>
CC: Michael Tokarev <mjt@tls.msk.ru>
CC: John Snow <jsnow@redhat.com>
CC: Markus Armbruster <armbru@redhat.com>
CC: Alexander Graf <agraf@suse.de>
CC: Max Reitz <mreitz@redhat.com>

Signed-off-by: Benoît Canet <benoit.canet@nodalink.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Benoît Canet 2014-09-05 15:46:18 +02:00 committed by Kevin Wolf
parent 28298fd3d9
commit 5366d0c8bc
13 changed files with 108 additions and 79 deletions

13
block.c
View File

@ -3363,7 +3363,7 @@ static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
bdrv_set_dirty(bs, sector_num, nb_sectors); bdrv_set_dirty(bs, sector_num, nb_sectors);
bdrv_acct_highest_sector(bs, sector_num, nb_sectors); block_acct_highest_sector(&bs->stats, sector_num, nb_sectors);
if (bs->growable && ret >= 0) { if (bs->growable && ret >= 0) {
bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors); bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
@ -6087,3 +6087,14 @@ void bdrv_refresh_filename(BlockDriverState *bs)
QDECREF(json); QDECREF(json);
} }
} }
/* This accessor function purpose is to allow the device models to access the
* BlockAcctStats structure embedded inside a BlockDriverState without being
* aware of the BlockDriverState structure layout.
* It will go away when the BlockAcctStats structure will be moved inside
* the device models.
*/
BlockAcctStats *bdrv_get_stats(BlockDriverState *bs)
{
return &bs->stats;
}

View File

@ -25,9 +25,8 @@
#include "block/accounting.h" #include "block/accounting.h"
#include "block/block_int.h" #include "block/block_int.h"
void void block_acct_start(BlockAcctStats *stats, BlockAcctCookie *cookie,
bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes, int64_t bytes, enum BlockAcctType type)
enum BlockAcctType type)
{ {
assert(type < BLOCK_MAX_IOTYPE); assert(type < BLOCK_MAX_IOTYPE);
@ -36,22 +35,20 @@ bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
cookie->type = type; cookie->type = type;
} }
void void block_acct_done(BlockAcctStats *stats, BlockAcctCookie *cookie)
bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
{ {
assert(cookie->type < BLOCK_MAX_IOTYPE); assert(cookie->type < BLOCK_MAX_IOTYPE);
bs->stats.nr_bytes[cookie->type] += cookie->bytes; stats->nr_bytes[cookie->type] += cookie->bytes;
bs->stats.nr_ops[cookie->type]++; stats->nr_ops[cookie->type]++;
bs->stats.total_time_ns[cookie->type] += get_clock() - stats->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
cookie->start_time_ns;
} }
void bdrv_acct_highest_sector(BlockDriverState *bs, int64_t sector_num, void block_acct_highest_sector(BlockAcctStats *stats, int64_t sector_num,
unsigned int nb_sectors) unsigned int nb_sectors)
{ {
if (bs->stats.wr_highest_sector < sector_num + nb_sectors - 1) { if (stats->wr_highest_sector < sector_num + nb_sectors - 1) {
bs->stats.wr_highest_sector = sector_num + nb_sectors - 1; stats->wr_highest_sector = sector_num + nb_sectors - 1;
} }
} }

View File

@ -277,5 +277,5 @@ uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
QEMUSGList *sg, enum BlockAcctType type) QEMUSGList *sg, enum BlockAcctType type)
{ {
bdrv_acct_start(bs, cookie, sg->size, type); block_acct_start(bdrv_get_stats(bs), cookie, sg->size, type);
} }

View File

@ -197,7 +197,7 @@ static void nvme_rw_cb(void *opaque, int ret)
NvmeCtrl *n = sq->ctrl; NvmeCtrl *n = sq->ctrl;
NvmeCQueue *cq = n->cq[sq->cqid]; NvmeCQueue *cq = n->cq[sq->cqid];
bdrv_acct_done(n->conf.bs, &req->acct); block_acct_done(bdrv_get_stats(n->conf.bs), &req->acct);
if (!ret) { if (!ret) {
req->status = NVME_SUCCESS; req->status = NVME_SUCCESS;
} else { } else {

View File

@ -74,7 +74,7 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
s->rq = req; s->rq = req;
} else if (action == BLOCK_ERROR_ACTION_REPORT) { } else if (action == BLOCK_ERROR_ACTION_REPORT) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
bdrv_acct_done(s->bs, &req->acct); block_acct_done(bdrv_get_stats(s->bs), &req->acct);
virtio_blk_free_request(req); virtio_blk_free_request(req);
} }
@ -96,7 +96,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
} }
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
bdrv_acct_done(req->dev->bs, &req->acct); block_acct_done(bdrv_get_stats(req->dev->bs), &req->acct);
virtio_blk_free_request(req); virtio_blk_free_request(req);
} }
@ -111,7 +111,7 @@ static void virtio_blk_flush_complete(void *opaque, int ret)
} }
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
bdrv_acct_done(req->dev->bs, &req->acct); block_acct_done(bdrv_get_stats(req->dev->bs), &req->acct);
virtio_blk_free_request(req); virtio_blk_free_request(req);
} }
@ -279,7 +279,8 @@ void virtio_submit_multiwrite(BlockDriverState *bs, MultiReqBuffer *mrb)
static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb) static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{ {
bdrv_acct_start(req->dev->bs, &req->acct, 0, BLOCK_ACCT_FLUSH); block_acct_start(bdrv_get_stats(req->dev->bs), &req->acct, 0,
BLOCK_ACCT_FLUSH);
/* /*
* Make sure all outstanding writes are posted to the backing device. * Make sure all outstanding writes are posted to the backing device.
@ -322,7 +323,8 @@ static void virtio_blk_handle_write(VirtIOBlockReq *req, MultiReqBuffer *mrb)
return; return;
} }
bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BLOCK_ACCT_WRITE); block_acct_start(bdrv_get_stats(req->dev->bs), &req->acct, req->qiov.size,
BLOCK_ACCT_WRITE);
if (mrb->num_writes == 32) { if (mrb->num_writes == 32) {
virtio_submit_multiwrite(req->dev->bs, mrb); virtio_submit_multiwrite(req->dev->bs, mrb);
@ -353,7 +355,8 @@ static void virtio_blk_handle_read(VirtIOBlockReq *req)
return; return;
} }
bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BLOCK_ACCT_READ); block_acct_start(bdrv_get_stats(req->dev->bs), &req->acct, req->qiov.size,
BLOCK_ACCT_READ);
bdrv_aio_readv(req->dev->bs, sector, &req->qiov, bdrv_aio_readv(req->dev->bs, sector, &req->qiov,
req->qiov.size / BDRV_SECTOR_SIZE, req->qiov.size / BDRV_SECTOR_SIZE,
virtio_blk_rw_complete, req); virtio_blk_rw_complete, req);

View File

@ -493,7 +493,7 @@ static void qemu_aio_complete(void *opaque, int ret)
break; break;
} }
case BLKIF_OP_READ: case BLKIF_OP_READ:
bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct); block_acct_done(bdrv_get_stats(ioreq->blkdev->bs), &ioreq->acct);
break; break;
case BLKIF_OP_DISCARD: case BLKIF_OP_DISCARD:
default: default:
@ -518,8 +518,8 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
switch (ioreq->req.operation) { switch (ioreq->req.operation) {
case BLKIF_OP_READ: case BLKIF_OP_READ:
bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, block_acct_start(bdrv_get_stats(blkdev->bs), &ioreq->acct,
BLOCK_ACCT_READ); ioreq->v.size, BLOCK_ACCT_READ);
ioreq->aio_inflight++; ioreq->aio_inflight++;
bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE, bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
&ioreq->v, ioreq->v.size / BLOCK_SIZE, &ioreq->v, ioreq->v.size / BLOCK_SIZE,
@ -531,8 +531,8 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
break; break;
} }
bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, block_acct_start(bdrv_get_stats(blkdev->bs), &ioreq->acct,
BLOCK_ACCT_WRITE); ioreq->v.size, BLOCK_ACCT_WRITE);
ioreq->aio_inflight++; ioreq->aio_inflight++;
bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE, bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
&ioreq->v, ioreq->v.size / BLOCK_SIZE, &ioreq->v, ioreq->v.size / BLOCK_SIZE,

View File

@ -809,7 +809,8 @@ static void ncq_cb(void *opaque, int ret)
DPRINTF(ncq_tfs->drive->port_no, "NCQ transfer tag %d finished\n", DPRINTF(ncq_tfs->drive->port_no, "NCQ transfer tag %d finished\n",
ncq_tfs->tag); ncq_tfs->tag);
bdrv_acct_done(ncq_tfs->drive->port.ifs[0].bs, &ncq_tfs->acct); block_acct_done(bdrv_get_stats(ncq_tfs->drive->port.ifs[0].bs),
&ncq_tfs->acct);
qemu_sglist_destroy(&ncq_tfs->sglist); qemu_sglist_destroy(&ncq_tfs->sglist);
ncq_tfs->used = 0; ncq_tfs->used = 0;
} }

View File

@ -110,14 +110,16 @@ static int cd_read_sector(IDEState *s, int lba, uint8_t *buf, int sector_size)
switch(sector_size) { switch(sector_size) {
case 2048: case 2048:
bdrv_acct_start(s->bs, &s->acct, 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); block_acct_start(bdrv_get_stats(s->bs), &s->acct,
4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
ret = bdrv_read(s->bs, (int64_t)lba << 2, buf, 4); ret = bdrv_read(s->bs, (int64_t)lba << 2, buf, 4);
bdrv_acct_done(s->bs, &s->acct); block_acct_done(bdrv_get_stats(s->bs), &s->acct);
break; break;
case 2352: case 2352:
bdrv_acct_start(s->bs, &s->acct, 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); block_acct_start(bdrv_get_stats(s->bs), &s->acct,
4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
ret = bdrv_read(s->bs, (int64_t)lba << 2, buf + 16, 4); ret = bdrv_read(s->bs, (int64_t)lba << 2, buf + 16, 4);
bdrv_acct_done(s->bs, &s->acct); block_acct_done(bdrv_get_stats(s->bs), &s->acct);
if (ret < 0) if (ret < 0)
return ret; return ret;
cd_data_to_raw(buf, lba); cd_data_to_raw(buf, lba);
@ -253,7 +255,8 @@ static void ide_atapi_cmd_reply(IDEState *s, int size, int max_size)
s->io_buffer_index = 0; s->io_buffer_index = 0;
if (s->atapi_dma) { if (s->atapi_dma) {
bdrv_acct_start(s->bs, &s->acct, size, BLOCK_ACCT_READ); block_acct_start(bdrv_get_stats(s->bs), &s->acct, size,
BLOCK_ACCT_READ);
s->status = READY_STAT | SEEK_STAT | DRQ_STAT; s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
ide_start_dma(s, ide_atapi_cmd_read_dma_cb); ide_start_dma(s, ide_atapi_cmd_read_dma_cb);
} else { } else {
@ -354,7 +357,7 @@ static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret)
return; return;
eot: eot:
bdrv_acct_done(s->bs, &s->acct); block_acct_done(bdrv_get_stats(s->bs), &s->acct);
ide_set_inactive(s, false); ide_set_inactive(s, false);
} }
@ -369,7 +372,8 @@ static void ide_atapi_cmd_read_dma(IDEState *s, int lba, int nb_sectors,
s->io_buffer_size = 0; s->io_buffer_size = 0;
s->cd_sector_size = sector_size; s->cd_sector_size = sector_size;
bdrv_acct_start(s->bs, &s->acct, s->packet_transfer_size, BLOCK_ACCT_READ); block_acct_start(bdrv_get_stats(s->bs), &s->acct, s->packet_transfer_size,
BLOCK_ACCT_READ);
/* XXX: check if BUSY_STAT should be set */ /* XXX: check if BUSY_STAT should be set */
s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT; s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;

View File

@ -568,7 +568,7 @@ static void ide_sector_read_cb(void *opaque, int ret)
s->pio_aiocb = NULL; s->pio_aiocb = NULL;
s->status &= ~BUSY_STAT; s->status &= ~BUSY_STAT;
bdrv_acct_done(s->bs, &s->acct); block_acct_done(bdrv_get_stats(s->bs), &s->acct);
if (ret != 0) { if (ret != 0) {
if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO | if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
IDE_RETRY_READ)) { IDE_RETRY_READ)) {
@ -624,7 +624,8 @@ void ide_sector_read(IDEState *s)
s->iov.iov_len = n * BDRV_SECTOR_SIZE; s->iov.iov_len = n * BDRV_SECTOR_SIZE;
qemu_iovec_init_external(&s->qiov, &s->iov, 1); qemu_iovec_init_external(&s->qiov, &s->iov, 1);
bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); block_acct_start(bdrv_get_stats(s->bs), &s->acct,
n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
s->pio_aiocb = bdrv_aio_readv(s->bs, sector_num, &s->qiov, n, s->pio_aiocb = bdrv_aio_readv(s->bs, sector_num, &s->qiov, n,
ide_sector_read_cb, s); ide_sector_read_cb, s);
} }
@ -756,7 +757,7 @@ void ide_dma_cb(void *opaque, int ret)
eot: eot:
if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
bdrv_acct_done(s->bs, &s->acct); block_acct_done(bdrv_get_stats(s->bs), &s->acct);
} }
ide_set_inactive(s, stay_active); ide_set_inactive(s, stay_active);
} }
@ -770,12 +771,12 @@ static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
switch (dma_cmd) { switch (dma_cmd) {
case IDE_DMA_READ: case IDE_DMA_READ:
bdrv_acct_start(s->bs, &s->acct, s->nsector * BDRV_SECTOR_SIZE, block_acct_start(bdrv_get_stats(s->bs), &s->acct,
BLOCK_ACCT_READ); s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
break; break;
case IDE_DMA_WRITE: case IDE_DMA_WRITE:
bdrv_acct_start(s->bs, &s->acct, s->nsector * BDRV_SECTOR_SIZE, block_acct_start(bdrv_get_stats(s->bs), &s->acct,
BLOCK_ACCT_WRITE); s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
break; break;
default: default:
break; break;
@ -802,7 +803,7 @@ static void ide_sector_write_cb(void *opaque, int ret)
IDEState *s = opaque; IDEState *s = opaque;
int n; int n;
bdrv_acct_done(s->bs, &s->acct); block_acct_done(bdrv_get_stats(s->bs), &s->acct);
s->pio_aiocb = NULL; s->pio_aiocb = NULL;
s->status &= ~BUSY_STAT; s->status &= ~BUSY_STAT;
@ -869,7 +870,8 @@ void ide_sector_write(IDEState *s)
s->iov.iov_len = n * BDRV_SECTOR_SIZE; s->iov.iov_len = n * BDRV_SECTOR_SIZE;
qemu_iovec_init_external(&s->qiov, &s->iov, 1); qemu_iovec_init_external(&s->qiov, &s->iov, 1);
bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); block_acct_start(bdrv_get_stats(s->bs), &s->acct,
n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
s->pio_aiocb = bdrv_aio_writev(s->bs, sector_num, &s->qiov, n, s->pio_aiocb = bdrv_aio_writev(s->bs, sector_num, &s->qiov, n,
ide_sector_write_cb, s); ide_sector_write_cb, s);
} }
@ -888,7 +890,7 @@ static void ide_flush_cb(void *opaque, int ret)
} }
if (s->bs) { if (s->bs) {
bdrv_acct_done(s->bs, &s->acct); block_acct_done(bdrv_get_stats(s->bs), &s->acct);
} }
s->status = READY_STAT | SEEK_STAT; s->status = READY_STAT | SEEK_STAT;
ide_cmd_done(s); ide_cmd_done(s);
@ -903,7 +905,7 @@ void ide_flush_cache(IDEState *s)
} }
s->status |= BUSY_STAT; s->status |= BUSY_STAT;
bdrv_acct_start(s->bs, &s->acct, 0, BLOCK_ACCT_FLUSH); block_acct_start(bdrv_get_stats(s->bs), &s->acct, 0, BLOCK_ACCT_FLUSH);
s->pio_aiocb = bdrv_aio_flush(s->bs, ide_flush_cb, s); s->pio_aiocb = bdrv_aio_flush(s->bs, ide_flush_cb, s);
} }

View File

@ -171,7 +171,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
done: done:
MACIO_DPRINTF("done DMA\n"); MACIO_DPRINTF("done DMA\n");
bdrv_acct_done(s->bs, &s->acct); block_acct_done(bdrv_get_stats(s->bs), &s->acct);
io->dma_end(opaque); io->dma_end(opaque);
} }
@ -352,7 +352,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
done: done:
if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
bdrv_acct_done(s->bs, &s->acct); block_acct_done(bdrv_get_stats(s->bs), &s->acct);
} }
io->dma_end(io); io->dma_end(io);
} }
@ -370,8 +370,8 @@ static void pmac_ide_transfer(DBDMA_io *io)
/* Handle non-block ATAPI DMA transfers */ /* Handle non-block ATAPI DMA transfers */
if (s->lba == -1) { if (s->lba == -1) {
s->io_buffer_size = MIN(io->len, s->packet_transfer_size); s->io_buffer_size = MIN(io->len, s->packet_transfer_size);
bdrv_acct_start(s->bs, &s->acct, s->io_buffer_size, block_acct_start(bdrv_get_stats(s->bs), &s->acct, s->io_buffer_size,
BLOCK_ACCT_READ); BLOCK_ACCT_READ);
MACIO_DPRINTF("non-block ATAPI DMA transfer size: %d\n", MACIO_DPRINTF("non-block ATAPI DMA transfer size: %d\n",
s->io_buffer_size); s->io_buffer_size);
@ -382,22 +382,25 @@ static void pmac_ide_transfer(DBDMA_io *io)
m->dma_active = false; m->dma_active = false;
MACIO_DPRINTF("end of non-block ATAPI DMA transfer\n"); MACIO_DPRINTF("end of non-block ATAPI DMA transfer\n");
bdrv_acct_done(s->bs, &s->acct); block_acct_done(bdrv_get_stats(s->bs), &s->acct);
io->dma_end(io); io->dma_end(io);
return; return;
} }
bdrv_acct_start(s->bs, &s->acct, io->len, BLOCK_ACCT_READ); block_acct_start(bdrv_get_stats(s->bs), &s->acct, io->len,
BLOCK_ACCT_READ);
pmac_ide_atapi_transfer_cb(io, 0); pmac_ide_atapi_transfer_cb(io, 0);
return; return;
} }
switch (s->dma_cmd) { switch (s->dma_cmd) {
case IDE_DMA_READ: case IDE_DMA_READ:
bdrv_acct_start(s->bs, &s->acct, io->len, BLOCK_ACCT_READ); block_acct_start(bdrv_get_stats(s->bs), &s->acct, io->len,
BLOCK_ACCT_READ);
break; break;
case IDE_DMA_WRITE: case IDE_DMA_WRITE:
bdrv_acct_start(s->bs, &s->acct, io->len, BLOCK_ACCT_WRITE); block_acct_start(bdrv_get_stats(s->bs), &s->acct, io->len,
BLOCK_ACCT_WRITE);
break; break;
default: default:
break; break;

View File

@ -183,7 +183,7 @@ static void scsi_aio_complete(void *opaque, int ret)
assert(r->req.aiocb != NULL); assert(r->req.aiocb != NULL);
r->req.aiocb = NULL; r->req.aiocb = NULL;
bdrv_acct_done(s->qdev.conf.bs, &r->acct); block_acct_done(bdrv_get_stats(s->qdev.conf.bs), &r->acct);
if (r->req.io_canceled) { if (r->req.io_canceled) {
goto done; goto done;
} }
@ -237,7 +237,8 @@ static void scsi_write_do_fua(SCSIDiskReq *r)
} }
if (scsi_is_cmd_fua(&r->req.cmd)) { if (scsi_is_cmd_fua(&r->req.cmd)) {
bdrv_acct_start(s->qdev.conf.bs, &r->acct, 0, BLOCK_ACCT_FLUSH); block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct, 0,
BLOCK_ACCT_FLUSH);
r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_aio_complete, r); r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_aio_complete, r);
return; return;
} }
@ -257,7 +258,7 @@ static void scsi_dma_complete_noio(void *opaque, int ret)
if (r->req.aiocb != NULL) { if (r->req.aiocb != NULL) {
r->req.aiocb = NULL; r->req.aiocb = NULL;
bdrv_acct_done(s->qdev.conf.bs, &r->acct); block_acct_done(bdrv_get_stats(s->qdev.conf.bs), &r->acct);
} }
if (r->req.io_canceled) { if (r->req.io_canceled) {
goto done; goto done;
@ -300,7 +301,7 @@ static void scsi_read_complete(void * opaque, int ret)
assert(r->req.aiocb != NULL); assert(r->req.aiocb != NULL);
r->req.aiocb = NULL; r->req.aiocb = NULL;
bdrv_acct_done(s->qdev.conf.bs, &r->acct); block_acct_done(bdrv_get_stats(s->qdev.conf.bs), &r->acct);
if (r->req.io_canceled) { if (r->req.io_canceled) {
goto done; goto done;
} }
@ -333,7 +334,7 @@ static void scsi_do_read(void *opaque, int ret)
if (r->req.aiocb != NULL) { if (r->req.aiocb != NULL) {
r->req.aiocb = NULL; r->req.aiocb = NULL;
bdrv_acct_done(s->qdev.conf.bs, &r->acct); block_acct_done(bdrv_get_stats(s->qdev.conf.bs), &r->acct);
} }
if (r->req.io_canceled) { if (r->req.io_canceled) {
goto done; goto done;
@ -355,8 +356,8 @@ static void scsi_do_read(void *opaque, int ret)
scsi_dma_complete, r); scsi_dma_complete, r);
} else { } else {
n = scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); n = scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
bdrv_acct_start(s->qdev.conf.bs, &r->acct, n * BDRV_SECTOR_SIZE, block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct,
BLOCK_ACCT_READ); n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
r->req.aiocb = bdrv_aio_readv(s->qdev.conf.bs, r->sector, &r->qiov, n, r->req.aiocb = bdrv_aio_readv(s->qdev.conf.bs, r->sector, &r->qiov, n,
scsi_read_complete, r); scsi_read_complete, r);
} }
@ -400,7 +401,8 @@ static void scsi_read_data(SCSIRequest *req)
first = !r->started; first = !r->started;
r->started = true; r->started = true;
if (first && scsi_is_cmd_fua(&r->req.cmd)) { if (first && scsi_is_cmd_fua(&r->req.cmd)) {
bdrv_acct_start(s->qdev.conf.bs, &r->acct, 0, BLOCK_ACCT_FLUSH); block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct, 0,
BLOCK_ACCT_FLUSH);
r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_do_read, r); r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_do_read, r);
} else { } else {
scsi_do_read(r, 0); scsi_do_read(r, 0);
@ -454,7 +456,7 @@ static void scsi_write_complete(void * opaque, int ret)
if (r->req.aiocb != NULL) { if (r->req.aiocb != NULL) {
r->req.aiocb = NULL; r->req.aiocb = NULL;
bdrv_acct_done(s->qdev.conf.bs, &r->acct); block_acct_done(bdrv_get_stats(s->qdev.conf.bs), &r->acct);
} }
if (r->req.io_canceled) { if (r->req.io_canceled) {
goto done; goto done;
@ -529,8 +531,8 @@ static void scsi_write_data(SCSIRequest *req)
scsi_dma_complete, r); scsi_dma_complete, r);
} else { } else {
n = r->qiov.size / 512; n = r->qiov.size / 512;
bdrv_acct_start(s->qdev.conf.bs, &r->acct, n * BDRV_SECTOR_SIZE, block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct,
BLOCK_ACCT_WRITE); n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
r->req.aiocb = bdrv_aio_writev(s->qdev.conf.bs, r->sector, &r->qiov, n, r->req.aiocb = bdrv_aio_writev(s->qdev.conf.bs, r->sector, &r->qiov, n,
scsi_write_complete, r); scsi_write_complete, r);
} }
@ -1498,7 +1500,8 @@ static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
if (!bdrv_enable_write_cache(s->qdev.conf.bs)) { if (!bdrv_enable_write_cache(s->qdev.conf.bs)) {
/* The request is used as the AIO opaque value, so add a ref. */ /* The request is used as the AIO opaque value, so add a ref. */
scsi_req_ref(&r->req); scsi_req_ref(&r->req);
bdrv_acct_start(s->qdev.conf.bs, &r->acct, 0, BLOCK_ACCT_FLUSH); block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct, 0,
BLOCK_ACCT_FLUSH);
r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_aio_complete, r); r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_aio_complete, r);
return; return;
} }
@ -1649,7 +1652,7 @@ static void scsi_write_same_complete(void *opaque, int ret)
assert(r->req.aiocb != NULL); assert(r->req.aiocb != NULL);
r->req.aiocb = NULL; r->req.aiocb = NULL;
bdrv_acct_done(s->qdev.conf.bs, &r->acct); block_acct_done(bdrv_get_stats(s->qdev.conf.bs), &r->acct);
if (r->req.io_canceled) { if (r->req.io_canceled) {
goto done; goto done;
} }
@ -1664,8 +1667,8 @@ static void scsi_write_same_complete(void *opaque, int ret)
data->sector += data->iov.iov_len / 512; data->sector += data->iov.iov_len / 512;
data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len);
if (data->iov.iov_len) { if (data->iov.iov_len) {
bdrv_acct_start(s->qdev.conf.bs, &r->acct, data->iov.iov_len, block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct,
BLOCK_ACCT_WRITE); data->iov.iov_len, BLOCK_ACCT_WRITE);
r->req.aiocb = bdrv_aio_writev(s->qdev.conf.bs, data->sector, r->req.aiocb = bdrv_aio_writev(s->qdev.conf.bs, data->sector,
&data->qiov, data->iov.iov_len / 512, &data->qiov, data->iov.iov_len / 512,
scsi_write_same_complete, data); scsi_write_same_complete, data);
@ -1711,8 +1714,9 @@ static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
/* The request is used as the AIO opaque value, so add a ref. */ /* The request is used as the AIO opaque value, so add a ref. */
scsi_req_ref(&r->req); scsi_req_ref(&r->req);
bdrv_acct_start(s->qdev.conf.bs, &r->acct, block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct,
nb_sectors * s->qdev.blocksize, BLOCK_ACCT_WRITE); nb_sectors * s->qdev.blocksize,
BLOCK_ACCT_WRITE);
r->req.aiocb = bdrv_aio_write_zeroes(s->qdev.conf.bs, r->req.aiocb = bdrv_aio_write_zeroes(s->qdev.conf.bs,
r->req.cmd.lba * (s->qdev.blocksize / 512), r->req.cmd.lba * (s->qdev.blocksize / 512),
nb_sectors * (s->qdev.blocksize / 512), nb_sectors * (s->qdev.blocksize / 512),
@ -1733,8 +1737,8 @@ static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
} }
scsi_req_ref(&r->req); scsi_req_ref(&r->req);
bdrv_acct_start(s->qdev.conf.bs, &r->acct, data->iov.iov_len, block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct,
BLOCK_ACCT_WRITE); data->iov.iov_len, BLOCK_ACCT_WRITE);
r->req.aiocb = bdrv_aio_writev(s->qdev.conf.bs, data->sector, r->req.aiocb = bdrv_aio_writev(s->qdev.conf.bs, data->sector,
&data->qiov, data->iov.iov_len / 512, &data->qiov, data->iov.iov_len / 512,
scsi_write_same_complete, data); scsi_write_same_complete, data);
@ -1998,7 +2002,8 @@ static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE:
/* The request is used as the AIO opaque value, so add a ref. */ /* The request is used as the AIO opaque value, so add a ref. */
scsi_req_ref(&r->req); scsi_req_ref(&r->req);
bdrv_acct_start(s->qdev.conf.bs, &r->acct, 0, BLOCK_ACCT_FLUSH); block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct, 0,
BLOCK_ACCT_FLUSH);
r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_aio_complete, r); r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_aio_complete, r);
return 0; return 0;
case SEEK_10: case SEEK_10:

View File

@ -48,10 +48,10 @@ typedef struct BlockAcctCookie {
enum BlockAcctType type; enum BlockAcctType type;
} BlockAcctCookie; } BlockAcctCookie;
void bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, void block_acct_start(BlockAcctStats *stats, BlockAcctCookie *cookie,
int64_t bytes, enum BlockAcctType type); int64_t bytes, enum BlockAcctType type);
void bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie); void block_acct_done(BlockAcctStats *stats, BlockAcctCookie *cookie);
void bdrv_acct_highest_sector(BlockDriverState *bs, int64_t sector_num, void block_acct_highest_sector(BlockAcctStats *stats, int64_t sector_num,
unsigned int nb_sectors); unsigned int nb_sectors);
#endif #endif

View File

@ -5,6 +5,7 @@
#include "qemu-common.h" #include "qemu-common.h"
#include "qemu/option.h" #include "qemu/option.h"
#include "block/coroutine.h" #include "block/coroutine.h"
#include "block/accounting.h"
#include "qapi/qmp/qobject.h" #include "qapi/qmp/qobject.h"
#include "qapi-types.h" #include "qapi-types.h"
@ -574,4 +575,6 @@ void bdrv_io_plug(BlockDriverState *bs);
void bdrv_io_unplug(BlockDriverState *bs); void bdrv_io_unplug(BlockDriverState *bs);
void bdrv_flush_io_queue(BlockDriverState *bs); void bdrv_flush_io_queue(BlockDriverState *bs);
BlockAcctStats *bdrv_get_stats(BlockDriverState *bs);
#endif #endif