Block layer patches

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIcBAABAgAGBQJZUQcUAAoJEH8JsnLIjy/WIeMQAJQCJcUdV7g5Uiz11ila1zA1
 rypcDIoEG6DriadqhK+3Ex4fVAR21u5kJLD7wXjfnh9ip+ssUhYO2rttvjsGuyNQ
 nSx77itq2Mdb9Eyrus8ff1L6vSK7nGiUJEQxd2qhgGs05coPDtG8pko+/ppGAp2E
 mcxcRUBC3gkMKNDr9vIfwI9GLtpeAramIOuPbV/gXVi0RFZSKEBBRaZbxNtHNibG
 i+ESoX5a+jitns6AmZv0WaE29Ye50HNLi2DZ2Im39/QrQDyOsFS0bsSO4VaDeBEW
 2HjFvD83DaStxwukglE7nEz7ktZ/RxIyhf4cazNWcrWDJWm082jSXnWgurv2MxFd
 pNT4+pX0f7POKIKBdYm5a4TPrm9EpR0FHTfgf0hjygkKtSnaBiYOOtmctGDDXGOw
 aT1pxU/7OoRZ7SPGtaiUVgTImpypG0S5a2NL5vi0Rh40y1Jb82dry0qnDFRNoE1n
 wP6vd5te5wWEnVUwobsvgqcwCEdvBWALb/sxUZPdKw01AM9n4u/m8T4tX8tboSRa
 S2v8n0VaMzuPkZEKaHBHU4kh81fiQe3B7chGOvNTGetrMnX7d1kuv6RA3XcCopnW
 tqVRHQYzIg31gokIk+sY7nJEHcI2Mum9edXU9f0AQ7L9Vtt+MBNDW4DgxBGc9et/
 DBTuBcNdHRJxELGsgTie
 =9I7S
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging

Block layer patches

# gpg: Signature made Mon 26 Jun 2017 14:07:32 BST
# gpg:                using RSA key 0x7F09B272C88F2FD6
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>"
# Primary key fingerprint: DC3D EB15 9A9A F95D 3D74  56FE 7F09 B272 C88F 2FD6

* remotes/kevin/tags/for-upstream: (60 commits)
  qemu-img: don't shadow opts variable in img_dd()
  block: Do not strcmp() with NULL uri->scheme
  blkverify: Catch bs->exact_filename overflow
  blkdebug: Catch bs->exact_filename overflow
  fix: avoid an infinite loop or a dangling pointer problem in img_commit
  block: change variable names in BlockDriverState
  block: Remove bdrv_aio_readv/writev/flush()
  qed: Use bdrv_co_* for coroutine_fns
  qed: Add coroutine_fn to I/O path functions
  qed: Use a coroutine for need_check_timer
  qed: Simplify request handling
  qed: Use CoQueue for serialising allocations
  qed: Implement .bdrv_co_readv/writev
  qed: Remove recursion in qed_aio_next_io()
  qed: Remove ret argument from qed_aio_next_io()
  qed: Add return value to qed_aio_read/write_data()
  qed: Add return value to qed_aio_write_inplace/alloc()
  qed: Add return value to qed_aio_write_cow()
  qed: Add return value to qed_aio_write_main()
  qed: Add return value to qed_aio_write_l2_update()
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2017-06-26 15:38:29 +01:00
commit 054914f646
45 changed files with 1496 additions and 1344 deletions

View File

@ -1,6 +1,6 @@
block-obj-y += raw-format.o qcow.o vdi.o vmdk.o cloop.o bochs.o vpc.o vvfat.o dmg.o
block-obj-y += qcow2.o qcow2-refcount.o qcow2-cluster.o qcow2-snapshot.o qcow2-cache.o
block-obj-y += qed.o qed-gencb.o qed-l2-cache.o qed-table.o qed-cluster.o
block-obj-y += qed.o qed-l2-cache.o qed-table.o qed-cluster.o
block-obj-y += qed-check.o
block-obj-y += vhdx.o vhdx-endian.o vhdx-log.o
block-obj-y += quorum.o

View File

@ -575,7 +575,7 @@ static int blkdebug_co_flush(BlockDriverState *bs)
}
static int coroutine_fn blkdebug_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int count,
int64_t offset, int bytes,
BdrvRequestFlags flags)
{
uint32_t align = MAX(bs->bl.request_alignment,
@ -586,29 +586,29 @@ static int coroutine_fn blkdebug_co_pwrite_zeroes(BlockDriverState *bs,
* preferred alignment (so that we test the fallback to writes on
* unaligned portions), and check that the block layer never hands
* us anything unaligned that crosses an alignment boundary. */
if (count < align) {
if (bytes < align) {
assert(QEMU_IS_ALIGNED(offset, align) ||
QEMU_IS_ALIGNED(offset + count, align) ||
QEMU_IS_ALIGNED(offset + bytes, align) ||
DIV_ROUND_UP(offset, align) ==
DIV_ROUND_UP(offset + count, align));
DIV_ROUND_UP(offset + bytes, align));
return -ENOTSUP;
}
assert(QEMU_IS_ALIGNED(offset, align));
assert(QEMU_IS_ALIGNED(count, align));
assert(QEMU_IS_ALIGNED(bytes, align));
if (bs->bl.max_pwrite_zeroes) {
assert(count <= bs->bl.max_pwrite_zeroes);
assert(bytes <= bs->bl.max_pwrite_zeroes);
}
err = rule_check(bs, offset, count);
err = rule_check(bs, offset, bytes);
if (err) {
return err;
}
return bdrv_co_pwrite_zeroes(bs->file, offset, count, flags);
return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
}
static int coroutine_fn blkdebug_co_pdiscard(BlockDriverState *bs,
int64_t offset, int count)
int64_t offset, int bytes)
{
uint32_t align = bs->bl.pdiscard_alignment;
int err;
@ -616,29 +616,29 @@ static int coroutine_fn blkdebug_co_pdiscard(BlockDriverState *bs,
/* Only pass through requests that are larger than requested
* minimum alignment, and ensure that unaligned requests do not
* cross optimum discard boundaries. */
if (count < bs->bl.request_alignment) {
if (bytes < bs->bl.request_alignment) {
assert(QEMU_IS_ALIGNED(offset, align) ||
QEMU_IS_ALIGNED(offset + count, align) ||
QEMU_IS_ALIGNED(offset + bytes, align) ||
DIV_ROUND_UP(offset, align) ==
DIV_ROUND_UP(offset + count, align));
DIV_ROUND_UP(offset + bytes, align));
return -ENOTSUP;
}
assert(QEMU_IS_ALIGNED(offset, bs->bl.request_alignment));
assert(QEMU_IS_ALIGNED(count, bs->bl.request_alignment));
if (align && count >= align) {
assert(QEMU_IS_ALIGNED(bytes, bs->bl.request_alignment));
if (align && bytes >= align) {
assert(QEMU_IS_ALIGNED(offset, align));
assert(QEMU_IS_ALIGNED(count, align));
assert(QEMU_IS_ALIGNED(bytes, align));
}
if (bs->bl.max_pdiscard) {
assert(count <= bs->bl.max_pdiscard);
assert(bytes <= bs->bl.max_pdiscard);
}
err = rule_check(bs, offset, count);
err = rule_check(bs, offset, bytes);
if (err) {
return err;
}
return bdrv_co_pdiscard(bs->file->bs, offset, count);
return bdrv_co_pdiscard(bs->file->bs, offset, bytes);
}
static void blkdebug_close(BlockDriverState *bs)
@ -839,9 +839,13 @@ static void blkdebug_refresh_filename(BlockDriverState *bs, QDict *options)
}
if (!force_json && bs->file->bs->exact_filename[0]) {
snprintf(bs->exact_filename, sizeof(bs->exact_filename),
"blkdebug:%s:%s", s->config_file ?: "",
bs->file->bs->exact_filename);
int ret = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
"blkdebug:%s:%s", s->config_file ?: "",
bs->file->bs->exact_filename);
if (ret >= sizeof(bs->exact_filename)) {
/* An overflow makes the filename unusable, so do not report any */
bs->exact_filename[0] = 0;
}
}
opts = qdict_new();

View File

@ -96,10 +96,10 @@ static int coroutine_fn blkreplay_co_pwritev(BlockDriverState *bs,
}
static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int count, BdrvRequestFlags flags)
int64_t offset, int bytes, BdrvRequestFlags flags)
{
uint64_t reqid = blkreplay_next_id();
int ret = bdrv_co_pwrite_zeroes(bs->file, offset, count, flags);
int ret = bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
block_request_create(reqid, bs, qemu_coroutine_self());
qemu_coroutine_yield();
@ -107,10 +107,10 @@ static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs,
}
static int coroutine_fn blkreplay_co_pdiscard(BlockDriverState *bs,
int64_t offset, int count)
int64_t offset, int bytes)
{
uint64_t reqid = blkreplay_next_id();
int ret = bdrv_co_pdiscard(bs->file->bs, offset, count);
int ret = bdrv_co_pdiscard(bs->file->bs, offset, bytes);
block_request_create(reqid, bs, qemu_coroutine_self());
qemu_coroutine_yield();

View File

@ -301,10 +301,14 @@ static void blkverify_refresh_filename(BlockDriverState *bs, QDict *options)
if (bs->file->bs->exact_filename[0]
&& s->test_file->bs->exact_filename[0])
{
snprintf(bs->exact_filename, sizeof(bs->exact_filename),
"blkverify:%s:%s",
bs->file->bs->exact_filename,
s->test_file->bs->exact_filename);
int ret = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
"blkverify:%s:%s",
bs->file->bs->exact_filename,
s->test_file->bs->exact_filename);
if (ret >= sizeof(bs->exact_filename)) {
/* An overflow makes the filename unusable, so do not report any */
bs->exact_filename[0] = 0;
}
}
}

View File

@ -1099,9 +1099,9 @@ int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf,
}
int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int count, BdrvRequestFlags flags)
int bytes, BdrvRequestFlags flags)
{
return blk_prw(blk, offset, NULL, count, blk_write_entry,
return blk_prw(blk, offset, NULL, bytes, blk_write_entry,
flags | BDRV_REQ_ZERO_WRITE);
}
@ -1311,10 +1311,10 @@ static void blk_aio_pdiscard_entry(void *opaque)
}
BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
int64_t offset, int count,
int64_t offset, int bytes,
BlockCompletionFunc *cb, void *opaque)
{
return blk_aio_prwv(blk, offset, count, NULL, blk_aio_pdiscard_entry, 0,
return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0,
cb, opaque);
}
@ -1374,14 +1374,14 @@ BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
return blk_aio_prwv(blk, req, 0, &qiov, blk_aio_ioctl_entry, 0, cb, opaque);
}
int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int count)
int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
{
int ret = blk_check_byte_request(blk, offset, count);
int ret = blk_check_byte_request(blk, offset, bytes);
if (ret < 0) {
return ret;
}
return bdrv_co_pdiscard(blk_bs(blk), offset, count);
return bdrv_co_pdiscard(blk_bs(blk), offset, bytes);
}
int blk_co_flush(BlockBackend *blk)
@ -1760,9 +1760,9 @@ void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
}
int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int count, BdrvRequestFlags flags)
int bytes, BdrvRequestFlags flags)
{
return blk_co_pwritev(blk, offset, count, NULL,
return blk_co_pwritev(blk, offset, bytes, NULL,
flags | BDRV_REQ_ZERO_WRITE);
}
@ -1789,9 +1789,9 @@ static void blk_pdiscard_entry(void *opaque)
rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, rwco->qiov->size);
}
int blk_pdiscard(BlockBackend *blk, int64_t offset, int count)
int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
{
return blk_prw(blk, offset, NULL, count, blk_pdiscard_entry, 0);
return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0);
}
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,

View File

@ -119,6 +119,13 @@ static void commit_complete(BlockJob *job, void *opaque)
}
g_free(s->backing_file_str);
blk_unref(s->top);
/* If there is more than one reference to the job (e.g. if called from
* block_job_finish_sync()), block_job_completed() won't free it and
* therefore the blockers on the intermediate nodes remain. This would
* cause bdrv_set_backing_hd() to fail. */
block_job_remove_all_bdrv(job);
block_job_completed(&s->common, ret);
g_free(data);

View File

@ -1485,7 +1485,7 @@ static int aio_worker(void *arg)
static int paio_submit_co(BlockDriverState *bs, int fd,
int64_t offset, QEMUIOVector *qiov,
int count, int type)
int bytes, int type)
{
RawPosixAIOData *acb = g_new(RawPosixAIOData, 1);
ThreadPool *pool;
@ -1494,22 +1494,22 @@ static int paio_submit_co(BlockDriverState *bs, int fd,
acb->aio_type = type;
acb->aio_fildes = fd;
acb->aio_nbytes = count;
acb->aio_nbytes = bytes;
acb->aio_offset = offset;
if (qiov) {
acb->aio_iov = qiov->iov;
acb->aio_niov = qiov->niov;
assert(qiov->size == count);
assert(qiov->size == bytes);
}
trace_paio_submit_co(offset, count, type);
trace_paio_submit_co(offset, bytes, type);
pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
return thread_pool_submit_co(pool, aio_worker, acb);
}
static BlockAIOCB *paio_submit(BlockDriverState *bs, int fd,
int64_t offset, QEMUIOVector *qiov, int count,
int64_t offset, QEMUIOVector *qiov, int bytes,
BlockCompletionFunc *cb, void *opaque, int type)
{
RawPosixAIOData *acb = g_new(RawPosixAIOData, 1);
@ -1519,7 +1519,7 @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, int fd,
acb->aio_type = type;
acb->aio_fildes = fd;
acb->aio_nbytes = count;
acb->aio_nbytes = bytes;
acb->aio_offset = offset;
if (qiov) {
@ -1528,7 +1528,7 @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, int fd,
assert(qiov->size == acb->aio_nbytes);
}
trace_paio_submit(acb, opaque, offset, count, type);
trace_paio_submit(acb, opaque, offset, bytes, type);
pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
}
@ -2109,26 +2109,26 @@ static int64_t coroutine_fn raw_co_get_block_status(BlockDriverState *bs,
}
static coroutine_fn BlockAIOCB *raw_aio_pdiscard(BlockDriverState *bs,
int64_t offset, int count,
int64_t offset, int bytes,
BlockCompletionFunc *cb, void *opaque)
{
BDRVRawState *s = bs->opaque;
return paio_submit(bs, s->fd, offset, NULL, count,
return paio_submit(bs, s->fd, offset, NULL, bytes,
cb, opaque, QEMU_AIO_DISCARD);
}
static int coroutine_fn raw_co_pwrite_zeroes(
BlockDriverState *bs, int64_t offset,
int count, BdrvRequestFlags flags)
int bytes, BdrvRequestFlags flags)
{
BDRVRawState *s = bs->opaque;
if (!(flags & BDRV_REQ_MAY_UNMAP)) {
return paio_submit_co(bs, s->fd, offset, NULL, count,
return paio_submit_co(bs, s->fd, offset, NULL, bytes,
QEMU_AIO_WRITE_ZEROES);
} else if (s->discard_zeroes) {
return paio_submit_co(bs, s->fd, offset, NULL, count,
return paio_submit_co(bs, s->fd, offset, NULL, bytes,
QEMU_AIO_DISCARD);
}
return -ENOTSUP;
@ -2560,7 +2560,7 @@ static int fd_open(BlockDriverState *bs)
}
static coroutine_fn BlockAIOCB *hdev_aio_pdiscard(BlockDriverState *bs,
int64_t offset, int count,
int64_t offset, int bytes,
BlockCompletionFunc *cb, void *opaque)
{
BDRVRawState *s = bs->opaque;
@ -2568,12 +2568,12 @@ static coroutine_fn BlockAIOCB *hdev_aio_pdiscard(BlockDriverState *bs,
if (fd_open(bs) < 0) {
return NULL;
}
return paio_submit(bs, s->fd, offset, NULL, count,
return paio_submit(bs, s->fd, offset, NULL, bytes,
cb, opaque, QEMU_AIO_DISCARD|QEMU_AIO_BLKDEV);
}
static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int count, BdrvRequestFlags flags)
int64_t offset, int bytes, BdrvRequestFlags flags)
{
BDRVRawState *s = bs->opaque;
int rc;
@ -2583,10 +2583,10 @@ static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs,
return rc;
}
if (!(flags & BDRV_REQ_MAY_UNMAP)) {
return paio_submit_co(bs, s->fd, offset, NULL, count,
return paio_submit_co(bs, s->fd, offset, NULL, bytes,
QEMU_AIO_WRITE_ZEROES|QEMU_AIO_BLKDEV);
} else if (s->discard_zeroes) {
return paio_submit_co(bs, s->fd, offset, NULL, count,
return paio_submit_co(bs, s->fd, offset, NULL, bytes,
QEMU_AIO_DISCARD|QEMU_AIO_BLKDEV);
}
return -ENOTSUP;

View File

@ -34,16 +34,8 @@
#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
int64_t offset,
QEMUIOVector *qiov,
BdrvRequestFlags flags,
BlockCompletionFunc *cb,
void *opaque,
bool is_write);
static void coroutine_fn bdrv_co_do_rw(void *opaque);
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int count, BdrvRequestFlags flags);
int64_t offset, int bytes, BdrvRequestFlags flags);
void bdrv_parent_drained_begin(BlockDriverState *bs)
{
@ -674,12 +666,12 @@ int bdrv_write(BdrvChild *child, int64_t sector_num,
}
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
int count, BdrvRequestFlags flags)
int bytes, BdrvRequestFlags flags)
{
QEMUIOVector qiov;
struct iovec iov = {
.iov_base = NULL,
.iov_len = count,
.iov_len = bytes,
};
qemu_iovec_init_external(&qiov, &iov, 1);
@ -1220,7 +1212,7 @@ int coroutine_fn bdrv_co_readv(BdrvChild *child, int64_t sector_num,
#define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int count, BdrvRequestFlags flags)
int64_t offset, int bytes, BdrvRequestFlags flags)
{
BlockDriver *drv = bs->drv;
QEMUIOVector qiov;
@ -1238,12 +1230,12 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
assert(alignment % bs->bl.request_alignment == 0);
head = offset % alignment;
tail = (offset + count) % alignment;
tail = (offset + bytes) % alignment;
max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
assert(max_write_zeroes >= bs->bl.request_alignment);
while (count > 0 && !ret) {
int num = count;
while (bytes > 0 && !ret) {
int num = bytes;
/* Align request. Block drivers can expect the "bulk" of the request
* to be aligned, and that unaligned requests do not cross cluster
@ -1253,7 +1245,7 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
/* Make a small request up to the first aligned sector. For
* convenience, limit this request to max_transfer even if
* we don't need to fall back to writes. */
num = MIN(MIN(count, max_transfer), alignment - head);
num = MIN(MIN(bytes, max_transfer), alignment - head);
head = (head + num) % alignment;
assert(num < max_write_zeroes);
} else if (tail && num > alignment) {
@ -1314,7 +1306,7 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
}
offset += num;
count -= num;
bytes -= num;
}
fail:
@ -1666,15 +1658,15 @@ int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num,
}
int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
int count, BdrvRequestFlags flags)
int bytes, BdrvRequestFlags flags)
{
trace_bdrv_co_pwrite_zeroes(child->bs, offset, count, flags);
trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
flags &= ~BDRV_REQ_MAY_UNMAP;
}
return bdrv_co_pwritev(child, offset, count, NULL,
return bdrv_co_pwritev(child, offset, bytes, NULL,
BDRV_REQ_ZERO_WRITE | flags);
}
@ -1980,17 +1972,24 @@ bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
bool is_read)
{
BlockDriver *drv = bs->drv;
int ret = -ENOTSUP;
bdrv_inc_in_flight(bs);
if (!drv) {
return -ENOMEDIUM;
ret = -ENOMEDIUM;
} else if (drv->bdrv_load_vmstate) {
return is_read ? drv->bdrv_load_vmstate(bs, qiov, pos)
: drv->bdrv_save_vmstate(bs, qiov, pos);
if (is_read) {
ret = drv->bdrv_load_vmstate(bs, qiov, pos);
} else {
ret = drv->bdrv_save_vmstate(bs, qiov, pos);
}
} else if (bs->file) {
return bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
}
return -ENOTSUP;
bdrv_dec_in_flight(bs);
return ret;
}
static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
@ -2016,9 +2015,7 @@ bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
bdrv_coroutine_enter(bs, co);
while (data.ret == -EINPROGRESS) {
aio_poll(bdrv_get_aio_context(bs), true);
}
BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS);
return data.ret;
}
}
@ -2075,28 +2072,6 @@ int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
/**************************************************************/
/* async I/Os */
BlockAIOCB *bdrv_aio_readv(BdrvChild *child, int64_t sector_num,
QEMUIOVector *qiov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque)
{
trace_bdrv_aio_readv(child->bs, sector_num, nb_sectors, opaque);
assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
0, cb, opaque, false);
}
BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
QEMUIOVector *qiov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque)
{
trace_bdrv_aio_writev(child->bs, sector_num, nb_sectors, opaque);
assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
0, cb, opaque, true);
}
void bdrv_aio_cancel(BlockAIOCB *acb)
{
qemu_aio_ref(acb);
@ -2128,147 +2103,6 @@ void bdrv_aio_cancel_async(BlockAIOCB *acb)
}
}
/**************************************************************/
/* async block device emulation */
typedef struct BlockRequest {
union {
/* Used during read, write, trim */
struct {
int64_t offset;
int bytes;
int flags;
QEMUIOVector *qiov;
};
/* Used during ioctl */
struct {
int req;
void *buf;
};
};
BlockCompletionFunc *cb;
void *opaque;
int error;
} BlockRequest;
typedef struct BlockAIOCBCoroutine {
BlockAIOCB common;
BdrvChild *child;
BlockRequest req;
bool is_write;
bool need_bh;
bool *done;
} BlockAIOCBCoroutine;
static const AIOCBInfo bdrv_em_co_aiocb_info = {
.aiocb_size = sizeof(BlockAIOCBCoroutine),
};
static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
{
if (!acb->need_bh) {
bdrv_dec_in_flight(acb->common.bs);
acb->common.cb(acb->common.opaque, acb->req.error);
qemu_aio_unref(acb);
}
}
static void bdrv_co_em_bh(void *opaque)
{
BlockAIOCBCoroutine *acb = opaque;
assert(!acb->need_bh);
bdrv_co_complete(acb);
}
static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
{
acb->need_bh = false;
if (acb->req.error != -EINPROGRESS) {
BlockDriverState *bs = acb->common.bs;
aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
}
}
/* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
static void coroutine_fn bdrv_co_do_rw(void *opaque)
{
BlockAIOCBCoroutine *acb = opaque;
if (!acb->is_write) {
acb->req.error = bdrv_co_preadv(acb->child, acb->req.offset,
acb->req.qiov->size, acb->req.qiov, acb->req.flags);
} else {
acb->req.error = bdrv_co_pwritev(acb->child, acb->req.offset,
acb->req.qiov->size, acb->req.qiov, acb->req.flags);
}
bdrv_co_complete(acb);
}
static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
int64_t offset,
QEMUIOVector *qiov,
BdrvRequestFlags flags,
BlockCompletionFunc *cb,
void *opaque,
bool is_write)
{
Coroutine *co;
BlockAIOCBCoroutine *acb;
/* Matched by bdrv_co_complete's bdrv_dec_in_flight. */
bdrv_inc_in_flight(child->bs);
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, child->bs, cb, opaque);
acb->child = child;
acb->need_bh = true;
acb->req.error = -EINPROGRESS;
acb->req.offset = offset;
acb->req.qiov = qiov;
acb->req.flags = flags;
acb->is_write = is_write;
co = qemu_coroutine_create(bdrv_co_do_rw, acb);
bdrv_coroutine_enter(child->bs, co);
bdrv_co_maybe_schedule_bh(acb);
return &acb->common;
}
static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
{
BlockAIOCBCoroutine *acb = opaque;
BlockDriverState *bs = acb->common.bs;
acb->req.error = bdrv_co_flush(bs);
bdrv_co_complete(acb);
}
BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
BlockCompletionFunc *cb, void *opaque)
{
trace_bdrv_aio_flush(bs, opaque);
Coroutine *co;
BlockAIOCBCoroutine *acb;
/* Matched by bdrv_co_complete's bdrv_dec_in_flight. */
bdrv_inc_in_flight(bs);
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
acb->need_bh = true;
acb->req.error = -EINPROGRESS;
co = qemu_coroutine_create(bdrv_aio_flush_co_entry, acb);
bdrv_coroutine_enter(bs, co);
bdrv_co_maybe_schedule_bh(acb);
return &acb->common;
}
/**************************************************************/
/* Coroutine block device emulation */
@ -2414,18 +2248,18 @@ int bdrv_flush(BlockDriverState *bs)
typedef struct DiscardCo {
BlockDriverState *bs;
int64_t offset;
int count;
int bytes;
int ret;
} DiscardCo;
static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
{
DiscardCo *rwco = opaque;
rwco->ret = bdrv_co_pdiscard(rwco->bs, rwco->offset, rwco->count);
rwco->ret = bdrv_co_pdiscard(rwco->bs, rwco->offset, rwco->bytes);
}
int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
int count)
int bytes)
{
BdrvTrackedRequest req;
int max_pdiscard, ret;
@ -2435,7 +2269,7 @@ int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
return -ENOMEDIUM;
}
ret = bdrv_check_byte_request(bs, offset, count);
ret = bdrv_check_byte_request(bs, offset, bytes);
if (ret < 0) {
return ret;
} else if (bs->read_only) {
@ -2460,10 +2294,10 @@ int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
assert(align % bs->bl.request_alignment == 0);
head = offset % align;
tail = (offset + count) % align;
tail = (offset + bytes) % align;
bdrv_inc_in_flight(bs);
tracked_request_begin(&req, bs, offset, count, BDRV_TRACKED_DISCARD);
tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
if (ret < 0) {
@ -2474,13 +2308,13 @@ int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
align);
assert(max_pdiscard >= bs->bl.request_alignment);
while (count > 0) {
while (bytes > 0) {
int ret;
int num = count;
int num = bytes;
if (head) {
/* Make small requests to get to alignment boundaries. */
num = MIN(count, align - head);
num = MIN(bytes, align - head);
if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
num %= bs->bl.request_alignment;
}
@ -2524,7 +2358,7 @@ int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
}
offset += num;
count -= num;
bytes -= num;
}
ret = 0;
out:
@ -2536,13 +2370,13 @@ out:
return ret;
}
int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count)
int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
{
Coroutine *co;
DiscardCo rwco = {
.bs = bs,
.offset = offset,
.count = count,
.bytes = bytes,
.ret = NOT_DONE,
};

View File

@ -1116,14 +1116,14 @@ iscsi_getlength(BlockDriverState *bs)
}
static int
coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
{
IscsiLun *iscsilun = bs->opaque;
struct IscsiTask iTask;
struct unmap_list list;
int r = 0;
if (!is_byte_request_lun_aligned(offset, count, iscsilun)) {
if (!is_byte_request_lun_aligned(offset, bytes, iscsilun)) {
return -ENOTSUP;
}
@ -1133,7 +1133,7 @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
}
list.lba = offset / iscsilun->block_size;
list.num = count / iscsilun->block_size;
list.num = bytes / iscsilun->block_size;
iscsi_co_init_iscsitask(iscsilun, &iTask);
qemu_mutex_lock(&iscsilun->mutex);
@ -1174,7 +1174,7 @@ retry:
}
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
count >> BDRV_SECTOR_BITS);
bytes >> BDRV_SECTOR_BITS);
out_unlock:
qemu_mutex_unlock(&iscsilun->mutex);
@ -1183,7 +1183,7 @@ out_unlock:
static int
coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
int count, BdrvRequestFlags flags)
int bytes, BdrvRequestFlags flags)
{
IscsiLun *iscsilun = bs->opaque;
struct IscsiTask iTask;
@ -1192,7 +1192,7 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
bool use_16_for_ws = iscsilun->use_16_for_rw;
int r = 0;
if (!is_byte_request_lun_aligned(offset, count, iscsilun)) {
if (!is_byte_request_lun_aligned(offset, bytes, iscsilun)) {
return -ENOTSUP;
}
@ -1215,7 +1215,7 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
}
lba = offset / iscsilun->block_size;
nb_blocks = count / iscsilun->block_size;
nb_blocks = bytes / iscsilun->block_size;
if (iscsilun->zeroblock == NULL) {
iscsilun->zeroblock = g_try_malloc0(iscsilun->block_size);
@ -1273,17 +1273,17 @@ retry:
if (iTask.status != SCSI_STATUS_GOOD) {
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
count >> BDRV_SECTOR_BITS);
bytes >> BDRV_SECTOR_BITS);
r = iTask.err_code;
goto out_unlock;
}
if (flags & BDRV_REQ_MAY_UNMAP) {
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
count >> BDRV_SECTOR_BITS);
bytes >> BDRV_SECTOR_BITS);
} else {
iscsi_allocmap_set_allocated(iscsilun, offset >> BDRV_SECTOR_BITS,
count >> BDRV_SECTOR_BITS);
bytes >> BDRV_SECTOR_BITS);
}
out_unlock:

View File

@ -1063,15 +1063,15 @@ static int64_t coroutine_fn bdrv_mirror_top_get_block_status(
}
static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int count, BdrvRequestFlags flags)
int64_t offset, int bytes, BdrvRequestFlags flags)
{
return bdrv_co_pwrite_zeroes(bs->backing, offset, count, flags);
return bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
}
static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
int64_t offset, int count)
int64_t offset, int bytes)
{
return bdrv_co_pdiscard(bs->backing->bs, offset, count);
return bdrv_co_pdiscard(bs->backing->bs, offset, bytes);
}
static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts)

View File

@ -259,14 +259,14 @@ int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
}
int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
int count, BdrvRequestFlags flags)
int bytes, BdrvRequestFlags flags)
{
ssize_t ret;
NBDClientSession *client = nbd_get_client_session(bs);
NBDRequest request = {
.type = NBD_CMD_WRITE_ZEROES,
.from = offset,
.len = count,
.len = bytes,
};
NBDReply reply;
@ -316,13 +316,13 @@ int nbd_client_co_flush(BlockDriverState *bs)
return -reply.error;
}
int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
{
NBDClientSession *client = nbd_get_client_session(bs);
NBDRequest request = {
.type = NBD_CMD_TRIM,
.from = offset,
.len = count,
.len = bytes,
};
NBDReply reply;
ssize_t ret;

View File

@ -42,12 +42,12 @@ int nbd_client_init(BlockDriverState *bs,
Error **errp);
void nbd_client_close(BlockDriverState *bs);
int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int count);
int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes);
int nbd_client_co_flush(BlockDriverState *bs);
int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov, int flags);
int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
int count, BdrvRequestFlags flags);
int bytes, BdrvRequestFlags flags);
int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov, int flags);

View File

@ -64,11 +64,11 @@ static int nbd_parse_uri(const char *filename, QDict *options)
}
/* transport */
if (!strcmp(uri->scheme, "nbd")) {
if (!g_strcmp0(uri->scheme, "nbd")) {
is_unix = false;
} else if (!strcmp(uri->scheme, "nbd+tcp")) {
} else if (!g_strcmp0(uri->scheme, "nbd+tcp")) {
is_unix = false;
} else if (!strcmp(uri->scheme, "nbd+unix")) {
} else if (!g_strcmp0(uri->scheme, "nbd+unix")) {
is_unix = true;
} else {
ret = -EINVAL;

View File

@ -82,7 +82,7 @@ static int nfs_parse_uri(const char *filename, QDict *options, Error **errp)
error_setg(errp, "Invalid URI specified");
goto out;
}
if (strcmp(uri->scheme, "nfs") != 0) {
if (g_strcmp0(uri->scheme, "nfs") != 0) {
error_setg(errp, "URI scheme must be 'nfs'");
goto out;
}

View File

@ -403,30 +403,21 @@ int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num,
return 0;
}
static int coroutine_fn do_perform_cow(BlockDriverState *bs,
uint64_t src_cluster_offset,
uint64_t cluster_offset,
int offset_in_cluster,
int bytes)
static int coroutine_fn do_perform_cow_read(BlockDriverState *bs,
uint64_t src_cluster_offset,
unsigned offset_in_cluster,
QEMUIOVector *qiov)
{
BDRVQcow2State *s = bs->opaque;
QEMUIOVector qiov;
struct iovec iov;
int ret;
iov.iov_len = bytes;
iov.iov_base = qemu_try_blockalign(bs, iov.iov_len);
if (iov.iov_base == NULL) {
return -ENOMEM;
if (qiov->size == 0) {
return 0;
}
qemu_iovec_init_external(&qiov, &iov, 1);
BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
if (!bs->drv) {
ret = -ENOMEDIUM;
goto out;
return -ENOMEDIUM;
}
/* Call .bdrv_co_readv() directly instead of using the public block-layer
@ -434,43 +425,60 @@ static int coroutine_fn do_perform_cow(BlockDriverState *bs,
* which can lead to deadlock when block layer copy-on-read is enabled.
*/
ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster,
bytes, &qiov, 0);
qiov->size, qiov, 0);
if (ret < 0) {
goto out;
return ret;
}
if (bs->encrypted) {
Error *err = NULL;
return 0;
}
static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs,
uint64_t src_cluster_offset,
unsigned offset_in_cluster,
uint8_t *buffer,
unsigned bytes)
{
if (bytes && bs->encrypted) {
BDRVQcow2State *s = bs->opaque;
int64_t sector = (src_cluster_offset + offset_in_cluster)
>> BDRV_SECTOR_BITS;
assert(s->cipher);
assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0);
assert((bytes & ~BDRV_SECTOR_MASK) == 0);
if (qcow2_encrypt_sectors(s, sector, iov.iov_base, iov.iov_base,
bytes >> BDRV_SECTOR_BITS, true, &err) < 0) {
ret = -EIO;
error_free(err);
goto out;
if (qcow2_encrypt_sectors(s, sector, buffer, buffer,
bytes >> BDRV_SECTOR_BITS, true, NULL) < 0) {
return false;
}
}
return true;
}
static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
uint64_t cluster_offset,
unsigned offset_in_cluster,
QEMUIOVector *qiov)
{
int ret;
if (qiov->size == 0) {
return 0;
}
ret = qcow2_pre_write_overlap_check(bs, 0,
cluster_offset + offset_in_cluster, bytes);
cluster_offset + offset_in_cluster, qiov->size);
if (ret < 0) {
goto out;
return ret;
}
BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster,
bytes, &qiov, 0);
qiov->size, qiov, 0);
if (ret < 0) {
goto out;
return ret;
}
ret = 0;
out:
qemu_vfree(iov.iov_base);
return ret;
return 0;
}
@ -548,7 +556,7 @@ int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
/* find the cluster offset for the given disk offset */
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
l2_index = offset_to_l2_index(s, offset);
*cluster_offset = be64_to_cpu(l2_table[l2_index]);
nb_clusters = size_to_clusters(s, bytes_needed);
@ -685,7 +693,7 @@ static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
/* find the cluster offset for the given disk offset */
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
l2_index = offset_to_l2_index(s, offset);
*new_l2_table = l2_table;
*new_l2_index = l2_index;
@ -753,31 +761,133 @@ uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
return cluster_offset;
}
static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r)
static int perform_cow(BlockDriverState *bs, QCowL2Meta *m)
{
BDRVQcow2State *s = bs->opaque;
Qcow2COWRegion *start = &m->cow_start;
Qcow2COWRegion *end = &m->cow_end;
unsigned buffer_size;
unsigned data_bytes = end->offset - (start->offset + start->nb_bytes);
bool merge_reads;
uint8_t *start_buffer, *end_buffer;
QEMUIOVector qiov;
int ret;
if (r->nb_bytes == 0) {
assert(start->nb_bytes <= UINT_MAX - end->nb_bytes);
assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes);
assert(start->offset + start->nb_bytes <= end->offset);
assert(!m->data_qiov || m->data_qiov->size == data_bytes);
if (start->nb_bytes == 0 && end->nb_bytes == 0) {
return 0;
}
qemu_co_mutex_unlock(&s->lock);
ret = do_perform_cow(bs, m->offset, m->alloc_offset, r->offset, r->nb_bytes);
qemu_co_mutex_lock(&s->lock);
if (ret < 0) {
return ret;
/* If we have to read both the start and end COW regions and the
* middle region is not too large then perform just one read
* operation */
merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384;
if (merge_reads) {
buffer_size = start->nb_bytes + data_bytes + end->nb_bytes;
} else {
/* If we have to do two reads, add some padding in the middle
* if necessary to make sure that the end region is optimally
* aligned. */
size_t align = bdrv_opt_mem_align(bs);
assert(align > 0 && align <= UINT_MAX);
assert(QEMU_ALIGN_UP(start->nb_bytes, align) <=
UINT_MAX - end->nb_bytes);
buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes;
}
/* Reserve a buffer large enough to store all the data that we're
* going to read */
start_buffer = qemu_try_blockalign(bs, buffer_size);
if (start_buffer == NULL) {
return -ENOMEM;
}
/* The part of the buffer where the end region is located */
end_buffer = start_buffer + buffer_size - end->nb_bytes;
qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0));
qemu_co_mutex_unlock(&s->lock);
/* First we read the existing data from both COW regions. We
* either read the whole region in one go, or the start and end
* regions separately. */
if (merge_reads) {
qemu_iovec_add(&qiov, start_buffer, buffer_size);
ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
} else {
qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
if (ret < 0) {
goto fail;
}
qemu_iovec_reset(&qiov);
qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov);
}
if (ret < 0) {
goto fail;
}
/* Encrypt the data if necessary before writing it */
if (bs->encrypted) {
if (!do_perform_cow_encrypt(bs, m->offset, start->offset,
start_buffer, start->nb_bytes) ||
!do_perform_cow_encrypt(bs, m->offset, end->offset,
end_buffer, end->nb_bytes)) {
ret = -EIO;
goto fail;
}
}
/* And now we can write everything. If we have the guest data we
* can write everything in one single operation */
if (m->data_qiov) {
qemu_iovec_reset(&qiov);
if (start->nb_bytes) {
qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
}
qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes);
if (end->nb_bytes) {
qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
}
/* NOTE: we have a write_aio blkdebug event here followed by
* a cow_write one in do_perform_cow_write(), but there's only
* one single I/O operation */
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
} else {
/* If there's no guest data then write both COW regions separately */
qemu_iovec_reset(&qiov);
qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
if (ret < 0) {
goto fail;
}
qemu_iovec_reset(&qiov);
qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov);
}
fail:
qemu_co_mutex_lock(&s->lock);
/*
* Before we update the L2 table to actually point to the new cluster, we
* need to be sure that the refcounts have been increased and COW was
* handled.
*/
qcow2_cache_depends_on_flush(s->l2_table_cache);
if (ret == 0) {
qcow2_cache_depends_on_flush(s->l2_table_cache);
}
return 0;
qemu_vfree(start_buffer);
qemu_iovec_destroy(&qiov);
return ret;
}
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
@ -797,12 +907,7 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
}
/* copy content of unmodified sectors */
ret = perform_cow(bs, m, &m->cow_start);
if (ret < 0) {
goto err;
}
ret = perform_cow(bs, m, &m->cow_end);
ret = perform_cow(bs, m);
if (ret < 0) {
goto err;
}

View File

@ -356,7 +356,7 @@ static int validate_table_offset(BlockDriverState *bs, uint64_t offset,
}
/* Tables must be cluster aligned */
if (offset & (s->cluster_size - 1)) {
if (offset_into_cluster(s, offset) != 0) {
return -EINVAL;
}
@ -1575,6 +1575,44 @@ fail:
return ret;
}
/* Check if it's possible to merge a write request with the writing of
* the data from the COW regions */
static bool merge_cow(uint64_t offset, unsigned bytes,
QEMUIOVector *hd_qiov, QCowL2Meta *l2meta)
{
QCowL2Meta *m;
for (m = l2meta; m != NULL; m = m->next) {
/* If both COW regions are empty then there's nothing to merge */
if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) {
continue;
}
/* The data (middle) region must be immediately after the
* start region */
if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) {
continue;
}
/* The end region must be immediately after the data (middle)
* region */
if (m->offset + m->cow_end.offset != offset + bytes) {
continue;
}
/* Make sure that adding both COW regions to the QEMUIOVector
* does not exceed IOV_MAX */
if (hd_qiov->niov > IOV_MAX - 2) {
continue;
}
m->data_qiov = hd_qiov;
return true;
}
return false;
}
static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov,
int flags)
@ -1657,16 +1695,22 @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
goto fail;
}
qemu_co_mutex_unlock(&s->lock);
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
trace_qcow2_writev_data(qemu_coroutine_self(),
cluster_offset + offset_in_cluster);
ret = bdrv_co_pwritev(bs->file,
cluster_offset + offset_in_cluster,
cur_bytes, &hd_qiov, 0);
qemu_co_mutex_lock(&s->lock);
if (ret < 0) {
goto fail;
/* If we need to do COW, check if it's possible to merge the
* writing of the guest data together with that of the COW regions.
* If it's not possible (or not necessary) then write the
* guest data now. */
if (!merge_cow(offset, cur_bytes, &hd_qiov, l2meta)) {
qemu_co_mutex_unlock(&s->lock);
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
trace_qcow2_writev_data(qemu_coroutine_self(),
cluster_offset + offset_in_cluster);
ret = bdrv_co_pwritev(bs->file,
cluster_offset + offset_in_cluster,
cur_bytes, &hd_qiov, 0);
qemu_co_mutex_lock(&s->lock);
if (ret < 0) {
goto fail;
}
}
while (l2meta != NULL) {
@ -2464,16 +2508,16 @@ static bool is_zero_sectors(BlockDriverState *bs, int64_t start,
}
static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int count, BdrvRequestFlags flags)
int64_t offset, int bytes, BdrvRequestFlags flags)
{
int ret;
BDRVQcow2State *s = bs->opaque;
uint32_t head = offset % s->cluster_size;
uint32_t tail = (offset + count) % s->cluster_size;
uint32_t tail = (offset + bytes) % s->cluster_size;
trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, count);
if (offset + count == bs->total_sectors * BDRV_SECTOR_SIZE) {
trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes);
if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) {
tail = 0;
}
@ -2482,12 +2526,12 @@ static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
uint64_t off;
unsigned int nr;
assert(head + count <= s->cluster_size);
assert(head + bytes <= s->cluster_size);
/* check whether remainder of cluster already reads as zero */
if (!(is_zero_sectors(bs, cl_start,
DIV_ROUND_UP(head, BDRV_SECTOR_SIZE)) &&
is_zero_sectors(bs, (offset + count) >> BDRV_SECTOR_BITS,
is_zero_sectors(bs, (offset + bytes) >> BDRV_SECTOR_BITS,
DIV_ROUND_UP(-tail & (s->cluster_size - 1),
BDRV_SECTOR_SIZE)))) {
return -ENOTSUP;
@ -2496,7 +2540,7 @@ static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
qemu_co_mutex_lock(&s->lock);
/* We can have new write after previous check */
offset = cl_start << BDRV_SECTOR_BITS;
count = s->cluster_size;
bytes = s->cluster_size;
nr = s->cluster_size;
ret = qcow2_get_cluster_offset(bs, offset, &nr, &off);
if (ret != QCOW2_CLUSTER_UNALLOCATED &&
@ -2509,33 +2553,33 @@ static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
qemu_co_mutex_lock(&s->lock);
}
trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, count);
trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes);
/* Whatever is left can use real zero clusters */
ret = qcow2_cluster_zeroize(bs, offset, count, flags);
ret = qcow2_cluster_zeroize(bs, offset, bytes, flags);
qemu_co_mutex_unlock(&s->lock);
return ret;
}
static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs,
int64_t offset, int count)
int64_t offset, int bytes)
{
int ret;
BDRVQcow2State *s = bs->opaque;
if (!QEMU_IS_ALIGNED(offset | count, s->cluster_size)) {
assert(count < s->cluster_size);
if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) {
assert(bytes < s->cluster_size);
/* Ignore partial clusters, except for the special case of the
* complete partial cluster at the end of an unaligned file */
if (!QEMU_IS_ALIGNED(offset, s->cluster_size) ||
offset + count != bs->total_sectors * BDRV_SECTOR_SIZE) {
offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) {
return -ENOTSUP;
}
}
qemu_co_mutex_lock(&s->lock);
ret = qcow2_cluster_discard(bs, offset, count, QCOW2_DISCARD_REQUEST,
ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST,
false);
qemu_co_mutex_unlock(&s->lock);
return ret;

View File

@ -301,10 +301,10 @@ typedef struct Qcow2COWRegion {
* Offset of the COW region in bytes from the start of the first cluster
* touched by the request.
*/
uint64_t offset;
unsigned offset;
/** Number of bytes to copy */
int nb_bytes;
unsigned nb_bytes;
} Qcow2COWRegion;
/**
@ -343,6 +343,13 @@ typedef struct QCowL2Meta
*/
Qcow2COWRegion cow_end;
/**
* The I/O vector with the data from the actual guest write request.
* If non-NULL, this is meant to be merged together with the data
* from @cow_start and @cow_end into one single write operation.
*/
QEMUIOVector *data_qiov;
/** Pointer to next L2Meta of the same write request */
struct QCowL2Meta *next;

View File

@ -61,37 +61,64 @@ static unsigned int qed_count_contiguous_clusters(BDRVQEDState *s,
return i - index;
}
typedef struct {
BDRVQEDState *s;
uint64_t pos;
size_t len;
QEDRequest *request;
/* User callback */
QEDFindClusterFunc *cb;
void *opaque;
} QEDFindClusterCB;
static void qed_find_cluster_cb(void *opaque, int ret)
/**
* Find the offset of a data cluster
*
* @s: QED state
* @request: L2 cache entry
* @pos: Byte position in device
* @len: Number of bytes (may be shortened on return)
* @img_offset: Contains offset in the image file on success
*
* This function translates a position in the block device to an offset in the
* image file. The translated offset or unallocated range in the image file is
* reported back in *img_offset and *len.
*
* If the L2 table exists, request->l2_table points to the L2 table cache entry
* and the caller must free the reference when they are finished. The cache
* entry is exposed in this way to avoid callers having to read the L2 table
* again later during request processing. If request->l2_table is non-NULL it
* will be unreferenced before taking on the new cache entry.
*
* On success QED_CLUSTER_FOUND is returned and img_offset/len are a contiguous
* range in the image file.
*
* On failure QED_CLUSTER_L2 or QED_CLUSTER_L1 is returned for missing L2 or L1
* table offset, respectively. len is number of contiguous unallocated bytes.
*/
int coroutine_fn qed_find_cluster(BDRVQEDState *s, QEDRequest *request,
uint64_t pos, size_t *len,
uint64_t *img_offset)
{
QEDFindClusterCB *find_cluster_cb = opaque;
BDRVQEDState *s = find_cluster_cb->s;
QEDRequest *request = find_cluster_cb->request;
uint64_t l2_offset;
uint64_t offset = 0;
size_t len = 0;
unsigned int index;
unsigned int n;
int ret;
/* Limit length to L2 boundary. Requests are broken up at the L2 boundary
* so that a request acts on one L2 table at a time.
*/
*len = MIN(*len, (((pos >> s->l1_shift) + 1) << s->l1_shift) - pos);
l2_offset = s->l1_table->offsets[qed_l1_index(s, pos)];
if (qed_offset_is_unalloc_cluster(l2_offset)) {
*img_offset = 0;
return QED_CLUSTER_L1;
}
if (!qed_check_table_offset(s, l2_offset)) {
*img_offset = *len = 0;
return -EINVAL;
}
ret = qed_read_l2_table(s, request, l2_offset);
qed_acquire(s);
if (ret) {
goto out;
}
index = qed_l2_index(s, find_cluster_cb->pos);
n = qed_bytes_to_clusters(s,
qed_offset_into_cluster(s, find_cluster_cb->pos) +
find_cluster_cb->len);
index = qed_l2_index(s, pos);
n = qed_bytes_to_clusters(s, qed_offset_into_cluster(s, pos) + *len);
n = qed_count_contiguous_clusters(s, request->l2_table->table,
index, n, &offset);
@ -105,64 +132,11 @@ static void qed_find_cluster_cb(void *opaque, int ret)
ret = -EINVAL;
}
len = MIN(find_cluster_cb->len, n * s->header.cluster_size -
qed_offset_into_cluster(s, find_cluster_cb->pos));
*len = MIN(*len,
n * s->header.cluster_size - qed_offset_into_cluster(s, pos));
out:
find_cluster_cb->cb(find_cluster_cb->opaque, ret, offset, len);
*img_offset = offset;
qed_release(s);
g_free(find_cluster_cb);
}
/**
* Find the offset of a data cluster
*
* @s: QED state
* @request: L2 cache entry
* @pos: Byte position in device
* @len: Number of bytes
* @cb: Completion function
* @opaque: User data for completion function
*
* This function translates a position in the block device to an offset in the
* image file. It invokes the cb completion callback to report back the
* translated offset or unallocated range in the image file.
*
* If the L2 table exists, request->l2_table points to the L2 table cache entry
* and the caller must free the reference when they are finished. The cache
* entry is exposed in this way to avoid callers having to read the L2 table
* again later during request processing. If request->l2_table is non-NULL it
* will be unreferenced before taking on the new cache entry.
*/
void qed_find_cluster(BDRVQEDState *s, QEDRequest *request, uint64_t pos,
size_t len, QEDFindClusterFunc *cb, void *opaque)
{
QEDFindClusterCB *find_cluster_cb;
uint64_t l2_offset;
/* Limit length to L2 boundary. Requests are broken up at the L2 boundary
* so that a request acts on one L2 table at a time.
*/
len = MIN(len, (((pos >> s->l1_shift) + 1) << s->l1_shift) - pos);
l2_offset = s->l1_table->offsets[qed_l1_index(s, pos)];
if (qed_offset_is_unalloc_cluster(l2_offset)) {
cb(opaque, QED_CLUSTER_L1, 0, len);
return;
}
if (!qed_check_table_offset(s, l2_offset)) {
cb(opaque, -EINVAL, 0, 0);
return;
}
find_cluster_cb = g_malloc(sizeof(*find_cluster_cb));
find_cluster_cb->s = s;
find_cluster_cb->pos = pos;
find_cluster_cb->len = len;
find_cluster_cb->cb = cb;
find_cluster_cb->opaque = opaque;
find_cluster_cb->request = request;
qed_read_l2_table(s, request, l2_offset,
qed_find_cluster_cb, find_cluster_cb);
return ret;
}

View File

@ -1,33 +0,0 @@
/*
* QEMU Enhanced Disk Format
*
* Copyright IBM, Corp. 2010
*
* Authors:
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "qed.h"
void *gencb_alloc(size_t len, BlockCompletionFunc *cb, void *opaque)
{
GenericCB *gencb = g_malloc(len);
gencb->cb = cb;
gencb->opaque = opaque;
return gencb;
}
void gencb_complete(void *opaque, int ret)
{
GenericCB *gencb = opaque;
BlockCompletionFunc *cb = gencb->cb;
void *user_opaque = gencb->opaque;
g_free(gencb);
cb(user_opaque, ret);
}

View File

@ -18,99 +18,38 @@
#include "qed.h"
#include "qemu/bswap.h"
typedef struct {
GenericCB gencb;
BDRVQEDState *s;
QEDTable *table;
struct iovec iov;
QEMUIOVector qiov;
} QEDReadTableCB;
static void qed_read_table_cb(void *opaque, int ret)
static int qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table)
{
QEDReadTableCB *read_table_cb = opaque;
QEDTable *table = read_table_cb->table;
BDRVQEDState *s = read_table_cb->s;
int noffsets = read_table_cb->qiov.size / sizeof(uint64_t);
int i;
QEMUIOVector qiov;
int noffsets;
int i, ret;
/* Handle I/O error */
if (ret) {
struct iovec iov = {
.iov_base = table->offsets,
.iov_len = s->header.cluster_size * s->header.table_size,
};
qemu_iovec_init_external(&qiov, &iov, 1);
trace_qed_read_table(s, offset, table);
ret = bdrv_preadv(s->bs->file, offset, &qiov);
if (ret < 0) {
goto out;
}
/* Byteswap offsets */
qed_acquire(s);
noffsets = qiov.size / sizeof(uint64_t);
for (i = 0; i < noffsets; i++) {
table->offsets[i] = le64_to_cpu(table->offsets[i]);
}
qed_release(s);
ret = 0;
out:
/* Completion */
trace_qed_read_table_cb(s, read_table_cb->table, ret);
gencb_complete(&read_table_cb->gencb, ret);
}
static void qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
BlockCompletionFunc *cb, void *opaque)
{
QEDReadTableCB *read_table_cb = gencb_alloc(sizeof(*read_table_cb),
cb, opaque);
QEMUIOVector *qiov = &read_table_cb->qiov;
trace_qed_read_table(s, offset, table);
read_table_cb->s = s;
read_table_cb->table = table;
read_table_cb->iov.iov_base = table->offsets,
read_table_cb->iov.iov_len = s->header.cluster_size * s->header.table_size,
qemu_iovec_init_external(qiov, &read_table_cb->iov, 1);
bdrv_aio_readv(s->bs->file, offset / BDRV_SECTOR_SIZE, qiov,
qiov->size / BDRV_SECTOR_SIZE,
qed_read_table_cb, read_table_cb);
}
typedef struct {
GenericCB gencb;
BDRVQEDState *s;
QEDTable *orig_table;
QEDTable *table;
bool flush; /* flush after write? */
struct iovec iov;
QEMUIOVector qiov;
} QEDWriteTableCB;
static void qed_write_table_cb(void *opaque, int ret)
{
QEDWriteTableCB *write_table_cb = opaque;
BDRVQEDState *s = write_table_cb->s;
trace_qed_write_table_cb(s,
write_table_cb->orig_table,
write_table_cb->flush,
ret);
if (ret) {
goto out;
}
if (write_table_cb->flush) {
/* We still need to flush first */
write_table_cb->flush = false;
qed_acquire(s);
bdrv_aio_flush(write_table_cb->s->bs, qed_write_table_cb,
write_table_cb);
qed_release(s);
return;
}
out:
qemu_vfree(write_table_cb->table);
gencb_complete(&write_table_cb->gencb, ret);
trace_qed_read_table_cb(s, table, ret);
return ret;
}
/**
@ -122,17 +61,17 @@ out:
* @index: Index of first element
* @n: Number of elements
* @flush: Whether or not to sync to disk
* @cb: Completion function
* @opaque: Argument for completion function
*/
static void qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
unsigned int index, unsigned int n, bool flush,
BlockCompletionFunc *cb, void *opaque)
static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
unsigned int index, unsigned int n, bool flush)
{
QEDWriteTableCB *write_table_cb;
unsigned int sector_mask = BDRV_SECTOR_SIZE / sizeof(uint64_t) - 1;
unsigned int start, end, i;
QEDTable *new_table;
struct iovec iov;
QEMUIOVector qiov;
size_t len_bytes;
int ret;
trace_qed_write_table(s, offset, table, index, n);
@ -142,157 +81,115 @@ static void qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
len_bytes = (end - start) * sizeof(uint64_t);
write_table_cb = gencb_alloc(sizeof(*write_table_cb), cb, opaque);
write_table_cb->s = s;
write_table_cb->orig_table = table;
write_table_cb->flush = flush;
write_table_cb->table = qemu_blockalign(s->bs, len_bytes);
write_table_cb->iov.iov_base = write_table_cb->table->offsets;
write_table_cb->iov.iov_len = len_bytes;
qemu_iovec_init_external(&write_table_cb->qiov, &write_table_cb->iov, 1);
new_table = qemu_blockalign(s->bs, len_bytes);
iov = (struct iovec) {
.iov_base = new_table->offsets,
.iov_len = len_bytes,
};
qemu_iovec_init_external(&qiov, &iov, 1);
/* Byteswap table */
for (i = start; i < end; i++) {
uint64_t le_offset = cpu_to_le64(table->offsets[i]);
write_table_cb->table->offsets[i - start] = le_offset;
new_table->offsets[i - start] = le_offset;
}
/* Adjust for offset into table */
offset += start * sizeof(uint64_t);
bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
&write_table_cb->qiov,
write_table_cb->qiov.size / BDRV_SECTOR_SIZE,
qed_write_table_cb, write_table_cb);
}
ret = bdrv_pwritev(s->bs->file, offset, &qiov);
trace_qed_write_table_cb(s, table, flush, ret);
if (ret < 0) {
goto out;
}
/**
* Propagate return value from async callback
*/
static void qed_sync_cb(void *opaque, int ret)
{
*(int *)opaque = ret;
if (flush) {
qed_acquire(s);
ret = bdrv_flush(s->bs);
qed_release(s);
if (ret < 0) {
goto out;
}
}
ret = 0;
out:
qemu_vfree(new_table);
return ret;
}
int qed_read_l1_table_sync(BDRVQEDState *s)
{
int ret = -EINPROGRESS;
qed_read_table(s, s->header.l1_table_offset,
s->l1_table, qed_sync_cb, &ret);
BDRV_POLL_WHILE(s->bs, ret == -EINPROGRESS);
return ret;
return qed_read_table(s, s->header.l1_table_offset, s->l1_table);
}
void qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n,
BlockCompletionFunc *cb, void *opaque)
int qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n)
{
BLKDBG_EVENT(s->bs->file, BLKDBG_L1_UPDATE);
qed_write_table(s, s->header.l1_table_offset,
s->l1_table, index, n, false, cb, opaque);
return qed_write_table(s, s->header.l1_table_offset,
s->l1_table, index, n, false);
}
int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index,
unsigned int n)
{
int ret = -EINPROGRESS;
qed_write_l1_table(s, index, n, qed_sync_cb, &ret);
BDRV_POLL_WHILE(s->bs, ret == -EINPROGRESS);
return ret;
return qed_write_l1_table(s, index, n);
}
typedef struct {
GenericCB gencb;
BDRVQEDState *s;
uint64_t l2_offset;
QEDRequest *request;
} QEDReadL2TableCB;
static void qed_read_l2_table_cb(void *opaque, int ret)
int qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
{
QEDReadL2TableCB *read_l2_table_cb = opaque;
QEDRequest *request = read_l2_table_cb->request;
BDRVQEDState *s = read_l2_table_cb->s;
CachedL2Table *l2_table = request->l2_table;
uint64_t l2_offset = read_l2_table_cb->l2_offset;
qed_acquire(s);
if (ret) {
/* can't trust loaded L2 table anymore */
qed_unref_l2_cache_entry(l2_table);
request->l2_table = NULL;
} else {
l2_table->offset = l2_offset;
qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
/* This is guaranteed to succeed because we just committed the entry
* to the cache.
*/
request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
assert(request->l2_table != NULL);
}
qed_release(s);
gencb_complete(&read_l2_table_cb->gencb, ret);
}
void qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset,
BlockCompletionFunc *cb, void *opaque)
{
QEDReadL2TableCB *read_l2_table_cb;
int ret;
qed_unref_l2_cache_entry(request->l2_table);
/* Check for cached L2 entry */
request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset);
if (request->l2_table) {
cb(opaque, 0);
return;
return 0;
}
request->l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
request->l2_table->table = qed_alloc_table(s);
read_l2_table_cb = gencb_alloc(sizeof(*read_l2_table_cb), cb, opaque);
read_l2_table_cb->s = s;
read_l2_table_cb->l2_offset = offset;
read_l2_table_cb->request = request;
BLKDBG_EVENT(s->bs->file, BLKDBG_L2_LOAD);
qed_read_table(s, offset, request->l2_table->table,
qed_read_l2_table_cb, read_l2_table_cb);
}
ret = qed_read_table(s, offset, request->l2_table->table);
int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
{
int ret = -EINPROGRESS;
qed_acquire(s);
if (ret) {
/* can't trust loaded L2 table anymore */
qed_unref_l2_cache_entry(request->l2_table);
request->l2_table = NULL;
} else {
request->l2_table->offset = offset;
qed_read_l2_table(s, request, offset, qed_sync_cb, &ret);
BDRV_POLL_WHILE(s->bs, ret == -EINPROGRESS);
qed_commit_l2_cache_entry(&s->l2_cache, request->l2_table);
/* This is guaranteed to succeed because we just committed the entry
* to the cache.
*/
request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset);
assert(request->l2_table != NULL);
}
qed_release(s);
return ret;
}
void qed_write_l2_table(BDRVQEDState *s, QEDRequest *request,
unsigned int index, unsigned int n, bool flush,
BlockCompletionFunc *cb, void *opaque)
int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
{
return qed_read_l2_table(s, request, offset);
}
int qed_write_l2_table(BDRVQEDState *s, QEDRequest *request,
unsigned int index, unsigned int n, bool flush)
{
BLKDBG_EVENT(s->bs->file, BLKDBG_L2_UPDATE);
qed_write_table(s, request->l2_table->offset,
request->l2_table->table, index, n, flush, cb, opaque);
return qed_write_table(s, request->l2_table->offset,
request->l2_table->table, index, n, flush);
}
int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request,
unsigned int index, unsigned int n, bool flush)
{
int ret = -EINPROGRESS;
qed_write_l2_table(s, request, index, n, flush, qed_sync_cb, &ret);
BDRV_POLL_WHILE(s->bs, ret == -EINPROGRESS);
return ret;
return qed_write_l2_table(s, request, index, n, flush);
}

File diff suppressed because it is too large Load Diff

View File

@ -129,8 +129,7 @@ enum {
};
typedef struct QEDAIOCB {
BlockAIOCB common;
int bh_ret; /* final return status for completion bh */
BlockDriverState *bs;
QSIMPLEQ_ENTRY(QEDAIOCB) next; /* next request */
int flags; /* QED_AIOCB_* bits ORed together */
uint64_t end_pos; /* request end on block device, in bytes */
@ -163,7 +162,8 @@ typedef struct {
uint32_t l2_mask;
/* Allocating write request queue */
QSIMPLEQ_HEAD(, QEDAIOCB) allocating_write_reqs;
QEDAIOCB *allocating_acb;
CoQueue allocating_write_reqs;
bool allocating_write_reqs_plugged;
/* Periodic flush and clear need check flag */
@ -177,41 +177,9 @@ enum {
QED_CLUSTER_L1, /* cluster missing in L1 */
};
/**
* qed_find_cluster() completion callback
*
* @opaque: User data for completion callback
* @ret: QED_CLUSTER_FOUND Success
* QED_CLUSTER_L2 Data cluster unallocated in L2
* QED_CLUSTER_L1 L2 unallocated in L1
* -errno POSIX error occurred
* @offset: Data cluster offset
* @len: Contiguous bytes starting from cluster offset
*
* This function is invoked when qed_find_cluster() completes.
*
* On success ret is QED_CLUSTER_FOUND and offset/len are a contiguous range
* in the image file.
*
* On failure ret is QED_CLUSTER_L2 or QED_CLUSTER_L1 for missing L2 or L1
* table offset, respectively. len is number of contiguous unallocated bytes.
*/
typedef void QEDFindClusterFunc(void *opaque, int ret, uint64_t offset, size_t len);
void qed_acquire(BDRVQEDState *s);
void qed_release(BDRVQEDState *s);
/**
* Generic callback for chaining async callbacks
*/
typedef struct {
BlockCompletionFunc *cb;
void *opaque;
} GenericCB;
void *gencb_alloc(size_t len, BlockCompletionFunc *cb, void *opaque);
void gencb_complete(void *opaque, int ret);
/**
* Header functions
*/
@ -231,25 +199,23 @@ void qed_commit_l2_cache_entry(L2TableCache *l2_cache, CachedL2Table *l2_table);
* Table I/O functions
*/
int qed_read_l1_table_sync(BDRVQEDState *s);
void qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n,
BlockCompletionFunc *cb, void *opaque);
int qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n);
int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index,
unsigned int n);
int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request,
uint64_t offset);
void qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset,
BlockCompletionFunc *cb, void *opaque);
void qed_write_l2_table(BDRVQEDState *s, QEDRequest *request,
unsigned int index, unsigned int n, bool flush,
BlockCompletionFunc *cb, void *opaque);
int qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset);
int qed_write_l2_table(BDRVQEDState *s, QEDRequest *request,
unsigned int index, unsigned int n, bool flush);
int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request,
unsigned int index, unsigned int n, bool flush);
/**
* Cluster functions
*/
void qed_find_cluster(BDRVQEDState *s, QEDRequest *request, uint64_t pos,
size_t len, QEDFindClusterFunc *cb, void *opaque);
int coroutine_fn qed_find_cluster(BDRVQEDState *s, QEDRequest *request,
uint64_t pos, size_t *len,
uint64_t *img_offset);
/**
* Consistency check

View File

@ -264,7 +264,7 @@ static int64_t coroutine_fn raw_co_get_block_status(BlockDriverState *bs,
}
static int coroutine_fn raw_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int count,
int64_t offset, int bytes,
BdrvRequestFlags flags)
{
BDRVRawState *s = bs->opaque;
@ -272,18 +272,18 @@ static int coroutine_fn raw_co_pwrite_zeroes(BlockDriverState *bs,
return -EINVAL;
}
offset += s->offset;
return bdrv_co_pwrite_zeroes(bs->file, offset, count, flags);
return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
}
static int coroutine_fn raw_co_pdiscard(BlockDriverState *bs,
int64_t offset, int count)
int64_t offset, int bytes)
{
BDRVRawState *s = bs->opaque;
if (offset > UINT64_MAX - s->offset) {
return -EINVAL;
}
offset += s->offset;
return bdrv_co_pdiscard(bs->file->bs, offset, count);
return bdrv_co_pdiscard(bs->file->bs, offset, bytes);
}
static int64_t raw_getlength(BlockDriverState *bs)

View File

@ -1065,11 +1065,11 @@ static int qemu_rbd_snap_list(BlockDriverState *bs,
#ifdef LIBRBD_SUPPORTS_DISCARD
static BlockAIOCB *qemu_rbd_aio_pdiscard(BlockDriverState *bs,
int64_t offset,
int count,
int bytes,
BlockCompletionFunc *cb,
void *opaque)
{
return rbd_start_aio(bs, offset, NULL, count, cb, opaque,
return rbd_start_aio(bs, offset, NULL, bytes, cb, opaque,
RBD_AIO_DISCARD);
}
#endif

View File

@ -1046,11 +1046,11 @@ static void sd_parse_uri(SheepdogConfig *cfg, const char *filename,
}
/* transport */
if (!strcmp(uri->scheme, "sheepdog")) {
if (!g_strcmp0(uri->scheme, "sheepdog")) {
is_unix = false;
} else if (!strcmp(uri->scheme, "sheepdog+tcp")) {
} else if (!g_strcmp0(uri->scheme, "sheepdog+tcp")) {
is_unix = false;
} else if (!strcmp(uri->scheme, "sheepdog+unix")) {
} else if (!g_strcmp0(uri->scheme, "sheepdog+unix")) {
is_unix = true;
} else {
error_setg(&err, "URI scheme must be 'sheepdog', 'sheepdog+tcp',"
@ -2935,7 +2935,7 @@ static int sd_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
static coroutine_fn int sd_co_pdiscard(BlockDriverState *bs, int64_t offset,
int count)
int bytes)
{
SheepdogAIOCB acb;
BDRVSheepdogState *s = bs->opaque;
@ -2953,11 +2953,11 @@ static coroutine_fn int sd_co_pdiscard(BlockDriverState *bs, int64_t offset,
iov.iov_len = sizeof(zero);
discard_iov.iov = &iov;
discard_iov.niov = 1;
if (!QEMU_IS_ALIGNED(offset | count, BDRV_SECTOR_SIZE)) {
if (!QEMU_IS_ALIGNED(offset | bytes, BDRV_SECTOR_SIZE)) {
return -ENOTSUP;
}
sd_aio_setup(&acb, s, &discard_iov, offset >> BDRV_SECTOR_BITS,
count >> BDRV_SECTOR_BITS, AIOCB_DISCARD_OBJ);
bytes >> BDRV_SECTOR_BITS, AIOCB_DISCARD_OBJ);
sd_co_rw_vector(&acb);
sd_aio_complete(&acb);

View File

@ -204,7 +204,7 @@ static int parse_uri(const char *filename, QDict *options, Error **errp)
return -EINVAL;
}
if (strcmp(uri->scheme, "ssh") != 0) {
if (g_strcmp0(uri->scheme, "ssh") != 0) {
error_setg(errp, "URI scheme must be 'ssh'");
goto err;
}

View File

@ -49,7 +49,7 @@
* Again, all this is handled internally and is mostly transparent to
* the outside. The 'throttle_timers' field however has an additional
* constraint because it may be temporarily invalid (see for example
* bdrv_set_aio_context()). Therefore in this file a thread will
* blk_set_aio_context()). Therefore in this file a thread will
* access some other BlockBackend's timers only after verifying that
* that BlockBackend has throttled requests in the queue.
*/

View File

@ -9,9 +9,6 @@ blk_co_preadv(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags
blk_co_pwritev(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags %x"
# block/io.c
bdrv_aio_flush(void *bs, void *opaque) "bs %p opaque %p"
bdrv_aio_readv(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
bdrv_aio_writev(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
bdrv_co_readv(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d"
bdrv_co_writev(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d"
bdrv_co_pwrite_zeroes(void *bs, int64_t offset, int count, int flags) "bs %p offset %"PRId64" count %d flags %#x"

View File

@ -139,7 +139,7 @@ static void block_job_resume(BlockJob *job)
block_job_enter(job);
}
static void block_job_ref(BlockJob *job)
void block_job_ref(BlockJob *job)
{
++job->refcnt;
}
@ -148,7 +148,7 @@ static void block_job_attached_aio_context(AioContext *new_context,
void *opaque);
static void block_job_detach_aio_context(void *opaque);
static void block_job_unref(BlockJob *job)
void block_job_unref(BlockJob *job)
{
if (--job->refcnt == 0) {
BlockDriverState *bs = blk_bs(job->blk);

View File

@ -21,7 +21,7 @@
* cmb_size_mb=<cmb_size_mb[optional]>
*
* Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
* offset 0 in BAR2 and supports SQS only for now.
* offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
*/
#include "qemu/osdep.h"
@ -93,8 +93,8 @@ static void nvme_isr_notify(NvmeCtrl *n, NvmeCQueue *cq)
}
}
static uint16_t nvme_map_prp(QEMUSGList *qsg, uint64_t prp1, uint64_t prp2,
uint32_t len, NvmeCtrl *n)
static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
uint64_t prp2, uint32_t len, NvmeCtrl *n)
{
hwaddr trans_len = n->page_size - (prp1 % n->page_size);
trans_len = MIN(len, trans_len);
@ -102,10 +102,15 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, uint64_t prp1, uint64_t prp2,
if (!prp1) {
return NVME_INVALID_FIELD | NVME_DNR;
} else if (n->cmbsz && prp1 >= n->ctrl_mem.addr &&
prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
qsg->nsg = 0;
qemu_iovec_init(iov, num_prps);
qemu_iovec_add(iov, (void *)&n->cmbuf[prp1 - n->ctrl_mem.addr], trans_len);
} else {
pci_dma_sglist_init(qsg, &n->parent_obj, num_prps);
qemu_sglist_add(qsg, prp1, trans_len);
}
pci_dma_sglist_init(qsg, &n->parent_obj, num_prps);
qemu_sglist_add(qsg, prp1, trans_len);
len -= trans_len;
if (len) {
if (!prp2) {
@ -118,7 +123,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, uint64_t prp1, uint64_t prp2,
nents = (len + n->page_size - 1) >> n->page_bits;
prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
pci_dma_read(&n->parent_obj, prp2, (void *)prp_list, prp_trans);
nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
while (len != 0) {
uint64_t prp_ent = le64_to_cpu(prp_list[i]);
@ -130,7 +135,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, uint64_t prp1, uint64_t prp2,
i = 0;
nents = (len + n->page_size - 1) >> n->page_bits;
prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
pci_dma_read(&n->parent_obj, prp_ent, (void *)prp_list,
nvme_addr_read(n, prp_ent, (void *)prp_list,
prp_trans);
prp_ent = le64_to_cpu(prp_list[i]);
}
@ -140,7 +145,11 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, uint64_t prp1, uint64_t prp2,
}
trans_len = MIN(len, n->page_size);
qemu_sglist_add(qsg, prp_ent, trans_len);
if (qsg->nsg){
qemu_sglist_add(qsg, prp_ent, trans_len);
} else {
qemu_iovec_add(iov, (void *)&n->cmbuf[prp_ent - n->ctrl_mem.addr], trans_len);
}
len -= trans_len;
i++;
}
@ -148,7 +157,11 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, uint64_t prp1, uint64_t prp2,
if (prp2 & (n->page_size - 1)) {
goto unmap;
}
qemu_sglist_add(qsg, prp2, len);
if (qsg->nsg) {
qemu_sglist_add(qsg, prp2, len);
} else {
qemu_iovec_add(iov, (void *)&n->cmbuf[prp2 - n->ctrl_mem.addr], trans_len);
}
}
}
return NVME_SUCCESS;
@ -162,16 +175,24 @@ static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
uint64_t prp1, uint64_t prp2)
{
QEMUSGList qsg;
QEMUIOVector iov;
uint16_t status = NVME_SUCCESS;
if (nvme_map_prp(&qsg, prp1, prp2, len, n)) {
if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
return NVME_INVALID_FIELD | NVME_DNR;
}
if (dma_buf_read(ptr, len, &qsg)) {
if (qsg.nsg > 0) {
if (dma_buf_read(ptr, len, &qsg)) {
status = NVME_INVALID_FIELD | NVME_DNR;
}
qemu_sglist_destroy(&qsg);
return NVME_INVALID_FIELD | NVME_DNR;
} else {
if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
status = NVME_INVALID_FIELD | NVME_DNR;
}
qemu_iovec_destroy(&iov);
}
qemu_sglist_destroy(&qsg);
return NVME_SUCCESS;
return status;
}
static void nvme_post_cqes(void *opaque)
@ -285,20 +306,27 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
return NVME_LBA_RANGE | NVME_DNR;
}
if (nvme_map_prp(&req->qsg, prp1, prp2, data_size, n)) {
if (nvme_map_prp(&req->qsg, &req->iov, prp1, prp2, data_size, n)) {
block_acct_invalid(blk_get_stats(n->conf.blk), acct);
return NVME_INVALID_FIELD | NVME_DNR;
}
assert((nlb << data_shift) == req->qsg.size);
req->has_sg = true;
dma_acct_start(n->conf.blk, &req->acct, &req->qsg, acct);
req->aiocb = is_write ?
dma_blk_write(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
nvme_rw_cb, req) :
dma_blk_read(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
nvme_rw_cb, req);
if (req->qsg.nsg > 0) {
req->has_sg = true;
req->aiocb = is_write ?
dma_blk_write(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
nvme_rw_cb, req) :
dma_blk_read(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
nvme_rw_cb, req);
} else {
req->has_sg = false;
req->aiocb = is_write ?
blk_aio_pwritev(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb,
req) :
blk_aio_preadv(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb,
req);
}
return NVME_NO_COMPLETE;
}
@ -987,11 +1015,14 @@ static int nvme_init(PCIDevice *pci_dev)
NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1);
NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0);
NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 0);
NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 0);
NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 0);
NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1);
NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1);
NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->cmb_size_mb);
n->cmbloc = n->bar.cmbloc;
n->cmbsz = n->bar.cmbsz;
n->cmbuf = g_malloc0(NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
memory_region_init_io(&n->ctrl_mem, OBJECT(n), &nvme_cmb_ops, n,
"nvme-cmb", NVME_CMBSZ_GETSIZE(n->bar.cmbsz));

View File

@ -712,6 +712,7 @@ typedef struct NvmeRequest {
NvmeCqe cqe;
BlockAcctCookie acct;
QEMUSGList qsg;
QEMUIOVector iov;
QTAILQ_ENTRY(NvmeRequest)entry;
} NvmeRequest;

View File

@ -276,7 +276,7 @@ int bdrv_read(BdrvChild *child, int64_t sector_num,
int bdrv_write(BdrvChild *child, int64_t sector_num,
const uint8_t *buf, int nb_sectors);
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
int count, BdrvRequestFlags flags);
int bytes, BdrvRequestFlags flags);
int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags);
int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes);
int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov);
@ -295,7 +295,7 @@ int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num,
* because it may allocate memory for the entire region.
*/
int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
int count, BdrvRequestFlags flags);
int bytes, BdrvRequestFlags flags);
BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
const char *backing_file);
int bdrv_get_backing_file_depth(BlockDriverState *bs);
@ -353,14 +353,6 @@ BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs,
const char *node_name, Error **errp);
/* async block I/O */
BlockAIOCB *bdrv_aio_readv(BdrvChild *child, int64_t sector_num,
QEMUIOVector *iov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque);
BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
QEMUIOVector *iov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque);
BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
BlockCompletionFunc *cb, void *opaque);
void bdrv_aio_cancel(BlockAIOCB *acb);
void bdrv_aio_cancel_async(BlockAIOCB *acb);
@ -419,8 +411,8 @@ void bdrv_drain_all(void);
} \
waited_; })
int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count);
int bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset, int count);
int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int bytes);
int bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes);
int bdrv_has_zero_init_1(BlockDriverState *bs);
int bdrv_has_zero_init(BlockDriverState *bs);
bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs);

View File

@ -142,7 +142,7 @@ struct BlockDriver {
BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs,
BlockCompletionFunc *cb, void *opaque);
BlockAIOCB *(*bdrv_aio_pdiscard)(BlockDriverState *bs,
int64_t offset, int count,
int64_t offset, int bytes,
BlockCompletionFunc *cb, void *opaque);
int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
@ -163,9 +163,9 @@ struct BlockDriver {
* will be called instead.
*/
int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs,
int64_t offset, int count, BdrvRequestFlags flags);
int64_t offset, int bytes, BdrvRequestFlags flags);
int coroutine_fn (*bdrv_co_pdiscard)(BlockDriverState *bs,
int64_t offset, int count);
int64_t offset, int bytes);
/*
* Building block for bdrv_block_status[_above]. The driver should

View File

@ -320,6 +320,24 @@ void block_job_iostatus_reset(BlockJob *job);
*/
BlockJobTxn *block_job_txn_new(void);
/**
* block_job_ref:
*
* Add a reference to BlockJob refcnt, it will be decreased with
* block_job_unref, and then be freed if it comes to be the last
* reference.
*/
void block_job_ref(BlockJob *job);
/**
* block_job_unref:
*
* Release a reference that was previously acquired with block_job_ref
* or block_job_create. If it's the last reference to the object, it will be
* freed.
*/
void block_job_unref(BlockJob *job);
/**
* block_job_txn_unref:
*

View File

@ -130,7 +130,7 @@ BlockBackend *blk_by_dev(void *dev);
BlockBackend *blk_by_qdev_id(const char *id, Error **errp);
void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque);
int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf,
int count);
int bytes);
int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags);
@ -138,13 +138,13 @@ int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags);
int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int count, BdrvRequestFlags flags);
int bytes, BdrvRequestFlags flags);
BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int count, BdrvRequestFlags flags,
int bytes, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque);
int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags);
int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count);
int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int bytes);
int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int bytes,
BdrvRequestFlags flags);
int64_t blk_getlength(BlockBackend *blk);
void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr);
@ -157,7 +157,7 @@ BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
BlockCompletionFunc *cb, void *opaque);
BlockAIOCB *blk_aio_flush(BlockBackend *blk,
BlockCompletionFunc *cb, void *opaque);
BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int count,
BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int bytes,
BlockCompletionFunc *cb, void *opaque);
void blk_aio_cancel(BlockAIOCB *acb);
void blk_aio_cancel_async(BlockAIOCB *acb);
@ -165,7 +165,7 @@ int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
BlockCompletionFunc *cb, void *opaque);
int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int count);
int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes);
int blk_co_flush(BlockBackend *blk);
int blk_flush(BlockBackend *blk);
int blk_commit_all(void);
@ -220,11 +220,11 @@ int blk_get_open_flags_from_root_state(BlockBackend *blk);
void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
BlockCompletionFunc *cb, void *opaque);
int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int count, BdrvRequestFlags flags);
int bytes, BdrvRequestFlags flags);
int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
int count);
int bytes);
int blk_truncate(BlockBackend *blk, int64_t offset, Error **errp);
int blk_pdiscard(BlockBackend *blk, int64_t offset, int count);
int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes);
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
int64_t pos, int size);
int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size);

View File

@ -2107,6 +2107,8 @@ int save_snapshot(const char *name, Error **errp)
}
vm_stop(RUN_STATE_SAVE_VM);
bdrv_drain_all_begin();
aio_context_acquire(aio_context);
memset(sn, 0, sizeof(*sn));
@ -2144,6 +2146,14 @@ int save_snapshot(const char *name, Error **errp)
goto the_end;
}
/* The bdrv_all_create_snapshot() call that follows acquires the AioContext
* for itself. BDRV_POLL_WHILE() does not support nested locking because
* it only releases the lock once. Therefore synchronous I/O will deadlock
* unless we release the AioContext before bdrv_all_create_snapshot().
*/
aio_context_release(aio_context);
aio_context = NULL;
ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, &bs);
if (ret < 0) {
error_setg(errp, "Error while creating snapshot on '%s'",
@ -2154,7 +2164,12 @@ int save_snapshot(const char *name, Error **errp)
ret = 0;
the_end:
aio_context_release(aio_context);
if (aio_context) {
aio_context_release(aio_context);
}
bdrv_drain_all_end();
if (saved_vm_running) {
vm_start();
}
@ -2263,20 +2278,21 @@ int load_snapshot(const char *name, Error **errp)
}
/* Flush all IO requests so they don't interfere with the new state. */
bdrv_drain_all();
bdrv_drain_all_begin();
ret = bdrv_all_goto_snapshot(name, &bs);
if (ret < 0) {
error_setg(errp, "Error %d while activating snapshot '%s' on '%s'",
ret, name, bdrv_get_device_name(bs));
return ret;
goto err_drain;
}
/* restore the VM state */
f = qemu_fopen_bdrv(bs_vm_state, 0);
if (!f) {
error_setg(errp, "Could not open VM state file");
return -EINVAL;
ret = -EINVAL;
goto err_drain;
}
qemu_system_reset(SHUTDOWN_CAUSE_NONE);
@ -2284,15 +2300,21 @@ int load_snapshot(const char *name, Error **errp)
aio_context_acquire(aio_context);
ret = qemu_loadvm_state(f);
migration_incoming_state_destroy();
aio_context_release(aio_context);
migration_incoming_state_destroy();
bdrv_drain_all_end();
if (ret < 0) {
error_setg(errp, "Error %d while loading VM state", ret);
return ret;
}
return 0;
err_drain:
bdrv_drain_all_end();
return ret;
}
void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev)

View File

@ -887,22 +887,28 @@ static void common_block_job_cb(void *opaque, int ret)
static void run_block_job(BlockJob *job, Error **errp)
{
AioContext *aio_context = blk_get_aio_context(job->blk);
int ret = 0;
/* FIXME In error cases, the job simply goes away and we access a dangling
* pointer below. */
aio_context_acquire(aio_context);
block_job_ref(job);
do {
aio_poll(aio_context, true);
qemu_progress_print(job->len ?
((float)job->offset / job->len * 100.f) : 0.0f, 0);
} while (!job->ready);
} while (!job->ready && !job->completed);
block_job_complete_sync(job, errp);
if (!job->completed) {
ret = block_job_complete_sync(job, errp);
} else {
ret = job->ret;
}
block_job_unref(job);
aio_context_release(aio_context);
/* A block job may finish instantaneously without publishing any progress,
* so just signal completion here */
qemu_progress_print(100.f, 0);
/* publish completion progress only when success */
if (!ret) {
qemu_progress_print(100.f, 0);
}
}
static int img_commit(int argc, char **argv)
@ -4249,15 +4255,12 @@ static int img_dd(int argc, char **argv)
case 'U':
force_share = true;
break;
case OPTION_OBJECT: {
QemuOpts *opts;
opts = qemu_opts_parse_noisily(&qemu_object_opts,
optarg, true);
if (!opts) {
case OPTION_OBJECT:
if (!qemu_opts_parse_noisily(&qemu_object_opts, optarg, true)) {
ret = -1;
goto out;
}
} break;
break;
case OPTION_IMAGE_OPTS:
image_opts = true;
break;

View File

@ -451,13 +451,13 @@ fail:
}
static int do_pread(BlockBackend *blk, char *buf, int64_t offset,
int64_t count, int64_t *total)
int64_t bytes, int64_t *total)
{
if (count > INT_MAX) {
if (bytes > INT_MAX) {
return -ERANGE;
}
*total = blk_pread(blk, offset, (uint8_t *)buf, count);
*total = blk_pread(blk, offset, (uint8_t *)buf, bytes);
if (*total < 0) {
return *total;
}
@ -465,13 +465,13 @@ static int do_pread(BlockBackend *blk, char *buf, int64_t offset,
}
static int do_pwrite(BlockBackend *blk, char *buf, int64_t offset,
int64_t count, int flags, int64_t *total)
int64_t bytes, int flags, int64_t *total)
{
if (count > INT_MAX) {
if (bytes > INT_MAX) {
return -ERANGE;
}
*total = blk_pwrite(blk, offset, (uint8_t *)buf, count, flags);
*total = blk_pwrite(blk, offset, (uint8_t *)buf, bytes, flags);
if (*total < 0) {
return *total;
}
@ -481,7 +481,7 @@ static int do_pwrite(BlockBackend *blk, char *buf, int64_t offset,
typedef struct {
BlockBackend *blk;
int64_t offset;
int64_t count;
int64_t bytes;
int64_t *total;
int flags;
int ret;
@ -492,7 +492,7 @@ static void coroutine_fn co_pwrite_zeroes_entry(void *opaque)
{
CoWriteZeroes *data = opaque;
data->ret = blk_co_pwrite_zeroes(data->blk, data->offset, data->count,
data->ret = blk_co_pwrite_zeroes(data->blk, data->offset, data->bytes,
data->flags);
data->done = true;
if (data->ret < 0) {
@ -500,23 +500,23 @@ static void coroutine_fn co_pwrite_zeroes_entry(void *opaque)
return;
}
*data->total = data->count;
*data->total = data->bytes;
}
static int do_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int64_t count, int flags, int64_t *total)
int64_t bytes, int flags, int64_t *total)
{
Coroutine *co;
CoWriteZeroes data = {
.blk = blk,
.offset = offset,
.count = count,
.bytes = bytes,
.total = total,
.flags = flags,
.done = false,
};
if (count > INT_MAX) {
if (bytes > INT_MAX) {
return -ERANGE;
}
@ -533,19 +533,19 @@ static int do_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
}
static int do_write_compressed(BlockBackend *blk, char *buf, int64_t offset,
int64_t count, int64_t *total)
int64_t bytes, int64_t *total)
{
int ret;
if (count >> 9 > BDRV_REQUEST_MAX_SECTORS) {
if (bytes >> 9 > BDRV_REQUEST_MAX_SECTORS) {
return -ERANGE;
}
ret = blk_pwrite_compressed(blk, offset, buf, count);
ret = blk_pwrite_compressed(blk, offset, buf, bytes);
if (ret < 0) {
return ret;
}
*total = count;
*total = bytes;
return 1;
}
@ -1701,7 +1701,7 @@ static int discard_f(BlockBackend *blk, int argc, char **argv)
struct timeval t1, t2;
bool Cflag = false, qflag = false;
int c, ret;
int64_t offset, count;
int64_t offset, bytes;
while ((c = getopt(argc, argv, "Cq")) != -1) {
switch (c) {
@ -1727,11 +1727,11 @@ static int discard_f(BlockBackend *blk, int argc, char **argv)
}
optind++;
count = cvtnum(argv[optind]);
if (count < 0) {
print_cvtnum_err(count, argv[optind]);
bytes = cvtnum(argv[optind]);
if (bytes < 0) {
print_cvtnum_err(bytes, argv[optind]);
return 0;
} else if (count >> BDRV_SECTOR_BITS > BDRV_REQUEST_MAX_SECTORS) {
} else if (bytes >> BDRV_SECTOR_BITS > BDRV_REQUEST_MAX_SECTORS) {
printf("length cannot exceed %"PRIu64", given %s\n",
(uint64_t)BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS,
argv[optind]);
@ -1739,7 +1739,7 @@ static int discard_f(BlockBackend *blk, int argc, char **argv)
}
gettimeofday(&t1, NULL);
ret = blk_pdiscard(blk, offset, count);
ret = blk_pdiscard(blk, offset, bytes);
gettimeofday(&t2, NULL);
if (ret < 0) {
@ -1750,7 +1750,7 @@ static int discard_f(BlockBackend *blk, int argc, char **argv)
/* Finally, report back -- -C gives a parsable format */
if (!qflag) {
t2 = tsub(t2, t1);
print_report("discard", &t2, offset, count, count, 1, Cflag);
print_report("discard", &t2, offset, bytes, bytes, 1, Cflag);
}
out:

View File

@ -610,6 +610,166 @@ DEF("blockdev", HAS_ARG, QEMU_OPTION_blockdev,
" [,read-only=on|off][,detect-zeroes=on|off|unmap]\n"
" [,driver specific parameters...]\n"
" configure a block backend\n", QEMU_ARCH_ALL)
STEXI
@item -blockdev @var{option}[,@var{option}[,@var{option}[,...]]]
@findex -blockdev
Define a new block driver node. Some of the options apply to all block drivers,
other options are only accepted for a specific block driver. See below for a
list of generic options and options for the most common block drivers.
Options that expect a reference to another node (e.g. @code{file}) can be
given in two ways. Either you specify the node name of an already existing node
(file=@var{node-name}), or you define a new node inline, adding options
for the referenced node after a dot (file.filename=@var{path},file.aio=native).
A block driver node created with @option{-blockdev} can be used for a guest
device by specifying its node name for the @code{drive} property in a
@option{-device} argument that defines a block device.
@table @option
@item Valid options for any block driver node:
@table @code
@item driver
Specifies the block driver to use for the given node.
@item node-name
This defines the name of the block driver node by which it will be referenced
later. The name must be unique, i.e. it must not match the name of a different
block driver node, or (if you use @option{-drive} as well) the ID of a drive.
If no node name is specified, it is automatically generated. The generated node
name is not intended to be predictable and changes between QEMU invocations.
For the top level, an explicit node name must be specified.
@item read-only
Open the node read-only. Guest write attempts will fail.
@item cache.direct
The host page cache can be avoided with @option{cache.direct=on}. This will
attempt to do disk IO directly to the guest's memory. QEMU may still perform an
internal copy of the data.
@item cache.no-flush
In case you don't care about data integrity over host failures, you can use
@option{cache.no-flush=on}. This option tells QEMU that it never needs to write
any data to the disk but can instead keep things in cache. If anything goes
wrong, like your host losing power, the disk storage getting disconnected
accidentally, etc. your image will most probably be rendered unusable.
@item discard=@var{discard}
@var{discard} is one of "ignore" (or "off") or "unmap" (or "on") and controls
whether @code{discard} (also known as @code{trim} or @code{unmap}) requests are
ignored or passed to the filesystem. Some machine types may not support
discard requests.
@item detect-zeroes=@var{detect-zeroes}
@var{detect-zeroes} is "off", "on" or "unmap" and enables the automatic
conversion of plain zero writes by the OS to driver specific optimized
zero write commands. You may even choose "unmap" if @var{discard} is set
to "unmap" to allow a zero write to be converted to an @code{unmap} operation.
@end table
@item Driver-specific options for @code{file}
This is the protocol-level block driver for accessing regular files.
@table @code
@item filename
The path to the image file in the local filesystem
@item aio
Specifies the AIO backend (threads/native, default: threads)
@end table
Example:
@example
-blockdev driver=file,node-name=disk,filename=disk.img
@end example
@item Driver-specific options for @code{raw}
This is the image format block driver for raw images. It is usually
stacked on top of a protocol level block driver such as @code{file}.
@table @code
@item file
Reference to or definition of the data source block driver node
(e.g. a @code{file} driver node)
@end table
Example 1:
@example
-blockdev driver=file,node-name=disk_file,filename=disk.img
-blockdev driver=raw,node-name=disk,file=disk_file
@end example
Example 2:
@example
-blockdev driver=raw,node-name=disk,file.driver=file,file.filename=disk.img
@end example
@item Driver-specific options for @code{qcow2}
This is the image format block driver for qcow2 images. It is usually
stacked on top of a protocol level block driver such as @code{file}.
@table @code
@item file
Reference to or definition of the data source block driver node
(e.g. a @code{file} driver node)
@item backing
Reference to or definition of the backing file block device (default is taken
from the image file). It is allowed to pass an empty string here in order to
disable the default backing file.
@item lazy-refcounts
Whether to enable the lazy refcounts feature (on/off; default is taken from the
image file)
@item cache-size
The maximum total size of the L2 table and refcount block caches in bytes
(default: 1048576 bytes or 8 clusters, whichever is larger)
@item l2-cache-size
The maximum size of the L2 table cache in bytes
(default: 4/5 of the total cache size)
@item refcount-cache-size
The maximum size of the refcount block cache in bytes
(default: 1/5 of the total cache size)
@item cache-clean-interval
Clean unused entries in the L2 and refcount caches. The interval is in seconds.
The default value is 0 and it disables this feature.
@item pass-discard-request
Whether discard requests to the qcow2 device should be forwarded to the data
source (on/off; default: on if discard=unmap is specified, off otherwise)
@item pass-discard-snapshot
Whether discard requests for the data source should be issued when a snapshot
operation (e.g. deleting a snapshot) frees clusters in the qcow2 file (on/off;
default: on)
@item pass-discard-other
Whether discard requests for the data source should be issued on other
occasions where a cluster gets freed (on/off; default: off)
@item overlap-check
Which overlap checks to perform for writes to the image
(none/constant/cached/all; default: cached). For details or finer
granularity control refer to the QAPI documentation of @code{blockdev-add}.
@end table
Example 1:
@example
-blockdev driver=file,node-name=my_file,filename=/tmp/disk.qcow2
-blockdev driver=qcow2,node-name=hda,file=my_file,overlap-check=none,cache-size=16777216
@end example
Example 2:
@example
-blockdev driver=qcow2,node-name=disk,file.driver=http,file.filename=http://example.com/image.qcow2
@end example
@item Driver-specific options for other drivers
Please refer to the QAPI documentation of the @code{blockdev-add} QMP command.
@end table
ETEXI
DEF("drive", HAS_ARG, QEMU_OPTION_drive,
"-drive [file=file][,if=type][,bus=n][,unit=m][,media=d][,index=i]\n"
@ -630,7 +790,12 @@ STEXI
@item -drive @var{option}[,@var{option}[,@var{option}[,...]]]
@findex -drive
Define a new drive. Valid options are:
Define a new drive. This includes creating a block driver node (the backend) as
well as a guest device, and is mostly a shortcut for defining the corresponding
@option{-blockdev} and @option{-device} options.
@option{-drive} accepts all options that are accepted by @option{-blockdev}. In
addition, it knows the following options:
@table @option
@item file=@var{file}
@ -657,11 +822,31 @@ These options have the same definition as they have in @option{-hdachs}.
@var{snapshot} is "on" or "off" and controls snapshot mode for the given drive
(see @option{-snapshot}).
@item cache=@var{cache}
@var{cache} is "none", "writeback", "unsafe", "directsync" or "writethrough" and controls how the host cache is used to access block data.
@var{cache} is "none", "writeback", "unsafe", "directsync" or "writethrough"
and controls how the host cache is used to access block data. This is a
shortcut that sets the @option{cache.direct} and @option{cache.no-flush}
options (as in @option{-blockdev}), and additionally @option{cache.writeback},
which provides a default for the @option{write-cache} option of block guest
devices (as in @option{-device}). The modes correspond to the following
settings:
@c Our texi2pod.pl script doesn't support @multitable, so fall back to using
@c plain ASCII art (well, UTF-8 art really). This looks okay both in the manpage
@c and the HTML output.
@example
@ cache.writeback cache.direct cache.no-flush
writeback on off off
none on on off
writethrough off off off
directsync off on off
unsafe on off on
@end example
The default mode is @option{cache=writeback}.
@item aio=@var{aio}
@var{aio} is "threads", or "native" and selects between pthread based disk I/O and native Linux AIO.
@item discard=@var{discard}
@var{discard} is one of "ignore" (or "off") or "unmap" (or "on") and controls whether @dfn{discard} (also known as @dfn{trim} or @dfn{unmap}) requests are ignored or passed to the filesystem. Some machine types may not support discard requests.
@item format=@var{format}
Specify which disk @var{format} will be used rather than detecting
the format. Can be used to specify format=raw to avoid interpreting
@ -676,16 +861,9 @@ Specify which @var{action} to take on write and read errors. Valid actions are:
"report" (report the error to the guest), "enospc" (pause QEMU only if the
host disk is full; report the error to the guest otherwise).
The default setting is @option{werror=enospc} and @option{rerror=report}.
@item readonly
Open drive @option{file} as read-only. Guest write attempts will fail.
@item copy-on-read=@var{copy-on-read}
@var{copy-on-read} is "on" or "off" and enables whether to copy read backing
file sectors into the image file.
@item detect-zeroes=@var{detect-zeroes}
@var{detect-zeroes} is "off", "on" or "unmap" and enables the automatic
conversion of plain zero writes by the OS to driver specific optimized
zero write commands. You may even choose "unmap" if @var{discard} is set
to "unmap" to allow a zero write to be converted to an UNMAP operation.
@item bps=@var{b},bps_rd=@var{r},bps_wr=@var{w}
Specify bandwidth throttling limits in bytes per second, either for all request
types or for reads or writes only. Small values can lead to timeouts or hangs
@ -712,34 +890,19 @@ prevent guests from circumventing throttling limits by using many small disks
instead of a single larger disk.
@end table
By default, the @option{cache=writeback} mode is used. It will report data
By default, the @option{cache.writeback=on} mode is used. It will report data
writes as completed as soon as the data is present in the host page cache.
This is safe as long as your guest OS makes sure to correctly flush disk caches
where needed. If your guest OS does not handle volatile disk write caches
correctly and your host crashes or loses power, then the guest may experience
data corruption.
For such guests, you should consider using @option{cache=writethrough}. This
For such guests, you should consider using @option{cache.writeback=off}. This
means that the host page cache will be used to read and write data, but write
notification will be sent to the guest only after QEMU has made sure to flush
each write to the disk. Be aware that this has a major impact on performance.
The host page cache can be avoided entirely with @option{cache=none}. This will
attempt to do disk IO directly to the guest's memory. QEMU may still perform
an internal copy of the data. Note that this is considered a writeback mode and
the guest OS must handle the disk write cache correctly in order to avoid data
corruption on host crashes.
The host page cache can be avoided while only sending write notifications to
the guest when the data has been flushed to the disk using
@option{cache=directsync}.
In case you don't care about data integrity over host failures, use
@option{cache=unsafe}. This option tells QEMU that it never needs to write any
data to the disk but can instead keep things in cache. If anything goes wrong,
like your host losing power, the disk storage getting disconnected accidentally,
etc. your image will most probably be rendered unusable. When using
the @option{-snapshot} option, unsafe caching is always used.
When using the @option{-snapshot} option, unsafe caching is always used.
Copy-on-read avoids accessing the same backing file sectors repeatedly and is
useful when the backing file is over a slow network. By default copy-on-read

View File

@ -45,28 +45,41 @@ _supported_os Linux
IMGOPTS="compat=1.1"
IMG_SIZE=128K
echo
echo "=== Saving and reloading a VM state to/from a qcow2 image ==="
echo
_make_test_img $IMG_SIZE
case "$QEMU_DEFAULT_MACHINE" in
s390-ccw-virtio)
platform_parm="-no-shutdown"
hba=virtio-scsi-ccw
;;
*)
platform_parm=""
hba=virtio-scsi-pci
;;
esac
# Give qemu some time to boot before saving the VM state
bash -c 'sleep 1; echo -e "savevm 0\nquit"' |\
$QEMU $platform_parm -nographic -monitor stdio -serial none -hda "$TEST_IMG" |\
_filter_qemu | _filter_hmp
# Now try to continue from that VM state (this should just work)
echo quit |\
$QEMU $platform_parm -nographic -monitor stdio -serial none -hda "$TEST_IMG" -loadvm 0 |\
_qemu()
{
$QEMU $platform_parm -nographic -monitor stdio -serial none \
-drive if=none,id=drive0,file="$TEST_IMG",format="$IMGFMT" \
-device $hba,id=hba0 \
-device scsi-hd,drive=drive0 \
"$@" |\
_filter_qemu | _filter_hmp
}
for extra_args in \
"" \
"-object iothread,id=iothread0 -set device.hba0.iothread=iothread0"; do
echo
echo "=== Saving and reloading a VM state to/from a qcow2 image ($extra_args) ==="
echo
_make_test_img $IMG_SIZE
# Give qemu some time to boot before saving the VM state
bash -c 'sleep 1; echo -e "savevm 0\nquit"' | _qemu $extra_args
# Now try to continue from that VM state (this should just work)
echo quit | _qemu $extra_args -loadvm 0
done
# success, all done
echo "*** done"

View File

@ -1,6 +1,15 @@
QA output created by 068
=== Saving and reloading a VM state to/from a qcow2 image ===
=== Saving and reloading a VM state to/from a qcow2 image () ===
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=131072
QEMU X.Y.Z monitor - type 'help' for more information
(qemu) savevm 0
(qemu) quit
QEMU X.Y.Z monitor - type 'help' for more information
(qemu) quit
=== Saving and reloading a VM state to/from a qcow2 image (-object iothread,id=iothread0 -set device.hba0.iothread=iothread0) ===
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=131072
QEMU X.Y.Z monitor - type 'help' for more information

206
tests/qemu-iotests/185 Executable file
View File

@ -0,0 +1,206 @@
#!/bin/bash
#
# Test exiting qemu while jobs are still running
#
# Copyright (C) 2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# creator
owner=kwolf@redhat.com
seq=`basename $0`
echo "QA output created by $seq"
here=`pwd`
status=1 # failure is the default!
MIG_SOCKET="${TEST_DIR}/migrate"
_cleanup()
{
rm -f "${TEST_IMG}.mid"
rm -f "${TEST_IMG}.copy"
_cleanup_test_img
_cleanup_qemu
}
trap "_cleanup; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
. ./common.qemu
_supported_fmt qcow2
_supported_proto file
_supported_os Linux
size=64M
TEST_IMG="${TEST_IMG}.base" _make_test_img $size
echo
echo === Starting VM ===
echo
qemu_comm_method="qmp"
_launch_qemu \
-drive file="${TEST_IMG}.base",cache=$CACHEMODE,driver=$IMGFMT,id=disk
h=$QEMU_HANDLE
_send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return'
echo
echo === Creating backing chain ===
echo
_send_qemu_cmd $h \
"{ 'execute': 'blockdev-snapshot-sync',
'arguments': { 'device': 'disk',
'snapshot-file': '$TEST_IMG.mid',
'format': '$IMGFMT',
'mode': 'absolute-paths' } }" \
"return"
_send_qemu_cmd $h \
"{ 'execute': 'human-monitor-command',
'arguments': { 'command-line':
'qemu-io disk \"write 0 4M\"' } }" \
"return"
_send_qemu_cmd $h \
"{ 'execute': 'blockdev-snapshot-sync',
'arguments': { 'device': 'disk',
'snapshot-file': '$TEST_IMG',
'format': '$IMGFMT',
'mode': 'absolute-paths' } }" \
"return"
echo
echo === Start commit job and exit qemu ===
echo
# Note that the reference output intentionally includes the 'offset' field in
# BLOCK_JOB_CANCELLED events for all of the following block jobs. They are
# predictable and any change in the offsets would hint at a bug in the job
# throttling code.
#
# In order to achieve these predictable offsets, all of the following tests
# use speed=65536. Each job will perform exactly one iteration before it has
# to sleep at least for a second, which is plenty of time for the 'quit' QMP
# command to be received (after receiving the command, the rest runs
# synchronously, so jobs can arbitrarily continue or complete).
#
# The buffer size for commit and streaming is 512k (waiting for 8 seconds after
# the first request), for active commit and mirror it's large enough to cover
# the full 4M, and for backup it's the qcow2 cluster size, which we know is
# 64k. As all of these are at least as large as the speed, we are sure that the
# offset doesn't advance after the first iteration before qemu exits.
_send_qemu_cmd $h \
"{ 'execute': 'block-commit',
'arguments': { 'device': 'disk',
'base':'$TEST_IMG.base',
'top': '$TEST_IMG.mid',
'speed': 65536 } }" \
"return"
_send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
wait=1 _cleanup_qemu
echo
echo === Start active commit job and exit qemu ===
echo
_launch_qemu \
-drive file="${TEST_IMG}",cache=$CACHEMODE,driver=$IMGFMT,id=disk
h=$QEMU_HANDLE
_send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return'
_send_qemu_cmd $h \
"{ 'execute': 'block-commit',
'arguments': { 'device': 'disk',
'base':'$TEST_IMG.base',
'speed': 65536 } }" \
"return"
_send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
wait=1 _cleanup_qemu
echo
echo === Start mirror job and exit qemu ===
echo
_launch_qemu \
-drive file="${TEST_IMG}",cache=$CACHEMODE,driver=$IMGFMT,id=disk
h=$QEMU_HANDLE
_send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return'
_send_qemu_cmd $h \
"{ 'execute': 'drive-mirror',
'arguments': { 'device': 'disk',
'target': '$TEST_IMG.copy',
'format': '$IMGFMT',
'sync': 'full',
'speed': 65536 } }" \
"return"
_send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
wait=1 _cleanup_qemu
echo
echo === Start backup job and exit qemu ===
echo
_launch_qemu \
-drive file="${TEST_IMG}",cache=$CACHEMODE,driver=$IMGFMT,id=disk
h=$QEMU_HANDLE
_send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return'
_send_qemu_cmd $h \
"{ 'execute': 'drive-backup',
'arguments': { 'device': 'disk',
'target': '$TEST_IMG.copy',
'format': '$IMGFMT',
'sync': 'full',
'speed': 65536 } }" \
"return"
_send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
wait=1 _cleanup_qemu
echo
echo === Start streaming job and exit qemu ===
echo
_launch_qemu \
-drive file="${TEST_IMG}",cache=$CACHEMODE,driver=$IMGFMT,id=disk
h=$QEMU_HANDLE
_send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return'
_send_qemu_cmd $h \
"{ 'execute': 'block-stream',
'arguments': { 'device': 'disk',
'speed': 65536 } }" \
"return"
_send_qemu_cmd $h "{ 'execute': 'quit' }" "return"
wait=1 _cleanup_qemu
_check_test_img
# success, all done
echo "*** done"
rm -f $seq.full
status=0

View File

@ -0,0 +1,59 @@
QA output created by 185
Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=67108864
=== Starting VM ===
{"return": {}}
=== Creating backing chain ===
Formatting 'TEST_DIR/t.qcow2.mid', fmt=qcow2 size=67108864 backing_file=TEST_DIR/t.qcow2.base backing_fmt=qcow2 encryption=off cluster_size=65536 lazy_refcounts=off refcount_bits=16
{"return": {}}
wrote 4194304/4194304 bytes at offset 0
4 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
{"return": ""}
Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 size=67108864 backing_file=TEST_DIR/t.qcow2.mid backing_fmt=qcow2 encryption=off cluster_size=65536 lazy_refcounts=off refcount_bits=16
{"return": {}}
=== Start commit job and exit qemu ===
{"return": {}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "disk", "len": 67108864, "offset": 524288, "speed": 65536, "type": "commit"}}
=== Start active commit job and exit qemu ===
{"return": {}}
{"return": {}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "disk", "len": 4194304, "offset": 4194304, "speed": 65536, "type": "commit"}}
=== Start mirror job and exit qemu ===
{"return": {}}
Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 encryption=off cluster_size=65536 lazy_refcounts=off refcount_bits=16
{"return": {}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "disk", "len": 4194304, "offset": 4194304, "speed": 65536, "type": "mirror"}}
=== Start backup job and exit qemu ===
{"return": {}}
Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 encryption=off cluster_size=65536 lazy_refcounts=off refcount_bits=16
{"return": {}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "disk", "len": 67108864, "offset": 65536, "speed": 65536, "type": "backup"}}
=== Start streaming job and exit qemu ===
{"return": {}}
{"return": {}}
{"return": {}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "disk", "len": 67108864, "offset": 524288, "speed": 65536, "type": "stream"}}
No errors were found on the image.
*** done

View File

@ -222,5 +222,8 @@ function _cleanup_qemu()
rm -f "${QEMU_FIFO_IN}_${i}" "${QEMU_FIFO_OUT}_${i}"
eval "exec ${QEMU_IN[$i]}<&-" # close file descriptors
eval "exec ${QEMU_OUT[$i]}<&-"
unset QEMU_IN[$i]
unset QEMU_OUT[$i]
done
}

View File

@ -175,3 +175,4 @@
181 rw auto migration
182 rw auto quick
183 rw auto migration
185 rw auto