Block patches:

- Several qcow2 fixes and refactorings
 - Let qemu-img convert try to stay at cluster boundaries
 - Stable child names for quorum (with x-blockdev-change)
 - Explicitly drop vhdx 4k sector support, as it was never actually
   working
 - rbd: Mark @namespace a strong runtime option
 - iotests.py improvements
 - Drop unused runtime_opts objects
 - Skip a test case in 030 when run through make check-block
 -----BEGIN PGP SIGNATURE-----
 
 iQFFBAABCAAwFiEEkb62CjDbPohX0Rgp9AfbAGHVz0AFAl9glvkSHG1yZWl0ekBy
 ZWRoYXQuY29tAAoJEPQH2wBh1c9A/0cH+MR1uFlqNuL4Q8vnZPyEEB6FniWgIF/K
 oMivXdZe9F5UjUPx+2I8iOpzBnT+lMGQZzaNSOZtI9Rv6JqBTjA9xlVWyVo5SVzF
 cNfVE00lCnBRsYGiycOkOyTnP0PuxlFJDRwdozgumch3akN/0Ep9npL7So2BMwVf
 hrPH4VVAihf0ZZaEH9JN2Sgm8/ffpXFcGtg8uoS1NqgK42fGWp4sip6mPFulNDBE
 4HFLv98/hKqBlU5+sYe9mKo7SJSbqEXMxR7AHmDxM9qBbzFF2SPkZoSEaZlqw+bz
 YlR/EDVVbAAAOfKeG8DPjfNwZHVdusMmOpzKTG9QhEOxyK2Vbef1lQ==
 =7AVw
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2020-09-15' into staging

Block patches:
- Several qcow2 fixes and refactorings
- Let qemu-img convert try to stay at cluster boundaries
- Stable child names for quorum (with x-blockdev-change)
- Explicitly drop vhdx 4k sector support, as it was never actually
  working
- rbd: Mark @namespace a strong runtime option
- iotests.py improvements
- Drop unused runtime_opts objects
- Skip a test case in 030 when run through make check-block

# gpg: Signature made Tue 15 Sep 2020 11:27:05 BST
# gpg:                using RSA key 91BEB60A30DB3E8857D11829F407DB0061D5CF40
# gpg:                issuer "mreitz@redhat.com"
# gpg: Good signature from "Max Reitz <mreitz@redhat.com>" [full]
# Primary key fingerprint: 91BE B60A 30DB 3E88 57D1  1829 F407 DB00 61D5 CF40

* remotes/maxreitz/tags/pull-block-2020-09-15: (22 commits)
  block/rbd: add 'namespace' to qemu_rbd_strong_runtime_opts[]
  qcow2: Convert qcow2_alloc_cluster_offset() into qcow2_alloc_host_offset()
  qcow2: Make preallocate_co() resize the image to the correct size
  block/qcow: remove runtime opts
  block/rbd: remove runtime_opts
  qcow2: Return the original error code in qcow2_co_pwrite_zeroes()
  qcow2: Make qcow2_free_any_clusters() free only one cluster
  qcow2: Handle QCowL2Meta on error in preallocate_co()
  block/vhdx: Support vhdx image only with 512 bytes logical sector size
  iotests: Skip test_stream_parallel in test 030 when doing "make check"
  qemu-img: Explicit number replaced by a constant
  qcow2: Rewrite the documentation of qcow2_alloc_cluster_offset()
  qcow2: Don't check nb_clusters when removing l2meta from the list
  qcow2: Fix removal of list members from BDRVQcow2State.cluster_allocs
  qcow2: Use macros for the L1, refcount and bitmap table entry sizes
  qemu-img: avoid unaligned read requests during convert
  block/quorum.c: stable children names
  qemu-iotests: Simplify FilePath __init__
  qemu-iotests: Merge FilePaths and FilePath
  qemu-iotests: Support varargs syntax in FilePaths
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-09-15 11:48:40 +01:00
commit 9b14671aec
24 changed files with 395 additions and 282 deletions

View File

@ -105,15 +105,6 @@ static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename)
return 0;
}
static QemuOptsList qcow_runtime_opts = {
.name = "qcow",
.head = QTAILQ_HEAD_INITIALIZER(qcow_runtime_opts.head),
.desc = {
BLOCK_CRYPTO_OPT_DEF_QCOW_KEY_SECRET("encrypt."),
{ /* end of list */ }
},
};
static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{

View File

@ -42,6 +42,9 @@
#define BME_MIN_GRANULARITY_BITS 9
#define BME_MAX_NAME_SIZE 1023
/* Size of bitmap table entries */
#define BME_TABLE_ENTRY_SIZE (sizeof(uint64_t))
QEMU_BUILD_BUG_ON(BME_MAX_NAME_SIZE != BDRV_BITMAP_MAX_NAME_SIZE);
#if BME_MAX_TABLE_SIZE * 8ULL > INT_MAX
@ -232,7 +235,7 @@ static int bitmap_table_load(BlockDriverState *bs, Qcow2BitmapTable *tb,
assert(tb->size <= BME_MAX_TABLE_SIZE);
ret = bdrv_pread(bs->file, tb->offset,
table, tb->size * sizeof(uint64_t));
table, tb->size * BME_TABLE_ENTRY_SIZE);
if (ret < 0) {
goto fail;
}
@ -265,7 +268,7 @@ static int free_bitmap_clusters(BlockDriverState *bs, Qcow2BitmapTable *tb)
}
clear_bitmap_table(bs, bitmap_table, tb->size);
qcow2_free_clusters(bs, tb->offset, tb->size * sizeof(uint64_t),
qcow2_free_clusters(bs, tb->offset, tb->size * BME_TABLE_ENTRY_SIZE,
QCOW2_DISCARD_OTHER);
g_free(bitmap_table);
@ -690,7 +693,7 @@ int qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
ret = qcow2_inc_refcounts_imrt(bs, res,
refcount_table, refcount_table_size,
bm->table.offset,
bm->table.size * sizeof(uint64_t));
bm->table.size * BME_TABLE_ENTRY_SIZE);
if (ret < 0) {
goto out;
}
@ -1797,7 +1800,7 @@ uint64_t qcow2_get_persistent_dirty_bitmap_size(BlockDriverState *in_bs,
/* Assume the entire bitmap is allocated */
bitmaps_size += bmclusters * cluster_size;
/* Also reserve space for the bitmap table entries */
bitmaps_size += ROUND_UP(bmclusters * sizeof(uint64_t),
bitmaps_size += ROUND_UP(bmclusters * BME_TABLE_ENTRY_SIZE,
cluster_size);
/* And space for contribution to bitmap directory size */
bitmap_dir_size += calc_dir_entry_size(strlen(name), 0);

View File

@ -47,8 +47,8 @@ int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
new_l1_size * sizeof(uint64_t),
(s->l1_size - new_l1_size) * sizeof(uint64_t), 0);
new_l1_size * L1E_SIZE,
(s->l1_size - new_l1_size) * L1E_SIZE, 0);
if (ret < 0) {
goto fail;
}
@ -76,7 +76,7 @@ fail:
* l1_table in memory to avoid possible image corruption.
*/
memset(s->l1_table + new_l1_size, 0,
(s->l1_size - new_l1_size) * sizeof(uint64_t));
(s->l1_size - new_l1_size) * L1E_SIZE);
return ret;
}
@ -96,7 +96,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
/* Do a sanity check on min_size before trying to calculate new_l1_size
* (this prevents overflows during the while loop for the calculation of
* new_l1_size) */
if (min_size > INT_MAX / sizeof(uint64_t)) {
if (min_size > INT_MAX / L1E_SIZE) {
return -EFBIG;
}
@ -114,7 +114,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
}
QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
if (new_l1_size > QCOW_MAX_L1_SIZE / L1E_SIZE) {
return -EFBIG;
}
@ -123,7 +123,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
s->l1_size, new_l1_size);
#endif
new_l1_size2 = sizeof(uint64_t) * new_l1_size;
new_l1_size2 = L1E_SIZE * new_l1_size;
new_l1_table = qemu_try_blockalign(bs->file->bs, new_l1_size2);
if (new_l1_table == NULL) {
return -ENOMEM;
@ -131,7 +131,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
memset(new_l1_table, 0, new_l1_size2);
if (s->l1_size) {
memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
memcpy(new_l1_table, s->l1_table, s->l1_size * L1E_SIZE);
}
/* write new table (align to cluster) */
@ -180,7 +180,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
s->l1_table = new_l1_table;
old_l1_size = s->l1_size;
s->l1_size = new_l1_size;
qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * L1E_SIZE,
QCOW2_DISCARD_OTHER);
return 0;
fail:
@ -225,9 +225,9 @@ int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
BDRVQcow2State *s = bs->opaque;
int l1_start_index;
int i, ret;
int bufsize = MAX(sizeof(uint64_t),
int bufsize = MAX(L1E_SIZE,
MIN(bs->file->bs->bl.request_alignment, s->cluster_size));
int nentries = bufsize / sizeof(uint64_t);
int nentries = bufsize / L1E_SIZE;
g_autofree uint64_t *buf = g_try_new0(uint64_t, nentries);
if (buf == NULL) {
@ -1096,7 +1096,7 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
*/
if (!m->keep_old_clusters && j != 0) {
for (i = 0; i < j; i++) {
qcow2_free_any_clusters(bs, old_cluster[i], 1, QCOW2_DISCARD_NEVER);
qcow2_free_any_cluster(bs, old_cluster[i], QCOW2_DISCARD_NEVER);
}
}
@ -1710,34 +1710,39 @@ static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
out:
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
if (ret < 0 && *m && (*m)->nb_clusters > 0) {
QLIST_REMOVE(*m, next_in_flight);
}
return ret;
}
/*
* alloc_cluster_offset
* For a given area on the virtual disk defined by @offset and @bytes,
* find the corresponding area on the qcow2 image, allocating new
* clusters (or subclusters) if necessary. The result can span a
* combination of allocated and previously unallocated clusters.
*
* For a given offset on the virtual disk, find the cluster offset in qcow2
* file. If the offset is not found, allocate a new cluster.
* Note that offset may not be cluster aligned. In this case, the returned
* *host_offset points to exact byte referenced by offset and therefore
* isn't cluster aligned as well.
*
* If the cluster was already allocated, m->nb_clusters is set to 0 and
* other fields in m are meaningless.
* On return, @host_offset is set to the beginning of the requested
* area. This area is guaranteed to be contiguous on the qcow2 file
* but it can be smaller than initially requested. In this case @bytes
* is updated with the actual size.
*
* If the cluster is newly allocated, m->nb_clusters is set to the number of
* contiguous clusters that have been allocated. In this case, the other
* fields of m are valid and contain information about the first allocated
* cluster.
* If any clusters or subclusters were allocated then @m contains a
* list with the information of all the affected regions. Note that
* this can happen regardless of whether this function succeeds or
* not. The caller is responsible for updating the L2 metadata of the
* allocated clusters (on success) or freeing them (on failure), and
* for clearing the contents of @m afterwards in both cases.
*
* If the request conflicts with another write request in flight, the coroutine
* is queued and will be reentered when the dependency has completed.
*
* Return 0 on success and -errno in error cases
*/
int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes, uint64_t *host_offset,
QCowL2Meta **m)
int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes, uint64_t *host_offset,
QCowL2Meta **m)
{
BDRVQcow2State *s = bs->opaque;
uint64_t start, remaining;
@ -1758,7 +1763,7 @@ again:
while (true) {
if (*host_offset == INV_OFFSET && cluster_offset != INV_OFFSET) {
*host_offset = start_of_cluster(s, cluster_offset);
*host_offset = cluster_offset;
}
assert(remaining >= cur_bytes);
@ -1841,6 +1846,8 @@ again:
*bytes -= remaining;
assert(*bytes > 0);
assert(*host_offset != INV_OFFSET);
assert(offset_into_cluster(s, *host_offset) ==
offset_into_cluster(s, offset));
return 0;
}
@ -1912,7 +1919,7 @@ static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset,
set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap);
}
/* Then decrease the refcount */
qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
qcow2_free_any_cluster(bs, old_l2_entry, type);
}
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
@ -2004,7 +2011,7 @@ static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
if (unmap) {
qcow2_free_any_clusters(bs, old_l2_entry, 1, QCOW2_DISCARD_REQUEST);
qcow2_free_any_cluster(bs, old_l2_entry, QCOW2_DISCARD_REQUEST);
}
set_l2_entry(s, l2_slice, l2_index + i, new_l2_entry);
if (has_subclusters(s)) {
@ -2410,7 +2417,7 @@ int qcow2_expand_zero_clusters(BlockDriverState *bs,
Error *local_err = NULL;
ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset,
s->snapshots[i].l1_size, sizeof(uint64_t),
s->snapshots[i].l1_size, L1E_SIZE,
QCOW_MAX_L1_SIZE, "Snapshot L1 table",
&local_err);
if (ret < 0) {
@ -2418,7 +2425,7 @@ int qcow2_expand_zero_clusters(BlockDriverState *bs,
goto fail;
}
l1_size2 = s->snapshots[i].l1_size * sizeof(uint64_t);
l1_size2 = s->snapshots[i].l1_size * L1E_SIZE;
new_l1_table = g_try_realloc(l1_table, l1_size2);
if (!new_l1_table) {

View File

@ -105,8 +105,8 @@ int qcow2_refcount_init(BlockDriverState *bs)
s->get_refcount = get_refcount_funcs[s->refcount_order];
s->set_refcount = set_refcount_funcs[s->refcount_order];
assert(s->refcount_table_size <= INT_MAX / sizeof(uint64_t));
refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
assert(s->refcount_table_size <= INT_MAX / REFTABLE_ENTRY_SIZE);
refcount_table_size2 = s->refcount_table_size * REFTABLE_ENTRY_SIZE;
s->refcount_table = g_try_malloc(refcount_table_size2);
if (s->refcount_table_size > 0) {
@ -434,8 +434,8 @@ static int alloc_refcount_block(BlockDriverState *bs,
if (refcount_table_index < s->refcount_table_size) {
uint64_t data64 = cpu_to_be64(new_block);
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP);
ret = bdrv_pwrite_sync(bs->file,
s->refcount_table_offset + refcount_table_index * sizeof(uint64_t),
ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset +
refcount_table_index * REFTABLE_ENTRY_SIZE,
&data64, sizeof(data64));
if (ret < 0) {
goto fail;
@ -562,8 +562,8 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t start_offset,
DIV_ROUND_UP(total_refblock_count, 2);
}
/* The qcow2 file can only store the reftable size in number of clusters */
table_size = ROUND_UP(table_size, s->cluster_size / sizeof(uint64_t));
table_clusters = (table_size * sizeof(uint64_t)) / s->cluster_size;
table_size = ROUND_UP(table_size, s->cluster_size / REFTABLE_ENTRY_SIZE);
table_clusters = (table_size * REFTABLE_ENTRY_SIZE) / s->cluster_size;
if (table_size > QCOW_MAX_REFTABLE_SIZE) {
return -EFBIG;
@ -581,13 +581,13 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t start_offset,
if (table_size > s->max_refcount_table_index) {
/* We're actually growing the reftable */
memcpy(new_table, s->refcount_table,
(s->max_refcount_table_index + 1) * sizeof(uint64_t));
(s->max_refcount_table_index + 1) * REFTABLE_ENTRY_SIZE);
} else {
/* Improbable case: We're shrinking the reftable. However, the caller
* has assured us that there is only empty space beyond @start_offset,
* so we can simply drop all of the refblocks that won't fit into the
* new reftable. */
memcpy(new_table, s->refcount_table, table_size * sizeof(uint64_t));
memcpy(new_table, s->refcount_table, table_size * REFTABLE_ENTRY_SIZE);
}
if (new_refblock_offset) {
@ -682,7 +682,7 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t start_offset,
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE);
ret = bdrv_pwrite_sync(bs->file, table_offset, new_table,
table_size * sizeof(uint64_t));
table_size * REFTABLE_ENTRY_SIZE);
if (ret < 0) {
goto fail;
}
@ -717,7 +717,8 @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t start_offset,
update_max_refcount_table_index(s);
/* Free old table. */
qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t),
qcow2_free_clusters(bs, old_table_offset,
old_table_size * REFTABLE_ENTRY_SIZE,
QCOW2_DISCARD_OTHER);
return end_offset;
@ -1156,8 +1157,8 @@ void qcow2_free_clusters(BlockDriverState *bs,
* Free a cluster using its L2 entry (handles clusters of all types, e.g.
* normal cluster, compressed cluster, etc.)
*/
void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
int nb_clusters, enum qcow2_discard_type type)
void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
enum qcow2_discard_type type)
{
BDRVQcow2State *s = bs->opaque;
QCow2ClusterType ctype = qcow2_get_cluster_type(bs, l2_entry);
@ -1168,7 +1169,7 @@ void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
ctype == QCOW2_CLUSTER_ZERO_ALLOC))
{
bdrv_pdiscard(s->data_file, l2_entry & L2E_OFFSET_MASK,
nb_clusters << s->cluster_bits);
s->cluster_size);
}
return;
}
@ -1191,7 +1192,7 @@ void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
l2_entry & L2E_OFFSET_MASK);
} else {
qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK,
nb_clusters << s->cluster_bits, type);
s->cluster_size, type);
}
break;
case QCOW2_CLUSTER_ZERO_PLAIN:
@ -1253,7 +1254,7 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
l2_slice = NULL;
l1_table = NULL;
l1_size2 = l1_size * sizeof(uint64_t);
l1_size2 = l1_size * L1E_SIZE;
slice_size2 = s->l2_slice_size * l2_entry_size(s);
n_slices = s->cluster_size / slice_size2;
@ -1784,7 +1785,7 @@ static int check_refcounts_l1(BlockDriverState *bs,
uint64_t *l1_table = NULL, l2_offset, l1_size2;
int i, ret;
l1_size2 = l1_size * sizeof(uint64_t);
l1_size2 = l1_size * L1E_SIZE;
/* Mark L1 table as used */
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, refcount_table_size,
@ -2146,7 +2147,7 @@ static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
res->corruptions++;
continue;
}
if (sn->l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
if (sn->l1_size > QCOW_MAX_L1_SIZE / L1E_SIZE) {
fprintf(stderr, "ERROR snapshot %s (%s) l1_size=%#" PRIx32 ": "
"L1 table is too large; snapshot table entry corrupted\n",
sn->id_str, sn->name, sn->l1_size);
@ -2169,7 +2170,8 @@ static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
/* refcount data */
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, nb_clusters,
s->refcount_table_offset,
s->refcount_table_size * sizeof(uint64_t));
s->refcount_table_size *
REFTABLE_ENTRY_SIZE);
if (ret < 0) {
return ret;
}
@ -2390,11 +2392,11 @@ write_refblocks:
uint32_t old_reftable_size = reftable_size;
uint64_t *new_on_disk_reftable;
reftable_size = ROUND_UP((refblock_index + 1) * sizeof(uint64_t),
s->cluster_size) / sizeof(uint64_t);
reftable_size = ROUND_UP((refblock_index + 1) * REFTABLE_ENTRY_SIZE,
s->cluster_size) / REFTABLE_ENTRY_SIZE;
new_on_disk_reftable = g_try_realloc(on_disk_reftable,
reftable_size *
sizeof(uint64_t));
REFTABLE_ENTRY_SIZE);
if (!new_on_disk_reftable) {
res->check_errors++;
ret = -ENOMEM;
@ -2403,7 +2405,7 @@ write_refblocks:
on_disk_reftable = new_on_disk_reftable;
memset(on_disk_reftable + old_reftable_size, 0,
(reftable_size - old_reftable_size) * sizeof(uint64_t));
(reftable_size - old_reftable_size) * REFTABLE_ENTRY_SIZE);
/* The offset we have for the reftable is now no longer valid;
* this will leak that range, but we can easily fix that by running
@ -2420,7 +2422,7 @@ write_refblocks:
reftable_offset < 0)
{
uint64_t reftable_clusters = size_to_clusters(s, reftable_size *
sizeof(uint64_t));
REFTABLE_ENTRY_SIZE);
reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
refcount_table, nb_clusters,
&first_free_cluster);
@ -2460,8 +2462,8 @@ write_refblocks:
uint64_t post_refblock_start, reftable_clusters;
post_refblock_start = ROUND_UP(*nb_clusters, s->refcount_block_size);
reftable_clusters = size_to_clusters(s,
reftable_size * sizeof(uint64_t));
reftable_clusters =
size_to_clusters(s, reftable_size * REFTABLE_ENTRY_SIZE);
/* Not pretty but simple */
if (first_free_cluster < post_refblock_start) {
first_free_cluster = post_refblock_start;
@ -2485,16 +2487,16 @@ write_refblocks:
}
ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset,
reftable_size * sizeof(uint64_t),
reftable_size * REFTABLE_ENTRY_SIZE,
false);
if (ret < 0) {
fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
goto fail;
}
assert(reftable_size < INT_MAX / sizeof(uint64_t));
assert(reftable_size < INT_MAX / REFTABLE_ENTRY_SIZE);
ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable,
reftable_size * sizeof(uint64_t));
reftable_size * REFTABLE_ENTRY_SIZE);
if (ret < 0) {
fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
goto fail;
@ -2503,7 +2505,7 @@ write_refblocks:
/* Enter new reftable into the image header */
reftable_offset_and_clusters.reftable_offset = cpu_to_be64(reftable_offset);
reftable_offset_and_clusters.reftable_clusters =
cpu_to_be32(size_to_clusters(s, reftable_size * sizeof(uint64_t)));
cpu_to_be32(size_to_clusters(s, reftable_size * REFTABLE_ENTRY_SIZE));
ret = bdrv_pwrite_sync(bs->file,
offsetof(QCowHeader, refcount_table_offset),
&reftable_offset_and_clusters,
@ -2693,14 +2695,14 @@ int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
offset = start_of_cluster(s, offset);
if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) {
if (overlaps_with(s->l1_table_offset, s->l1_size * sizeof(uint64_t))) {
if (overlaps_with(s->l1_table_offset, s->l1_size * L1E_SIZE)) {
return QCOW2_OL_ACTIVE_L1;
}
}
if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) {
if (overlaps_with(s->refcount_table_offset,
s->refcount_table_size * sizeof(uint64_t))) {
s->refcount_table_size * REFTABLE_ENTRY_SIZE)) {
return QCOW2_OL_REFCOUNT_TABLE;
}
}
@ -2715,7 +2717,7 @@ int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
for (i = 0; i < s->nb_snapshots; i++) {
if (s->snapshots[i].l1_size &&
overlaps_with(s->snapshots[i].l1_table_offset,
s->snapshots[i].l1_size * sizeof(uint64_t))) {
s->snapshots[i].l1_size * L1E_SIZE)) {
return QCOW2_OL_INACTIVE_L1;
}
}
@ -2749,11 +2751,11 @@ int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
for (i = 0; i < s->nb_snapshots; i++) {
uint64_t l1_ofs = s->snapshots[i].l1_table_offset;
uint32_t l1_sz = s->snapshots[i].l1_size;
uint64_t l1_sz2 = l1_sz * sizeof(uint64_t);
uint64_t l1_sz2 = l1_sz * L1E_SIZE;
uint64_t *l1;
int ret;
ret = qcow2_validate_table(bs, l1_ofs, l1_sz, sizeof(uint64_t),
ret = qcow2_validate_table(bs, l1_ofs, l1_sz, L1E_SIZE,
QCOW_MAX_L1_SIZE, "", NULL);
if (ret < 0) {
return ret;
@ -2877,8 +2879,8 @@ static int alloc_refblock(BlockDriverState *bs, uint64_t **reftable,
uint64_t new_reftable_size;
new_reftable_size = ROUND_UP(reftable_index + 1,
s->cluster_size / sizeof(uint64_t));
if (new_reftable_size > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) {
s->cluster_size / REFTABLE_ENTRY_SIZE);
if (new_reftable_size > QCOW_MAX_REFTABLE_SIZE / REFTABLE_ENTRY_SIZE) {
error_setg(errp,
"This operation would make the refcount table grow "
"beyond the maximum size supported by QEMU, aborting");
@ -2886,14 +2888,14 @@ static int alloc_refblock(BlockDriverState *bs, uint64_t **reftable,
}
new_reftable = g_try_realloc(*reftable, new_reftable_size *
sizeof(uint64_t));
REFTABLE_ENTRY_SIZE);
if (!new_reftable) {
error_setg(errp, "Failed to increase reftable buffer size");
return -ENOMEM;
}
memset(new_reftable + *reftable_size, 0,
(new_reftable_size - *reftable_size) * sizeof(uint64_t));
(new_reftable_size - *reftable_size) * REFTABLE_ENTRY_SIZE);
*reftable = new_reftable;
*reftable_size = new_reftable_size;
@ -3164,13 +3166,14 @@ int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
if (new_allocation) {
if (new_reftable_offset) {
qcow2_free_clusters(bs, new_reftable_offset,
allocated_reftable_size * sizeof(uint64_t),
QCOW2_DISCARD_NEVER);
qcow2_free_clusters(
bs, new_reftable_offset,
allocated_reftable_size * REFTABLE_ENTRY_SIZE,
QCOW2_DISCARD_NEVER);
}
new_reftable_offset = qcow2_alloc_clusters(bs, new_reftable_size *
sizeof(uint64_t));
REFTABLE_ENTRY_SIZE);
if (new_reftable_offset < 0) {
error_setg_errno(errp, -new_reftable_offset,
"Failed to allocate the new reftable");
@ -3196,7 +3199,7 @@ int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
/* Write the new reftable */
ret = qcow2_pre_write_overlap_check(bs, 0, new_reftable_offset,
new_reftable_size * sizeof(uint64_t),
new_reftable_size * REFTABLE_ENTRY_SIZE,
false);
if (ret < 0) {
error_setg_errno(errp, -ret, "Overlap check failed");
@ -3208,7 +3211,7 @@ int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
}
ret = bdrv_pwrite(bs->file, new_reftable_offset, new_reftable,
new_reftable_size * sizeof(uint64_t));
new_reftable_size * REFTABLE_ENTRY_SIZE);
for (i = 0; i < new_reftable_size; i++) {
be64_to_cpus(&new_reftable[i]);
@ -3285,7 +3288,7 @@ done:
if (new_reftable_offset > 0) {
qcow2_free_clusters(bs, new_reftable_offset,
new_reftable_size * sizeof(uint64_t),
new_reftable_size * REFTABLE_ENTRY_SIZE,
QCOW2_DISCARD_OTHER);
}
}
@ -3374,7 +3377,7 @@ int qcow2_shrink_reftable(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
uint64_t *reftable_tmp =
g_malloc(s->refcount_table_size * sizeof(uint64_t));
g_malloc(s->refcount_table_size * REFTABLE_ENTRY_SIZE);
int i, ret;
for (i = 0; i < s->refcount_table_size; i++) {
@ -3412,7 +3415,7 @@ int qcow2_shrink_reftable(BlockDriverState *bs)
}
ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset, reftable_tmp,
s->refcount_table_size * sizeof(uint64_t));
s->refcount_table_size * REFTABLE_ENTRY_SIZE);
/*
* If the write in the reftable failed the image may contain a partially
* overwritten reftable. In this case it would be better to clear the

View File

@ -659,7 +659,7 @@ int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
sn->extra_data_size = sizeof(QCowSnapshotExtraData);
/* Allocate the L1 table of the snapshot and copy the current one there. */
l1_table_offset = qcow2_alloc_clusters(bs, s->l1_size * sizeof(uint64_t));
l1_table_offset = qcow2_alloc_clusters(bs, s->l1_size * L1E_SIZE);
if (l1_table_offset < 0) {
ret = l1_table_offset;
goto fail;
@ -679,13 +679,13 @@ int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
}
ret = qcow2_pre_write_overlap_check(bs, 0, sn->l1_table_offset,
s->l1_size * sizeof(uint64_t), false);
s->l1_size * L1E_SIZE, false);
if (ret < 0) {
goto fail;
}
ret = bdrv_pwrite(bs->file, sn->l1_table_offset, l1_table,
s->l1_size * sizeof(uint64_t));
s->l1_size * L1E_SIZE);
if (ret < 0) {
goto fail;
}
@ -768,7 +768,7 @@ int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id)
sn = &s->snapshots[snapshot_index];
ret = qcow2_validate_table(bs, sn->l1_table_offset, sn->l1_size,
sizeof(uint64_t), QCOW_MAX_L1_SIZE,
L1E_SIZE, QCOW_MAX_L1_SIZE,
"Snapshot L1 table", &local_err);
if (ret < 0) {
error_report_err(local_err);
@ -803,8 +803,8 @@ int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id)
goto fail;
}
cur_l1_bytes = s->l1_size * sizeof(uint64_t);
sn_l1_bytes = sn->l1_size * sizeof(uint64_t);
cur_l1_bytes = s->l1_size * L1E_SIZE;
sn_l1_bytes = sn->l1_size * L1E_SIZE;
/*
* Copy the snapshot L1 table to the current L1 table.
@ -917,7 +917,7 @@ int qcow2_snapshot_delete(BlockDriverState *bs,
sn = s->snapshots[snapshot_index];
ret = qcow2_validate_table(bs, sn.l1_table_offset, sn.l1_size,
sizeof(uint64_t), QCOW_MAX_L1_SIZE,
L1E_SIZE, QCOW_MAX_L1_SIZE,
"Snapshot L1 table", errp);
if (ret < 0) {
return ret;
@ -953,7 +953,7 @@ int qcow2_snapshot_delete(BlockDriverState *bs,
error_setg_errno(errp, -ret, "Failed to free the cluster and L1 table");
return ret;
}
qcow2_free_clusters(bs, sn.l1_table_offset, sn.l1_size * sizeof(uint64_t),
qcow2_free_clusters(bs, sn.l1_table_offset, sn.l1_size * L1E_SIZE,
QCOW2_DISCARD_SNAPSHOT);
/* must update the copied flag on the current cluster offsets */
@ -1030,12 +1030,12 @@ int qcow2_snapshot_load_tmp(BlockDriverState *bs,
/* Allocate and read in the snapshot's L1 table */
ret = qcow2_validate_table(bs, sn->l1_table_offset, sn->l1_size,
sizeof(uint64_t), QCOW_MAX_L1_SIZE,
L1E_SIZE, QCOW_MAX_L1_SIZE,
"Snapshot L1 table", errp);
if (ret < 0) {
return ret;
}
new_l1_bytes = sn->l1_size * sizeof(uint64_t);
new_l1_bytes = sn->l1_size * L1E_SIZE;
new_l1_table = qemu_try_blockalign(bs->file->bs, new_l1_bytes);
if (new_l1_table == NULL) {
return -ENOMEM;

View File

@ -1543,7 +1543,7 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
/* read the level 1 table */
ret = qcow2_validate_table(bs, header.l1_table_offset,
header.l1_size, sizeof(uint64_t),
header.l1_size, L1E_SIZE,
QCOW_MAX_L1_SIZE, "Active L1 table", errp);
if (ret < 0) {
goto fail;
@ -1568,15 +1568,14 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
}
if (s->l1_size > 0) {
s->l1_table = qemu_try_blockalign(bs->file->bs,
s->l1_size * sizeof(uint64_t));
s->l1_table = qemu_try_blockalign(bs->file->bs, s->l1_size * L1E_SIZE);
if (s->l1_table == NULL) {
error_setg(errp, "Could not allocate L1 table");
ret = -ENOMEM;
goto fail;
}
ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
s->l1_size * sizeof(uint64_t));
s->l1_size * L1E_SIZE);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read L1 table");
goto fail;
@ -2102,7 +2101,6 @@ static coroutine_fn int qcow2_handle_l2meta(BlockDriverState *bs,
QCowL2Meta *next;
if (link_l2) {
assert(!l2meta->prealloc);
ret = qcow2_alloc_cluster_link_l2(bs, l2meta);
if (ret) {
goto out;
@ -2112,9 +2110,7 @@ static coroutine_fn int qcow2_handle_l2meta(BlockDriverState *bs,
}
/* Take the request off the list of running requests */
if (l2meta->nb_clusters != 0) {
QLIST_REMOVE(l2meta, next_in_flight);
}
QLIST_REMOVE(l2meta, next_in_flight);
qemu_co_queue_restart_all(&l2meta->dependent_requests);
@ -2563,7 +2559,7 @@ static coroutine_fn int qcow2_co_pwritev_part(
int offset_in_cluster;
int ret;
unsigned int cur_bytes; /* number of sectors in current iteration */
uint64_t cluster_offset;
uint64_t host_offset;
QCowL2Meta *l2meta = NULL;
AioTaskPool *aio = NULL;
@ -2584,16 +2580,13 @@ static coroutine_fn int qcow2_co_pwritev_part(
qemu_co_mutex_lock(&s->lock);
ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes,
&cluster_offset, &l2meta);
ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes,
&host_offset, &l2meta);
if (ret < 0) {
goto out_locked;
}
assert(offset_into_cluster(s, cluster_offset) == 0);
ret = qcow2_pre_write_overlap_check(bs, 0,
cluster_offset + offset_in_cluster,
ret = qcow2_pre_write_overlap_check(bs, 0, host_offset,
cur_bytes, true);
if (ret < 0) {
goto out_locked;
@ -2605,7 +2598,7 @@ static coroutine_fn int qcow2_co_pwritev_part(
aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
}
ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_task_entry, 0,
cluster_offset + offset_in_cluster, offset,
host_offset, offset,
cur_bytes, qiov, qiov_offset, l2meta);
l2meta = NULL; /* l2meta is consumed by qcow2_co_pwritev_task() */
if (ret < 0) {
@ -3126,38 +3119,28 @@ static int coroutine_fn preallocate_co(BlockDriverState *bs, uint64_t offset,
int64_t file_length;
unsigned int cur_bytes;
int ret;
QCowL2Meta *meta;
QCowL2Meta *meta = NULL, *m;
assert(offset <= new_length);
bytes = new_length - offset;
while (bytes) {
cur_bytes = MIN(bytes, QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size));
ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes,
&host_offset, &meta);
ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes,
&host_offset, &meta);
if (ret < 0) {
error_setg_errno(errp, -ret, "Allocating clusters failed");
return ret;
goto out;
}
while (meta) {
QCowL2Meta *next = meta->next;
meta->prealloc = true;
for (m = meta; m != NULL; m = m->next) {
m->prealloc = true;
}
ret = qcow2_alloc_cluster_link_l2(bs, meta);
if (ret < 0) {
error_setg_errno(errp, -ret, "Mapping clusters failed");
qcow2_free_any_clusters(bs, meta->alloc_offset,
meta->nb_clusters, QCOW2_DISCARD_NEVER);
return ret;
}
/* There are no dependent requests, but we need to remove our
* request from the list of in-flight requests */
QLIST_REMOVE(meta, next_in_flight);
g_free(meta);
meta = next;
ret = qcow2_handle_l2meta(bs, &meta, true);
if (ret < 0) {
error_setg_errno(errp, -ret, "Mapping clusters failed");
goto out;
}
/* TODO Preallocate data if requested */
@ -3174,7 +3157,8 @@ static int coroutine_fn preallocate_co(BlockDriverState *bs, uint64_t offset,
file_length = bdrv_getlength(s->data_file->bs);
if (file_length < 0) {
error_setg_errno(errp, -file_length, "Could not get file size");
return file_length;
ret = file_length;
goto out;
}
if (host_offset + cur_bytes > file_length) {
@ -3184,11 +3168,15 @@ static int coroutine_fn preallocate_co(BlockDriverState *bs, uint64_t offset,
ret = bdrv_co_truncate(s->data_file, host_offset + cur_bytes, false,
mode, 0, errp);
if (ret < 0) {
return ret;
goto out;
}
}
return 0;
ret = 0;
out:
qcow2_handle_l2meta(bs, &meta, false);
return ret;
}
/* qcow2_refcount_metadata_size:
@ -3213,7 +3201,7 @@ int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
* where no further refcount blocks or table clusters are required to
* reference count every cluster.
*/
int64_t blocks_per_table_cluster = cluster_size / sizeof(uint64_t);
int64_t blocks_per_table_cluster = cluster_size / REFTABLE_ENTRY_SIZE;
int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order);
int64_t table = 0; /* number of refcount table clusters */
int64_t blocks = 0; /* number of refcount block clusters */
@ -3270,8 +3258,8 @@ static int64_t qcow2_calc_prealloc_size(int64_t total_size,
/* total size of L1 tables */
nl1e = nl2e * l2e_size / cluster_size;
nl1e = ROUND_UP(nl1e, cluster_size / sizeof(uint64_t));
meta_size += nl1e * sizeof(uint64_t);
nl1e = ROUND_UP(nl1e, cluster_size / L1E_SIZE);
meta_size += nl1e * L1E_SIZE;
/* total size of refcount table and blocks */
meta_size += qcow2_refcount_metadata_size(
@ -3916,7 +3904,7 @@ static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
type != QCOW2_SUBCLUSTER_ZERO_PLAIN &&
type != QCOW2_SUBCLUSTER_ZERO_ALLOC)) {
qemu_co_mutex_unlock(&s->lock);
return -ENOTSUP;
return ret < 0 ? ret : -ENOTSUP;
}
} else {
qemu_co_mutex_lock(&s->lock);
@ -4051,10 +4039,9 @@ qcow2_co_copy_range_to(BlockDriverState *bs,
BdrvRequestFlags write_flags)
{
BDRVQcow2State *s = bs->opaque;
int offset_in_cluster;
int ret;
unsigned int cur_bytes; /* number of sectors in current iteration */
uint64_t cluster_offset;
uint64_t host_offset;
QCowL2Meta *l2meta = NULL;
assert(!bs->encrypted);
@ -4065,31 +4052,26 @@ qcow2_co_copy_range_to(BlockDriverState *bs,
l2meta = NULL;
offset_in_cluster = offset_into_cluster(s, dst_offset);
cur_bytes = MIN(bytes, INT_MAX);
/* TODO:
* If src->bs == dst->bs, we could simply copy by incrementing
* the refcnt, without copying user data.
* Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */
ret = qcow2_alloc_cluster_offset(bs, dst_offset, &cur_bytes,
&cluster_offset, &l2meta);
ret = qcow2_alloc_host_offset(bs, dst_offset, &cur_bytes,
&host_offset, &l2meta);
if (ret < 0) {
goto fail;
}
assert(offset_into_cluster(s, cluster_offset) == 0);
ret = qcow2_pre_write_overlap_check(bs, 0,
cluster_offset + offset_in_cluster, cur_bytes, true);
ret = qcow2_pre_write_overlap_check(bs, 0, host_offset, cur_bytes,
true);
if (ret < 0) {
goto fail;
}
qemu_co_mutex_unlock(&s->lock);
ret = bdrv_co_copy_range_to(src, src_offset,
s->data_file,
cluster_offset + offset_in_cluster,
ret = bdrv_co_copy_range_to(src, src_offset, s->data_file, host_offset,
cur_bytes, read_flags, write_flags);
qemu_co_mutex_lock(&s->lock);
if (ret < 0) {
@ -4460,7 +4442,7 @@ static int coroutine_fn qcow2_co_truncate(BlockDriverState *bs, int64_t offset,
/* write updated header.size */
offset = cpu_to_be64(offset);
ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size),
&offset, sizeof(uint64_t));
&offset, sizeof(offset));
if (ret < 0) {
error_setg_errno(errp, -ret, "Failed to update the image size");
goto fail;
@ -4700,8 +4682,8 @@ static int make_completely_empty(BlockDriverState *bs)
BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t));
l1_size2 = (uint64_t)s->l1_size * sizeof(uint64_t);
l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE);
l1_size2 = (uint64_t)s->l1_size * L1E_SIZE;
/* After this call, neither the in-memory nor the on-disk refcount
* information accurately describe the actual references */
@ -4747,14 +4729,14 @@ static int make_completely_empty(BlockDriverState *bs)
s->l1_table_offset = 3 * s->cluster_size;
new_reftable = g_try_new0(uint64_t, s->cluster_size / sizeof(uint64_t));
new_reftable = g_try_new0(uint64_t, s->cluster_size / REFTABLE_ENTRY_SIZE);
if (!new_reftable) {
ret = -ENOMEM;
goto fail_broken_refcounts;
}
s->refcount_table_offset = s->cluster_size;
s->refcount_table_size = s->cluster_size / sizeof(uint64_t);
s->refcount_table_size = s->cluster_size / REFTABLE_ENTRY_SIZE;
s->max_refcount_table_index = 0;
g_free(s->refcount_table);
@ -4826,7 +4808,7 @@ static int qcow2_make_empty(BlockDriverState *bs)
int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size);
int l1_clusters, ret = 0;
l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t));
l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE);
if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps &&
3 + l1_clusters <= s->refcount_block_size &&
@ -4957,7 +4939,7 @@ static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs,
l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL;
l2_tables = DIV_ROUND_UP(virtual_size / cluster_size,
cluster_size / l2e_size);
if (l2_tables * sizeof(uint64_t) > QCOW_MAX_L1_SIZE) {
if (l2_tables * L1E_SIZE > QCOW_MAX_L1_SIZE) {
error_setg(&local_err, "The image size is too large "
"(try using a larger cluster size)");
goto err;

View File

@ -99,6 +99,12 @@
#define L2E_SIZE_NORMAL (sizeof(uint64_t))
#define L2E_SIZE_EXTENDED (sizeof(uint64_t) * 2)
/* Size of L1 table entries */
#define L1E_SIZE (sizeof(uint64_t))
/* Size of reftable entries */
#define REFTABLE_ENTRY_SIZE (sizeof(uint64_t))
#define MIN_CLUSTER_BITS 9
#define MAX_CLUSTER_BITS 21
@ -855,8 +861,8 @@ int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size);
void qcow2_free_clusters(BlockDriverState *bs,
int64_t offset, int64_t size,
enum qcow2_discard_type type);
void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
int nb_clusters, enum qcow2_discard_type type);
void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
enum qcow2_discard_type type);
int qcow2_update_snapshot_refcount(BlockDriverState *bs,
int64_t l1_table_offset, int l1_size, int addend);
@ -895,9 +901,9 @@ int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num,
int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes, uint64_t *host_offset,
QCow2SubclusterType *subcluster_type);
int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes, uint64_t *host_offset,
QCowL2Meta **m);
int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes, uint64_t *host_offset,
QCowL2Meta **m);
int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
uint64_t offset,
int compressed_size,

View File

@ -29,6 +29,8 @@
#define HASH_LENGTH 32
#define INDEXSTR_LEN 32
#define QUORUM_OPT_VOTE_THRESHOLD "vote-threshold"
#define QUORUM_OPT_BLKVERIFY "blkverify"
#define QUORUM_OPT_REWRITE "rewrite-corrupted"
@ -970,9 +972,9 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
opened = g_new0(bool, s->num_children);
for (i = 0; i < s->num_children; i++) {
char indexstr[32];
ret = snprintf(indexstr, 32, "children.%d", i);
assert(ret < 32);
char indexstr[INDEXSTR_LEN];
ret = snprintf(indexstr, INDEXSTR_LEN, "children.%d", i);
assert(ret < INDEXSTR_LEN);
s->children[i] = bdrv_open_child(NULL, options, indexstr, bs,
&child_of_bds, BDRV_CHILD_DATA, false,
@ -1024,7 +1026,7 @@ static void quorum_add_child(BlockDriverState *bs, BlockDriverState *child_bs,
{
BDRVQuorumState *s = bs->opaque;
BdrvChild *child;
char indexstr[32];
char indexstr[INDEXSTR_LEN];
int ret;
if (s->is_blkverify) {
@ -1039,8 +1041,8 @@ static void quorum_add_child(BlockDriverState *bs, BlockDriverState *child_bs,
return;
}
ret = snprintf(indexstr, 32, "children.%u", s->next_child_index);
if (ret < 0 || ret >= 32) {
ret = snprintf(indexstr, INDEXSTR_LEN, "children.%u", s->next_child_index);
if (ret < 0 || ret >= INDEXSTR_LEN) {
error_setg(errp, "cannot generate child name");
return;
}
@ -1068,6 +1070,7 @@ static void quorum_del_child(BlockDriverState *bs, BdrvChild *child,
Error **errp)
{
BDRVQuorumState *s = bs->opaque;
char indexstr[INDEXSTR_LEN];
int i;
for (i = 0; i < s->num_children; i++) {
@ -1089,6 +1092,11 @@ static void quorum_del_child(BlockDriverState *bs, BdrvChild *child,
/* We know now that num_children > threshold, so blkverify must be false */
assert(!s->is_blkverify);
snprintf(indexstr, INDEXSTR_LEN, "children.%u", s->next_child_index - 1);
if (!strncmp(child->name, indexstr, INDEXSTR_LEN)) {
s->next_child_index--;
}
bdrv_drained_begin(bs);
/* We can safely remove this child now */

View File

@ -341,48 +341,6 @@ static void qemu_rbd_memset(RADOSCB *rcb, int64_t offs)
}
}
static QemuOptsList runtime_opts = {
.name = "rbd",
.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
.desc = {
{
.name = "pool",
.type = QEMU_OPT_STRING,
.help = "Rados pool name",
},
{
.name = "namespace",
.type = QEMU_OPT_STRING,
.help = "Rados namespace name in the pool",
},
{
.name = "image",
.type = QEMU_OPT_STRING,
.help = "Image name in the pool",
},
{
.name = "conf",
.type = QEMU_OPT_STRING,
.help = "Rados config file location",
},
{
.name = "snapshot",
.type = QEMU_OPT_STRING,
.help = "Ceph snapshot name",
},
{
/* maps to 'id' in rados_create() */
.name = "user",
.type = QEMU_OPT_STRING,
.help = "Rados id name",
},
/*
* server.* extracted manually, see qemu_rbd_mon_host()
*/
{ /* end of list */ }
},
};
/* FIXME Deprecate and remove keypairs or make it available in QMP. */
static int qemu_rbd_do_create(BlockdevCreateOptions *options,
const char *keypairs, const char *password_secret,
@ -1289,6 +1247,7 @@ static QemuOptsList qemu_rbd_create_opts = {
static const char *const qemu_rbd_strong_runtime_opts[] = {
"pool",
"namespace",
"image",
"conf",
"snapshot",

View File

@ -816,9 +816,9 @@ static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s)
goto exit;
}
/* only 2 supported sector sizes */
if (s->logical_sector_size != 512 && s->logical_sector_size != 4096) {
ret = -EINVAL;
/* Currently we only support 512 */
if (s->logical_sector_size != 512) {
ret = -ENOTSUP;
goto exit;
}

View File

@ -1201,10 +1201,10 @@ static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum,
*pnum = 0;
return 0;
}
is_zero = buffer_is_zero(buf, 512);
is_zero = buffer_is_zero(buf, BDRV_SECTOR_SIZE);
for(i = 1; i < n; i++) {
buf += 512;
if (is_zero != buffer_is_zero(buf, 512)) {
buf += BDRV_SECTOR_SIZE;
if (is_zero != buffer_is_zero(buf, BDRV_SECTOR_SIZE)) {
break;
}
}
@ -1666,6 +1666,7 @@ enum ImgConvertBlockStatus {
typedef struct ImgConvertState {
BlockBackend **src;
int64_t *src_sectors;
int *src_alignment;
int src_num;
int64_t total_sectors;
int64_t allocated_sectors;
@ -1732,6 +1733,7 @@ static int convert_iteration_sectors(ImgConvertState *s, int64_t sector_num)
if (s->sector_next_status <= sector_num) {
uint64_t offset = (sector_num - src_cur_offset) * BDRV_SECTOR_SIZE;
int64_t count;
int tail;
BlockDriverState *src_bs = blk_bs(s->src[src_cur]);
BlockDriverState *base;
@ -1772,6 +1774,16 @@ static int convert_iteration_sectors(ImgConvertState *s, int64_t sector_num)
n = DIV_ROUND_UP(count, BDRV_SECTOR_SIZE);
/*
* Avoid that s->sector_next_status becomes unaligned to the source
* request alignment and/or cluster size to avoid unnecessary read
* cycles.
*/
tail = (sector_num - src_cur_offset + n) % s->src_alignment[src_cur];
if (n > tail) {
n -= tail;
}
if (ret & BDRV_BLOCK_ZERO) {
s->status = post_backing_zero ? BLK_BACKING_FILE : BLK_ZERO;
} else if (ret & BDRV_BLOCK_DATA) {
@ -2410,8 +2422,10 @@ static int img_convert(int argc, char **argv)
s.src = g_new0(BlockBackend *, s.src_num);
s.src_sectors = g_new(int64_t, s.src_num);
s.src_alignment = g_new(int, s.src_num);
for (bs_i = 0; bs_i < s.src_num; bs_i++) {
BlockDriverState *src_bs;
s.src[bs_i] = img_open(image_opts, argv[optind + bs_i],
fmt, src_flags, src_writethrough, s.quiet,
force_share);
@ -2426,6 +2440,13 @@ static int img_convert(int argc, char **argv)
ret = -1;
goto out;
}
src_bs = blk_bs(s.src[bs_i]);
s.src_alignment[bs_i] = DIV_ROUND_UP(src_bs->bl.request_alignment,
BDRV_SECTOR_SIZE);
if (!bdrv_get_info(src_bs, &bdi)) {
s.src_alignment[bs_i] = MAX(s.src_alignment[bs_i],
bdi.cluster_size / BDRV_SECTOR_SIZE);
}
s.total_sectors += s.src_sectors[bs_i];
}
@ -2492,8 +2513,8 @@ static int img_convert(int argc, char **argv)
}
}
qemu_opt_set_number(opts, BLOCK_OPT_SIZE, s.total_sectors * 512,
&error_abort);
qemu_opt_set_number(opts, BLOCK_OPT_SIZE,
s.total_sectors * BDRV_SECTOR_SIZE, &error_abort);
ret = add_old_style_options(out_fmt, opts, out_baseimg, NULL);
if (ret < 0) {
goto out;
@ -2708,6 +2729,7 @@ out:
g_free(s.src);
}
g_free(s.src_sectors);
g_free(s.src_alignment);
fail_getopt:
g_free(options);

View File

@ -55,6 +55,9 @@ fi
cd tests/qemu-iotests
# QEMU_CHECK_BLOCK_AUTO is used to disable some unstable sub-tests
export QEMU_CHECK_BLOCK_AUTO=1
ret=0
for fmt in $format_list ; do
./check -makecheck -$fmt $group || ret=1

View File

@ -21,6 +21,7 @@
import time
import os
import iotests
import unittest
from iotests import qemu_img, qemu_io
backing_img = os.path.join(iotests.test_dir, 'backing.img')
@ -228,6 +229,7 @@ class TestParallelOps(iotests.QMPTestCase):
# Test that it's possible to run several block-stream operations
# in parallel in the same snapshot chain
@unittest.skipIf(os.environ.get('QEMU_CHECK_BLOCK_AUTO'), 'disabled in CI')
def test_stream_parallel(self):
self.assert_no_active_block_jobs()

View File

@ -43,6 +43,10 @@ get_image_size_on_host()
_supported_fmt qcow2
_supported_proto file
# Growing a file with a backing file (without preallocation=full or
# =falloc) requires zeroing the newly added area, which is impossible
# to do quickly for v2 images, and hence is unsupported.
_unsupported_imgopts 'compat=0.10'
if [ -z "$TEST_IMG_FILE" ]; then
TEST_IMG_FILE=$TEST_IMG
@ -168,24 +172,28 @@ done
$QEMU_IMG create -f raw "$TEST_IMG.base" 128k | _filter_img_create
$QEMU_IO -c 'write -q -P 1 0 128k' -f raw "$TEST_IMG.base"
for orig_size in 31k 33k; do
echo "--- Resizing image from $orig_size to 96k ---"
_make_test_img -F raw -b "$TEST_IMG.base" -o cluster_size=64k "$orig_size"
$QEMU_IMG resize -f "$IMGFMT" --preallocation=full "$TEST_IMG" 96k
# The first part of the image should contain data from the backing file
$QEMU_IO -c "read -q -P 1 0 ${orig_size}" "$TEST_IMG"
# The resized part of the image should contain zeroes
$QEMU_IO -c "read -q -P 0 ${orig_size} 63k" "$TEST_IMG"
# If the image does not have an external data file we can also verify its
# actual size. The resized image should have 7 clusters:
# header, L1 table, L2 table, refcount table, refcount block, 2 data clusters
if ! _get_data_file "$TEST_IMG" > /dev/null; then
expected_file_length=$((65536 * 7))
file_length=$(stat -c '%s' "$TEST_IMG_FILE")
if [ "$file_length" != "$expected_file_length" ]; then
echo "ERROR: file length $file_length (expected $expected_file_length)"
fi
fi
echo
for dst_size in 96k 128k; do
for prealloc in metadata full; do
echo "--- Resizing image from $orig_size to $dst_size (preallocation=$prealloc) ---"
_make_test_img -F raw -b "$TEST_IMG.base" -o cluster_size=64k "$orig_size"
$QEMU_IMG resize -f "$IMGFMT" --preallocation="$prealloc" "$TEST_IMG" "$dst_size"
# The first part of the image should contain data from the backing file
$QEMU_IO -c "read -q -P 1 0 ${orig_size}" "$TEST_IMG"
# The resized part of the image should contain zeroes
$QEMU_IO -c "read -q -P 0 ${orig_size} 63k" "$TEST_IMG"
# If the image does not have an external data file we can also verify its
# actual size. The resized image should have 7 clusters:
# header, L1 table, L2 table, refcount table, refcount block, 2 data clusters
if ! _get_data_file "$TEST_IMG" > /dev/null; then
expected_file_length=$((65536 * 7))
file_length=$(stat -c '%s' "$TEST_IMG_FILE")
if [ "$file_length" != "$expected_file_length" ]; then
echo "ERROR: file length $file_length (expected $expected_file_length)"
fi
fi
echo
done
done
done
# success, all done

View File

@ -768,11 +768,35 @@ wrote 81920/81920 bytes at offset 2048000
80 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
Formatting 'TEST_DIR/t.IMGFMT.base', fmt=raw size=131072
--- Resizing image from 31k to 96k ---
--- Resizing image from 31k to 96k (preallocation=metadata) ---
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=31744 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
Image resized.
--- Resizing image from 33k to 96k ---
--- Resizing image from 31k to 96k (preallocation=full) ---
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=31744 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
Image resized.
--- Resizing image from 31k to 128k (preallocation=metadata) ---
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=31744 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
Image resized.
--- Resizing image from 31k to 128k (preallocation=full) ---
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=31744 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
Image resized.
--- Resizing image from 33k to 96k (preallocation=metadata) ---
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=33792 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
Image resized.
--- Resizing image from 33k to 96k (preallocation=full) ---
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=33792 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
Image resized.
--- Resizing image from 33k to 128k (preallocation=metadata) ---
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=33792 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
Image resized.
--- Resizing image from 33k to 128k (preallocation=full) ---
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=33792 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=raw
Image resized.

View File

@ -26,8 +26,8 @@ iotests.script_initialize(supported_fmts=['qcow2', 'qed', 'raw'],
with iotests.FilePath('source.img') as source_img_path, \
iotests.FilePath('dest.img') as dest_img_path, \
iotests.FilePaths(['migration.sock', 'nbd.sock'], iotests.sock_dir) as \
[migration_sock_path, nbd_sock_path], \
iotests.FilePath('migration.sock', 'nbd.sock', base_dir=iotests.sock_dir) \
as (migration_sock_path, nbd_sock_path), \
iotests.VM('source') as source_vm, \
iotests.VM('dest') as dest_vm:

View File

@ -26,7 +26,7 @@ iotests.script_initialize(supported_fmts=['generic'])
with iotests.FilePath('disk.img') as disk_img_path, \
iotests.FilePath('disk-snapshot.img') as disk_snapshot_img_path, \
iotests.FilePath('nbd.sock', iotests.sock_dir) as nbd_sock_path, \
iotests.FilePath('nbd.sock', base_dir=iotests.sock_dir) as nbd_sock_path, \
iotests.VM() as vm:
img_size = '10M'

View File

@ -49,7 +49,7 @@ remainder = [("0xd5", "0x108000", "32k"), # Right-end of partial-left [1]
with iotests.FilePath('base.img') as base_img_path, \
iotests.FilePath('fleece.img') as fleece_img_path, \
iotests.FilePath('nbd.sock', iotests.sock_dir) as nbd_sock_path, \
iotests.FilePath('nbd.sock', base_dir=iotests.sock_dir) as nbd_sock_path, \
iotests.VM() as vm:
log('--- Setting up images ---')

View File

@ -46,8 +46,11 @@ if [ "$IMGOPTSSYNTAX" = "true" ]; then
# We use json:{} filenames here, so we cannot work with additional options.
_unsupported_fmt $IMGFMT
else
# With VDI, the output is ordered differently. Just disable it.
_unsupported_fmt vdi
# - With VDI, the output is ordered differently. Just disable it.
# - VHDX has large clusters; because qemu-img convert tries to
# align the requests to the cluster size, the output is ordered
# differently, so disable it, too.
_unsupported_fmt vdi vhdx
fi

View File

@ -275,10 +275,9 @@ def test_bitmap_sync(bsync_mode, msync_mode='bitmap', failure=None):
an incomplete backup. Testing limitations prevent
testing competing writes.
"""
with iotests.FilePaths(['img', 'bsync1', 'bsync2',
'fbackup0', 'fbackup1', 'fbackup2']) as \
(img_path, bsync1, bsync2,
fbackup0, fbackup1, fbackup2), \
with iotests.FilePath(
'img', 'bsync1', 'bsync2', 'fbackup0', 'fbackup1', 'fbackup2') as \
(img_path, bsync1, bsync2, fbackup0, fbackup1, fbackup2), \
iotests.VM() as vm:
mode = "Mode {:s}; Bitmap Sync {:s}".format(msync_mode, bsync_mode)
@ -441,8 +440,7 @@ def test_backup_api():
"""
Test malformed and prohibited invocations of the backup API.
"""
with iotests.FilePaths(['img', 'bsync1']) as \
(img_path, backup_path), \
with iotests.FilePath('img', 'bsync1') as (img_path, backup_path), \
iotests.VM() as vm:
log("\n=== API failure tests ===\n")

74
tests/qemu-iotests/305 Executable file
View File

@ -0,0 +1,74 @@
#!/usr/bin/env bash
#
# Test the handling of errors in write requests with multiple allocations
#
# Copyright (C) 2020 Igalia, S.L.
# Author: Alberto Garcia <berto@igalia.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# creator
owner=berto@igalia.com
seq=`basename $0`
echo "QA output created by $seq"
status=1 # failure is the default!
_cleanup()
{
_cleanup_test_img
}
trap "_cleanup; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
_supported_fmt qcow2
_supported_proto file
_supported_os Linux
_unsupported_imgopts cluster_size refcount_bits extended_l2 compat=0.10 data_file
echo '### Create the image'
_make_test_img -o refcount_bits=64,cluster_size=1k 1M
# The reference counts of the clusters for the first 123k of this
# write request are stored in the first refcount block. The last
# cluster (guest offset 123k) is referenced in the second refcount
# block.
echo '### Fill the first refcount block and one data cluster from the second'
$QEMU_IO -c 'write 0 124k' "$TEST_IMG" | _filter_qemu_io
echo '### Discard two of the last data clusters, leave one in the middle'
$QEMU_IO -c 'discard 121k 1k' "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c 'discard 123k 1k' "$TEST_IMG" | _filter_qemu_io
echo '### Corrupt the offset of the second refcount block'
refcount_table_offset=$(peek_file_be "$TEST_IMG" 48 8)
poke_file "$TEST_IMG" $(($refcount_table_offset+14)) "\x06"
# This tries to allocate the two clusters discarded earlier (guest
# offsets 121k and 123k). Their reference counts are in the first and
# second refcount blocks respectively, but only the first one can be
# allocated correctly because the second entry of the refcount table
# is corrupted.
echo '### Try to allocate the discarded clusters again'
$QEMU_IO -c 'write 121k 3k' "$TEST_IMG" | _filter_qemu_io
# success, all done
echo "*** done"
rm -f $seq.full
status=0

View File

@ -0,0 +1,16 @@
QA output created by 305
### Create the image
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576
### Fill the first refcount block and one data cluster from the second
wrote 126976/126976 bytes at offset 0
124 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
### Discard two of the last data clusters, leave one in the middle
discard 1024/1024 bytes at offset 123904
1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
discard 1024/1024 bytes at offset 125952
1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
### Corrupt the offset of the second refcount block
### Try to allocate the discarded clusters again
qcow2: Marking image as corrupt: Refblock offset 0x20600 unaligned (reftable index: 0x1); further corruption events will be suppressed
write failed: Input/output error
*** done

View File

@ -313,3 +313,4 @@
302 quick
303 rw quick
304 rw quick
305 rw quick

View File

@ -448,42 +448,45 @@ class Timeout:
def file_pattern(name):
return "{0}-{1}".format(os.getpid(), name)
class FilePaths:
class FilePath:
"""
FilePaths is an auto-generated filename that cleans itself up.
Context manager generating multiple file names. The generated files are
removed when exiting the context.
Use this context manager to generate filenames and ensure that the file
gets deleted::
Example usage:
with FilePath('a.img', 'b.img') as (img_a, img_b):
# Use img_a and img_b here...
# a.img and b.img are automatically removed here.
By default images are created in iotests.test_dir. To create sockets use
iotests.sock_dir:
with FilePath('a.sock', base_dir=iotests.sock_dir) as sock:
For convenience, calling with one argument yields a single file instead of
a tuple with one item.
with FilePaths(['test.img']) as img_path:
qemu_img('create', img_path, '1G')
# migration_sock_path is automatically deleted
"""
def __init__(self, names, base_dir=test_dir):
self.paths = []
for name in names:
self.paths.append(os.path.join(base_dir, file_pattern(name)))
def __init__(self, *names, base_dir=test_dir):
self.paths = [os.path.join(base_dir, file_pattern(name))
for name in names]
def __enter__(self):
return self.paths
if len(self.paths) == 1:
return self.paths[0]
else:
return self.paths
def __exit__(self, exc_type, exc_val, exc_tb):
try:
for path in self.paths:
for path in self.paths:
try:
os.remove(path)
except OSError:
pass
except OSError:
pass
return False
class FilePath(FilePaths):
"""
FilePath is a specialization of FilePaths that takes a single filename.
"""
def __init__(self, name, base_dir=test_dir):
super(FilePath, self).__init__([name], base_dir)
def __enter__(self):
return self.paths[0]
def file_path_remover():
for path in reversed(file_path_remover.paths):