2004-12-12 12:24:44 +01:00
|
|
|
/*
|
|
|
|
* QEMU Block driver for DMG images
|
2007-09-16 23:08:06 +02:00
|
|
|
*
|
2004-12-12 12:24:44 +01:00
|
|
|
* Copyright (c) 2004 Johannes E. Schindelin
|
2007-09-16 23:08:06 +02:00
|
|
|
*
|
2004-12-12 12:24:44 +01:00
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
2016-01-18 19:01:42 +01:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 09:01:28 +01:00
|
|
|
#include "qapi/error.h"
|
2022-12-21 14:35:49 +01:00
|
|
|
#include "block/block-io.h"
|
2012-12-17 18:19:44 +01:00
|
|
|
#include "block/block_int.h"
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/bswap.h"
|
2015-03-17 18:29:20 +01:00
|
|
|
#include "qemu/error-report.h"
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/module.h"
|
2022-02-26 19:07:23 +01:00
|
|
|
#include "qemu/memalign.h"
|
2016-09-05 04:50:45 +02:00
|
|
|
#include "dmg.h"
|
|
|
|
|
2023-03-20 16:26:10 +01:00
|
|
|
BdrvDmgUncompressFunc *dmg_uncompress_bz2;
|
|
|
|
BdrvDmgUncompressFunc *dmg_uncompress_lzfse;
|
2018-11-05 16:08:05 +01:00
|
|
|
|
2014-03-26 13:05:58 +01:00
|
|
|
enum {
|
|
|
|
/* Limit chunk sizes to prevent unreasonable amounts of memory being used
|
|
|
|
* or truncating when converting to 32-bit types
|
|
|
|
*/
|
|
|
|
DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */
|
|
|
|
DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
|
|
|
|
};
|
|
|
|
|
2018-11-05 16:08:06 +01:00
|
|
|
enum {
|
|
|
|
/* DMG Block Type */
|
|
|
|
UDZE = 0, /* Zeroes */
|
|
|
|
UDRW, /* RAW type */
|
|
|
|
UDIG, /* Ignore */
|
|
|
|
UDCO = 0x80000004,
|
|
|
|
UDZO,
|
|
|
|
UDBZ,
|
|
|
|
ULFO,
|
|
|
|
UDCM = 0x7ffffffe, /* Comments */
|
2018-12-28 15:50:55 +01:00
|
|
|
UDLE = 0xffffffff /* Last Entry */
|
2018-11-05 16:08:06 +01:00
|
|
|
};
|
|
|
|
|
2004-12-12 12:24:44 +01:00
|
|
|
static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
|
|
|
|
{
|
2013-03-18 16:20:27 +01:00
|
|
|
int len;
|
|
|
|
|
|
|
|
if (!filename) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = strlen(filename);
|
|
|
|
if (len > 4 && !strcmp(filename + len - 4, ".dmg")) {
|
|
|
|
return 2;
|
|
|
|
}
|
2004-12-12 12:24:44 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-01-25 17:07:30 +01:00
|
|
|
static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
|
2004-12-12 12:24:44 +01:00
|
|
|
{
|
2013-01-25 17:07:30 +01:00
|
|
|
uint64_t buffer;
|
|
|
|
int ret;
|
|
|
|
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 17:27:36 +02:00
|
|
|
ret = bdrv_pread(bs->file, offset, 8, &buffer, 0);
|
2013-01-25 17:07:30 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
*result = be64_to_cpu(buffer);
|
|
|
|
return 0;
|
2004-12-12 12:24:44 +01:00
|
|
|
}
|
|
|
|
|
2013-01-25 17:07:30 +01:00
|
|
|
static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
|
2004-12-12 12:24:44 +01:00
|
|
|
{
|
2013-01-25 17:07:30 +01:00
|
|
|
uint32_t buffer;
|
|
|
|
int ret;
|
|
|
|
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 17:27:36 +02:00
|
|
|
ret = bdrv_pread(bs->file, offset, 4, &buffer, 0);
|
2013-01-25 17:07:30 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
*result = be32_to_cpu(buffer);
|
|
|
|
return 0;
|
2004-12-12 12:24:44 +01:00
|
|
|
}
|
|
|
|
|
2015-01-06 18:48:07 +01:00
|
|
|
static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset)
|
|
|
|
{
|
|
|
|
return be64_to_cpu(*(uint64_t *)&buffer[offset]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset)
|
|
|
|
{
|
|
|
|
return be32_to_cpu(*(uint32_t *)&buffer[offset]);
|
|
|
|
}
|
|
|
|
|
2014-03-26 13:06:00 +01:00
|
|
|
/* Increase max chunk sizes, if necessary. This function is used to calculate
|
|
|
|
* the buffer sizes needed for compressed/uncompressed chunk I/O.
|
|
|
|
*/
|
|
|
|
static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
|
|
|
|
uint32_t *max_compressed_size,
|
|
|
|
uint32_t *max_sectors_per_chunk)
|
|
|
|
{
|
|
|
|
uint32_t compressed_size = 0;
|
|
|
|
uint32_t uncompressed_sectors = 0;
|
|
|
|
|
|
|
|
switch (s->types[chunk]) {
|
2018-11-05 16:08:06 +01:00
|
|
|
case UDZO: /* zlib compressed */
|
|
|
|
case UDBZ: /* bzip2 compressed */
|
|
|
|
case ULFO: /* lzfse compressed */
|
2014-03-26 13:06:00 +01:00
|
|
|
compressed_size = s->lengths[chunk];
|
|
|
|
uncompressed_sectors = s->sectorcounts[chunk];
|
|
|
|
break;
|
2018-11-05 16:08:06 +01:00
|
|
|
case UDRW: /* copy */
|
2017-06-22 13:04:16 +02:00
|
|
|
uncompressed_sectors = DIV_ROUND_UP(s->lengths[chunk], 512);
|
2014-03-26 13:06:00 +01:00
|
|
|
break;
|
dmg: don't skip zero chunk
The dmg file has many tables which describe: "start from sector XXX to
sector XXX, the compression method is XXX and where the compressed data
resides on".
Each sector in the expanded file should be covered by a table. The table
will describe the offset of compressed data (or raw depends on the type)
in the dmg.
For example:
[-----------The expanded file------------]
[---bzip table ---]/* zeros */[---zlib---]
^
| if we want to read this sector.
we will find bzip table which contains this sector, and get the
compressed data offset, read it from dmg, uncompress it, finally write to
expanded file.
If we skip zero chunk (table), some sector cannot find the table which
will cause search_chunk() return s->n_chunks, dmg_read_chunk() return -1
and finally causing dmg_co_preadv() return EIO.
See:
[-----------The expanded file------------]
[---bzip table ---]/* zeros */[---zlib---]
^
| if we want to read this sector.
Oops, we cannot find the table contains it...
In the original implementation, we don't have zero table. When we try to
read sector inside the zero chunk. We will get EIO, and skip reading.
After this patch, we treat zero chunk the same as ignore chunk, it will
directly write zero and avoid some sector may not find the table.
After this patch:
[-----------The expanded file------------]
[---bzip table ---][--zeros--][---zlib---]
Signed-off-by: yuchenlin <npes87184@gmail.com>
Reviewed-by: Julio Faracco <jcfaracco@gmail.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 20190103114700.9686-4-npes87184@gmail.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2019-01-03 12:47:00 +01:00
|
|
|
case UDZE: /* zero */
|
|
|
|
case UDIG: /* ignore */
|
2015-01-06 18:48:15 +01:00
|
|
|
/* as the all-zeroes block may be large, it is treated specially: the
|
|
|
|
* sector is not copied from a large buffer, a simple memset is used
|
|
|
|
* instead. Therefore uncompressed_sectors does not need to be set. */
|
2014-03-26 13:06:00 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (compressed_size > *max_compressed_size) {
|
|
|
|
*max_compressed_size = compressed_size;
|
|
|
|
}
|
|
|
|
if (uncompressed_sectors > *max_sectors_per_chunk) {
|
|
|
|
*max_sectors_per_chunk = uncompressed_sectors;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-20 18:24:02 +02:00
|
|
|
static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp)
|
2015-01-06 18:48:04 +01:00
|
|
|
{
|
2016-06-20 18:24:02 +02:00
|
|
|
BlockDriverState *file_bs = file->bs;
|
2015-01-06 18:48:04 +01:00
|
|
|
int64_t length;
|
|
|
|
int64_t offset = 0;
|
|
|
|
uint8_t buffer[515];
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
/* bdrv_getlength returns a multiple of block size (512), rounded up. Since
|
|
|
|
* dmg images can have odd sizes, try to look for the "koly" magic which
|
|
|
|
* marks the begin of the UDIF trailer (512 bytes). This magic can be found
|
|
|
|
* in the last 511 bytes of the second-last sector or the first 4 bytes of
|
|
|
|
* the last sector (search space: 515 bytes) */
|
|
|
|
length = bdrv_getlength(file_bs);
|
|
|
|
if (length < 0) {
|
|
|
|
error_setg_errno(errp, -length,
|
|
|
|
"Failed to get file size while reading UDIF trailer");
|
|
|
|
return length;
|
|
|
|
} else if (length < 512) {
|
|
|
|
error_setg(errp, "dmg file must be at least 512 bytes long");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (length > 511 + 512) {
|
|
|
|
offset = length - 511 - 512;
|
|
|
|
}
|
|
|
|
length = length < 515 ? length : 515;
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 17:27:36 +02:00
|
|
|
ret = bdrv_pread(file, offset, length, buffer, 0);
|
2015-01-06 18:48:04 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
error_setg_errno(errp, -ret, "Failed while reading UDIF trailer");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
for (i = 0; i < length - 3; i++) {
|
|
|
|
if (buffer[i] == 'k' && buffer[i+1] == 'o' &&
|
|
|
|
buffer[i+2] == 'l' && buffer[i+3] == 'y') {
|
|
|
|
return offset + i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
error_setg(errp, "Could not locate UDIF trailer in dmg file");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-01-06 18:48:05 +01:00
|
|
|
/* used when building the sector table */
|
|
|
|
typedef struct DmgHeaderState {
|
|
|
|
/* used internally by dmg_read_mish_block to remember offsets of blocks
|
|
|
|
* across calls */
|
2015-01-06 18:48:11 +01:00
|
|
|
uint64_t data_fork_offset;
|
2015-01-06 18:48:05 +01:00
|
|
|
/* exported for dmg_open */
|
|
|
|
uint32_t max_compressed_size;
|
|
|
|
uint32_t max_sectors_per_chunk;
|
|
|
|
} DmgHeaderState;
|
|
|
|
|
2015-01-06 18:48:13 +01:00
|
|
|
static bool dmg_is_known_block_type(uint32_t entry_type)
|
|
|
|
{
|
|
|
|
switch (entry_type) {
|
dmg: don't skip zero chunk
The dmg file has many tables which describe: "start from sector XXX to
sector XXX, the compression method is XXX and where the compressed data
resides on".
Each sector in the expanded file should be covered by a table. The table
will describe the offset of compressed data (or raw depends on the type)
in the dmg.
For example:
[-----------The expanded file------------]
[---bzip table ---]/* zeros */[---zlib---]
^
| if we want to read this sector.
we will find bzip table which contains this sector, and get the
compressed data offset, read it from dmg, uncompress it, finally write to
expanded file.
If we skip zero chunk (table), some sector cannot find the table which
will cause search_chunk() return s->n_chunks, dmg_read_chunk() return -1
and finally causing dmg_co_preadv() return EIO.
See:
[-----------The expanded file------------]
[---bzip table ---]/* zeros */[---zlib---]
^
| if we want to read this sector.
Oops, we cannot find the table contains it...
In the original implementation, we don't have zero table. When we try to
read sector inside the zero chunk. We will get EIO, and skip reading.
After this patch, we treat zero chunk the same as ignore chunk, it will
directly write zero and avoid some sector may not find the table.
After this patch:
[-----------The expanded file------------]
[---bzip table ---][--zeros--][---zlib---]
Signed-off-by: yuchenlin <npes87184@gmail.com>
Reviewed-by: Julio Faracco <jcfaracco@gmail.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 20190103114700.9686-4-npes87184@gmail.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2019-01-03 12:47:00 +01:00
|
|
|
case UDZE: /* zeros */
|
2018-11-05 16:08:06 +01:00
|
|
|
case UDRW: /* uncompressed */
|
dmg: don't skip zero chunk
The dmg file has many tables which describe: "start from sector XXX to
sector XXX, the compression method is XXX and where the compressed data
resides on".
Each sector in the expanded file should be covered by a table. The table
will describe the offset of compressed data (or raw depends on the type)
in the dmg.
For example:
[-----------The expanded file------------]
[---bzip table ---]/* zeros */[---zlib---]
^
| if we want to read this sector.
we will find bzip table which contains this sector, and get the
compressed data offset, read it from dmg, uncompress it, finally write to
expanded file.
If we skip zero chunk (table), some sector cannot find the table which
will cause search_chunk() return s->n_chunks, dmg_read_chunk() return -1
and finally causing dmg_co_preadv() return EIO.
See:
[-----------The expanded file------------]
[---bzip table ---]/* zeros */[---zlib---]
^
| if we want to read this sector.
Oops, we cannot find the table contains it...
In the original implementation, we don't have zero table. When we try to
read sector inside the zero chunk. We will get EIO, and skip reading.
After this patch, we treat zero chunk the same as ignore chunk, it will
directly write zero and avoid some sector may not find the table.
After this patch:
[-----------The expanded file------------]
[---bzip table ---][--zeros--][---zlib---]
Signed-off-by: yuchenlin <npes87184@gmail.com>
Reviewed-by: Julio Faracco <jcfaracco@gmail.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 20190103114700.9686-4-npes87184@gmail.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2019-01-03 12:47:00 +01:00
|
|
|
case UDIG: /* ignore */
|
2018-11-05 16:08:06 +01:00
|
|
|
case UDZO: /* zlib */
|
2015-01-06 18:48:13 +01:00
|
|
|
return true;
|
2018-11-05 16:08:06 +01:00
|
|
|
case UDBZ: /* bzip2 */
|
2016-09-05 04:50:45 +02:00
|
|
|
return !!dmg_uncompress_bz2;
|
2018-11-05 16:08:06 +01:00
|
|
|
case ULFO: /* lzfse */
|
2018-11-05 16:08:05 +01:00
|
|
|
return !!dmg_uncompress_lzfse;
|
2015-01-06 18:48:13 +01:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-06 18:48:07 +01:00
|
|
|
static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
|
|
|
|
uint8_t *buffer, uint32_t count)
|
2015-01-06 18:48:05 +01:00
|
|
|
{
|
|
|
|
uint32_t type, i;
|
|
|
|
int ret;
|
|
|
|
size_t new_size;
|
|
|
|
uint32_t chunk_count;
|
2015-01-06 18:48:07 +01:00
|
|
|
int64_t offset = 0;
|
2015-01-06 18:48:11 +01:00
|
|
|
uint64_t data_offset;
|
|
|
|
uint64_t in_offset = ds->data_fork_offset;
|
2015-01-06 18:48:12 +01:00
|
|
|
uint64_t out_offset;
|
2015-01-06 18:48:05 +01:00
|
|
|
|
2015-01-06 18:48:07 +01:00
|
|
|
type = buff_read_uint32(buffer, offset);
|
2015-01-06 18:48:05 +01:00
|
|
|
/* skip data that is not a valid MISH block (invalid magic or too small) */
|
|
|
|
if (type != 0x6d697368 || count < 244) {
|
|
|
|
/* assume success for now */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-01-06 18:48:12 +01:00
|
|
|
/* chunk offsets are relative to this sector number */
|
|
|
|
out_offset = buff_read_uint64(buffer, offset + 8);
|
|
|
|
|
2015-01-06 18:48:11 +01:00
|
|
|
/* location in data fork for (compressed) blob (in bytes) */
|
|
|
|
data_offset = buff_read_uint64(buffer, offset + 0x18);
|
|
|
|
in_offset += data_offset;
|
|
|
|
|
|
|
|
/* move to begin of chunk entries */
|
|
|
|
offset += 204;
|
2015-01-06 18:48:05 +01:00
|
|
|
|
|
|
|
chunk_count = (count - 204) / 40;
|
|
|
|
new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
|
|
|
|
s->types = g_realloc(s->types, new_size / 2);
|
|
|
|
s->offsets = g_realloc(s->offsets, new_size);
|
|
|
|
s->lengths = g_realloc(s->lengths, new_size);
|
|
|
|
s->sectors = g_realloc(s->sectors, new_size);
|
|
|
|
s->sectorcounts = g_realloc(s->sectorcounts, new_size);
|
|
|
|
|
|
|
|
for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
|
2015-01-06 18:48:07 +01:00
|
|
|
s->types[i] = buff_read_uint32(buffer, offset);
|
2015-01-06 18:48:13 +01:00
|
|
|
if (!dmg_is_known_block_type(s->types[i])) {
|
2022-09-29 11:30:34 +02:00
|
|
|
switch (s->types[i]) {
|
|
|
|
case UDBZ:
|
|
|
|
warn_report_once("dmg-bzip2 module is missing, accessing bzip2 "
|
|
|
|
"compressed blocks will result in I/O errors");
|
|
|
|
break;
|
|
|
|
case ULFO:
|
|
|
|
warn_report_once("dmg-lzfse module is missing, accessing lzfse "
|
|
|
|
"compressed blocks will result in I/O errors");
|
|
|
|
break;
|
|
|
|
case UDCM:
|
|
|
|
case UDLE:
|
|
|
|
/* Comments and last entry can be ignored without problems */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
warn_report_once("Image contains chunks of unknown type %x, "
|
|
|
|
"accessing them will result in I/O errors",
|
|
|
|
s->types[i]);
|
|
|
|
break;
|
|
|
|
}
|
2015-01-06 18:48:05 +01:00
|
|
|
chunk_count--;
|
|
|
|
i--;
|
2015-01-06 18:48:13 +01:00
|
|
|
offset += 40;
|
2015-01-06 18:48:05 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-01-06 18:48:13 +01:00
|
|
|
/* sector number */
|
|
|
|
s->sectors[i] = buff_read_uint64(buffer, offset + 8);
|
2015-01-06 18:48:12 +01:00
|
|
|
s->sectors[i] += out_offset;
|
2015-01-06 18:48:05 +01:00
|
|
|
|
2015-01-06 18:48:13 +01:00
|
|
|
/* sector count */
|
|
|
|
s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10);
|
2015-01-06 18:48:05 +01:00
|
|
|
|
dmg: don't skip zero chunk
The dmg file has many tables which describe: "start from sector XXX to
sector XXX, the compression method is XXX and where the compressed data
resides on".
Each sector in the expanded file should be covered by a table. The table
will describe the offset of compressed data (or raw depends on the type)
in the dmg.
For example:
[-----------The expanded file------------]
[---bzip table ---]/* zeros */[---zlib---]
^
| if we want to read this sector.
we will find bzip table which contains this sector, and get the
compressed data offset, read it from dmg, uncompress it, finally write to
expanded file.
If we skip zero chunk (table), some sector cannot find the table which
will cause search_chunk() return s->n_chunks, dmg_read_chunk() return -1
and finally causing dmg_co_preadv() return EIO.
See:
[-----------The expanded file------------]
[---bzip table ---]/* zeros */[---zlib---]
^
| if we want to read this sector.
Oops, we cannot find the table contains it...
In the original implementation, we don't have zero table. When we try to
read sector inside the zero chunk. We will get EIO, and skip reading.
After this patch, we treat zero chunk the same as ignore chunk, it will
directly write zero and avoid some sector may not find the table.
After this patch:
[-----------The expanded file------------]
[---bzip table ---][--zeros--][---zlib---]
Signed-off-by: yuchenlin <npes87184@gmail.com>
Reviewed-by: Julio Faracco <jcfaracco@gmail.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 20190103114700.9686-4-npes87184@gmail.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2019-01-03 12:47:00 +01:00
|
|
|
/* all-zeroes sector (type UDZE and UDIG) does not need to be
|
|
|
|
* "uncompressed" and can therefore be unbounded. */
|
|
|
|
if (s->types[i] != UDZE && s->types[i] != UDIG
|
|
|
|
&& s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
|
2015-01-06 18:48:05 +01:00
|
|
|
error_report("sector count %" PRIu64 " for chunk %" PRIu32
|
|
|
|
" is larger than max (%u)",
|
|
|
|
s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2015-01-06 18:48:13 +01:00
|
|
|
/* offset in (compressed) data fork */
|
|
|
|
s->offsets[i] = buff_read_uint64(buffer, offset + 0x18);
|
2015-01-06 18:48:11 +01:00
|
|
|
s->offsets[i] += in_offset;
|
2015-01-06 18:48:05 +01:00
|
|
|
|
2015-01-06 18:48:13 +01:00
|
|
|
/* length in (compressed) data fork */
|
|
|
|
s->lengths[i] = buff_read_uint64(buffer, offset + 0x20);
|
2015-01-06 18:48:05 +01:00
|
|
|
|
|
|
|
if (s->lengths[i] > DMG_LENGTHS_MAX) {
|
|
|
|
error_report("length %" PRIu64 " for chunk %" PRIu32
|
|
|
|
" is larger than max (%u)",
|
|
|
|
s->lengths[i], i, DMG_LENGTHS_MAX);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
update_max_chunk_size(s, i, &ds->max_compressed_size,
|
|
|
|
&ds->max_sectors_per_chunk);
|
2015-01-06 18:48:13 +01:00
|
|
|
offset += 40;
|
2015-01-06 18:48:05 +01:00
|
|
|
}
|
|
|
|
s->n_chunks += chunk_count;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-01-06 18:48:06 +01:00
|
|
|
static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
|
|
|
|
uint64_t info_begin, uint64_t info_length)
|
2004-12-12 12:24:44 +01:00
|
|
|
{
|
2015-01-06 18:48:07 +01:00
|
|
|
BDRVDMGState *s = bs->opaque;
|
2013-01-25 17:07:30 +01:00
|
|
|
int ret;
|
2015-01-06 18:48:06 +01:00
|
|
|
uint32_t count, rsrc_data_offset;
|
2015-01-06 18:48:07 +01:00
|
|
|
uint8_t *buffer = NULL;
|
2015-01-06 18:48:06 +01:00
|
|
|
uint64_t info_end;
|
|
|
|
uint64_t offset;
|
2004-12-12 12:24:44 +01:00
|
|
|
|
2015-01-06 18:48:06 +01:00
|
|
|
/* read offset from begin of resource fork (info_begin) to resource data */
|
2015-01-06 18:48:05 +01:00
|
|
|
ret = read_uint32(bs, info_begin, &rsrc_data_offset);
|
2013-01-25 17:07:30 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
2015-01-06 18:48:06 +01:00
|
|
|
} else if (rsrc_data_offset > info_length) {
|
2013-01-25 17:07:30 +01:00
|
|
|
ret = -EINVAL;
|
2010-05-12 16:31:35 +02:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2015-01-06 18:48:06 +01:00
|
|
|
/* read length of resource data */
|
|
|
|
ret = read_uint32(bs, info_begin + 8, &count);
|
2013-01-25 17:07:30 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
2015-01-06 18:48:06 +01:00
|
|
|
} else if (count == 0 || rsrc_data_offset + count > info_length) {
|
2013-01-25 17:07:30 +01:00
|
|
|
ret = -EINVAL;
|
2010-05-12 16:31:35 +02:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2015-01-06 18:48:05 +01:00
|
|
|
/* begin of resource data (consisting of one or more resources) */
|
2015-01-06 18:48:06 +01:00
|
|
|
offset = info_begin + rsrc_data_offset;
|
|
|
|
|
|
|
|
/* end of resource data (there is possibly a following resource map
|
|
|
|
* which will be ignored). */
|
|
|
|
info_end = offset + count;
|
2004-12-12 12:24:44 +01:00
|
|
|
|
2015-01-06 18:48:05 +01:00
|
|
|
/* read offsets (mish blocks) from one or more resources in resource data */
|
2010-05-12 16:31:35 +02:00
|
|
|
while (offset < info_end) {
|
2015-01-06 18:48:05 +01:00
|
|
|
/* size of following resource */
|
2013-01-25 17:07:30 +01:00
|
|
|
ret = read_uint32(bs, offset, &count);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
2015-01-06 18:48:08 +01:00
|
|
|
} else if (count == 0 || count > info_end - offset) {
|
2013-01-25 17:07:30 +01:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
2010-05-12 16:31:35 +02:00
|
|
|
offset += 4;
|
|
|
|
|
2015-01-06 18:48:07 +01:00
|
|
|
buffer = g_realloc(buffer, count);
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 17:27:36 +02:00
|
|
|
ret = bdrv_pread(bs->file, offset, count, buffer, 0);
|
2015-01-06 18:48:07 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = dmg_read_mish_block(s, ds, buffer, count);
|
2013-01-25 17:07:30 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2015-01-06 18:48:05 +01:00
|
|
|
/* advance offset by size of resource */
|
|
|
|
offset += count;
|
2004-12-12 12:24:44 +01:00
|
|
|
}
|
2015-01-06 18:48:07 +01:00
|
|
|
ret = 0;
|
2015-01-06 18:48:06 +01:00
|
|
|
|
|
|
|
fail:
|
2015-01-06 18:48:07 +01:00
|
|
|
g_free(buffer);
|
2015-01-06 18:48:06 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-01-06 18:48:09 +01:00
|
|
|
static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
|
|
|
|
uint64_t info_begin, uint64_t info_length)
|
|
|
|
{
|
|
|
|
BDRVDMGState *s = bs->opaque;
|
|
|
|
int ret;
|
|
|
|
uint8_t *buffer = NULL;
|
|
|
|
char *data_begin, *data_end;
|
|
|
|
|
|
|
|
/* Have at least some length to avoid NULL for g_malloc. Attempt to set a
|
|
|
|
* safe upper cap on the data length. A test sample had a XML length of
|
|
|
|
* about 1 MiB. */
|
|
|
|
if (info_length == 0 || info_length > 16 * 1024 * 1024) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
buffer = g_malloc(info_length + 1);
|
|
|
|
buffer[info_length] = '\0';
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 17:27:36 +02:00
|
|
|
ret = bdrv_pread(bs->file, info_begin, info_length, buffer, 0);
|
2022-06-09 17:27:37 +02:00
|
|
|
if (ret < 0) {
|
2015-01-06 18:48:09 +01:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* look for <data>...</data>. The data is 284 (0x11c) bytes after base64
|
|
|
|
* decode. The actual data element has 431 (0x1af) bytes which includes tabs
|
|
|
|
* and line feeds. */
|
|
|
|
data_end = (char *)buffer;
|
|
|
|
while ((data_begin = strstr(data_end, "<data>")) != NULL) {
|
|
|
|
guchar *mish;
|
|
|
|
gsize out_len = 0;
|
|
|
|
|
|
|
|
data_begin += 6;
|
|
|
|
data_end = strstr(data_begin, "</data>");
|
|
|
|
/* malformed XML? */
|
|
|
|
if (data_end == NULL) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
*data_end++ = '\0';
|
|
|
|
mish = g_base64_decode(data_begin, &out_len);
|
|
|
|
ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len);
|
|
|
|
g_free(mish);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
g_free(buffer);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-01-06 18:48:06 +01:00
|
|
|
static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
BDRVDMGState *s = bs->opaque;
|
|
|
|
DmgHeaderState ds;
|
|
|
|
uint64_t rsrc_fork_offset, rsrc_fork_length;
|
2015-01-06 18:48:09 +01:00
|
|
|
uint64_t plist_xml_offset, plist_xml_length;
|
2015-01-06 18:48:06 +01:00
|
|
|
int64_t offset;
|
|
|
|
int ret;
|
|
|
|
|
2023-09-29 16:51:53 +02:00
|
|
|
bdrv_graph_rdlock_main_loop();
|
2018-10-12 11:27:41 +02:00
|
|
|
ret = bdrv_apply_auto_read_only(bs, NULL, errp);
|
2023-09-29 16:51:53 +02:00
|
|
|
bdrv_graph_rdunlock_main_loop();
|
2018-10-12 11:27:41 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-07-26 22:11:21 +02:00
|
|
|
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2016-12-16 18:52:37 +01:00
|
|
|
}
|
module: add Error arguments to module_load and module_load_qom
improve error handling during module load, by changing:
bool module_load(const char *prefix, const char *lib_name);
void module_load_qom(const char *type);
to:
int module_load(const char *prefix, const char *name, Error **errp);
int module_load_qom(const char *type, Error **errp);
where the return value is:
-1 on module load error, and errp is set with the error
0 on module or one of its dependencies are not installed
1 on module load success
2 on module load success (module already loaded or built-in)
module_load_qom_one has been introduced in:
commit 28457744c345 ("module: qom module support"), which built on top of
module_load_one, but discarded the bool return value. Restore it.
Adapt all callers to emit errors, or ignore them, or fail hard,
as appropriate in each context.
Replace the previous emission of errors via fprintf in _some_ error
conditions with Error and error_report, so as to emit to the appropriate
target.
A memory leak is also fixed as part of the module_load changes.
audio: when attempting to load an audio module, report module load errors.
Note that still for some callers, a single issue may generate multiple
error reports, and this could be improved further.
Regarding the audio code itself, audio_add() seems to ignore errors,
and this should probably be improved.
block: when attempting to load a block module, report module load errors.
For the code paths that already use the Error API, take advantage of those
to report module load errors into the Error parameter.
For the other code paths, we currently emit the error, but this could be
improved further by adding Error parameters to all possible code paths.
console: when attempting to load a display module, report module load errors.
qdev: when creating a new qdev Device object (DeviceState), report load errors.
If a module cannot be loaded to create that device, now abort execution
(if no CONFIG_MODULE) or exit (if CONFIG_MODULE).
qom/object.c: when initializing a QOM object, or looking up class_by_name,
report module load errors.
qtest: when processing the "module_load" qtest command, report errors
in the load of the module.
Signed-off-by: Claudio Fontana <cfontana@suse.de>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20220929093035.4231-4-cfontana@suse.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-09-29 11:30:33 +02:00
|
|
|
/*
|
|
|
|
* NB: if uncompress submodules are absent,
|
|
|
|
* ie block_module_load return value == 0, the function pointers
|
|
|
|
* dmg_uncompress_bz2 and dmg_uncompress_lzfse will be NULL.
|
|
|
|
*/
|
|
|
|
if (block_module_load("dmg-bz2", errp) < 0) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (block_module_load("dmg-lzfse", errp) < 0) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2016-04-25 15:43:09 +02:00
|
|
|
|
2015-01-06 18:48:06 +01:00
|
|
|
s->n_chunks = 0;
|
|
|
|
s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
|
|
|
|
/* used by dmg_read_mish_block to keep track of the current I/O position */
|
2015-01-06 18:48:11 +01:00
|
|
|
ds.data_fork_offset = 0;
|
2015-01-06 18:48:06 +01:00
|
|
|
ds.max_compressed_size = 1;
|
|
|
|
ds.max_sectors_per_chunk = 1;
|
|
|
|
|
|
|
|
/* locate the UDIF trailer */
|
2016-06-20 18:24:02 +02:00
|
|
|
offset = dmg_find_koly_offset(bs->file, errp);
|
2015-01-06 18:48:06 +01:00
|
|
|
if (offset < 0) {
|
|
|
|
ret = offset;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2015-01-06 18:48:11 +01:00
|
|
|
/* offset of data fork (DataForkOffset) */
|
|
|
|
ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
} else if (ds.data_fork_offset > offset) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2015-01-06 18:48:06 +01:00
|
|
|
/* offset of resource fork (RsrcForkOffset) */
|
|
|
|
ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2015-01-06 18:48:08 +01:00
|
|
|
if (rsrc_fork_offset >= offset ||
|
|
|
|
rsrc_fork_length > offset - rsrc_fork_offset) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
2015-01-06 18:48:09 +01:00
|
|
|
/* offset of property list (XMLOffset) */
|
|
|
|
ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
ret = read_uint64(bs, offset + 0xe0, &plist_xml_length);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (plist_xml_offset >= offset ||
|
|
|
|
plist_xml_length > offset - plist_xml_offset) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
2015-01-06 18:48:10 +01:00
|
|
|
ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (bs->total_sectors < 0) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
2015-01-06 18:48:06 +01:00
|
|
|
if (rsrc_fork_length != 0) {
|
|
|
|
ret = dmg_read_resource_fork(bs, &ds,
|
|
|
|
rsrc_fork_offset, rsrc_fork_length);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2015-01-06 18:48:09 +01:00
|
|
|
} else if (plist_xml_length != 0) {
|
|
|
|
ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2015-01-06 18:48:06 +01:00
|
|
|
} else {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
2004-12-12 12:24:44 +01:00
|
|
|
|
|
|
|
/* initialize zlib engine */
|
2015-06-16 14:19:22 +02:00
|
|
|
s->compressed_chunk = qemu_try_blockalign(bs->file->bs,
|
2015-01-06 18:48:05 +01:00
|
|
|
ds.max_compressed_size + 1);
|
2015-06-16 14:19:22 +02:00
|
|
|
s->uncompressed_chunk = qemu_try_blockalign(bs->file->bs,
|
2015-01-06 18:48:05 +01:00
|
|
|
512 * ds.max_sectors_per_chunk);
|
2014-05-20 13:28:14 +02:00
|
|
|
if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2014-03-26 13:05:54 +01:00
|
|
|
if (inflateInit(&s->zstream) != Z_OK) {
|
2013-01-25 17:07:30 +01:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
2004-12-12 12:24:44 +01:00
|
|
|
|
|
|
|
s->current_chunk = s->n_chunks;
|
2007-09-17 10:09:54 +02:00
|
|
|
|
2011-10-20 13:16:21 +02:00
|
|
|
qemu_co_mutex_init(&s->lock);
|
2004-12-12 12:24:44 +01:00
|
|
|
return 0;
|
2013-01-25 17:07:30 +01:00
|
|
|
|
2010-01-11 14:06:54 +01:00
|
|
|
fail:
|
2013-01-25 17:07:30 +01:00
|
|
|
g_free(s->types);
|
|
|
|
g_free(s->offsets);
|
|
|
|
g_free(s->lengths);
|
|
|
|
g_free(s->sectors);
|
|
|
|
g_free(s->sectorcounts);
|
2014-05-20 13:28:14 +02:00
|
|
|
qemu_vfree(s->compressed_chunk);
|
|
|
|
qemu_vfree(s->uncompressed_chunk);
|
2013-01-25 17:07:30 +01:00
|
|
|
return ret;
|
2004-12-12 12:24:44 +01:00
|
|
|
}
|
|
|
|
|
2016-06-24 00:37:17 +02:00
|
|
|
static void dmg_refresh_limits(BlockDriverState *bs, Error **errp)
|
|
|
|
{
|
2016-06-24 00:37:24 +02:00
|
|
|
bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
|
2016-06-24 00:37:17 +02:00
|
|
|
}
|
|
|
|
|
2020-10-30 04:35:12 +01:00
|
|
|
static inline int is_sector_in_chunk(BDRVDMGState *s,
|
2014-03-26 13:05:59 +01:00
|
|
|
uint32_t chunk_num, uint64_t sector_num)
|
2004-12-12 12:24:44 +01:00
|
|
|
{
|
2014-03-26 13:05:54 +01:00
|
|
|
if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
|
|
|
|
s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
2004-12-12 12:24:44 +01:00
|
|
|
}
|
|
|
|
|
2014-03-26 13:05:59 +01:00
|
|
|
static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
|
2004-12-12 12:24:44 +01:00
|
|
|
{
|
|
|
|
/* binary search */
|
2014-03-26 13:05:54 +01:00
|
|
|
uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
|
2019-01-03 12:46:58 +01:00
|
|
|
while (chunk1 <= chunk2) {
|
2014-03-26 13:05:54 +01:00
|
|
|
chunk3 = (chunk1 + chunk2) / 2;
|
|
|
|
if (s->sectors[chunk3] > sector_num) {
|
2019-01-03 12:46:58 +01:00
|
|
|
if (chunk3 == 0) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
chunk2 = chunk3 - 1;
|
2014-03-26 13:05:54 +01:00
|
|
|
} else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
|
|
|
|
return chunk3;
|
|
|
|
} else {
|
2019-01-03 12:46:58 +01:00
|
|
|
chunk1 = chunk3 + 1;
|
2014-03-26 13:05:54 +01:00
|
|
|
}
|
2004-12-12 12:24:44 +01:00
|
|
|
}
|
2019-01-03 12:46:58 +01:00
|
|
|
err:
|
2004-12-12 12:24:44 +01:00
|
|
|
return s->n_chunks; /* error */
|
|
|
|
}
|
|
|
|
|
2023-06-01 13:51:40 +02:00
|
|
|
static int coroutine_fn GRAPH_RDLOCK
|
|
|
|
dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
|
2004-12-12 12:24:44 +01:00
|
|
|
{
|
2010-05-12 16:31:49 +02:00
|
|
|
BDRVDMGState *s = bs->opaque;
|
|
|
|
|
2014-03-26 13:05:54 +01:00
|
|
|
if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
|
|
|
|
int ret;
|
|
|
|
uint32_t chunk = search_chunk(s, sector_num);
|
2004-12-12 12:24:44 +01:00
|
|
|
|
2014-03-26 13:05:54 +01:00
|
|
|
if (chunk >= s->n_chunks) {
|
|
|
|
return -1;
|
|
|
|
}
|
2004-12-12 12:24:44 +01:00
|
|
|
|
2014-03-26 13:05:54 +01:00
|
|
|
s->current_chunk = s->n_chunks;
|
2015-01-06 18:48:14 +01:00
|
|
|
switch (s->types[chunk]) { /* block entry type */
|
2018-11-05 16:08:06 +01:00
|
|
|
case UDZO: { /* zlib compressed */
|
2014-03-26 13:05:54 +01:00
|
|
|
/* we need to buffer, because only the chunk as whole can be
|
|
|
|
* inflated. */
|
2023-06-01 13:51:40 +02:00
|
|
|
ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
|
|
|
|
s->compressed_chunk, 0);
|
2022-06-09 17:27:37 +02:00
|
|
|
if (ret < 0) {
|
2014-03-26 13:05:54 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->zstream.next_in = s->compressed_chunk;
|
|
|
|
s->zstream.avail_in = s->lengths[chunk];
|
|
|
|
s->zstream.next_out = s->uncompressed_chunk;
|
|
|
|
s->zstream.avail_out = 512 * s->sectorcounts[chunk];
|
|
|
|
ret = inflateReset(&s->zstream);
|
|
|
|
if (ret != Z_OK) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
ret = inflate(&s->zstream, Z_FINISH);
|
|
|
|
if (ret != Z_STREAM_END ||
|
|
|
|
s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break; }
|
2018-11-05 16:08:06 +01:00
|
|
|
case UDBZ: /* bzip2 compressed */
|
2016-09-05 04:50:45 +02:00
|
|
|
if (!dmg_uncompress_bz2) {
|
|
|
|
break;
|
|
|
|
}
|
2015-01-06 18:48:14 +01:00
|
|
|
/* we need to buffer, because only the chunk as whole can be
|
|
|
|
* inflated. */
|
2023-06-01 13:51:40 +02:00
|
|
|
ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
|
|
|
|
s->compressed_chunk, 0);
|
2022-06-09 17:27:37 +02:00
|
|
|
if (ret < 0) {
|
2015-01-06 18:48:14 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-09-05 04:50:45 +02:00
|
|
|
ret = dmg_uncompress_bz2((char *)s->compressed_chunk,
|
|
|
|
(unsigned int) s->lengths[chunk],
|
|
|
|
(char *)s->uncompressed_chunk,
|
|
|
|
(unsigned int)
|
|
|
|
(512 * s->sectorcounts[chunk]));
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2015-01-06 18:48:14 +01:00
|
|
|
}
|
|
|
|
break;
|
2018-11-05 16:08:06 +01:00
|
|
|
case ULFO:
|
2018-11-05 16:08:05 +01:00
|
|
|
if (!dmg_uncompress_lzfse) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* we need to buffer, because only the chunk as whole can be
|
|
|
|
* inflated. */
|
2023-06-01 13:51:40 +02:00
|
|
|
ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
|
|
|
|
s->compressed_chunk, 0);
|
2022-06-09 17:27:37 +02:00
|
|
|
if (ret < 0) {
|
2018-11-05 16:08:05 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = dmg_uncompress_lzfse((char *)s->compressed_chunk,
|
|
|
|
(unsigned int) s->lengths[chunk],
|
|
|
|
(char *)s->uncompressed_chunk,
|
|
|
|
(unsigned int)
|
|
|
|
(512 * s->sectorcounts[chunk]));
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
break;
|
2018-11-05 16:08:06 +01:00
|
|
|
case UDRW: /* copy */
|
2023-06-01 13:51:40 +02:00
|
|
|
ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
|
|
|
|
s->uncompressed_chunk, 0);
|
2022-06-09 17:27:37 +02:00
|
|
|
if (ret < 0) {
|
2014-03-26 13:05:54 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
dmg: don't skip zero chunk
The dmg file has many tables which describe: "start from sector XXX to
sector XXX, the compression method is XXX and where the compressed data
resides on".
Each sector in the expanded file should be covered by a table. The table
will describe the offset of compressed data (or raw depends on the type)
in the dmg.
For example:
[-----------The expanded file------------]
[---bzip table ---]/* zeros */[---zlib---]
^
| if we want to read this sector.
we will find bzip table which contains this sector, and get the
compressed data offset, read it from dmg, uncompress it, finally write to
expanded file.
If we skip zero chunk (table), some sector cannot find the table which
will cause search_chunk() return s->n_chunks, dmg_read_chunk() return -1
and finally causing dmg_co_preadv() return EIO.
See:
[-----------The expanded file------------]
[---bzip table ---]/* zeros */[---zlib---]
^
| if we want to read this sector.
Oops, we cannot find the table contains it...
In the original implementation, we don't have zero table. When we try to
read sector inside the zero chunk. We will get EIO, and skip reading.
After this patch, we treat zero chunk the same as ignore chunk, it will
directly write zero and avoid some sector may not find the table.
After this patch:
[-----------The expanded file------------]
[---bzip table ---][--zeros--][---zlib---]
Signed-off-by: yuchenlin <npes87184@gmail.com>
Reviewed-by: Julio Faracco <jcfaracco@gmail.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 20190103114700.9686-4-npes87184@gmail.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2019-01-03 12:47:00 +01:00
|
|
|
case UDZE: /* zeros */
|
|
|
|
case UDIG: /* ignore */
|
2015-01-06 18:48:15 +01:00
|
|
|
/* see dmg_read, it is treated specially. No buffer needs to be
|
|
|
|
* pre-filled, the zeroes can be set directly. */
|
2014-03-26 13:05:54 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
s->current_chunk = chunk;
|
2004-12-12 12:24:44 +01:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-01 13:51:40 +02:00
|
|
|
static int coroutine_fn GRAPH_RDLOCK
|
block: use int64_t instead of uint64_t in driver read handlers
We are generally moving to int64_t for both offset and bytes parameters
on all io paths.
Main motivation is realization of 64-bit write_zeroes operation for
fast zeroing large disk chunks, up to the whole disk.
We chose signed type, to be consistent with off_t (which is signed) and
with possibility for signed return type (where negative value means
error).
So, convert driver read handlers parameters which are already 64bit to
signed type.
While being here, convert also flags parameter to be BdrvRequestFlags.
Now let's consider all callers. Simple
git grep '\->bdrv_\(aio\|co\)_preadv\(_part\)\?'
shows that's there three callers of driver function:
bdrv_driver_preadv() in block/io.c, passes int64_t, checked by
bdrv_check_qiov_request() to be non-negative.
qcow2_load_vmstate() does bdrv_check_qiov_request().
do_perform_cow_read() has uint64_t argument. And a lot of things in
qcow2 driver are uint64_t, so converting it is big job. But we must
not work with requests that don't satisfy bdrv_check_qiov_request(),
so let's just assert it here.
Still, the functions may be called directly, not only by drv->...
Let's check:
git grep '\.bdrv_\(aio\|co\)_preadv\(_part\)\?\s*=' | \
awk '{print $4}' | sed 's/,//' | sed 's/&//' | sort | uniq | \
while read func; do git grep "$func(" | \
grep -v "$func(BlockDriverState"; done
The only one such caller:
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1);
...
ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0);
in tests/unit/test-bdrv-drain.c, and it's OK obviously.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210903102807.27127-4-vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
[eblake: fix typos]
Signed-off-by: Eric Blake <eblake@redhat.com>
2021-09-03 12:27:59 +02:00
|
|
|
dmg_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
|
|
|
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
2004-12-12 12:24:44 +01:00
|
|
|
{
|
|
|
|
BDRVDMGState *s = bs->opaque;
|
2016-04-25 15:43:09 +02:00
|
|
|
uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
|
|
|
|
int nb_sectors = bytes >> BDRV_SECTOR_BITS;
|
|
|
|
int ret, i;
|
|
|
|
|
2019-08-27 20:59:12 +02:00
|
|
|
assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
|
|
|
|
assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
|
2016-04-25 15:43:09 +02:00
|
|
|
|
|
|
|
qemu_co_mutex_lock(&s->lock);
|
2004-12-12 12:24:44 +01:00
|
|
|
|
2014-03-26 13:05:54 +01:00
|
|
|
for (i = 0; i < nb_sectors; i++) {
|
|
|
|
uint32_t sector_offset_in_chunk;
|
2016-04-25 15:43:09 +02:00
|
|
|
void *data;
|
|
|
|
|
2014-03-26 13:05:54 +01:00
|
|
|
if (dmg_read_chunk(bs, sector_num + i) != 0) {
|
2016-04-25 15:43:09 +02:00
|
|
|
ret = -EIO;
|
|
|
|
goto fail;
|
2014-03-26 13:05:54 +01:00
|
|
|
}
|
2015-01-06 18:48:15 +01:00
|
|
|
/* Special case: current chunk is all zeroes. Do not perform a memcpy as
|
|
|
|
* s->uncompressed_chunk may be too small to cover the large all-zeroes
|
|
|
|
* section. dmg_read_chunk is called to find s->current_chunk */
|
dmg: don't skip zero chunk
The dmg file has many tables which describe: "start from sector XXX to
sector XXX, the compression method is XXX and where the compressed data
resides on".
Each sector in the expanded file should be covered by a table. The table
will describe the offset of compressed data (or raw depends on the type)
in the dmg.
For example:
[-----------The expanded file------------]
[---bzip table ---]/* zeros */[---zlib---]
^
| if we want to read this sector.
we will find bzip table which contains this sector, and get the
compressed data offset, read it from dmg, uncompress it, finally write to
expanded file.
If we skip zero chunk (table), some sector cannot find the table which
will cause search_chunk() return s->n_chunks, dmg_read_chunk() return -1
and finally causing dmg_co_preadv() return EIO.
See:
[-----------The expanded file------------]
[---bzip table ---]/* zeros */[---zlib---]
^
| if we want to read this sector.
Oops, we cannot find the table contains it...
In the original implementation, we don't have zero table. When we try to
read sector inside the zero chunk. We will get EIO, and skip reading.
After this patch, we treat zero chunk the same as ignore chunk, it will
directly write zero and avoid some sector may not find the table.
After this patch:
[-----------The expanded file------------]
[---bzip table ---][--zeros--][---zlib---]
Signed-off-by: yuchenlin <npes87184@gmail.com>
Reviewed-by: Julio Faracco <jcfaracco@gmail.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 20190103114700.9686-4-npes87184@gmail.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2019-01-03 12:47:00 +01:00
|
|
|
if (s->types[s->current_chunk] == UDZE
|
|
|
|
|| s->types[s->current_chunk] == UDIG) { /* all zeroes block entry */
|
2016-04-25 15:43:09 +02:00
|
|
|
qemu_iovec_memset(qiov, i * 512, 0, 512);
|
2015-01-06 18:48:15 +01:00
|
|
|
continue;
|
|
|
|
}
|
2014-03-26 13:05:54 +01:00
|
|
|
sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
|
2016-04-25 15:43:09 +02:00
|
|
|
data = s->uncompressed_chunk + sector_offset_in_chunk * 512;
|
|
|
|
qemu_iovec_from_buf(qiov, i * 512, data, 512);
|
2004-12-12 12:24:44 +01:00
|
|
|
}
|
|
|
|
|
2016-04-25 15:43:09 +02:00
|
|
|
ret = 0;
|
|
|
|
fail:
|
2011-10-20 13:16:22 +02:00
|
|
|
qemu_co_mutex_unlock(&s->lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2004-12-12 12:24:44 +01:00
|
|
|
static void dmg_close(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BDRVDMGState *s = bs->opaque;
|
2013-01-25 17:07:31 +01:00
|
|
|
|
|
|
|
g_free(s->types);
|
|
|
|
g_free(s->offsets);
|
|
|
|
g_free(s->lengths);
|
|
|
|
g_free(s->sectors);
|
|
|
|
g_free(s->sectorcounts);
|
2014-05-20 13:28:14 +02:00
|
|
|
qemu_vfree(s->compressed_chunk);
|
|
|
|
qemu_vfree(s->uncompressed_chunk);
|
2013-01-25 17:07:31 +01:00
|
|
|
|
2004-12-12 12:24:44 +01:00
|
|
|
inflateEnd(&s->zstream);
|
|
|
|
}
|
|
|
|
|
2009-05-10 00:03:42 +02:00
|
|
|
static BlockDriver bdrv_dmg = {
|
2014-03-26 13:05:54 +01:00
|
|
|
.format_name = "dmg",
|
|
|
|
.instance_size = sizeof(BDRVDMGState),
|
|
|
|
.bdrv_probe = dmg_probe,
|
|
|
|
.bdrv_open = dmg_open,
|
2016-06-24 00:37:17 +02:00
|
|
|
.bdrv_refresh_limits = dmg_refresh_limits,
|
2020-05-13 13:05:39 +02:00
|
|
|
.bdrv_child_perm = bdrv_default_perms,
|
2016-04-25 15:43:09 +02:00
|
|
|
.bdrv_co_preadv = dmg_co_preadv,
|
2014-03-26 13:05:54 +01:00
|
|
|
.bdrv_close = dmg_close,
|
2020-05-13 13:05:12 +02:00
|
|
|
.is_format = true,
|
2004-12-12 12:24:44 +01:00
|
|
|
};
|
2009-05-10 00:03:42 +02:00
|
|
|
|
|
|
|
static void bdrv_dmg_init(void)
|
|
|
|
{
|
|
|
|
bdrv_register(&bdrv_dmg);
|
|
|
|
}
|
|
|
|
|
|
|
|
block_init(bdrv_dmg_init);
|