2013-06-05 14:19:31 +02:00
|
|
|
/*
|
|
|
|
* Command line utility to exercise the QEMU I/O path.
|
|
|
|
*
|
qemu-io: Allow unaligned access by default
There's no reason to require the user to specify a flag just so
they can pass in unaligned numbers. Keep 'read -p' and 'write -p'
as no-ops so that I don't have to hunt down and update all users
of qemu-io, but otherwise make their behavior default as 'read' and
'write'. Also fix 'write -z', 'readv', 'writev', 'writev',
'aio_read', 'aio_write', and 'aio_write -z'. For now, 'read -b',
'write -b', and 'write -c' still require alignment (and 'multiwrite',
but that's slated to die soon).
qemu-iotest 23 is updated to match, as the only test that was
previously explicitly expecting an error on an unaligned request.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-id: 1462677405-4752-5-git-send-email-eblake@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-05-08 05:16:43 +02:00
|
|
|
* Copyright (C) 2009-2016 Red Hat, Inc.
|
2013-06-05 14:19:31 +02:00
|
|
|
* Copyright (c) 2003-2005 Silicon Graphics, Inc.
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
2016-01-18 19:01:42 +01:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 09:01:28 +01:00
|
|
|
#include "qapi/error.h"
|
2018-11-12 15:00:42 +01:00
|
|
|
#include "qapi/qmp/qdict.h"
|
2013-06-05 14:19:39 +02:00
|
|
|
#include "qemu-io.h"
|
2015-02-05 19:58:22 +01:00
|
|
|
#include "sysemu/block-backend.h"
|
|
|
|
#include "block/block.h"
|
|
|
|
#include "block/block_int.h" /* for info_f() */
|
2013-10-09 10:46:17 +02:00
|
|
|
#include "block/qapi.h"
|
2015-03-17 18:29:20 +01:00
|
|
|
#include "qemu/error-report.h"
|
2013-08-21 17:02:47 +02:00
|
|
|
#include "qemu/main-loop.h"
|
2018-02-01 12:18:46 +01:00
|
|
|
#include "qemu/option.h"
|
2014-01-15 15:39:10 +01:00
|
|
|
#include "qemu/timer.h"
|
2016-03-20 18:16:19 +01:00
|
|
|
#include "qemu/cutils.h"
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
#define CMD_NOFILE_OK 0x01
|
|
|
|
|
2014-03-05 22:23:00 +01:00
|
|
|
bool qemuio_misalign;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2013-06-05 14:19:36 +02:00
|
|
|
static cmdinfo_t *cmdtab;
|
|
|
|
static int ncmds;
|
|
|
|
|
|
|
|
static int compare_cmdname(const void *a, const void *b)
|
|
|
|
{
|
|
|
|
return strcmp(((const cmdinfo_t *)a)->name,
|
|
|
|
((const cmdinfo_t *)b)->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
void qemuio_add_command(const cmdinfo_t *ci)
|
|
|
|
{
|
2017-03-31 15:38:49 +02:00
|
|
|
/* ci->perm assumes a file is open, but the GLOBAL and NOFILE_OK
|
|
|
|
* flags allow it not to be, so that combination is invalid.
|
|
|
|
* Catch it now rather than letting it manifest as a crash if a
|
|
|
|
* particular set of command line options are used.
|
|
|
|
*/
|
|
|
|
assert(ci->perm == 0 ||
|
|
|
|
(ci->flags & (CMD_FLAG_GLOBAL | CMD_NOFILE_OK)) == 0);
|
2014-08-19 10:31:09 +02:00
|
|
|
cmdtab = g_renew(cmdinfo_t, cmdtab, ++ncmds);
|
2013-06-05 14:19:36 +02:00
|
|
|
cmdtab[ncmds - 1] = *ci;
|
|
|
|
qsort(cmdtab, ncmds, sizeof(*cmdtab), compare_cmdname);
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:58 +02:00
|
|
|
void qemuio_command_usage(const cmdinfo_t *ci)
|
2013-06-05 14:19:36 +02:00
|
|
|
{
|
|
|
|
printf("%s %s -- %s\n", ci->name, ci->args, ci->oneline);
|
|
|
|
}
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
static int init_check_command(BlockBackend *blk, const cmdinfo_t *ct)
|
2013-06-05 14:19:36 +02:00
|
|
|
{
|
|
|
|
if (ct->flags & CMD_FLAG_GLOBAL) {
|
|
|
|
return 1;
|
|
|
|
}
|
2015-02-05 19:58:22 +01:00
|
|
|
if (!(ct->flags & CMD_NOFILE_OK) && !blk) {
|
2013-06-05 14:19:36 +02:00
|
|
|
fprintf(stderr, "no file open, try 'help open'\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int command(BlockBackend *blk, const cmdinfo_t *ct, int argc,
|
|
|
|
char **argv)
|
2013-06-05 14:19:36 +02:00
|
|
|
{
|
|
|
|
char *cmd = argv[0];
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
if (!init_check_command(blk, ct)) {
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (argc - 1 < ct->argmin || (ct->argmax != -1 && argc - 1 > ct->argmax)) {
|
|
|
|
if (ct->argmax == -1) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"bad argument count %d to %s, expected at least %d arguments\n",
|
|
|
|
argc-1, cmd, ct->argmin);
|
|
|
|
} else if (ct->argmin == ct->argmax) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"bad argument count %d to %s, expected %d arguments\n",
|
|
|
|
argc-1, cmd, ct->argmin);
|
|
|
|
} else {
|
|
|
|
fprintf(stderr,
|
|
|
|
"bad argument count %d to %s, expected between %d and %d arguments\n",
|
|
|
|
argc-1, cmd, ct->argmin, ct->argmax);
|
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:36 +02:00
|
|
|
}
|
2017-02-10 16:24:56 +01:00
|
|
|
|
|
|
|
/* Request additional permissions if necessary for this command. The caller
|
|
|
|
* is responsible for restoring the original permissions afterwards if this
|
|
|
|
* is what it wants. */
|
|
|
|
if (ct->perm && blk_is_available(blk)) {
|
|
|
|
uint64_t orig_perm, orig_shared_perm;
|
|
|
|
blk_get_perm(blk, &orig_perm, &orig_shared_perm);
|
|
|
|
|
|
|
|
if (ct->perm & ~orig_perm) {
|
|
|
|
uint64_t new_perm;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
new_perm = orig_perm | ct->perm;
|
|
|
|
|
|
|
|
ret = blk_set_perm(blk, new_perm, orig_shared_perm, &local_err);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report_err(local_err);
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2017-02-10 16:24:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
qemu-io: Add generic function for reinitializing optind.
On FreeBSD 11.2:
$ nbdkit memory size=1M --run './qemu-io -f raw -c "aio_write 0 512" $nbd'
Parsing error: non-numeric argument, or extraneous/unrecognized suffix -- aio_write
After main option parsing, we reinitialize optind so we can parse each
command. However reinitializing optind to 0 does not work on FreeBSD.
What happens when you do this is optind remains 0 after the option
parsing loop, and the result is we try to parse argv[optind] ==
argv[0] == "aio_write" as if it was the first parameter.
The FreeBSD manual page says:
In order to use getopt() to evaluate multiple sets of arguments, or to
evaluate a single set of arguments multiple times, the variable optreset
must be set to 1 before the second and each additional set of calls to
getopt(), and the variable optind must be reinitialized.
(From the rest of the man page it is clear that optind must be
reinitialized to 1).
The glibc man page says:
A program that scans multiple argument vectors, or rescans the same
vector more than once, and wants to make use of GNU extensions such as
'+' and '-' at the start of optstring, or changes the value of
POSIXLY_CORRECT between scans, must reinitialize getopt() by resetting
optind to 0, rather than the traditional value of 1. (Resetting to 0
forces the invocation of an internal initialization routine that
rechecks POSIXLY_CORRECT and checks for GNU extensions in optstring.)
This commit introduces an OS-portability function called
qemu_reset_optind which provides a way of resetting optind that works
on FreeBSD and platforms that use optreset, while keeping it the same
as now on other platforms.
Note that the qemu codebase sets optind in many other places, but in
those other places it's setting a local variable and not using getopt.
This change is only needed in places where we are using getopt and the
associated global variable optind.
Signed-off-by: Richard W.M. Jones <rjones@redhat.com>
Message-id: 20190118101114.11759-2-rjones@redhat.com
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-01-18 11:11:14 +01:00
|
|
|
qemu_reset_optind();
|
2018-05-09 21:42:59 +02:00
|
|
|
return ct->cfunc(blk, argc, argv);
|
2013-06-05 14:19:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static const cmdinfo_t *find_command(const char *cmd)
|
|
|
|
{
|
|
|
|
cmdinfo_t *ct;
|
|
|
|
|
|
|
|
for (ct = cmdtab; ct < &cmdtab[ncmds]; ct++) {
|
|
|
|
if (strcmp(ct->name, cmd) == 0 ||
|
|
|
|
(ct->altname && strcmp(ct->altname, cmd) == 0))
|
|
|
|
{
|
|
|
|
return (const cmdinfo_t *)ct;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-11-14 11:54:18 +01:00
|
|
|
/* Invoke fn() for commands with a matching prefix */
|
|
|
|
void qemuio_complete_command(const char *input,
|
|
|
|
void (*fn)(const char *cmd, void *opaque),
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
cmdinfo_t *ct;
|
|
|
|
size_t input_len = strlen(input);
|
|
|
|
|
|
|
|
for (ct = cmdtab; ct < &cmdtab[ncmds]; ct++) {
|
|
|
|
if (strncmp(input, ct->name, input_len) == 0) {
|
|
|
|
fn(ct->name, opaque);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-05 14:19:36 +02:00
|
|
|
static char **breakline(char *input, int *count)
|
|
|
|
{
|
|
|
|
int c = 0;
|
|
|
|
char *p;
|
block: Use g_new() & friends where that makes obvious sense
g_new(T, n) is neater than g_malloc(sizeof(T) * n). It's also safer,
for two reasons. One, it catches multiplication overflowing size_t.
Two, it returns T * rather than void *, which lets the compiler catch
more type errors.
Patch created with Coccinelle, with two manual changes on top:
* Add const to bdrv_iterate_format() to keep the types straight
* Convert the allocation in bdrv_drop_intermediate(), which Coccinelle
inexplicably misses
Coccinelle semantic patch:
@@
type T;
@@
-g_malloc(sizeof(T))
+g_new(T, 1)
@@
type T;
@@
-g_try_malloc(sizeof(T))
+g_try_new(T, 1)
@@
type T;
@@
-g_malloc0(sizeof(T))
+g_new0(T, 1)
@@
type T;
@@
-g_try_malloc0(sizeof(T))
+g_try_new0(T, 1)
@@
type T;
expression n;
@@
-g_malloc(sizeof(T) * (n))
+g_new(T, n)
@@
type T;
expression n;
@@
-g_try_malloc(sizeof(T) * (n))
+g_try_new(T, n)
@@
type T;
expression n;
@@
-g_malloc0(sizeof(T) * (n))
+g_new0(T, n)
@@
type T;
expression n;
@@
-g_try_malloc0(sizeof(T) * (n))
+g_try_new0(T, n)
@@
type T;
expression p, n;
@@
-g_realloc(p, sizeof(T) * (n))
+g_renew(T, p, n)
@@
type T;
expression p, n;
@@
-g_try_realloc(p, sizeof(T) * (n))
+g_try_renew(T, p, n)
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-08-19 10:31:08 +02:00
|
|
|
char **rval = g_new0(char *, 1);
|
2013-06-05 14:19:36 +02:00
|
|
|
|
|
|
|
while (rval && (p = qemu_strsep(&input, " ")) != NULL) {
|
|
|
|
if (!*p) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
c++;
|
2014-08-19 10:31:10 +02:00
|
|
|
rval = g_renew(char *, rval, (c + 1));
|
2013-06-05 14:19:36 +02:00
|
|
|
rval[c - 1] = p;
|
|
|
|
rval[c] = NULL;
|
|
|
|
}
|
|
|
|
*count = c;
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
2013-06-05 14:19:31 +02:00
|
|
|
static int64_t cvtnum(const char *s)
|
|
|
|
{
|
2017-02-21 21:14:06 +01:00
|
|
|
int err;
|
2017-02-21 21:14:07 +01:00
|
|
|
uint64_t value;
|
2015-11-06 00:53:03 +01:00
|
|
|
|
2017-02-21 21:14:06 +01:00
|
|
|
err = qemu_strtosz(s, NULL, &value);
|
|
|
|
if (err < 0) {
|
|
|
|
return err;
|
|
|
|
}
|
2017-02-21 21:14:07 +01:00
|
|
|
if (value > INT64_MAX) {
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
2017-02-21 21:14:06 +01:00
|
|
|
return value;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2015-11-06 00:53:04 +01:00
|
|
|
static void print_cvtnum_err(int64_t rc, const char *arg)
|
|
|
|
{
|
|
|
|
switch (rc) {
|
|
|
|
case -EINVAL:
|
|
|
|
printf("Parsing error: non-numeric argument,"
|
|
|
|
" or extraneous/unrecognized suffix -- %s\n", arg);
|
|
|
|
break;
|
|
|
|
case -ERANGE:
|
|
|
|
printf("Parsing error: argument too large -- %s\n", arg);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
printf("Parsing error: %s\n", arg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-05 14:19:38 +02:00
|
|
|
#define EXABYTES(x) ((long long)(x) << 60)
|
|
|
|
#define PETABYTES(x) ((long long)(x) << 50)
|
|
|
|
#define TERABYTES(x) ((long long)(x) << 40)
|
|
|
|
#define GIGABYTES(x) ((long long)(x) << 30)
|
|
|
|
#define MEGABYTES(x) ((long long)(x) << 20)
|
|
|
|
#define KILOBYTES(x) ((long long)(x) << 10)
|
|
|
|
|
|
|
|
#define TO_EXABYTES(x) ((x) / EXABYTES(1))
|
|
|
|
#define TO_PETABYTES(x) ((x) / PETABYTES(1))
|
|
|
|
#define TO_TERABYTES(x) ((x) / TERABYTES(1))
|
|
|
|
#define TO_GIGABYTES(x) ((x) / GIGABYTES(1))
|
|
|
|
#define TO_MEGABYTES(x) ((x) / MEGABYTES(1))
|
|
|
|
#define TO_KILOBYTES(x) ((x) / KILOBYTES(1))
|
|
|
|
|
|
|
|
static void cvtstr(double value, char *str, size_t size)
|
|
|
|
{
|
|
|
|
char *trim;
|
|
|
|
const char *suffix;
|
|
|
|
|
|
|
|
if (value >= EXABYTES(1)) {
|
|
|
|
suffix = " EiB";
|
|
|
|
snprintf(str, size - 4, "%.3f", TO_EXABYTES(value));
|
|
|
|
} else if (value >= PETABYTES(1)) {
|
|
|
|
suffix = " PiB";
|
|
|
|
snprintf(str, size - 4, "%.3f", TO_PETABYTES(value));
|
|
|
|
} else if (value >= TERABYTES(1)) {
|
|
|
|
suffix = " TiB";
|
|
|
|
snprintf(str, size - 4, "%.3f", TO_TERABYTES(value));
|
|
|
|
} else if (value >= GIGABYTES(1)) {
|
|
|
|
suffix = " GiB";
|
|
|
|
snprintf(str, size - 4, "%.3f", TO_GIGABYTES(value));
|
|
|
|
} else if (value >= MEGABYTES(1)) {
|
|
|
|
suffix = " MiB";
|
|
|
|
snprintf(str, size - 4, "%.3f", TO_MEGABYTES(value));
|
|
|
|
} else if (value >= KILOBYTES(1)) {
|
|
|
|
suffix = " KiB";
|
|
|
|
snprintf(str, size - 4, "%.3f", TO_KILOBYTES(value));
|
|
|
|
} else {
|
|
|
|
suffix = " bytes";
|
|
|
|
snprintf(str, size - 6, "%f", value);
|
|
|
|
}
|
|
|
|
|
|
|
|
trim = strstr(str, ".000");
|
|
|
|
if (trim) {
|
|
|
|
strcpy(trim, suffix);
|
|
|
|
} else {
|
|
|
|
strcat(str, suffix);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2019-05-29 18:16:32 +02:00
|
|
|
static struct timespec tsub(struct timespec t1, struct timespec t2)
|
2013-06-05 14:19:38 +02:00
|
|
|
{
|
2019-05-29 18:16:32 +02:00
|
|
|
t1.tv_nsec -= t2.tv_nsec;
|
|
|
|
if (t1.tv_nsec < 0) {
|
|
|
|
t1.tv_nsec += NANOSECONDS_PER_SECOND;
|
2013-06-05 14:19:38 +02:00
|
|
|
t1.tv_sec--;
|
|
|
|
}
|
|
|
|
t1.tv_sec -= t2.tv_sec;
|
|
|
|
return t1;
|
|
|
|
}
|
|
|
|
|
2019-05-29 18:16:32 +02:00
|
|
|
static double tdiv(double value, struct timespec tv)
|
2013-06-05 14:19:38 +02:00
|
|
|
{
|
2019-05-29 18:16:32 +02:00
|
|
|
double seconds = tv.tv_sec + (tv.tv_nsec / 1e9);
|
|
|
|
return value / seconds;
|
2013-06-05 14:19:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#define HOURS(sec) ((sec) / (60 * 60))
|
|
|
|
#define MINUTES(sec) (((sec) % (60 * 60)) / 60)
|
|
|
|
#define SECONDS(sec) ((sec) % 60)
|
|
|
|
|
|
|
|
enum {
|
|
|
|
DEFAULT_TIME = 0x0,
|
|
|
|
TERSE_FIXED_TIME = 0x1,
|
|
|
|
VERBOSE_FIXED_TIME = 0x2,
|
|
|
|
};
|
|
|
|
|
2019-05-29 18:16:32 +02:00
|
|
|
static void timestr(struct timespec *tv, char *ts, size_t size, int format)
|
2013-06-05 14:19:38 +02:00
|
|
|
{
|
2019-05-29 18:16:32 +02:00
|
|
|
double frac_sec = tv->tv_nsec / 1e9;
|
2013-06-05 14:19:38 +02:00
|
|
|
|
|
|
|
if (format & TERSE_FIXED_TIME) {
|
|
|
|
if (!HOURS(tv->tv_sec)) {
|
2019-05-29 18:16:32 +02:00
|
|
|
snprintf(ts, size, "%u:%05.2f",
|
|
|
|
(unsigned int) MINUTES(tv->tv_sec),
|
|
|
|
SECONDS(tv->tv_sec) + frac_sec);
|
2013-06-05 14:19:38 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
format |= VERBOSE_FIXED_TIME; /* fallback if hours needed */
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((format & VERBOSE_FIXED_TIME) || tv->tv_sec) {
|
2019-05-29 18:16:32 +02:00
|
|
|
snprintf(ts, size, "%u:%02u:%05.2f",
|
2013-06-05 14:19:38 +02:00
|
|
|
(unsigned int) HOURS(tv->tv_sec),
|
|
|
|
(unsigned int) MINUTES(tv->tv_sec),
|
2019-05-29 18:16:32 +02:00
|
|
|
SECONDS(tv->tv_sec) + frac_sec);
|
2013-06-05 14:19:38 +02:00
|
|
|
} else {
|
2019-05-29 18:16:32 +02:00
|
|
|
snprintf(ts, size, "%05.2f sec", frac_sec);
|
2013-06-05 14:19:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-05 14:19:31 +02:00
|
|
|
/*
|
|
|
|
* Parse the pattern argument to various sub-commands.
|
|
|
|
*
|
|
|
|
* Because the pattern is used as an argument to memset it must evaluate
|
|
|
|
* to an unsigned integer that fits into a single byte.
|
|
|
|
*/
|
|
|
|
static int parse_pattern(const char *arg)
|
|
|
|
{
|
|
|
|
char *endptr = NULL;
|
|
|
|
long pattern;
|
|
|
|
|
|
|
|
pattern = strtol(arg, &endptr, 0);
|
|
|
|
if (pattern < 0 || pattern > UCHAR_MAX || *endptr != '\0') {
|
|
|
|
printf("%s is not a valid pattern byte\n", arg);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return pattern;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Memory allocation helpers.
|
|
|
|
*
|
|
|
|
* Make sure memory is aligned by default, or purposefully misaligned if
|
|
|
|
* that is specified on the command line.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define MISALIGN_OFFSET 16
|
2015-02-05 19:58:22 +01:00
|
|
|
static void *qemu_io_alloc(BlockBackend *blk, size_t len, int pattern)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
|
|
|
void *buf;
|
|
|
|
|
|
|
|
if (qemuio_misalign) {
|
|
|
|
len += MISALIGN_OFFSET;
|
|
|
|
}
|
2015-02-05 19:58:22 +01:00
|
|
|
buf = blk_blockalign(blk, len);
|
2013-06-05 14:19:31 +02:00
|
|
|
memset(buf, pattern, len);
|
|
|
|
if (qemuio_misalign) {
|
|
|
|
buf += MISALIGN_OFFSET;
|
|
|
|
}
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qemu_io_free(void *p)
|
|
|
|
{
|
|
|
|
if (qemuio_misalign) {
|
|
|
|
p -= MISALIGN_OFFSET;
|
|
|
|
}
|
|
|
|
qemu_vfree(p);
|
|
|
|
}
|
|
|
|
|
2019-08-20 18:46:16 +02:00
|
|
|
/*
|
|
|
|
* qemu_io_alloc_from_file()
|
|
|
|
*
|
|
|
|
* Allocates the buffer and populates it with the content of the given file
|
|
|
|
* up to @len bytes. If the file length is less than @len, then the buffer
|
|
|
|
* is populated with the file content cyclically.
|
|
|
|
*
|
|
|
|
* @blk - the block backend where the buffer content is going to be written to
|
|
|
|
* @len - the buffer length
|
|
|
|
* @file_name - the file to read the content from
|
|
|
|
*
|
|
|
|
* Returns: the buffer pointer on success
|
|
|
|
* NULL on error
|
|
|
|
*/
|
|
|
|
static void *qemu_io_alloc_from_file(BlockBackend *blk, size_t len,
|
|
|
|
const char *file_name)
|
|
|
|
{
|
|
|
|
char *buf, *buf_origin;
|
|
|
|
FILE *f = fopen(file_name, "r");
|
|
|
|
int pattern_len;
|
|
|
|
|
|
|
|
if (!f) {
|
|
|
|
perror(file_name);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuio_misalign) {
|
|
|
|
len += MISALIGN_OFFSET;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf_origin = buf = blk_blockalign(blk, len);
|
|
|
|
|
|
|
|
if (qemuio_misalign) {
|
|
|
|
buf_origin += MISALIGN_OFFSET;
|
|
|
|
buf += MISALIGN_OFFSET;
|
|
|
|
len -= MISALIGN_OFFSET;
|
|
|
|
}
|
|
|
|
|
|
|
|
pattern_len = fread(buf_origin, 1, len, f);
|
|
|
|
|
|
|
|
if (ferror(f)) {
|
|
|
|
perror(file_name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pattern_len == 0) {
|
|
|
|
fprintf(stderr, "%s: file is empty\n", file_name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
fclose(f);
|
2019-09-10 09:03:06 +02:00
|
|
|
f = NULL;
|
2019-08-20 18:46:16 +02:00
|
|
|
|
|
|
|
if (len > pattern_len) {
|
|
|
|
len -= pattern_len;
|
|
|
|
buf += pattern_len;
|
|
|
|
|
|
|
|
while (len > 0) {
|
|
|
|
size_t len_to_copy = MIN(pattern_len, len);
|
|
|
|
|
|
|
|
memcpy(buf, buf_origin, len_to_copy);
|
|
|
|
|
|
|
|
len -= len_to_copy;
|
|
|
|
buf += len_to_copy;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return buf_origin;
|
|
|
|
|
|
|
|
error:
|
|
|
|
qemu_io_free(buf_origin);
|
2019-09-10 09:03:06 +02:00
|
|
|
if (f) {
|
|
|
|
fclose(f);
|
|
|
|
}
|
2019-08-20 18:46:16 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-11-06 00:53:02 +01:00
|
|
|
static void dump_buffer(const void *buffer, int64_t offset, int64_t len)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2015-11-06 00:53:02 +01:00
|
|
|
uint64_t i;
|
|
|
|
int j;
|
2013-06-05 14:19:31 +02:00
|
|
|
const uint8_t *p;
|
|
|
|
|
|
|
|
for (i = 0, p = buffer; i < len; i += 16) {
|
|
|
|
const uint8_t *s = p;
|
|
|
|
|
|
|
|
printf("%08" PRIx64 ": ", offset + i);
|
|
|
|
for (j = 0; j < 16 && i + j < len; j++, p++) {
|
|
|
|
printf("%02x ", *p);
|
|
|
|
}
|
|
|
|
printf(" ");
|
|
|
|
for (j = 0; j < 16 && i + j < len; j++, s++) {
|
|
|
|
if (isalnum(*s)) {
|
|
|
|
printf("%c", *s);
|
|
|
|
} else {
|
|
|
|
printf(".");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-29 18:16:32 +02:00
|
|
|
static void print_report(const char *op, struct timespec *t, int64_t offset,
|
2016-05-08 05:16:42 +02:00
|
|
|
int64_t count, int64_t total, int cnt, bool Cflag)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
|
|
|
char s1[64], s2[64], ts[64];
|
|
|
|
|
|
|
|
timestr(t, ts, sizeof(ts), Cflag ? VERBOSE_FIXED_TIME : 0);
|
|
|
|
if (!Cflag) {
|
|
|
|
cvtstr((double)total, s1, sizeof(s1));
|
|
|
|
cvtstr(tdiv((double)total, *t), s2, sizeof(s2));
|
2015-11-06 00:53:02 +01:00
|
|
|
printf("%s %"PRId64"/%"PRId64" bytes at offset %" PRId64 "\n",
|
2013-06-05 14:19:31 +02:00
|
|
|
op, total, count, offset);
|
|
|
|
printf("%s, %d ops; %s (%s/sec and %.4f ops/sec)\n",
|
|
|
|
s1, cnt, ts, s2, tdiv((double)cnt, *t));
|
|
|
|
} else {/* bytes,ops,time,bytes/sec,ops/sec */
|
2015-11-06 00:53:02 +01:00
|
|
|
printf("%"PRId64",%d,%s,%.3f,%.3f\n",
|
2013-06-05 14:19:31 +02:00
|
|
|
total, cnt, ts,
|
|
|
|
tdiv((double)total, *t),
|
|
|
|
tdiv((double)cnt, *t));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse multiple length statements for vectored I/O, and construct an I/O
|
|
|
|
* vector matching it.
|
|
|
|
*/
|
|
|
|
static void *
|
2015-02-05 19:58:22 +01:00
|
|
|
create_iovec(BlockBackend *blk, QEMUIOVector *qiov, char **argv, int nr_iov,
|
2013-06-05 14:19:31 +02:00
|
|
|
int pattern)
|
|
|
|
{
|
|
|
|
size_t *sizes = g_new0(size_t, nr_iov);
|
|
|
|
size_t count = 0;
|
|
|
|
void *buf = NULL;
|
|
|
|
void *p;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_iov; i++) {
|
|
|
|
char *arg = argv[i];
|
|
|
|
int64_t len;
|
|
|
|
|
|
|
|
len = cvtnum(arg);
|
|
|
|
if (len < 0) {
|
2015-11-06 00:53:04 +01:00
|
|
|
print_cvtnum_err(len, arg);
|
2013-06-05 14:19:31 +02:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2017-01-31 17:09:54 +01:00
|
|
|
if (len > BDRV_REQUEST_MAX_BYTES) {
|
|
|
|
printf("Argument '%s' exceeds maximum size %" PRIu64 "\n", arg,
|
|
|
|
(uint64_t)BDRV_REQUEST_MAX_BYTES);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count > BDRV_REQUEST_MAX_BYTES - len) {
|
|
|
|
printf("The total number of bytes exceed the maximum size %" PRIu64
|
|
|
|
"\n", (uint64_t)BDRV_REQUEST_MAX_BYTES);
|
2013-06-05 14:19:31 +02:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
sizes[i] = len;
|
|
|
|
count += len;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_iovec_init(qiov, nr_iov);
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
buf = p = qemu_io_alloc(blk, count, pattern);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
for (i = 0; i < nr_iov; i++) {
|
|
|
|
qemu_iovec_add(qiov, p, sizes[i]);
|
|
|
|
p += sizes[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
fail:
|
|
|
|
g_free(sizes);
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2015-11-06 00:53:02 +01:00
|
|
|
static int do_pread(BlockBackend *blk, char *buf, int64_t offset,
|
2017-06-09 12:18:08 +02:00
|
|
|
int64_t bytes, int64_t *total)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2017-06-09 12:18:08 +02:00
|
|
|
if (bytes > INT_MAX) {
|
2015-11-06 00:53:02 +01:00
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
2017-06-09 12:18:08 +02:00
|
|
|
*total = blk_pread(blk, offset, (uint8_t *)buf, bytes);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (*total < 0) {
|
|
|
|
return *total;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-11-06 00:53:02 +01:00
|
|
|
static int do_pwrite(BlockBackend *blk, char *buf, int64_t offset,
|
2017-06-09 12:18:08 +02:00
|
|
|
int64_t bytes, int flags, int64_t *total)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2017-06-09 12:18:08 +02:00
|
|
|
if (bytes > INT_MAX) {
|
2015-11-06 00:53:02 +01:00
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
2017-06-09 12:18:08 +02:00
|
|
|
*total = blk_pwrite(blk, offset, (uint8_t *)buf, bytes, flags);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (*total < 0) {
|
|
|
|
return *total;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct {
|
2015-02-05 19:58:22 +01:00
|
|
|
BlockBackend *blk;
|
2013-06-05 14:19:31 +02:00
|
|
|
int64_t offset;
|
2017-06-09 12:18:08 +02:00
|
|
|
int64_t bytes;
|
2015-11-06 00:53:02 +01:00
|
|
|
int64_t *total;
|
2016-05-08 05:16:44 +02:00
|
|
|
int flags;
|
2013-06-05 14:19:31 +02:00
|
|
|
int ret;
|
|
|
|
bool done;
|
|
|
|
} CoWriteZeroes;
|
|
|
|
|
2016-05-25 00:25:20 +02:00
|
|
|
static void coroutine_fn co_pwrite_zeroes_entry(void *opaque)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
|
|
|
CoWriteZeroes *data = opaque;
|
|
|
|
|
2017-06-09 12:18:08 +02:00
|
|
|
data->ret = blk_co_pwrite_zeroes(data->blk, data->offset, data->bytes,
|
2016-05-25 00:25:20 +02:00
|
|
|
data->flags);
|
2013-06-05 14:19:31 +02:00
|
|
|
data->done = true;
|
|
|
|
if (data->ret < 0) {
|
|
|
|
*data->total = data->ret;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-06-09 12:18:08 +02:00
|
|
|
*data->total = data->bytes;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2016-05-25 00:25:20 +02:00
|
|
|
static int do_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
|
2017-06-09 12:18:08 +02:00
|
|
|
int64_t bytes, int flags, int64_t *total)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
|
|
|
Coroutine *co;
|
|
|
|
CoWriteZeroes data = {
|
2015-02-05 19:58:22 +01:00
|
|
|
.blk = blk,
|
2013-06-05 14:19:31 +02:00
|
|
|
.offset = offset,
|
2017-06-09 12:18:08 +02:00
|
|
|
.bytes = bytes,
|
2013-06-05 14:19:31 +02:00
|
|
|
.total = total,
|
2016-05-08 05:16:44 +02:00
|
|
|
.flags = flags,
|
2013-06-05 14:19:31 +02:00
|
|
|
.done = false,
|
|
|
|
};
|
|
|
|
|
2017-06-09 12:18:08 +02:00
|
|
|
if (bytes > INT_MAX) {
|
2015-11-06 00:53:02 +01:00
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
coroutine: move entry argument to qemu_coroutine_create
In practice the entry argument is always known at creation time, and
it is confusing that sometimes qemu_coroutine_enter is used with a
non-NULL argument to re-enter a coroutine (this happens in
block/sheepdog.c and tests/test-coroutine.c). So pass the opaque value
at creation time, for consistency with e.g. aio_bh_new.
Mostly done with the following semantic patch:
@ entry1 @
expression entry, arg, co;
@@
- co = qemu_coroutine_create(entry);
+ co = qemu_coroutine_create(entry, arg);
...
- qemu_coroutine_enter(co, arg);
+ qemu_coroutine_enter(co);
@ entry2 @
expression entry, arg;
identifier co;
@@
- Coroutine *co = qemu_coroutine_create(entry);
+ Coroutine *co = qemu_coroutine_create(entry, arg);
...
- qemu_coroutine_enter(co, arg);
+ qemu_coroutine_enter(co);
@ entry3 @
expression entry, arg;
@@
- qemu_coroutine_enter(qemu_coroutine_create(entry), arg);
+ qemu_coroutine_enter(qemu_coroutine_create(entry, arg));
@ reentry @
expression co;
@@
- qemu_coroutine_enter(co, NULL);
+ qemu_coroutine_enter(co);
except for the aforementioned few places where the semantic patch
stumbled (as expected) and for test_co_queue, which would otherwise
produce an uninitialized variable warning.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-07-04 19:10:01 +02:00
|
|
|
co = qemu_coroutine_create(co_pwrite_zeroes_entry, &data);
|
2017-04-10 14:16:18 +02:00
|
|
|
bdrv_coroutine_enter(blk_bs(blk), co);
|
2013-06-05 14:19:31 +02:00
|
|
|
while (!data.done) {
|
2015-02-05 19:58:22 +01:00
|
|
|
aio_poll(blk_get_aio_context(blk), true);
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
if (data.ret < 0) {
|
|
|
|
return data.ret;
|
|
|
|
} else {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
static int do_write_compressed(BlockBackend *blk, char *buf, int64_t offset,
|
2017-06-09 12:18:08 +02:00
|
|
|
int64_t bytes, int64_t *total)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2019-05-14 15:57:35 +02:00
|
|
|
if (bytes > BDRV_REQUEST_MAX_BYTES) {
|
2015-11-06 00:53:02 +01:00
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
2017-06-09 12:18:08 +02:00
|
|
|
ret = blk_pwrite_compressed(blk, offset, buf, bytes);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2017-06-09 12:18:08 +02:00
|
|
|
*total = bytes;
|
2013-06-05 14:19:31 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
static int do_load_vmstate(BlockBackend *blk, char *buf, int64_t offset,
|
2015-11-06 00:53:02 +01:00
|
|
|
int64_t count, int64_t *total)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2015-11-06 00:53:02 +01:00
|
|
|
if (count > INT_MAX) {
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
*total = blk_load_vmstate(blk, (uint8_t *)buf, offset, count);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (*total < 0) {
|
|
|
|
return *total;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
static int do_save_vmstate(BlockBackend *blk, char *buf, int64_t offset,
|
2015-11-06 00:53:02 +01:00
|
|
|
int64_t count, int64_t *total)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2015-11-06 00:53:02 +01:00
|
|
|
if (count > INT_MAX) {
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
*total = blk_save_vmstate(blk, (uint8_t *)buf, offset, count);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (*total < 0) {
|
|
|
|
return *total;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define NOT_DONE 0x7fffffff
|
|
|
|
static void aio_rw_done(void *opaque, int ret)
|
|
|
|
{
|
|
|
|
*(int *)opaque = ret;
|
|
|
|
}
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
static int do_aio_readv(BlockBackend *blk, QEMUIOVector *qiov,
|
2013-06-05 14:19:31 +02:00
|
|
|
int64_t offset, int *total)
|
|
|
|
{
|
|
|
|
int async_ret = NOT_DONE;
|
|
|
|
|
2016-05-06 18:26:44 +02:00
|
|
|
blk_aio_preadv(blk, offset, qiov, 0, aio_rw_done, &async_ret);
|
2013-06-05 14:19:31 +02:00
|
|
|
while (async_ret == NOT_DONE) {
|
|
|
|
main_loop_wait(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
*total = qiov->size;
|
|
|
|
return async_ret < 0 ? async_ret : 1;
|
|
|
|
}
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
static int do_aio_writev(BlockBackend *blk, QEMUIOVector *qiov,
|
2016-05-08 05:16:44 +02:00
|
|
|
int64_t offset, int flags, int *total)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
|
|
|
int async_ret = NOT_DONE;
|
|
|
|
|
2016-05-08 05:16:44 +02:00
|
|
|
blk_aio_pwritev(blk, offset, qiov, flags, aio_rw_done, &async_ret);
|
2013-06-05 14:19:31 +02:00
|
|
|
while (async_ret == NOT_DONE) {
|
|
|
|
main_loop_wait(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
*total = qiov->size;
|
|
|
|
return async_ret < 0 ? async_ret : 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void read_help(void)
|
|
|
|
{
|
|
|
|
printf(
|
|
|
|
"\n"
|
|
|
|
" reads a range of bytes from the given offset\n"
|
|
|
|
"\n"
|
|
|
|
" Example:\n"
|
|
|
|
" 'read -v 512 1k' - dumps 1 kilobyte read from 512 bytes into the file\n"
|
|
|
|
"\n"
|
|
|
|
" Reads a segment of the currently open file, optionally dumping it to the\n"
|
|
|
|
" standard output stream (with -v option) for subsequent inspection.\n"
|
|
|
|
" -b, -- read from the VM state rather than the virtual disk\n"
|
|
|
|
" -C, -- report statistics in a machine parsable format\n"
|
|
|
|
" -l, -- length for pattern verification (only with -P)\n"
|
qemu-io: Allow unaligned access by default
There's no reason to require the user to specify a flag just so
they can pass in unaligned numbers. Keep 'read -p' and 'write -p'
as no-ops so that I don't have to hunt down and update all users
of qemu-io, but otherwise make their behavior default as 'read' and
'write'. Also fix 'write -z', 'readv', 'writev', 'writev',
'aio_read', 'aio_write', and 'aio_write -z'. For now, 'read -b',
'write -b', and 'write -c' still require alignment (and 'multiwrite',
but that's slated to die soon).
qemu-iotest 23 is updated to match, as the only test that was
previously explicitly expecting an error on an unaligned request.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-id: 1462677405-4752-5-git-send-email-eblake@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-05-08 05:16:43 +02:00
|
|
|
" -p, -- ignored for backwards compatibility\n"
|
2013-06-05 14:19:31 +02:00
|
|
|
" -P, -- use a pattern to verify read data\n"
|
|
|
|
" -q, -- quiet mode, do not show I/O statistics\n"
|
|
|
|
" -s, -- start offset for pattern verification (only with -P)\n"
|
|
|
|
" -v, -- dump buffer to standard output\n"
|
|
|
|
"\n");
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int read_f(BlockBackend *blk, int argc, char **argv);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
static const cmdinfo_t read_cmd = {
|
|
|
|
.name = "read",
|
|
|
|
.altname = "r",
|
|
|
|
.cfunc = read_f,
|
|
|
|
.argmin = 2,
|
|
|
|
.argmax = -1,
|
qemu-io: Allow unaligned access by default
There's no reason to require the user to specify a flag just so
they can pass in unaligned numbers. Keep 'read -p' and 'write -p'
as no-ops so that I don't have to hunt down and update all users
of qemu-io, but otherwise make their behavior default as 'read' and
'write'. Also fix 'write -z', 'readv', 'writev', 'writev',
'aio_read', 'aio_write', and 'aio_write -z'. For now, 'read -b',
'write -b', and 'write -c' still require alignment (and 'multiwrite',
but that's slated to die soon).
qemu-iotest 23 is updated to match, as the only test that was
previously explicitly expecting an error on an unaligned request.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-id: 1462677405-4752-5-git-send-email-eblake@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-05-08 05:16:43 +02:00
|
|
|
.args = "[-abCqv] [-P pattern [-s off] [-l len]] off len",
|
2013-06-05 14:19:31 +02:00
|
|
|
.oneline = "reads a number of bytes at a specified offset",
|
|
|
|
.help = read_help,
|
|
|
|
};
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int read_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2019-05-29 18:16:32 +02:00
|
|
|
struct timespec t1, t2;
|
qemu-io: Allow unaligned access by default
There's no reason to require the user to specify a flag just so
they can pass in unaligned numbers. Keep 'read -p' and 'write -p'
as no-ops so that I don't have to hunt down and update all users
of qemu-io, but otherwise make their behavior default as 'read' and
'write'. Also fix 'write -z', 'readv', 'writev', 'writev',
'aio_read', 'aio_write', and 'aio_write -z'. For now, 'read -b',
'write -b', and 'write -c' still require alignment (and 'multiwrite',
but that's slated to die soon).
qemu-iotest 23 is updated to match, as the only test that was
previously explicitly expecting an error on an unaligned request.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-id: 1462677405-4752-5-git-send-email-eblake@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-05-08 05:16:43 +02:00
|
|
|
bool Cflag = false, qflag = false, vflag = false;
|
2016-05-08 05:16:42 +02:00
|
|
|
bool Pflag = false, sflag = false, lflag = false, bflag = false;
|
2018-05-09 21:42:59 +02:00
|
|
|
int c, cnt, ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
char *buf;
|
|
|
|
int64_t offset;
|
2015-11-06 00:53:02 +01:00
|
|
|
int64_t count;
|
2013-06-05 14:19:31 +02:00
|
|
|
/* Some compilers get confused and warn if this is not initialized. */
|
2015-11-06 00:53:02 +01:00
|
|
|
int64_t total = 0;
|
|
|
|
int pattern = 0;
|
|
|
|
int64_t pattern_offset = 0, pattern_count = 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2015-05-12 17:10:56 +02:00
|
|
|
while ((c = getopt(argc, argv, "bCl:pP:qs:v")) != -1) {
|
2013-06-05 14:19:31 +02:00
|
|
|
switch (c) {
|
|
|
|
case 'b':
|
2016-05-08 05:16:42 +02:00
|
|
|
bflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
case 'C':
|
2016-05-08 05:16:42 +02:00
|
|
|
Cflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
case 'l':
|
2016-05-08 05:16:42 +02:00
|
|
|
lflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
pattern_count = cvtnum(optarg);
|
|
|
|
if (pattern_count < 0) {
|
2015-11-06 00:53:04 +01:00
|
|
|
print_cvtnum_err(pattern_count, optarg);
|
2018-05-09 21:42:59 +02:00
|
|
|
return pattern_count;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'p':
|
qemu-io: Allow unaligned access by default
There's no reason to require the user to specify a flag just so
they can pass in unaligned numbers. Keep 'read -p' and 'write -p'
as no-ops so that I don't have to hunt down and update all users
of qemu-io, but otherwise make their behavior default as 'read' and
'write'. Also fix 'write -z', 'readv', 'writev', 'writev',
'aio_read', 'aio_write', and 'aio_write -z'. For now, 'read -b',
'write -b', and 'write -c' still require alignment (and 'multiwrite',
but that's slated to die soon).
qemu-iotest 23 is updated to match, as the only test that was
previously explicitly expecting an error on an unaligned request.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-id: 1462677405-4752-5-git-send-email-eblake@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-05-08 05:16:43 +02:00
|
|
|
/* Ignored for backwards compatibility */
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
case 'P':
|
2016-05-08 05:16:42 +02:00
|
|
|
Pflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
pattern = parse_pattern(optarg);
|
|
|
|
if (pattern < 0) {
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'q':
|
2016-05-08 05:16:42 +02:00
|
|
|
qflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
case 's':
|
2016-05-08 05:16:42 +02:00
|
|
|
sflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
pattern_offset = cvtnum(optarg);
|
|
|
|
if (pattern_offset < 0) {
|
2015-11-06 00:53:04 +01:00
|
|
|
print_cvtnum_err(pattern_offset, optarg);
|
2018-05-09 21:42:59 +02:00
|
|
|
return pattern_offset;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'v':
|
2016-05-08 05:16:42 +02:00
|
|
|
vflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
default:
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&read_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (optind != argc - 2) {
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&read_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
offset = cvtnum(argv[optind]);
|
|
|
|
if (offset < 0) {
|
2015-11-06 00:53:04 +01:00
|
|
|
print_cvtnum_err(offset, argv[optind]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return offset;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
optind++;
|
|
|
|
count = cvtnum(argv[optind]);
|
|
|
|
if (count < 0) {
|
2015-11-06 00:53:04 +01:00
|
|
|
print_cvtnum_err(count, argv[optind]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return count;
|
2017-01-31 17:09:54 +01:00
|
|
|
} else if (count > BDRV_REQUEST_MAX_BYTES) {
|
2015-11-06 00:53:02 +01:00
|
|
|
printf("length cannot exceed %" PRIu64 ", given %s\n",
|
2017-01-31 17:09:54 +01:00
|
|
|
(uint64_t)BDRV_REQUEST_MAX_BYTES, argv[optind]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!Pflag && (lflag || sflag)) {
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&read_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!lflag) {
|
|
|
|
pattern_count = count - pattern_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((pattern_count < 0) || (pattern_count + pattern_offset > count)) {
|
|
|
|
printf("pattern verification range exceeds end of read data\n");
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
qemu-io: Allow unaligned access by default
There's no reason to require the user to specify a flag just so
they can pass in unaligned numbers. Keep 'read -p' and 'write -p'
as no-ops so that I don't have to hunt down and update all users
of qemu-io, but otherwise make their behavior default as 'read' and
'write'. Also fix 'write -z', 'readv', 'writev', 'writev',
'aio_read', 'aio_write', and 'aio_write -z'. For now, 'read -b',
'write -b', and 'write -c' still require alignment (and 'multiwrite',
but that's slated to die soon).
qemu-iotest 23 is updated to match, as the only test that was
previously explicitly expecting an error on an unaligned request.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-id: 1462677405-4752-5-git-send-email-eblake@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-05-08 05:16:43 +02:00
|
|
|
if (bflag) {
|
2017-04-29 21:14:11 +02:00
|
|
|
if (!QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)) {
|
|
|
|
printf("%" PRId64 " is not a sector-aligned value for 'offset'\n",
|
2013-06-05 14:19:31 +02:00
|
|
|
offset);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
2017-04-29 21:14:11 +02:00
|
|
|
if (!QEMU_IS_ALIGNED(count, BDRV_SECTOR_SIZE)) {
|
|
|
|
printf("%"PRId64" is not a sector-aligned value for 'count'\n",
|
2013-06-05 14:19:31 +02:00
|
|
|
count);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
buf = qemu_io_alloc(blk, count, 0xab);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2019-05-29 18:16:32 +02:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &t1);
|
2016-05-06 18:26:44 +02:00
|
|
|
if (bflag) {
|
2018-05-09 21:42:59 +02:00
|
|
|
ret = do_load_vmstate(blk, buf, offset, count, &total);
|
2013-06-05 14:19:31 +02:00
|
|
|
} else {
|
2018-05-09 21:42:59 +02:00
|
|
|
ret = do_pread(blk, buf, offset, count, &total);
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
2019-05-29 18:16:32 +02:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &t2);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
printf("read failed: %s\n", strerror(-ret));
|
2013-06-05 14:19:31 +02:00
|
|
|
goto out;
|
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
cnt = ret;
|
|
|
|
|
|
|
|
ret = 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
if (Pflag) {
|
|
|
|
void *cmp_buf = g_malloc(pattern_count);
|
|
|
|
memset(cmp_buf, pattern, pattern_count);
|
|
|
|
if (memcmp(buf + pattern_offset, cmp_buf, pattern_count)) {
|
|
|
|
printf("Pattern verification failed at offset %"
|
2015-11-06 00:53:02 +01:00
|
|
|
PRId64 ", %"PRId64" bytes\n",
|
2013-06-05 14:19:31 +02:00
|
|
|
offset + pattern_offset, pattern_count);
|
2018-05-09 21:42:59 +02:00
|
|
|
ret = -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
g_free(cmp_buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qflag) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vflag) {
|
|
|
|
dump_buffer(buf, offset, count);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finally, report back -- -C gives a parsable format */
|
|
|
|
t2 = tsub(t2, t1);
|
|
|
|
print_report("read", &t2, offset, count, total, cnt, Cflag);
|
|
|
|
|
|
|
|
out:
|
|
|
|
qemu_io_free(buf);
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void readv_help(void)
|
|
|
|
{
|
|
|
|
printf(
|
|
|
|
"\n"
|
|
|
|
" reads a range of bytes from the given offset into multiple buffers\n"
|
|
|
|
"\n"
|
|
|
|
" Example:\n"
|
|
|
|
" 'readv -v 512 1k 1k ' - dumps 2 kilobytes read from 512 bytes into the file\n"
|
|
|
|
"\n"
|
|
|
|
" Reads a segment of the currently open file, optionally dumping it to the\n"
|
|
|
|
" standard output stream (with -v option) for subsequent inspection.\n"
|
|
|
|
" Uses multiple iovec buffers if more than one byte range is specified.\n"
|
|
|
|
" -C, -- report statistics in a machine parsable format\n"
|
|
|
|
" -P, -- use a pattern to verify read data\n"
|
|
|
|
" -v, -- dump buffer to standard output\n"
|
|
|
|
" -q, -- quiet mode, do not show I/O statistics\n"
|
|
|
|
"\n");
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int readv_f(BlockBackend *blk, int argc, char **argv);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
static const cmdinfo_t readv_cmd = {
|
|
|
|
.name = "readv",
|
|
|
|
.cfunc = readv_f,
|
|
|
|
.argmin = 2,
|
|
|
|
.argmax = -1,
|
qemu-io: Allow unaligned access by default
There's no reason to require the user to specify a flag just so
they can pass in unaligned numbers. Keep 'read -p' and 'write -p'
as no-ops so that I don't have to hunt down and update all users
of qemu-io, but otherwise make their behavior default as 'read' and
'write'. Also fix 'write -z', 'readv', 'writev', 'writev',
'aio_read', 'aio_write', and 'aio_write -z'. For now, 'read -b',
'write -b', and 'write -c' still require alignment (and 'multiwrite',
but that's slated to die soon).
qemu-iotest 23 is updated to match, as the only test that was
previously explicitly expecting an error on an unaligned request.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-id: 1462677405-4752-5-git-send-email-eblake@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-05-08 05:16:43 +02:00
|
|
|
.args = "[-Cqv] [-P pattern] off len [len..]",
|
2013-06-05 14:19:31 +02:00
|
|
|
.oneline = "reads a number of bytes at a specified offset",
|
|
|
|
.help = readv_help,
|
|
|
|
};
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int readv_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2019-05-29 18:16:32 +02:00
|
|
|
struct timespec t1, t2;
|
2016-05-08 05:16:42 +02:00
|
|
|
bool Cflag = false, qflag = false, vflag = false;
|
2018-05-09 21:42:59 +02:00
|
|
|
int c, cnt, ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
char *buf;
|
|
|
|
int64_t offset;
|
|
|
|
/* Some compilers get confused and warn if this is not initialized. */
|
|
|
|
int total = 0;
|
|
|
|
int nr_iov;
|
|
|
|
QEMUIOVector qiov;
|
|
|
|
int pattern = 0;
|
2016-05-08 05:16:42 +02:00
|
|
|
bool Pflag = false;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2015-05-12 17:10:56 +02:00
|
|
|
while ((c = getopt(argc, argv, "CP:qv")) != -1) {
|
2013-06-05 14:19:31 +02:00
|
|
|
switch (c) {
|
|
|
|
case 'C':
|
2016-05-08 05:16:42 +02:00
|
|
|
Cflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
case 'P':
|
2016-05-08 05:16:42 +02:00
|
|
|
Pflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
pattern = parse_pattern(optarg);
|
|
|
|
if (pattern < 0) {
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'q':
|
2016-05-08 05:16:42 +02:00
|
|
|
qflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
case 'v':
|
2016-05-08 05:16:42 +02:00
|
|
|
vflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
default:
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&readv_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (optind > argc - 2) {
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&readv_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
offset = cvtnum(argv[optind]);
|
|
|
|
if (offset < 0) {
|
2015-11-06 00:53:04 +01:00
|
|
|
print_cvtnum_err(offset, argv[optind]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return offset;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
optind++;
|
|
|
|
|
|
|
|
nr_iov = argc - optind;
|
2015-02-05 19:58:22 +01:00
|
|
|
buf = create_iovec(blk, &qiov, &argv[optind], nr_iov, 0xab);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (buf == NULL) {
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2019-05-29 18:16:32 +02:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &t1);
|
2018-05-09 21:42:59 +02:00
|
|
|
ret = do_aio_readv(blk, &qiov, offset, &total);
|
2019-05-29 18:16:32 +02:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &t2);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
printf("readv failed: %s\n", strerror(-ret));
|
2013-06-05 14:19:31 +02:00
|
|
|
goto out;
|
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
cnt = ret;
|
|
|
|
|
|
|
|
ret = 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
if (Pflag) {
|
|
|
|
void *cmp_buf = g_malloc(qiov.size);
|
|
|
|
memset(cmp_buf, pattern, qiov.size);
|
|
|
|
if (memcmp(buf, cmp_buf, qiov.size)) {
|
|
|
|
printf("Pattern verification failed at offset %"
|
2018-10-06 20:38:51 +02:00
|
|
|
PRId64 ", %zu bytes\n", offset, qiov.size);
|
2018-05-09 21:42:59 +02:00
|
|
|
ret = -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
g_free(cmp_buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qflag) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vflag) {
|
|
|
|
dump_buffer(buf, offset, qiov.size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finally, report back -- -C gives a parsable format */
|
|
|
|
t2 = tsub(t2, t1);
|
|
|
|
print_report("read", &t2, offset, qiov.size, total, cnt, Cflag);
|
|
|
|
|
|
|
|
out:
|
|
|
|
qemu_iovec_destroy(&qiov);
|
|
|
|
qemu_io_free(buf);
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void write_help(void)
|
|
|
|
{
|
|
|
|
printf(
|
|
|
|
"\n"
|
|
|
|
" writes a range of bytes from the given offset\n"
|
|
|
|
"\n"
|
|
|
|
" Example:\n"
|
|
|
|
" 'write 512 1k' - writes 1 kilobyte at 512 bytes into the open file\n"
|
|
|
|
"\n"
|
|
|
|
" Writes into a segment of the currently open file, using a buffer\n"
|
|
|
|
" filled with a set pattern (0xcdcdcdcd).\n"
|
|
|
|
" -b, -- write to the VM state rather than the virtual disk\n"
|
2015-02-05 19:58:22 +01:00
|
|
|
" -c, -- write compressed data with blk_write_compressed\n"
|
2016-05-08 05:16:44 +02:00
|
|
|
" -f, -- use Force Unit Access semantics\n"
|
2019-03-22 13:53:03 +01:00
|
|
|
" -n, -- with -z, don't allow slow fallback\n"
|
qemu-io: Allow unaligned access by default
There's no reason to require the user to specify a flag just so
they can pass in unaligned numbers. Keep 'read -p' and 'write -p'
as no-ops so that I don't have to hunt down and update all users
of qemu-io, but otherwise make their behavior default as 'read' and
'write'. Also fix 'write -z', 'readv', 'writev', 'writev',
'aio_read', 'aio_write', and 'aio_write -z'. For now, 'read -b',
'write -b', and 'write -c' still require alignment (and 'multiwrite',
but that's slated to die soon).
qemu-iotest 23 is updated to match, as the only test that was
previously explicitly expecting an error on an unaligned request.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-id: 1462677405-4752-5-git-send-email-eblake@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-05-08 05:16:43 +02:00
|
|
|
" -p, -- ignored for backwards compatibility\n"
|
2013-06-05 14:19:31 +02:00
|
|
|
" -P, -- use different pattern to fill file\n"
|
2019-08-20 18:46:16 +02:00
|
|
|
" -s, -- use a pattern file to fill the write buffer\n"
|
2013-06-05 14:19:31 +02:00
|
|
|
" -C, -- report statistics in a machine parsable format\n"
|
|
|
|
" -q, -- quiet mode, do not show I/O statistics\n"
|
2016-05-08 05:16:45 +02:00
|
|
|
" -u, -- with -z, allow unmapping\n"
|
2016-05-25 00:25:20 +02:00
|
|
|
" -z, -- write zeroes using blk_co_pwrite_zeroes\n"
|
2013-06-05 14:19:31 +02:00
|
|
|
"\n");
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int write_f(BlockBackend *blk, int argc, char **argv);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
static const cmdinfo_t write_cmd = {
|
|
|
|
.name = "write",
|
|
|
|
.altname = "w",
|
|
|
|
.cfunc = write_f,
|
2017-02-10 16:24:56 +01:00
|
|
|
.perm = BLK_PERM_WRITE,
|
2013-06-05 14:19:31 +02:00
|
|
|
.argmin = 2,
|
|
|
|
.argmax = -1,
|
2019-08-20 18:46:16 +02:00
|
|
|
.args = "[-bcCfnquz] [-P pattern | -s source_file] off len",
|
2013-06-05 14:19:31 +02:00
|
|
|
.oneline = "writes a number of bytes at a specified offset",
|
|
|
|
.help = write_help,
|
|
|
|
};
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int write_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2019-05-29 18:16:32 +02:00
|
|
|
struct timespec t1, t2;
|
qemu-io: Allow unaligned access by default
There's no reason to require the user to specify a flag just so
they can pass in unaligned numbers. Keep 'read -p' and 'write -p'
as no-ops so that I don't have to hunt down and update all users
of qemu-io, but otherwise make their behavior default as 'read' and
'write'. Also fix 'write -z', 'readv', 'writev', 'writev',
'aio_read', 'aio_write', and 'aio_write -z'. For now, 'read -b',
'write -b', and 'write -c' still require alignment (and 'multiwrite',
but that's slated to die soon).
qemu-iotest 23 is updated to match, as the only test that was
previously explicitly expecting an error on an unaligned request.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-id: 1462677405-4752-5-git-send-email-eblake@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-05-08 05:16:43 +02:00
|
|
|
bool Cflag = false, qflag = false, bflag = false;
|
2019-08-20 18:46:16 +02:00
|
|
|
bool Pflag = false, zflag = false, cflag = false, sflag = false;
|
2016-05-08 05:16:44 +02:00
|
|
|
int flags = 0;
|
2018-05-09 21:42:59 +02:00
|
|
|
int c, cnt, ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
char *buf = NULL;
|
|
|
|
int64_t offset;
|
2015-11-06 00:53:02 +01:00
|
|
|
int64_t count;
|
2013-06-05 14:19:31 +02:00
|
|
|
/* Some compilers get confused and warn if this is not initialized. */
|
2015-11-06 00:53:02 +01:00
|
|
|
int64_t total = 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
int pattern = 0xcd;
|
2019-08-20 18:46:16 +02:00
|
|
|
const char *file_name = NULL;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2019-08-20 18:46:16 +02:00
|
|
|
while ((c = getopt(argc, argv, "bcCfnpP:qs:uz")) != -1) {
|
2013-06-05 14:19:31 +02:00
|
|
|
switch (c) {
|
|
|
|
case 'b':
|
2016-05-08 05:16:42 +02:00
|
|
|
bflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
case 'c':
|
2016-05-08 05:16:42 +02:00
|
|
|
cflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
case 'C':
|
2016-05-08 05:16:42 +02:00
|
|
|
Cflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
2016-05-08 05:16:44 +02:00
|
|
|
case 'f':
|
|
|
|
flags |= BDRV_REQ_FUA;
|
|
|
|
break;
|
2019-03-22 13:53:03 +01:00
|
|
|
case 'n':
|
|
|
|
flags |= BDRV_REQ_NO_FALLBACK;
|
|
|
|
break;
|
2013-06-05 14:19:31 +02:00
|
|
|
case 'p':
|
qemu-io: Allow unaligned access by default
There's no reason to require the user to specify a flag just so
they can pass in unaligned numbers. Keep 'read -p' and 'write -p'
as no-ops so that I don't have to hunt down and update all users
of qemu-io, but otherwise make their behavior default as 'read' and
'write'. Also fix 'write -z', 'readv', 'writev', 'writev',
'aio_read', 'aio_write', and 'aio_write -z'. For now, 'read -b',
'write -b', and 'write -c' still require alignment (and 'multiwrite',
but that's slated to die soon).
qemu-iotest 23 is updated to match, as the only test that was
previously explicitly expecting an error on an unaligned request.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-id: 1462677405-4752-5-git-send-email-eblake@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-05-08 05:16:43 +02:00
|
|
|
/* Ignored for backwards compatibility */
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
case 'P':
|
2016-05-08 05:16:42 +02:00
|
|
|
Pflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
pattern = parse_pattern(optarg);
|
|
|
|
if (pattern < 0) {
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'q':
|
2016-05-08 05:16:42 +02:00
|
|
|
qflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
2019-08-20 18:46:16 +02:00
|
|
|
case 's':
|
|
|
|
sflag = true;
|
|
|
|
file_name = optarg;
|
|
|
|
break;
|
2016-05-08 05:16:45 +02:00
|
|
|
case 'u':
|
|
|
|
flags |= BDRV_REQ_MAY_UNMAP;
|
|
|
|
break;
|
2013-06-05 14:19:31 +02:00
|
|
|
case 'z':
|
2016-05-08 05:16:42 +02:00
|
|
|
zflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
default:
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&write_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (optind != argc - 2) {
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&write_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
qemu-io: Allow unaligned access by default
There's no reason to require the user to specify a flag just so
they can pass in unaligned numbers. Keep 'read -p' and 'write -p'
as no-ops so that I don't have to hunt down and update all users
of qemu-io, but otherwise make their behavior default as 'read' and
'write'. Also fix 'write -z', 'readv', 'writev', 'writev',
'aio_read', 'aio_write', and 'aio_write -z'. For now, 'read -b',
'write -b', and 'write -c' still require alignment (and 'multiwrite',
but that's slated to die soon).
qemu-iotest 23 is updated to match, as the only test that was
previously explicitly expecting an error on an unaligned request.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-id: 1462677405-4752-5-git-send-email-eblake@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-05-08 05:16:43 +02:00
|
|
|
if (bflag && zflag) {
|
|
|
|
printf("-b and -z cannot be specified at the same time\n");
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2016-05-08 05:16:44 +02:00
|
|
|
if ((flags & BDRV_REQ_FUA) && (bflag || cflag)) {
|
|
|
|
printf("-f and -b or -c cannot be specified at the same time\n");
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2016-05-08 05:16:44 +02:00
|
|
|
}
|
|
|
|
|
2019-03-22 13:53:03 +01:00
|
|
|
if ((flags & BDRV_REQ_NO_FALLBACK) && !zflag) {
|
|
|
|
printf("-n requires -z to be specified\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-05-08 05:16:45 +02:00
|
|
|
if ((flags & BDRV_REQ_MAY_UNMAP) && !zflag) {
|
|
|
|
printf("-u requires -z to be specified\n");
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2016-05-08 05:16:45 +02:00
|
|
|
}
|
|
|
|
|
2019-08-20 18:46:16 +02:00
|
|
|
if (zflag + Pflag + sflag > 1) {
|
|
|
|
printf("Only one of -z, -P, and -s "
|
|
|
|
"can be specified at the same time\n");
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
offset = cvtnum(argv[optind]);
|
|
|
|
if (offset < 0) {
|
2015-11-06 00:53:04 +01:00
|
|
|
print_cvtnum_err(offset, argv[optind]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return offset;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
optind++;
|
|
|
|
count = cvtnum(argv[optind]);
|
|
|
|
if (count < 0) {
|
2015-11-06 00:53:04 +01:00
|
|
|
print_cvtnum_err(count, argv[optind]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return count;
|
2017-01-31 17:09:54 +01:00
|
|
|
} else if (count > BDRV_REQUEST_MAX_BYTES) {
|
2015-11-06 00:53:02 +01:00
|
|
|
printf("length cannot exceed %" PRIu64 ", given %s\n",
|
2017-01-31 17:09:54 +01:00
|
|
|
(uint64_t)BDRV_REQUEST_MAX_BYTES, argv[optind]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
qemu-io: Allow unaligned access by default
There's no reason to require the user to specify a flag just so
they can pass in unaligned numbers. Keep 'read -p' and 'write -p'
as no-ops so that I don't have to hunt down and update all users
of qemu-io, but otherwise make their behavior default as 'read' and
'write'. Also fix 'write -z', 'readv', 'writev', 'writev',
'aio_read', 'aio_write', and 'aio_write -z'. For now, 'read -b',
'write -b', and 'write -c' still require alignment (and 'multiwrite',
but that's slated to die soon).
qemu-iotest 23 is updated to match, as the only test that was
previously explicitly expecting an error on an unaligned request.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-id: 1462677405-4752-5-git-send-email-eblake@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-05-08 05:16:43 +02:00
|
|
|
if (bflag || cflag) {
|
2017-04-29 21:14:11 +02:00
|
|
|
if (!QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)) {
|
|
|
|
printf("%" PRId64 " is not a sector-aligned value for 'offset'\n",
|
2013-06-05 14:19:31 +02:00
|
|
|
offset);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2017-04-29 21:14:11 +02:00
|
|
|
if (!QEMU_IS_ALIGNED(count, BDRV_SECTOR_SIZE)) {
|
|
|
|
printf("%"PRId64" is not a sector-aligned value for 'count'\n",
|
2013-06-05 14:19:31 +02:00
|
|
|
count);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!zflag) {
|
2019-08-20 18:46:16 +02:00
|
|
|
if (sflag) {
|
|
|
|
buf = qemu_io_alloc_from_file(blk, count, file_name);
|
|
|
|
if (!buf) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
buf = qemu_io_alloc(blk, count, pattern);
|
|
|
|
}
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2019-05-29 18:16:32 +02:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &t1);
|
2016-05-06 18:26:44 +02:00
|
|
|
if (bflag) {
|
2018-05-09 21:42:59 +02:00
|
|
|
ret = do_save_vmstate(blk, buf, offset, count, &total);
|
2013-06-05 14:19:31 +02:00
|
|
|
} else if (zflag) {
|
2018-05-09 21:42:59 +02:00
|
|
|
ret = do_co_pwrite_zeroes(blk, offset, count, flags, &total);
|
2013-06-05 14:19:31 +02:00
|
|
|
} else if (cflag) {
|
2018-05-09 21:42:59 +02:00
|
|
|
ret = do_write_compressed(blk, buf, offset, count, &total);
|
2013-06-05 14:19:31 +02:00
|
|
|
} else {
|
2018-05-09 21:42:59 +02:00
|
|
|
ret = do_pwrite(blk, buf, offset, count, flags, &total);
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
2019-05-29 18:16:32 +02:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &t2);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
printf("write failed: %s\n", strerror(-ret));
|
2013-06-05 14:19:31 +02:00
|
|
|
goto out;
|
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
cnt = ret;
|
|
|
|
|
|
|
|
ret = 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
if (qflag) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finally, report back -- -C gives a parsable format */
|
|
|
|
t2 = tsub(t2, t1);
|
|
|
|
print_report("wrote", &t2, offset, count, total, cnt, Cflag);
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (!zflag) {
|
|
|
|
qemu_io_free(buf);
|
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
writev_help(void)
|
|
|
|
{
|
|
|
|
printf(
|
|
|
|
"\n"
|
|
|
|
" writes a range of bytes from the given offset source from multiple buffers\n"
|
|
|
|
"\n"
|
|
|
|
" Example:\n"
|
2014-03-18 06:59:17 +01:00
|
|
|
" 'writev 512 1k 1k' - writes 2 kilobytes at 512 bytes into the open file\n"
|
2013-06-05 14:19:31 +02:00
|
|
|
"\n"
|
|
|
|
" Writes into a segment of the currently open file, using a buffer\n"
|
|
|
|
" filled with a set pattern (0xcdcdcdcd).\n"
|
|
|
|
" -P, -- use different pattern to fill file\n"
|
|
|
|
" -C, -- report statistics in a machine parsable format\n"
|
2016-05-08 05:16:44 +02:00
|
|
|
" -f, -- use Force Unit Access semantics\n"
|
2013-06-05 14:19:31 +02:00
|
|
|
" -q, -- quiet mode, do not show I/O statistics\n"
|
|
|
|
"\n");
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int writev_f(BlockBackend *blk, int argc, char **argv);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
static const cmdinfo_t writev_cmd = {
|
|
|
|
.name = "writev",
|
|
|
|
.cfunc = writev_f,
|
2017-02-10 16:24:56 +01:00
|
|
|
.perm = BLK_PERM_WRITE,
|
2013-06-05 14:19:31 +02:00
|
|
|
.argmin = 2,
|
|
|
|
.argmax = -1,
|
2016-05-08 05:16:44 +02:00
|
|
|
.args = "[-Cfq] [-P pattern] off len [len..]",
|
2013-06-05 14:19:31 +02:00
|
|
|
.oneline = "writes a number of bytes at a specified offset",
|
|
|
|
.help = writev_help,
|
|
|
|
};
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int writev_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2019-05-29 18:16:32 +02:00
|
|
|
struct timespec t1, t2;
|
2016-05-08 05:16:42 +02:00
|
|
|
bool Cflag = false, qflag = false;
|
2016-05-08 05:16:44 +02:00
|
|
|
int flags = 0;
|
2018-05-09 21:42:59 +02:00
|
|
|
int c, cnt, ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
char *buf;
|
|
|
|
int64_t offset;
|
|
|
|
/* Some compilers get confused and warn if this is not initialized. */
|
|
|
|
int total = 0;
|
|
|
|
int nr_iov;
|
|
|
|
int pattern = 0xcd;
|
|
|
|
QEMUIOVector qiov;
|
|
|
|
|
2016-05-16 18:43:01 +02:00
|
|
|
while ((c = getopt(argc, argv, "CfqP:")) != -1) {
|
2013-06-05 14:19:31 +02:00
|
|
|
switch (c) {
|
|
|
|
case 'C':
|
2016-05-08 05:16:42 +02:00
|
|
|
Cflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
2016-05-08 05:16:44 +02:00
|
|
|
case 'f':
|
|
|
|
flags |= BDRV_REQ_FUA;
|
|
|
|
break;
|
2013-06-05 14:19:31 +02:00
|
|
|
case 'q':
|
2016-05-08 05:16:42 +02:00
|
|
|
qflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
case 'P':
|
|
|
|
pattern = parse_pattern(optarg);
|
|
|
|
if (pattern < 0) {
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&writev_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (optind > argc - 2) {
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&writev_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
offset = cvtnum(argv[optind]);
|
|
|
|
if (offset < 0) {
|
2015-11-06 00:53:04 +01:00
|
|
|
print_cvtnum_err(offset, argv[optind]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return offset;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
optind++;
|
|
|
|
|
|
|
|
nr_iov = argc - optind;
|
2015-02-05 19:58:22 +01:00
|
|
|
buf = create_iovec(blk, &qiov, &argv[optind], nr_iov, pattern);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (buf == NULL) {
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2019-05-29 18:16:32 +02:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &t1);
|
2018-05-09 21:42:59 +02:00
|
|
|
ret = do_aio_writev(blk, &qiov, offset, flags, &total);
|
2019-05-29 18:16:32 +02:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &t2);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
printf("writev failed: %s\n", strerror(-ret));
|
2013-06-05 14:19:31 +02:00
|
|
|
goto out;
|
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
cnt = ret;
|
|
|
|
|
|
|
|
ret = 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
if (qflag) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finally, report back -- -C gives a parsable format */
|
|
|
|
t2 = tsub(t2, t1);
|
|
|
|
print_report("wrote", &t2, offset, qiov.size, total, cnt, Cflag);
|
|
|
|
out:
|
|
|
|
qemu_iovec_destroy(&qiov);
|
|
|
|
qemu_io_free(buf);
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
struct aio_ctx {
|
2015-02-05 19:58:22 +01:00
|
|
|
BlockBackend *blk;
|
2013-06-05 14:19:31 +02:00
|
|
|
QEMUIOVector qiov;
|
|
|
|
int64_t offset;
|
|
|
|
char *buf;
|
2016-05-08 05:16:42 +02:00
|
|
|
bool qflag;
|
|
|
|
bool vflag;
|
|
|
|
bool Cflag;
|
|
|
|
bool Pflag;
|
|
|
|
bool zflag;
|
2015-01-30 03:49:42 +01:00
|
|
|
BlockAcctCookie acct;
|
2013-06-05 14:19:31 +02:00
|
|
|
int pattern;
|
2019-05-29 18:16:32 +02:00
|
|
|
struct timespec t1;
|
2013-06-05 14:19:31 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static void aio_write_done(void *opaque, int ret)
|
|
|
|
{
|
|
|
|
struct aio_ctx *ctx = opaque;
|
2019-05-29 18:16:32 +02:00
|
|
|
struct timespec t2;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2019-05-29 18:16:32 +02:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &t2);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
printf("aio_write failed: %s\n", strerror(-ret));
|
2015-10-28 16:33:08 +01:00
|
|
|
block_acct_failed(blk_get_stats(ctx->blk), &ctx->acct);
|
2013-06-05 14:19:31 +02:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
block_acct_done(blk_get_stats(ctx->blk), &ctx->acct);
|
2015-01-30 03:49:42 +01:00
|
|
|
|
2013-06-05 14:19:31 +02:00
|
|
|
if (ctx->qflag) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finally, report back -- -C gives a parsable format */
|
|
|
|
t2 = tsub(t2, ctx->t1);
|
|
|
|
print_report("wrote", &t2, ctx->offset, ctx->qiov.size,
|
|
|
|
ctx->qiov.size, 1, ctx->Cflag);
|
|
|
|
out:
|
2016-04-13 12:39:39 +02:00
|
|
|
if (!ctx->zflag) {
|
|
|
|
qemu_io_free(ctx->buf);
|
|
|
|
qemu_iovec_destroy(&ctx->qiov);
|
|
|
|
}
|
2013-06-05 14:19:31 +02:00
|
|
|
g_free(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void aio_read_done(void *opaque, int ret)
|
|
|
|
{
|
|
|
|
struct aio_ctx *ctx = opaque;
|
2019-05-29 18:16:32 +02:00
|
|
|
struct timespec t2;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2019-05-29 18:16:32 +02:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &t2);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
printf("readv failed: %s\n", strerror(-ret));
|
2015-10-28 16:33:08 +01:00
|
|
|
block_acct_failed(blk_get_stats(ctx->blk), &ctx->acct);
|
2013-06-05 14:19:31 +02:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctx->Pflag) {
|
|
|
|
void *cmp_buf = g_malloc(ctx->qiov.size);
|
|
|
|
|
|
|
|
memset(cmp_buf, ctx->pattern, ctx->qiov.size);
|
|
|
|
if (memcmp(ctx->buf, cmp_buf, ctx->qiov.size)) {
|
|
|
|
printf("Pattern verification failed at offset %"
|
2018-10-06 20:38:51 +02:00
|
|
|
PRId64 ", %zu bytes\n", ctx->offset, ctx->qiov.size);
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
g_free(cmp_buf);
|
|
|
|
}
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
block_acct_done(blk_get_stats(ctx->blk), &ctx->acct);
|
2015-01-30 03:49:42 +01:00
|
|
|
|
2013-06-05 14:19:31 +02:00
|
|
|
if (ctx->qflag) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctx->vflag) {
|
|
|
|
dump_buffer(ctx->buf, ctx->offset, ctx->qiov.size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finally, report back -- -C gives a parsable format */
|
|
|
|
t2 = tsub(t2, ctx->t1);
|
|
|
|
print_report("read", &t2, ctx->offset, ctx->qiov.size,
|
|
|
|
ctx->qiov.size, 1, ctx->Cflag);
|
|
|
|
out:
|
|
|
|
qemu_io_free(ctx->buf);
|
|
|
|
qemu_iovec_destroy(&ctx->qiov);
|
|
|
|
g_free(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void aio_read_help(void)
|
|
|
|
{
|
|
|
|
printf(
|
|
|
|
"\n"
|
|
|
|
" asynchronously reads a range of bytes from the given offset\n"
|
|
|
|
"\n"
|
|
|
|
" Example:\n"
|
|
|
|
" 'aio_read -v 512 1k 1k ' - dumps 2 kilobytes read from 512 bytes into the file\n"
|
|
|
|
"\n"
|
|
|
|
" Reads a segment of the currently open file, optionally dumping it to the\n"
|
|
|
|
" standard output stream (with -v option) for subsequent inspection.\n"
|
|
|
|
" The read is performed asynchronously and the aio_flush command must be\n"
|
|
|
|
" used to ensure all outstanding aio requests have been completed.\n"
|
2018-05-09 21:42:59 +02:00
|
|
|
" Note that due to its asynchronous nature, this command will be\n"
|
|
|
|
" considered successful once the request is submitted, independently\n"
|
|
|
|
" of potential I/O errors or pattern mismatches.\n"
|
2013-06-05 14:19:31 +02:00
|
|
|
" -C, -- report statistics in a machine parsable format\n"
|
|
|
|
" -P, -- use a pattern to verify read data\n"
|
2016-05-16 18:43:03 +02:00
|
|
|
" -i, -- treat request as invalid, for exercising stats\n"
|
2013-06-05 14:19:31 +02:00
|
|
|
" -v, -- dump buffer to standard output\n"
|
|
|
|
" -q, -- quiet mode, do not show I/O statistics\n"
|
|
|
|
"\n");
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int aio_read_f(BlockBackend *blk, int argc, char **argv);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
static const cmdinfo_t aio_read_cmd = {
|
|
|
|
.name = "aio_read",
|
|
|
|
.cfunc = aio_read_f,
|
|
|
|
.argmin = 2,
|
|
|
|
.argmax = -1,
|
2016-05-16 18:43:03 +02:00
|
|
|
.args = "[-Ciqv] [-P pattern] off len [len..]",
|
2013-06-05 14:19:31 +02:00
|
|
|
.oneline = "asynchronously reads a number of bytes",
|
|
|
|
.help = aio_read_help,
|
|
|
|
};
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int aio_read_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
|
|
|
int nr_iov, c;
|
|
|
|
struct aio_ctx *ctx = g_new0(struct aio_ctx, 1);
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
ctx->blk = blk;
|
2016-05-16 18:43:03 +02:00
|
|
|
while ((c = getopt(argc, argv, "CP:iqv")) != -1) {
|
2013-06-05 14:19:31 +02:00
|
|
|
switch (c) {
|
|
|
|
case 'C':
|
2016-05-08 05:16:42 +02:00
|
|
|
ctx->Cflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
case 'P':
|
2016-05-08 05:16:42 +02:00
|
|
|
ctx->Pflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
ctx->pattern = parse_pattern(optarg);
|
|
|
|
if (ctx->pattern < 0) {
|
|
|
|
g_free(ctx);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
break;
|
2016-05-16 18:43:03 +02:00
|
|
|
case 'i':
|
|
|
|
printf("injecting invalid read request\n");
|
|
|
|
block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_READ);
|
|
|
|
g_free(ctx);
|
2018-05-09 21:42:59 +02:00
|
|
|
return 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
case 'q':
|
2016-05-08 05:16:42 +02:00
|
|
|
ctx->qflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
case 'v':
|
2016-05-08 05:16:42 +02:00
|
|
|
ctx->vflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_free(ctx);
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&aio_read_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (optind > argc - 2) {
|
|
|
|
g_free(ctx);
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&aio_read_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx->offset = cvtnum(argv[optind]);
|
|
|
|
if (ctx->offset < 0) {
|
2018-05-09 21:42:59 +02:00
|
|
|
int ret = ctx->offset;
|
|
|
|
print_cvtnum_err(ret, argv[optind]);
|
2013-06-05 14:19:31 +02:00
|
|
|
g_free(ctx);
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
optind++;
|
|
|
|
|
|
|
|
nr_iov = argc - optind;
|
2015-02-05 19:58:22 +01:00
|
|
|
ctx->buf = create_iovec(blk, &ctx->qiov, &argv[optind], nr_iov, 0xab);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (ctx->buf == NULL) {
|
2015-10-28 16:33:08 +01:00
|
|
|
block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_READ);
|
2013-06-05 14:19:31 +02:00
|
|
|
g_free(ctx);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2019-05-29 18:16:32 +02:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &ctx->t1);
|
2015-02-05 19:58:22 +01:00
|
|
|
block_acct_start(blk_get_stats(blk), &ctx->acct, ctx->qiov.size,
|
|
|
|
BLOCK_ACCT_READ);
|
2016-05-06 18:26:44 +02:00
|
|
|
blk_aio_preadv(blk, ctx->offset, &ctx->qiov, 0, aio_read_done, ctx);
|
2018-05-09 21:42:59 +02:00
|
|
|
return 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void aio_write_help(void)
|
|
|
|
{
|
|
|
|
printf(
|
|
|
|
"\n"
|
|
|
|
" asynchronously writes a range of bytes from the given offset source\n"
|
|
|
|
" from multiple buffers\n"
|
|
|
|
"\n"
|
|
|
|
" Example:\n"
|
|
|
|
" 'aio_write 512 1k 1k' - writes 2 kilobytes at 512 bytes into the open file\n"
|
|
|
|
"\n"
|
|
|
|
" Writes into a segment of the currently open file, using a buffer\n"
|
|
|
|
" filled with a set pattern (0xcdcdcdcd).\n"
|
|
|
|
" The write is performed asynchronously and the aio_flush command must be\n"
|
|
|
|
" used to ensure all outstanding aio requests have been completed.\n"
|
2018-05-09 21:42:59 +02:00
|
|
|
" Note that due to its asynchronous nature, this command will be\n"
|
|
|
|
" considered successful once the request is submitted, independently\n"
|
|
|
|
" of potential I/O errors or pattern mismatches.\n"
|
2013-06-05 14:19:31 +02:00
|
|
|
" -P, -- use different pattern to fill file\n"
|
|
|
|
" -C, -- report statistics in a machine parsable format\n"
|
2016-05-08 05:16:44 +02:00
|
|
|
" -f, -- use Force Unit Access semantics\n"
|
2016-05-16 18:43:03 +02:00
|
|
|
" -i, -- treat request as invalid, for exercising stats\n"
|
2013-06-05 14:19:31 +02:00
|
|
|
" -q, -- quiet mode, do not show I/O statistics\n"
|
2016-05-08 05:16:45 +02:00
|
|
|
" -u, -- with -z, allow unmapping\n"
|
2016-05-25 00:25:20 +02:00
|
|
|
" -z, -- write zeroes using blk_aio_pwrite_zeroes\n"
|
2013-06-05 14:19:31 +02:00
|
|
|
"\n");
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int aio_write_f(BlockBackend *blk, int argc, char **argv);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
static const cmdinfo_t aio_write_cmd = {
|
|
|
|
.name = "aio_write",
|
|
|
|
.cfunc = aio_write_f,
|
2017-02-10 16:24:56 +01:00
|
|
|
.perm = BLK_PERM_WRITE,
|
2013-06-05 14:19:31 +02:00
|
|
|
.argmin = 2,
|
|
|
|
.argmax = -1,
|
2016-05-16 18:43:03 +02:00
|
|
|
.args = "[-Cfiquz] [-P pattern] off len [len..]",
|
2013-06-05 14:19:31 +02:00
|
|
|
.oneline = "asynchronously writes a number of bytes",
|
|
|
|
.help = aio_write_help,
|
|
|
|
};
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int aio_write_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
|
|
|
int nr_iov, c;
|
|
|
|
int pattern = 0xcd;
|
|
|
|
struct aio_ctx *ctx = g_new0(struct aio_ctx, 1);
|
2016-05-08 05:16:44 +02:00
|
|
|
int flags = 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
ctx->blk = blk;
|
2016-05-16 18:43:03 +02:00
|
|
|
while ((c = getopt(argc, argv, "CfiqP:uz")) != -1) {
|
2013-06-05 14:19:31 +02:00
|
|
|
switch (c) {
|
|
|
|
case 'C':
|
2016-05-08 05:16:42 +02:00
|
|
|
ctx->Cflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
2016-05-08 05:16:44 +02:00
|
|
|
case 'f':
|
|
|
|
flags |= BDRV_REQ_FUA;
|
|
|
|
break;
|
2013-06-05 14:19:31 +02:00
|
|
|
case 'q':
|
2016-05-08 05:16:42 +02:00
|
|
|
ctx->qflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
2016-05-08 05:16:45 +02:00
|
|
|
case 'u':
|
|
|
|
flags |= BDRV_REQ_MAY_UNMAP;
|
|
|
|
break;
|
2013-06-05 14:19:31 +02:00
|
|
|
case 'P':
|
|
|
|
pattern = parse_pattern(optarg);
|
|
|
|
if (pattern < 0) {
|
|
|
|
g_free(ctx);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
break;
|
2016-05-16 18:43:03 +02:00
|
|
|
case 'i':
|
|
|
|
printf("injecting invalid write request\n");
|
|
|
|
block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_WRITE);
|
|
|
|
g_free(ctx);
|
2018-05-09 21:42:59 +02:00
|
|
|
return 0;
|
2016-04-13 12:39:39 +02:00
|
|
|
case 'z':
|
2016-05-08 05:16:42 +02:00
|
|
|
ctx->zflag = true;
|
2016-04-13 12:39:39 +02:00
|
|
|
break;
|
2013-06-05 14:19:31 +02:00
|
|
|
default:
|
|
|
|
g_free(ctx);
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&aio_write_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (optind > argc - 2) {
|
|
|
|
g_free(ctx);
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&aio_write_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2016-04-13 12:39:39 +02:00
|
|
|
if (ctx->zflag && optind != argc - 2) {
|
|
|
|
printf("-z supports only a single length parameter\n");
|
|
|
|
g_free(ctx);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2016-04-13 12:39:39 +02:00
|
|
|
}
|
|
|
|
|
2016-05-08 05:16:45 +02:00
|
|
|
if ((flags & BDRV_REQ_MAY_UNMAP) && !ctx->zflag) {
|
|
|
|
printf("-u requires -z to be specified\n");
|
2016-05-16 18:43:01 +02:00
|
|
|
g_free(ctx);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2016-05-08 05:16:45 +02:00
|
|
|
}
|
|
|
|
|
2016-04-13 12:39:39 +02:00
|
|
|
if (ctx->zflag && ctx->Pflag) {
|
|
|
|
printf("-z and -P cannot be specified at the same time\n");
|
|
|
|
g_free(ctx);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2016-04-13 12:39:39 +02:00
|
|
|
}
|
|
|
|
|
2013-06-05 14:19:31 +02:00
|
|
|
ctx->offset = cvtnum(argv[optind]);
|
|
|
|
if (ctx->offset < 0) {
|
2018-05-09 21:42:59 +02:00
|
|
|
int ret = ctx->offset;
|
|
|
|
print_cvtnum_err(ret, argv[optind]);
|
2013-06-05 14:19:31 +02:00
|
|
|
g_free(ctx);
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
optind++;
|
|
|
|
|
2016-04-13 12:39:39 +02:00
|
|
|
if (ctx->zflag) {
|
|
|
|
int64_t count = cvtnum(argv[optind]);
|
|
|
|
if (count < 0) {
|
|
|
|
print_cvtnum_err(count, argv[optind]);
|
2016-05-09 12:03:04 +02:00
|
|
|
g_free(ctx);
|
2018-05-09 21:42:59 +02:00
|
|
|
return count;
|
2016-04-13 12:39:39 +02:00
|
|
|
}
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2016-04-13 12:39:39 +02:00
|
|
|
ctx->qiov.size = count;
|
2016-05-25 00:25:20 +02:00
|
|
|
blk_aio_pwrite_zeroes(blk, ctx->offset, count, flags, aio_write_done,
|
|
|
|
ctx);
|
2016-04-13 12:39:39 +02:00
|
|
|
} else {
|
|
|
|
nr_iov = argc - optind;
|
|
|
|
ctx->buf = create_iovec(blk, &ctx->qiov, &argv[optind], nr_iov,
|
|
|
|
pattern);
|
|
|
|
if (ctx->buf == NULL) {
|
|
|
|
block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_WRITE);
|
|
|
|
g_free(ctx);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2016-04-13 12:39:39 +02:00
|
|
|
}
|
|
|
|
|
2019-05-29 18:16:32 +02:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &ctx->t1);
|
2016-04-13 12:39:39 +02:00
|
|
|
block_acct_start(blk_get_stats(blk), &ctx->acct, ctx->qiov.size,
|
|
|
|
BLOCK_ACCT_WRITE);
|
|
|
|
|
2016-05-08 05:16:44 +02:00
|
|
|
blk_aio_pwritev(blk, ctx->offset, &ctx->qiov, flags, aio_write_done,
|
|
|
|
ctx);
|
2016-04-13 12:39:39 +02:00
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
|
|
|
|
return 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int aio_flush_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2015-10-28 16:33:08 +01:00
|
|
|
BlockAcctCookie cookie;
|
|
|
|
block_acct_start(blk_get_stats(blk), &cookie, 0, BLOCK_ACCT_FLUSH);
|
2015-02-05 19:58:22 +01:00
|
|
|
blk_drain_all();
|
2015-10-28 16:33:08 +01:00
|
|
|
block_acct_done(blk_get_stats(blk), &cookie);
|
2018-05-09 21:42:59 +02:00
|
|
|
return 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static const cmdinfo_t aio_flush_cmd = {
|
|
|
|
.name = "aio_flush",
|
|
|
|
.cfunc = aio_flush_f,
|
|
|
|
.oneline = "completes all outstanding aio requests"
|
|
|
|
};
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int flush_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2018-05-09 21:42:59 +02:00
|
|
|
return blk_flush(blk);
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static const cmdinfo_t flush_cmd = {
|
|
|
|
.name = "flush",
|
|
|
|
.altname = "f",
|
|
|
|
.cfunc = flush_f,
|
|
|
|
.oneline = "flush all in-core file state to disk",
|
|
|
|
};
|
|
|
|
|
2020-10-21 16:58:47 +02:00
|
|
|
static int truncate_f(BlockBackend *blk, int argc, char **argv);
|
|
|
|
static const cmdinfo_t truncate_cmd = {
|
|
|
|
.name = "truncate",
|
|
|
|
.altname = "t",
|
|
|
|
.cfunc = truncate_f,
|
|
|
|
.perm = BLK_PERM_WRITE | BLK_PERM_RESIZE,
|
|
|
|
.argmin = 1,
|
|
|
|
.argmax = 3,
|
|
|
|
.args = "[-m prealloc_mode] off",
|
|
|
|
.oneline = "truncates the current file at the given offset",
|
|
|
|
};
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int truncate_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2017-03-28 22:51:27 +02:00
|
|
|
Error *local_err = NULL;
|
2013-06-05 14:19:31 +02:00
|
|
|
int64_t offset;
|
2020-10-21 16:58:47 +02:00
|
|
|
int c, ret;
|
|
|
|
PreallocMode prealloc = PREALLOC_MODE_OFF;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2020-10-21 16:58:47 +02:00
|
|
|
while ((c = getopt(argc, argv, "m:")) != -1) {
|
|
|
|
switch (c) {
|
|
|
|
case 'm':
|
|
|
|
prealloc = qapi_enum_parse(&PreallocMode_lookup, optarg,
|
|
|
|
PREALLOC_MODE__MAX, NULL);
|
|
|
|
if (prealloc == PREALLOC_MODE__MAX) {
|
|
|
|
error_report("Invalid preallocation mode '%s'", optarg);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
qemuio_command_usage(&truncate_cmd);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
offset = cvtnum(argv[optind]);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (offset < 0) {
|
2015-11-06 00:53:04 +01:00
|
|
|
print_cvtnum_err(offset, argv[1]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return offset;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2019-09-18 11:51:43 +02:00
|
|
|
/*
|
|
|
|
* qemu-io is a debugging tool, so let us be strict here and pass
|
|
|
|
* exact=true. It is better to err on the "emit more errors" side
|
|
|
|
* than to be overly permissive.
|
|
|
|
*/
|
2020-10-21 16:58:47 +02:00
|
|
|
ret = blk_truncate(blk, offset, false, prealloc, 0, &local_err);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (ret < 0) {
|
2017-03-28 22:51:27 +02:00
|
|
|
error_report_err(local_err);
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
|
|
|
|
return 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int length_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
|
|
|
int64_t size;
|
|
|
|
char s1[64];
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
size = blk_getlength(blk);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (size < 0) {
|
|
|
|
printf("getlength: %s\n", strerror(-size));
|
2018-05-09 21:42:59 +02:00
|
|
|
return size;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
cvtstr(size, s1, sizeof(s1));
|
|
|
|
printf("%s\n", s1);
|
2018-05-09 21:42:59 +02:00
|
|
|
return 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static const cmdinfo_t length_cmd = {
|
|
|
|
.name = "length",
|
|
|
|
.altname = "l",
|
|
|
|
.cfunc = length_f,
|
|
|
|
.oneline = "gets the length of the current file",
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int info_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2015-02-05 19:58:22 +01:00
|
|
|
BlockDriverState *bs = blk_bs(blk);
|
2013-06-05 14:19:31 +02:00
|
|
|
BlockDriverInfo bdi;
|
2013-10-09 10:46:17 +02:00
|
|
|
ImageInfoSpecific *spec_info;
|
2019-02-08 16:06:06 +01:00
|
|
|
Error *local_err = NULL;
|
2013-06-05 14:19:31 +02:00
|
|
|
char s1[64], s2[64];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (bs->drv && bs->drv->format_name) {
|
|
|
|
printf("format name: %s\n", bs->drv->format_name);
|
|
|
|
}
|
|
|
|
if (bs->drv && bs->drv->protocol_name) {
|
|
|
|
printf("format name: %s\n", bs->drv->protocol_name);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = bdrv_get_info(bs, &bdi);
|
|
|
|
if (ret) {
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
cvtstr(bdi.cluster_size, s1, sizeof(s1));
|
|
|
|
cvtstr(bdi.vm_state_offset, s2, sizeof(s2));
|
|
|
|
|
|
|
|
printf("cluster size: %s\n", s1);
|
|
|
|
printf("vm state offset: %s\n", s2);
|
|
|
|
|
2019-02-08 16:06:06 +01:00
|
|
|
spec_info = bdrv_get_specific_info(bs, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
return -EIO;
|
|
|
|
}
|
2013-10-09 10:46:17 +02:00
|
|
|
if (spec_info) {
|
|
|
|
printf("Format specific information:\n");
|
2019-04-17 21:17:55 +02:00
|
|
|
bdrv_image_info_specific_dump(spec_info);
|
2013-10-09 10:46:17 +02:00
|
|
|
qapi_free_ImageInfoSpecific(spec_info);
|
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
|
|
|
|
return 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static const cmdinfo_t info_cmd = {
|
|
|
|
.name = "info",
|
|
|
|
.altname = "i",
|
|
|
|
.cfunc = info_f,
|
|
|
|
.oneline = "prints information about the current file",
|
|
|
|
};
|
|
|
|
|
|
|
|
static void discard_help(void)
|
|
|
|
{
|
|
|
|
printf(
|
|
|
|
"\n"
|
|
|
|
" discards a range of bytes from the given offset\n"
|
|
|
|
"\n"
|
|
|
|
" Example:\n"
|
|
|
|
" 'discard 512 1k' - discards 1 kilobyte from 512 bytes into the file\n"
|
|
|
|
"\n"
|
|
|
|
" Discards a segment of the currently open file.\n"
|
|
|
|
" -C, -- report statistics in a machine parsable format\n"
|
|
|
|
" -q, -- quiet mode, do not show I/O statistics\n"
|
|
|
|
"\n");
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int discard_f(BlockBackend *blk, int argc, char **argv);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
static const cmdinfo_t discard_cmd = {
|
|
|
|
.name = "discard",
|
|
|
|
.altname = "d",
|
|
|
|
.cfunc = discard_f,
|
2017-02-10 16:24:56 +01:00
|
|
|
.perm = BLK_PERM_WRITE,
|
2013-06-05 14:19:31 +02:00
|
|
|
.argmin = 2,
|
|
|
|
.argmax = -1,
|
|
|
|
.args = "[-Cq] off len",
|
|
|
|
.oneline = "discards a number of bytes at a specified offset",
|
|
|
|
.help = discard_help,
|
|
|
|
};
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int discard_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2019-05-29 18:16:32 +02:00
|
|
|
struct timespec t1, t2;
|
2016-05-08 05:16:42 +02:00
|
|
|
bool Cflag = false, qflag = false;
|
2013-06-05 14:19:31 +02:00
|
|
|
int c, ret;
|
2017-06-09 12:18:08 +02:00
|
|
|
int64_t offset, bytes;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2015-05-12 17:10:56 +02:00
|
|
|
while ((c = getopt(argc, argv, "Cq")) != -1) {
|
2013-06-05 14:19:31 +02:00
|
|
|
switch (c) {
|
|
|
|
case 'C':
|
2016-05-08 05:16:42 +02:00
|
|
|
Cflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
case 'q':
|
2016-05-08 05:16:42 +02:00
|
|
|
qflag = true;
|
2013-06-05 14:19:31 +02:00
|
|
|
break;
|
|
|
|
default:
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&discard_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (optind != argc - 2) {
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&discard_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
offset = cvtnum(argv[optind]);
|
|
|
|
if (offset < 0) {
|
2015-11-06 00:53:04 +01:00
|
|
|
print_cvtnum_err(offset, argv[optind]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return offset;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
optind++;
|
2017-06-09 12:18:08 +02:00
|
|
|
bytes = cvtnum(argv[optind]);
|
|
|
|
if (bytes < 0) {
|
|
|
|
print_cvtnum_err(bytes, argv[optind]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return bytes;
|
2019-05-14 15:57:35 +02:00
|
|
|
} else if (bytes > BDRV_REQUEST_MAX_BYTES) {
|
2015-11-06 00:53:02 +01:00
|
|
|
printf("length cannot exceed %"PRIu64", given %s\n",
|
2019-05-14 15:57:35 +02:00
|
|
|
(uint64_t)BDRV_REQUEST_MAX_BYTES, argv[optind]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2019-05-29 18:16:32 +02:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &t1);
|
2017-06-09 12:18:08 +02:00
|
|
|
ret = blk_pdiscard(blk, offset, bytes);
|
2019-05-29 18:16:32 +02:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &t2);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
printf("discard failed: %s\n", strerror(-ret));
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Finally, report back -- -C gives a parsable format */
|
|
|
|
if (!qflag) {
|
|
|
|
t2 = tsub(t2, t1);
|
2017-06-09 12:18:08 +02:00
|
|
|
print_report("discard", &t2, offset, bytes, bytes, 1, Cflag);
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
|
|
|
|
return 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int alloc_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2015-02-05 19:58:22 +01:00
|
|
|
BlockDriverState *bs = blk_bs(blk);
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
int64_t offset, start, remaining, count;
|
2013-06-05 14:19:31 +02:00
|
|
|
char s1[64];
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
int ret;
|
|
|
|
int64_t num, sum_alloc;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
start = offset = cvtnum(argv[1]);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (offset < 0) {
|
2015-11-06 00:53:04 +01:00
|
|
|
print_cvtnum_err(offset, argv[1]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return offset;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (argc == 3) {
|
2017-04-29 21:14:12 +02:00
|
|
|
count = cvtnum(argv[2]);
|
|
|
|
if (count < 0) {
|
|
|
|
print_cvtnum_err(count, argv[2]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return count;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
} else {
|
2017-04-29 21:14:12 +02:00
|
|
|
count = BDRV_SECTOR_SIZE;
|
|
|
|
}
|
2013-06-05 14:19:31 +02:00
|
|
|
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
remaining = count;
|
2013-06-05 14:19:31 +02:00
|
|
|
sum_alloc = 0;
|
|
|
|
while (remaining) {
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
ret = bdrv_is_allocated(bs, offset, remaining, &num);
|
2013-09-04 19:00:25 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
printf("is_allocated failed: %s\n", strerror(-ret));
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2013-09-04 19:00:25 +02:00
|
|
|
}
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
offset += num;
|
2013-06-05 14:19:31 +02:00
|
|
|
remaining -= num;
|
|
|
|
if (ret) {
|
|
|
|
sum_alloc += num;
|
|
|
|
}
|
|
|
|
if (num == 0) {
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
count -= remaining;
|
2013-06-05 14:19:31 +02:00
|
|
|
remaining = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
cvtstr(start, s1, sizeof(s1));
|
2013-06-05 14:19:31 +02:00
|
|
|
|
2017-04-29 21:14:12 +02:00
|
|
|
printf("%"PRId64"/%"PRId64" bytes allocated at offset %s\n",
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
sum_alloc, count, s1);
|
2018-05-09 21:42:59 +02:00
|
|
|
return 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static const cmdinfo_t alloc_cmd = {
|
|
|
|
.name = "alloc",
|
|
|
|
.altname = "a",
|
|
|
|
.argmin = 1,
|
|
|
|
.argmax = 2,
|
|
|
|
.cfunc = alloc_f,
|
2017-04-29 21:14:12 +02:00
|
|
|
.args = "offset [count]",
|
|
|
|
.oneline = "checks if offset is allocated in the file",
|
2013-06-05 14:19:31 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
static int map_is_allocated(BlockDriverState *bs, int64_t offset,
|
|
|
|
int64_t bytes, int64_t *pnum)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
int64_t num;
|
|
|
|
int num_checked;
|
2013-06-05 14:19:31 +02:00
|
|
|
int ret, firstret;
|
|
|
|
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
num_checked = MIN(bytes, BDRV_REQUEST_MAX_BYTES);
|
|
|
|
ret = bdrv_is_allocated(bs, offset, num_checked, &num);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
firstret = ret;
|
|
|
|
*pnum = num;
|
|
|
|
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
while (bytes > 0 && ret == firstret) {
|
|
|
|
offset += num;
|
|
|
|
bytes -= num;
|
2013-06-05 14:19:31 +02:00
|
|
|
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
num_checked = MIN(bytes, BDRV_REQUEST_MAX_BYTES);
|
|
|
|
ret = bdrv_is_allocated(bs, offset, num_checked, &num);
|
2014-10-22 17:00:16 +02:00
|
|
|
if (ret == firstret && num) {
|
2013-06-05 14:19:31 +02:00
|
|
|
*pnum += num;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return firstret;
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int map_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
int64_t offset, bytes;
|
2017-04-29 21:14:13 +02:00
|
|
|
char s1[64], s2[64];
|
2013-06-05 14:19:31 +02:00
|
|
|
int64_t num;
|
|
|
|
int ret;
|
|
|
|
const char *retstr;
|
|
|
|
|
|
|
|
offset = 0;
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
bytes = blk_getlength(blk);
|
|
|
|
if (bytes < 0) {
|
|
|
|
error_report("Failed to query image length: %s", strerror(-bytes));
|
2018-05-09 21:42:59 +02:00
|
|
|
return bytes;
|
2015-02-05 19:58:22 +01:00
|
|
|
}
|
|
|
|
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
while (bytes) {
|
|
|
|
ret = map_is_allocated(blk_bs(blk), offset, bytes, &num);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
error_report("Failed to get allocation status: %s", strerror(-ret));
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2014-10-22 17:00:16 +02:00
|
|
|
} else if (!num) {
|
|
|
|
error_report("Unexpected end of image");
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EIO;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
retstr = ret ? " allocated" : "not allocated";
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
cvtstr(num, s1, sizeof(s1));
|
|
|
|
cvtstr(offset, s2, sizeof(s2));
|
2017-04-29 21:14:13 +02:00
|
|
|
printf("%s (0x%" PRIx64 ") bytes %s at offset %s (0x%" PRIx64 ")\n",
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
s1, num, retstr, s2, offset);
|
2013-06-05 14:19:31 +02:00
|
|
|
|
|
|
|
offset += num;
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 14:44:57 +02:00
|
|
|
bytes -= num;
|
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
|
|
|
|
return 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static const cmdinfo_t map_cmd = {
|
|
|
|
.name = "map",
|
|
|
|
.argmin = 0,
|
|
|
|
.argmax = 0,
|
|
|
|
.cfunc = map_f,
|
|
|
|
.args = "",
|
|
|
|
.oneline = "prints the allocated areas of a file",
|
|
|
|
};
|
|
|
|
|
2014-12-08 17:37:28 +01:00
|
|
|
static void reopen_help(void)
|
|
|
|
{
|
|
|
|
printf(
|
|
|
|
"\n"
|
|
|
|
" Changes the open options of an already opened image\n"
|
|
|
|
"\n"
|
|
|
|
" Example:\n"
|
|
|
|
" 'reopen -o lazy-refcounts=on' - activates lazy refcount writeback on a qcow2 image\n"
|
|
|
|
"\n"
|
|
|
|
" -r, -- Reopen the image read-only\n"
|
2017-08-03 17:03:00 +02:00
|
|
|
" -w, -- Reopen the image read-write\n"
|
2014-12-08 17:37:28 +01:00
|
|
|
" -c, -- Change the cache mode to the given value\n"
|
|
|
|
" -o, -- Changes block driver options (cf. 'open' command)\n"
|
|
|
|
"\n");
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int reopen_f(BlockBackend *blk, int argc, char **argv);
|
2014-12-08 17:37:28 +01:00
|
|
|
|
|
|
|
static QemuOptsList reopen_opts = {
|
|
|
|
.name = "reopen",
|
|
|
|
.merge_lists = true,
|
|
|
|
.head = QTAILQ_HEAD_INITIALIZER(reopen_opts.head),
|
|
|
|
.desc = {
|
|
|
|
/* no elements => accept any params */
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static const cmdinfo_t reopen_cmd = {
|
|
|
|
.name = "reopen",
|
|
|
|
.argmin = 0,
|
|
|
|
.argmax = -1,
|
|
|
|
.cfunc = reopen_f,
|
2017-08-03 17:03:00 +02:00
|
|
|
.args = "[(-r|-w)] [-c cache] [-o options]",
|
2014-12-08 17:37:28 +01:00
|
|
|
.oneline = "reopens an image with new options",
|
|
|
|
.help = reopen_help,
|
|
|
|
};
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int reopen_f(BlockBackend *blk, int argc, char **argv)
|
2014-12-08 17:37:28 +01:00
|
|
|
{
|
|
|
|
BlockDriverState *bs = blk_bs(blk);
|
|
|
|
QemuOpts *qopts;
|
|
|
|
QDict *opts;
|
|
|
|
int c;
|
|
|
|
int flags = bs->open_flags;
|
2016-03-18 15:36:31 +01:00
|
|
|
bool writethrough = !blk_enable_write_cache(blk);
|
2017-08-03 17:03:00 +02:00
|
|
|
bool has_rw_option = false;
|
2018-11-12 15:00:42 +01:00
|
|
|
bool has_cache_option = false;
|
2014-12-08 17:37:28 +01:00
|
|
|
|
|
|
|
BlockReopenQueue *brq;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
2017-08-03 17:03:00 +02:00
|
|
|
while ((c = getopt(argc, argv, "c:o:rw")) != -1) {
|
2014-12-08 17:37:28 +01:00
|
|
|
switch (c) {
|
|
|
|
case 'c':
|
2016-03-18 15:36:31 +01:00
|
|
|
if (bdrv_parse_cache_mode(optarg, &flags, &writethrough) < 0) {
|
2014-12-08 17:37:28 +01:00
|
|
|
error_report("Invalid cache option: %s", optarg);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2014-12-08 17:37:28 +01:00
|
|
|
}
|
2018-11-12 15:00:42 +01:00
|
|
|
has_cache_option = true;
|
2014-12-08 17:37:28 +01:00
|
|
|
break;
|
|
|
|
case 'o':
|
|
|
|
if (!qemu_opts_parse_noisily(&reopen_opts, optarg, 0)) {
|
|
|
|
qemu_opts_reset(&reopen_opts);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2014-12-08 17:37:28 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'r':
|
2017-08-03 17:03:00 +02:00
|
|
|
if (has_rw_option) {
|
|
|
|
error_report("Only one -r/-w option may be given");
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2017-08-03 17:03:00 +02:00
|
|
|
}
|
2014-12-08 17:37:28 +01:00
|
|
|
flags &= ~BDRV_O_RDWR;
|
2017-08-03 17:03:00 +02:00
|
|
|
has_rw_option = true;
|
|
|
|
break;
|
|
|
|
case 'w':
|
|
|
|
if (has_rw_option) {
|
|
|
|
error_report("Only one -r/-w option may be given");
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2017-08-03 17:03:00 +02:00
|
|
|
}
|
|
|
|
flags |= BDRV_O_RDWR;
|
|
|
|
has_rw_option = true;
|
2014-12-08 17:37:28 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
qemu_opts_reset(&reopen_opts);
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&reopen_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2014-12-08 17:37:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (optind != argc) {
|
|
|
|
qemu_opts_reset(&reopen_opts);
|
2018-05-09 21:42:58 +02:00
|
|
|
qemuio_command_usage(&reopen_cmd);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2014-12-08 17:37:28 +01:00
|
|
|
}
|
|
|
|
|
2018-09-06 11:37:01 +02:00
|
|
|
if (!writethrough != blk_enable_write_cache(blk) &&
|
2016-03-18 15:36:31 +01:00
|
|
|
blk_get_attached_dev(blk))
|
|
|
|
{
|
|
|
|
error_report("Cannot change cache.writeback: Device attached");
|
|
|
|
qemu_opts_reset(&reopen_opts);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EBUSY;
|
2016-03-18 15:36:31 +01:00
|
|
|
}
|
|
|
|
|
2017-09-22 14:50:12 +02:00
|
|
|
if (!(flags & BDRV_O_RDWR)) {
|
|
|
|
uint64_t orig_perm, orig_shared_perm;
|
|
|
|
|
|
|
|
bdrv_drain(bs);
|
|
|
|
|
|
|
|
blk_get_perm(blk, &orig_perm, &orig_shared_perm);
|
|
|
|
blk_set_perm(blk,
|
|
|
|
orig_perm & ~(BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED),
|
|
|
|
orig_shared_perm,
|
|
|
|
&error_abort);
|
|
|
|
}
|
|
|
|
|
2014-12-08 17:37:28 +01:00
|
|
|
qopts = qemu_opts_find(&reopen_opts, NULL);
|
2018-11-12 15:00:42 +01:00
|
|
|
opts = qopts ? qemu_opts_to_qdict(qopts, NULL) : qdict_new();
|
2014-12-08 17:37:28 +01:00
|
|
|
qemu_opts_reset(&reopen_opts);
|
|
|
|
|
2018-11-12 15:00:42 +01:00
|
|
|
if (qdict_haskey(opts, BDRV_OPT_READ_ONLY)) {
|
|
|
|
if (has_rw_option) {
|
|
|
|
error_report("Cannot set both -r/-w and '" BDRV_OPT_READ_ONLY "'");
|
|
|
|
qobject_unref(opts);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
qdict_put_bool(opts, BDRV_OPT_READ_ONLY, !(flags & BDRV_O_RDWR));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qdict_haskey(opts, BDRV_OPT_CACHE_DIRECT) ||
|
|
|
|
qdict_haskey(opts, BDRV_OPT_CACHE_NO_FLUSH)) {
|
|
|
|
if (has_cache_option) {
|
|
|
|
error_report("Cannot set both -c and the cache options");
|
|
|
|
qobject_unref(opts);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
qdict_put_bool(opts, BDRV_OPT_CACHE_DIRECT, flags & BDRV_O_NOCACHE);
|
|
|
|
qdict_put_bool(opts, BDRV_OPT_CACHE_NO_FLUSH, flags & BDRV_O_NO_FLUSH);
|
|
|
|
}
|
|
|
|
|
2017-12-06 20:24:44 +01:00
|
|
|
bdrv_subtree_drained_begin(bs);
|
2019-03-12 17:48:44 +01:00
|
|
|
brq = bdrv_reopen_queue(NULL, bs, opts, true);
|
2019-03-12 17:48:50 +01:00
|
|
|
bdrv_reopen_multiple(brq, &local_err);
|
2017-12-06 20:24:44 +01:00
|
|
|
bdrv_subtree_drained_end(bs);
|
|
|
|
|
2014-12-08 17:37:28 +01:00
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2014-12-08 17:37:28 +01:00
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
|
|
|
|
blk_set_enable_write_cache(blk, !writethrough);
|
|
|
|
return 0;
|
2014-12-08 17:37:28 +01:00
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int break_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
ret = bdrv_debug_breakpoint(blk_bs(blk), argv[1], argv[2]);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
printf("Could not set breakpoint: %s\n", strerror(-ret));
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
|
|
|
|
return 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int remove_break_f(BlockBackend *blk, int argc, char **argv)
|
2013-11-20 03:01:54 +01:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
ret = bdrv_debug_remove_breakpoint(blk_bs(blk), argv[1]);
|
2013-11-20 03:01:54 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
printf("Could not remove breakpoint %s: %s\n", argv[1], strerror(-ret));
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2013-11-20 03:01:54 +01:00
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
|
|
|
|
return 0;
|
2013-11-20 03:01:54 +01:00
|
|
|
}
|
|
|
|
|
2013-06-05 14:19:31 +02:00
|
|
|
static const cmdinfo_t break_cmd = {
|
|
|
|
.name = "break",
|
|
|
|
.argmin = 2,
|
|
|
|
.argmax = 2,
|
|
|
|
.cfunc = break_f,
|
|
|
|
.args = "event tag",
|
|
|
|
.oneline = "sets a breakpoint on event and tags the stopped "
|
|
|
|
"request as tag",
|
|
|
|
};
|
|
|
|
|
2013-11-20 03:01:54 +01:00
|
|
|
static const cmdinfo_t remove_break_cmd = {
|
|
|
|
.name = "remove_break",
|
|
|
|
.argmin = 1,
|
|
|
|
.argmax = 1,
|
|
|
|
.cfunc = remove_break_f,
|
|
|
|
.args = "tag",
|
|
|
|
.oneline = "remove a breakpoint by tag",
|
|
|
|
};
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int resume_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2015-02-05 19:58:22 +01:00
|
|
|
ret = bdrv_debug_resume(blk_bs(blk), argv[1]);
|
2013-06-05 14:19:31 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
printf("Could not resume request: %s\n", strerror(-ret));
|
2018-05-09 21:42:59 +02:00
|
|
|
return ret;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
|
|
|
|
return 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static const cmdinfo_t resume_cmd = {
|
|
|
|
.name = "resume",
|
|
|
|
.argmin = 1,
|
|
|
|
.argmax = 1,
|
|
|
|
.cfunc = resume_f,
|
|
|
|
.args = "tag",
|
|
|
|
.oneline = "resumes the request tagged as tag",
|
|
|
|
};
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int wait_break_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
2015-02-05 19:58:22 +01:00
|
|
|
while (!bdrv_debug_is_suspended(blk_bs(blk), argv[1])) {
|
|
|
|
aio_poll(blk_get_aio_context(blk), true);
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
2018-05-09 21:42:59 +02:00
|
|
|
return 0;
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static const cmdinfo_t wait_break_cmd = {
|
|
|
|
.name = "wait_break",
|
|
|
|
.argmin = 1,
|
|
|
|
.argmax = 1,
|
|
|
|
.cfunc = wait_break_f,
|
|
|
|
.args = "tag",
|
|
|
|
.oneline = "waits for the suspension of a request",
|
|
|
|
};
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int abort_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:31 +02:00
|
|
|
{
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
static const cmdinfo_t abort_cmd = {
|
|
|
|
.name = "abort",
|
|
|
|
.cfunc = abort_f,
|
|
|
|
.flags = CMD_NOFILE_OK,
|
|
|
|
.oneline = "simulate a program crash using abort(3)",
|
|
|
|
};
|
|
|
|
|
2014-12-08 10:48:10 +01:00
|
|
|
static void sigraise_help(void)
|
|
|
|
{
|
|
|
|
printf(
|
|
|
|
"\n"
|
|
|
|
" raises the given signal\n"
|
|
|
|
"\n"
|
|
|
|
" Example:\n"
|
|
|
|
" 'sigraise %i' - raises SIGTERM\n"
|
|
|
|
"\n"
|
|
|
|
" Invokes raise(signal), where \"signal\" is the mandatory integer argument\n"
|
|
|
|
" given to sigraise.\n"
|
|
|
|
"\n", SIGTERM);
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int sigraise_f(BlockBackend *blk, int argc, char **argv);
|
2014-12-08 10:48:10 +01:00
|
|
|
|
|
|
|
static const cmdinfo_t sigraise_cmd = {
|
|
|
|
.name = "sigraise",
|
|
|
|
.cfunc = sigraise_f,
|
|
|
|
.argmin = 1,
|
|
|
|
.argmax = 1,
|
|
|
|
.flags = CMD_NOFILE_OK,
|
|
|
|
.args = "signal",
|
|
|
|
.oneline = "raises a signal",
|
|
|
|
.help = sigraise_help,
|
|
|
|
};
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int sigraise_f(BlockBackend *blk, int argc, char **argv)
|
2014-12-08 10:48:10 +01:00
|
|
|
{
|
2015-11-06 00:53:02 +01:00
|
|
|
int64_t sig = cvtnum(argv[1]);
|
2014-12-08 10:48:10 +01:00
|
|
|
if (sig < 0) {
|
2015-11-06 00:53:04 +01:00
|
|
|
print_cvtnum_err(sig, argv[1]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return sig;
|
2015-11-06 00:53:02 +01:00
|
|
|
} else if (sig > NSIG) {
|
|
|
|
printf("signal argument '%s' is too large to be a valid signal\n",
|
|
|
|
argv[1]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2014-12-08 10:48:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Using raise() to kill this process does not necessarily flush all open
|
|
|
|
* streams. At least stdout and stderr (although the latter should be
|
|
|
|
* non-buffered anyway) should be flushed, though. */
|
|
|
|
fflush(stdout);
|
|
|
|
fflush(stderr);
|
|
|
|
|
|
|
|
raise(sig);
|
2018-05-09 21:42:59 +02:00
|
|
|
|
|
|
|
return 0;
|
2014-12-08 10:48:10 +01:00
|
|
|
}
|
|
|
|
|
2014-01-15 15:39:10 +01:00
|
|
|
static void sleep_cb(void *opaque)
|
|
|
|
{
|
|
|
|
bool *expired = opaque;
|
|
|
|
*expired = true;
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int sleep_f(BlockBackend *blk, int argc, char **argv)
|
2014-01-15 15:39:10 +01:00
|
|
|
{
|
|
|
|
char *endptr;
|
|
|
|
long ms;
|
|
|
|
struct QEMUTimer *timer;
|
|
|
|
bool expired = false;
|
|
|
|
|
|
|
|
ms = strtol(argv[1], &endptr, 0);
|
|
|
|
if (ms < 0 || *endptr != '\0') {
|
|
|
|
printf("%s is not a valid number\n", argv[1]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2014-01-15 15:39:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
timer = timer_new_ns(QEMU_CLOCK_HOST, sleep_cb, &expired);
|
|
|
|
timer_mod(timer, qemu_clock_get_ns(QEMU_CLOCK_HOST) + SCALE_MS * ms);
|
|
|
|
|
|
|
|
while (!expired) {
|
|
|
|
main_loop_wait(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
timer_free(timer);
|
2018-05-09 21:42:59 +02:00
|
|
|
return 0;
|
2014-01-15 15:39:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static const cmdinfo_t sleep_cmd = {
|
|
|
|
.name = "sleep",
|
|
|
|
.argmin = 1,
|
|
|
|
.argmax = 1,
|
|
|
|
.cfunc = sleep_f,
|
|
|
|
.flags = CMD_NOFILE_OK,
|
|
|
|
.oneline = "waits for the given value in milliseconds",
|
|
|
|
};
|
|
|
|
|
2013-06-05 14:19:33 +02:00
|
|
|
static void help_oneline(const char *cmd, const cmdinfo_t *ct)
|
|
|
|
{
|
qemu-io-cmds: Simplify help_oneline
help_oneline is declared and starts as:
static void help_oneline(const char *cmd, const cmdinfo_t *ct)
{
if (cmd) {
printf("%s ", cmd);
} else {
printf("%s ", ct->name);
if (ct->altname) {
printf("(or %s) ", ct->altname);
}
}
However, there are only two routes to help_oneline being called:
help_f -> help_all -> help_oneline(ct->name, ct)
help_f -> help_onecmd(argv[1], ct)
In the first case, 'cmd' and 'ct->name' are the same thing,
so it's impossible for the if (cmd) to be false and then validly
print ct->name - this is upsetting gcc
( https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96739 )
In the second case, cmd is argv[1] and we know we've got argv[1]
so again (cmd) is non-NULL.
Simplify help_oneline by just printing cmd.
(Also strengthen argc check just to be pedantic)
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-Id: <20200824102914.105619-1-dgilbert@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-08-24 12:29:14 +02:00
|
|
|
printf("%s ", cmd);
|
2013-06-05 14:19:33 +02:00
|
|
|
|
|
|
|
if (ct->args) {
|
|
|
|
printf("%s ", ct->args);
|
|
|
|
}
|
|
|
|
printf("-- %s\n", ct->oneline);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void help_onecmd(const char *cmd, const cmdinfo_t *ct)
|
|
|
|
{
|
|
|
|
help_oneline(cmd, ct);
|
|
|
|
if (ct->help) {
|
|
|
|
ct->help();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void help_all(void)
|
|
|
|
{
|
|
|
|
const cmdinfo_t *ct;
|
|
|
|
|
|
|
|
for (ct = cmdtab; ct < &cmdtab[ncmds]; ct++) {
|
|
|
|
help_oneline(ct->name, ct);
|
|
|
|
}
|
|
|
|
printf("\nUse 'help commandname' for extended help.\n");
|
|
|
|
}
|
|
|
|
|
2018-05-09 21:42:59 +02:00
|
|
|
static int help_f(BlockBackend *blk, int argc, char **argv)
|
2013-06-05 14:19:33 +02:00
|
|
|
{
|
|
|
|
const cmdinfo_t *ct;
|
|
|
|
|
qemu-io-cmds: Simplify help_oneline
help_oneline is declared and starts as:
static void help_oneline(const char *cmd, const cmdinfo_t *ct)
{
if (cmd) {
printf("%s ", cmd);
} else {
printf("%s ", ct->name);
if (ct->altname) {
printf("(or %s) ", ct->altname);
}
}
However, there are only two routes to help_oneline being called:
help_f -> help_all -> help_oneline(ct->name, ct)
help_f -> help_onecmd(argv[1], ct)
In the first case, 'cmd' and 'ct->name' are the same thing,
so it's impossible for the if (cmd) to be false and then validly
print ct->name - this is upsetting gcc
( https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96739 )
In the second case, cmd is argv[1] and we know we've got argv[1]
so again (cmd) is non-NULL.
Simplify help_oneline by just printing cmd.
(Also strengthen argc check just to be pedantic)
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-Id: <20200824102914.105619-1-dgilbert@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-08-24 12:29:14 +02:00
|
|
|
if (argc < 2) {
|
2013-06-05 14:19:33 +02:00
|
|
|
help_all();
|
2018-05-09 21:42:59 +02:00
|
|
|
return 0;
|
2013-06-05 14:19:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ct = find_command(argv[1]);
|
|
|
|
if (ct == NULL) {
|
|
|
|
printf("command %s not found\n", argv[1]);
|
2018-05-09 21:42:59 +02:00
|
|
|
return -EINVAL;
|
2013-06-05 14:19:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
help_onecmd(argv[1], ct);
|
2018-05-09 21:42:59 +02:00
|
|
|
return 0;
|
2013-06-05 14:19:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static const cmdinfo_t help_cmd = {
|
|
|
|
.name = "help",
|
|
|
|
.altname = "?",
|
|
|
|
.cfunc = help_f,
|
|
|
|
.argmin = 0,
|
|
|
|
.argmax = 1,
|
|
|
|
.flags = CMD_FLAG_GLOBAL,
|
|
|
|
.args = "[command]",
|
|
|
|
.oneline = "help for one or all commands",
|
|
|
|
};
|
|
|
|
|
monitor: hmp_qemu_io: acquire aio contex, fix crash
Max reported the following bug:
$ ./qemu-img create -f raw src.img 1G
$ ./qemu-img create -f raw dst.img 1G
$ (echo '
{"execute":"qmp_capabilities"}
{"execute":"blockdev-mirror",
"arguments":{"job-id":"mirror",
"device":"source",
"target":"target",
"sync":"full",
"filter-node-name":"mirror-top"}}
'; sleep 3; echo '
{"execute":"human-monitor-command",
"arguments":{"command-line":
"qemu-io mirror-top \"write 0 1G\""}}') \
| x86_64-softmmu/qemu-system-x86_64 \
-qmp stdio \
-blockdev file,node-name=source,filename=src.img \
-blockdev file,node-name=target,filename=dst.img \
-object iothread,id=iothr0 \
-device virtio-blk,drive=source,iothread=iothr0
crashes:
0 raise () at /usr/lib/libc.so.6
1 abort () at /usr/lib/libc.so.6
2 error_exit
(err=<optimized out>,
msg=msg@entry=0x55fbb1634790 <__func__.27> "qemu_mutex_unlock_impl")
at ../util/qemu-thread-posix.c:37
3 qemu_mutex_unlock_impl
(mutex=mutex@entry=0x55fbb25ab6e0,
file=file@entry=0x55fbb1636957 "../util/async.c",
line=line@entry=650)
at ../util/qemu-thread-posix.c:109
4 aio_context_release (ctx=ctx@entry=0x55fbb25ab680) at ../util/async.c:650
5 bdrv_do_drained_begin
(bs=bs@entry=0x55fbb3a87000, recursive=recursive@entry=false,
parent=parent@entry=0x0,
ignore_bds_parents=ignore_bds_parents@entry=false,
poll=poll@entry=true) at ../block/io.c:441
6 bdrv_do_drained_begin
(poll=true, ignore_bds_parents=false, parent=0x0, recursive=false,
bs=0x55fbb3a87000) at ../block/io.c:448
7 blk_drain (blk=0x55fbb26c5a00) at ../block/block-backend.c:1718
8 blk_unref (blk=0x55fbb26c5a00) at ../block/block-backend.c:498
9 blk_unref (blk=0x55fbb26c5a00) at ../block/block-backend.c:491
10 hmp_qemu_io (mon=0x7fffaf3fc7d0, qdict=<optimized out>)
at ../block/monitor/block-hmp-cmds.c:628
man pthread_mutex_unlock
...
EPERM The mutex type is PTHREAD_MUTEX_ERRORCHECK or
PTHREAD_MUTEX_RECURSIVE, or the mutex is a robust mutex, and the
current thread does not own the mutex.
So, thread doesn't own the mutex. And we have iothread here.
Next, note that AIO_WAIT_WHILE() documents that ctx must be acquired
exactly once by caller. But where is it acquired in the call stack?
Seems nowhere.
qemuio_command do acquire aio context.. But we need context acquired
around blk_unref() as well and actually around blk_insert_bs() too.
Let's refactor qemuio_command so that it doesn't acquire aio context
but callers do that instead. This way we can cleanly acquire aio
context in hmp_qemu_io() around all three calls.
Reported-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210423134233.51495-1-vsementsov@virtuozzo.com>
[mreitz: Fixed comment]
Signed-off-by: Max Reitz <mreitz@redhat.com>
2021-04-23 15:42:33 +02:00
|
|
|
/*
|
|
|
|
* Called with aio context of blk acquired. Or with qemu_get_aio_context()
|
|
|
|
* context acquired if blk is NULL.
|
|
|
|
*/
|
2018-05-09 21:42:59 +02:00
|
|
|
int qemuio_command(BlockBackend *blk, const char *cmd)
|
2013-06-05 14:19:32 +02:00
|
|
|
{
|
|
|
|
char *input;
|
|
|
|
const cmdinfo_t *ct;
|
|
|
|
char **v;
|
|
|
|
int c;
|
2018-05-09 21:42:59 +02:00
|
|
|
int ret = 0;
|
2013-06-05 14:19:32 +02:00
|
|
|
|
|
|
|
input = g_strdup(cmd);
|
|
|
|
v = breakline(input, &c);
|
|
|
|
if (c) {
|
|
|
|
ct = find_command(v[0]);
|
|
|
|
if (ct) {
|
2018-05-09 21:42:59 +02:00
|
|
|
ret = command(blk, ct, c, v);
|
2013-06-05 14:19:32 +02:00
|
|
|
} else {
|
|
|
|
fprintf(stderr, "command \"%s\" not found\n", v[0]);
|
2018-05-09 21:42:59 +02:00
|
|
|
ret = -EINVAL;
|
2013-06-05 14:19:32 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
g_free(input);
|
|
|
|
g_free(v);
|
2018-05-09 21:42:59 +02:00
|
|
|
|
|
|
|
return ret;
|
2013-06-05 14:19:32 +02:00
|
|
|
}
|
|
|
|
|
2013-06-05 14:19:31 +02:00
|
|
|
static void __attribute((constructor)) init_qemuio_commands(void)
|
|
|
|
{
|
|
|
|
/* initialize commands */
|
2013-06-05 14:19:36 +02:00
|
|
|
qemuio_add_command(&help_cmd);
|
|
|
|
qemuio_add_command(&read_cmd);
|
|
|
|
qemuio_add_command(&readv_cmd);
|
|
|
|
qemuio_add_command(&write_cmd);
|
|
|
|
qemuio_add_command(&writev_cmd);
|
|
|
|
qemuio_add_command(&aio_read_cmd);
|
|
|
|
qemuio_add_command(&aio_write_cmd);
|
|
|
|
qemuio_add_command(&aio_flush_cmd);
|
|
|
|
qemuio_add_command(&flush_cmd);
|
|
|
|
qemuio_add_command(&truncate_cmd);
|
|
|
|
qemuio_add_command(&length_cmd);
|
|
|
|
qemuio_add_command(&info_cmd);
|
|
|
|
qemuio_add_command(&discard_cmd);
|
|
|
|
qemuio_add_command(&alloc_cmd);
|
|
|
|
qemuio_add_command(&map_cmd);
|
2014-12-08 17:37:28 +01:00
|
|
|
qemuio_add_command(&reopen_cmd);
|
2013-06-05 14:19:36 +02:00
|
|
|
qemuio_add_command(&break_cmd);
|
2013-11-20 03:01:54 +01:00
|
|
|
qemuio_add_command(&remove_break_cmd);
|
2013-06-05 14:19:36 +02:00
|
|
|
qemuio_add_command(&resume_cmd);
|
|
|
|
qemuio_add_command(&wait_break_cmd);
|
|
|
|
qemuio_add_command(&abort_cmd);
|
2014-01-15 15:39:10 +01:00
|
|
|
qemuio_add_command(&sleep_cmd);
|
2014-12-08 10:48:10 +01:00
|
|
|
qemuio_add_command(&sigraise_cmd);
|
2013-06-05 14:19:31 +02:00
|
|
|
}
|