2010-02-18 17:48:12 +01:00
|
|
|
/*
|
|
|
|
* Block protocol for I/O error injection
|
|
|
|
*
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
* Copyright (C) 2016-2017 Red Hat, Inc.
|
2010-02-18 17:48:12 +01:00
|
|
|
* Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2016-01-18 19:01:42 +01:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 09:01:28 +01:00
|
|
|
#include "qapi/error.h"
|
2016-03-20 18:16:19 +01:00
|
|
|
#include "qemu/cutils.h"
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/config-file.h"
|
2012-12-17 18:19:44 +01:00
|
|
|
#include "block/block_int.h"
|
2019-11-08 13:34:53 +01:00
|
|
|
#include "block/qdict.h"
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/module.h"
|
2018-02-01 12:18:46 +01:00
|
|
|
#include "qemu/option.h"
|
2019-11-08 13:34:53 +01:00
|
|
|
#include "qapi/qapi-visit-block-core.h"
|
2014-07-18 20:24:57 +02:00
|
|
|
#include "qapi/qmp/qdict.h"
|
2019-11-08 13:34:53 +01:00
|
|
|
#include "qapi/qmp/qlist.h"
|
2014-07-18 20:24:57 +02:00
|
|
|
#include "qapi/qmp/qstring.h"
|
2019-11-08 13:34:53 +01:00
|
|
|
#include "qapi/qobject-input-visitor.h"
|
2015-11-30 12:44:44 +01:00
|
|
|
#include "sysemu/qtest.h"
|
2010-02-18 17:48:12 +01:00
|
|
|
|
|
|
|
typedef struct BDRVBlkdebugState {
|
2012-06-06 08:10:42 +02:00
|
|
|
int state;
|
2012-09-28 17:23:00 +02:00
|
|
|
int new_state;
|
2017-04-29 21:14:17 +02:00
|
|
|
uint64_t align;
|
2017-04-29 21:14:18 +02:00
|
|
|
uint64_t max_transfer;
|
|
|
|
uint64_t opt_write_zero;
|
|
|
|
uint64_t max_write_zero;
|
|
|
|
uint64_t opt_discard;
|
|
|
|
uint64_t max_discard;
|
2012-12-06 14:32:57 +01:00
|
|
|
|
2019-11-08 13:34:53 +01:00
|
|
|
uint64_t take_child_perms;
|
|
|
|
uint64_t unshare_child_perms;
|
|
|
|
|
2016-08-15 15:29:25 +02:00
|
|
|
/* For blkdebug_refresh_filename() */
|
|
|
|
char *config_file;
|
|
|
|
|
qapi: Don't let implicit enum MAX member collide
Now that we guarantee the user doesn't have any enum values
beginning with a single underscore, we can use that for our
own purposes. Renaming ENUM_MAX to ENUM__MAX makes it obvious
that the sentinel is generated.
This patch was mostly generated by applying a temporary patch:
|diff --git a/scripts/qapi.py b/scripts/qapi.py
|index e6d014b..b862ec9 100644
|--- a/scripts/qapi.py
|+++ b/scripts/qapi.py
|@@ -1570,6 +1570,7 @@ const char *const %(c_name)s_lookup[] = {
| max_index = c_enum_const(name, 'MAX', prefix)
| ret += mcgen('''
| [%(max_index)s] = NULL,
|+// %(max_index)s
| };
| ''',
| max_index=max_index)
then running:
$ cat qapi-{types,event}.c tests/test-qapi-types.c |
sed -n 's,^// \(.*\)MAX,s|\1MAX|\1_MAX|g,p' > list
$ git grep -l _MAX | xargs sed -i -f list
The only things not generated are the changes in scripts/qapi.py.
Rejecting enum members named 'MAX' is now useless, and will be dropped
in the next patch.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1447836791-369-23-git-send-email-eblake@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
[Rebased to current master, commit message tweaked]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2015-11-18 09:52:57 +01:00
|
|
|
QLIST_HEAD(, BlkdebugRule) rules[BLKDBG__MAX];
|
2012-06-06 08:10:42 +02:00
|
|
|
QSIMPLEQ_HEAD(, BlkdebugRule) active_rules;
|
2012-12-06 14:32:57 +01:00
|
|
|
QLIST_HEAD(, BlkdebugSuspendedReq) suspended_reqs;
|
2010-02-18 17:48:12 +01:00
|
|
|
} BDRVBlkdebugState;
|
|
|
|
|
2010-02-19 16:24:35 +01:00
|
|
|
typedef struct BlkdebugAIOCB {
|
2014-10-07 13:59:14 +02:00
|
|
|
BlockAIOCB common;
|
2010-02-19 16:24:35 +01:00
|
|
|
int ret;
|
|
|
|
} BlkdebugAIOCB;
|
|
|
|
|
2012-12-06 14:32:57 +01:00
|
|
|
typedef struct BlkdebugSuspendedReq {
|
|
|
|
Coroutine *co;
|
|
|
|
char *tag;
|
|
|
|
QLIST_ENTRY(BlkdebugSuspendedReq) next;
|
|
|
|
} BlkdebugSuspendedReq;
|
|
|
|
|
2010-03-15 17:27:00 +01:00
|
|
|
enum {
|
|
|
|
ACTION_INJECT_ERROR,
|
|
|
|
ACTION_SET_STATE,
|
2012-12-06 14:32:57 +01:00
|
|
|
ACTION_SUSPEND,
|
2010-03-15 17:27:00 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct BlkdebugRule {
|
2015-11-18 09:52:54 +01:00
|
|
|
BlkdebugEvent event;
|
2010-03-15 17:27:00 +01:00
|
|
|
int action;
|
|
|
|
int state;
|
|
|
|
union {
|
|
|
|
struct {
|
2019-05-07 22:35:04 +02:00
|
|
|
uint64_t iotype_mask;
|
2010-03-15 17:27:00 +01:00
|
|
|
int error;
|
|
|
|
int immediately;
|
|
|
|
int once;
|
2016-11-04 21:13:45 +01:00
|
|
|
int64_t offset;
|
2010-03-15 17:27:00 +01:00
|
|
|
} inject;
|
|
|
|
struct {
|
|
|
|
int new_state;
|
|
|
|
} set_state;
|
2012-12-06 14:32:57 +01:00
|
|
|
struct {
|
|
|
|
char *tag;
|
|
|
|
} suspend;
|
2010-03-15 17:27:00 +01:00
|
|
|
} options;
|
|
|
|
QLIST_ENTRY(BlkdebugRule) next;
|
2012-06-06 08:10:42 +02:00
|
|
|
QSIMPLEQ_ENTRY(BlkdebugRule) active_next;
|
2010-03-15 17:27:00 +01:00
|
|
|
} BlkdebugRule;
|
|
|
|
|
2019-05-07 22:35:04 +02:00
|
|
|
QEMU_BUILD_BUG_MSG(BLKDEBUG_IO_TYPE__MAX > 64,
|
|
|
|
"BlkdebugIOType mask does not fit into an uint64_t");
|
|
|
|
|
2010-03-15 17:27:00 +01:00
|
|
|
static QemuOptsList inject_error_opts = {
|
|
|
|
.name = "inject-error",
|
|
|
|
.head = QTAILQ_HEAD_INITIALIZER(inject_error_opts.head),
|
|
|
|
.desc = {
|
|
|
|
{
|
|
|
|
.name = "event",
|
|
|
|
.type = QEMU_OPT_STRING,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "state",
|
|
|
|
.type = QEMU_OPT_NUMBER,
|
|
|
|
},
|
2019-05-07 22:35:04 +02:00
|
|
|
{
|
|
|
|
.name = "iotype",
|
|
|
|
.type = QEMU_OPT_STRING,
|
|
|
|
},
|
2010-03-15 17:27:00 +01:00
|
|
|
{
|
|
|
|
.name = "errno",
|
|
|
|
.type = QEMU_OPT_NUMBER,
|
|
|
|
},
|
2012-06-06 08:10:43 +02:00
|
|
|
{
|
|
|
|
.name = "sector",
|
|
|
|
.type = QEMU_OPT_NUMBER,
|
|
|
|
},
|
2010-03-15 17:27:00 +01:00
|
|
|
{
|
|
|
|
.name = "once",
|
|
|
|
.type = QEMU_OPT_BOOL,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "immediately",
|
|
|
|
.type = QEMU_OPT_BOOL,
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static QemuOptsList set_state_opts = {
|
|
|
|
.name = "set-state",
|
2010-06-30 17:40:42 +02:00
|
|
|
.head = QTAILQ_HEAD_INITIALIZER(set_state_opts.head),
|
2010-03-15 17:27:00 +01:00
|
|
|
.desc = {
|
|
|
|
{
|
|
|
|
.name = "event",
|
|
|
|
.type = QEMU_OPT_STRING,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "state",
|
|
|
|
.type = QEMU_OPT_NUMBER,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "new_state",
|
|
|
|
.type = QEMU_OPT_NUMBER,
|
|
|
|
},
|
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static QemuOptsList *config_groups[] = {
|
|
|
|
&inject_error_opts,
|
|
|
|
&set_state_opts,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
struct add_rule_data {
|
|
|
|
BDRVBlkdebugState *s;
|
|
|
|
int action;
|
|
|
|
};
|
|
|
|
|
2015-03-13 13:35:14 +01:00
|
|
|
static int add_rule(void *opaque, QemuOpts *opts, Error **errp)
|
2010-03-15 17:27:00 +01:00
|
|
|
{
|
|
|
|
struct add_rule_data *d = opaque;
|
|
|
|
BDRVBlkdebugState *s = d->s;
|
|
|
|
const char* event_name;
|
2017-08-24 10:46:02 +02:00
|
|
|
int event;
|
2010-03-15 17:27:00 +01:00
|
|
|
struct BlkdebugRule *rule;
|
2016-11-04 21:13:45 +01:00
|
|
|
int64_t sector;
|
2019-05-07 22:35:04 +02:00
|
|
|
BlkdebugIOType iotype;
|
|
|
|
Error *local_error = NULL;
|
2010-03-15 17:27:00 +01:00
|
|
|
|
|
|
|
/* Find the right event for the rule */
|
|
|
|
event_name = qemu_opt_get(opts, "event");
|
2014-09-20 10:55:52 +02:00
|
|
|
if (!event_name) {
|
2015-03-13 13:38:42 +01:00
|
|
|
error_setg(errp, "Missing event name for rule");
|
2014-09-20 10:55:52 +02:00
|
|
|
return -1;
|
2017-08-24 10:46:02 +02:00
|
|
|
}
|
2017-08-24 10:46:10 +02:00
|
|
|
event = qapi_enum_parse(&BlkdebugEvent_lookup, event_name, -1, errp);
|
2017-08-24 10:46:02 +02:00
|
|
|
if (event < 0) {
|
2010-03-15 17:27:00 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set attributes common for all actions */
|
2011-08-21 05:09:37 +02:00
|
|
|
rule = g_malloc0(sizeof(*rule));
|
2010-03-15 17:27:00 +01:00
|
|
|
*rule = (struct BlkdebugRule) {
|
|
|
|
.event = event,
|
|
|
|
.action = d->action,
|
|
|
|
.state = qemu_opt_get_number(opts, "state", 0),
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Parse action-specific options */
|
|
|
|
switch (d->action) {
|
|
|
|
case ACTION_INJECT_ERROR:
|
|
|
|
rule->options.inject.error = qemu_opt_get_number(opts, "errno", EIO);
|
|
|
|
rule->options.inject.once = qemu_opt_get_bool(opts, "once", 0);
|
|
|
|
rule->options.inject.immediately =
|
|
|
|
qemu_opt_get_bool(opts, "immediately", 0);
|
2016-11-04 21:13:45 +01:00
|
|
|
sector = qemu_opt_get_number(opts, "sector", -1);
|
|
|
|
rule->options.inject.offset =
|
|
|
|
sector == -1 ? -1 : sector * BDRV_SECTOR_SIZE;
|
2019-05-07 22:35:04 +02:00
|
|
|
|
|
|
|
iotype = qapi_enum_parse(&BlkdebugIOType_lookup,
|
|
|
|
qemu_opt_get(opts, "iotype"),
|
|
|
|
BLKDEBUG_IO_TYPE__MAX, &local_error);
|
|
|
|
if (local_error) {
|
|
|
|
error_propagate(errp, local_error);
|
2020-10-09 21:09:59 +02:00
|
|
|
g_free(rule);
|
2019-05-07 22:35:04 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (iotype != BLKDEBUG_IO_TYPE__MAX) {
|
|
|
|
rule->options.inject.iotype_mask = (1ull << iotype);
|
|
|
|
} else {
|
|
|
|
/* Apply the default */
|
|
|
|
rule->options.inject.iotype_mask =
|
|
|
|
(1ull << BLKDEBUG_IO_TYPE_READ)
|
|
|
|
| (1ull << BLKDEBUG_IO_TYPE_WRITE)
|
|
|
|
| (1ull << BLKDEBUG_IO_TYPE_WRITE_ZEROES)
|
|
|
|
| (1ull << BLKDEBUG_IO_TYPE_DISCARD)
|
|
|
|
| (1ull << BLKDEBUG_IO_TYPE_FLUSH);
|
|
|
|
}
|
|
|
|
|
2010-03-15 17:27:00 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ACTION_SET_STATE:
|
|
|
|
rule->options.set_state.new_state =
|
|
|
|
qemu_opt_get_number(opts, "new_state", 0);
|
|
|
|
break;
|
2012-12-06 14:32:57 +01:00
|
|
|
|
|
|
|
case ACTION_SUSPEND:
|
|
|
|
rule->options.suspend.tag =
|
|
|
|
g_strdup(qemu_opt_get(opts, "tag"));
|
|
|
|
break;
|
2010-03-15 17:27:00 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Add the rule */
|
|
|
|
QLIST_INSERT_HEAD(&s->rules[event], rule, next);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-06 14:32:56 +01:00
|
|
|
static void remove_rule(BlkdebugRule *rule)
|
|
|
|
{
|
|
|
|
switch (rule->action) {
|
|
|
|
case ACTION_INJECT_ERROR:
|
|
|
|
case ACTION_SET_STATE:
|
|
|
|
break;
|
2012-12-06 14:32:57 +01:00
|
|
|
case ACTION_SUSPEND:
|
|
|
|
g_free(rule->options.suspend.tag);
|
|
|
|
break;
|
2012-12-06 14:32:56 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
QLIST_REMOVE(rule, next);
|
|
|
|
g_free(rule);
|
|
|
|
}
|
|
|
|
|
2013-12-20 19:28:07 +01:00
|
|
|
static int read_config(BDRVBlkdebugState *s, const char *filename,
|
|
|
|
QDict *options, Error **errp)
|
2010-03-15 17:27:00 +01:00
|
|
|
{
|
2013-12-20 19:28:06 +01:00
|
|
|
FILE *f = NULL;
|
2010-03-15 17:27:00 +01:00
|
|
|
int ret;
|
|
|
|
struct add_rule_data d;
|
2013-12-20 19:28:07 +01:00
|
|
|
Error *local_err = NULL;
|
2010-03-15 17:27:00 +01:00
|
|
|
|
2013-12-20 19:28:06 +01:00
|
|
|
if (filename) {
|
|
|
|
f = fopen(filename, "r");
|
|
|
|
if (f == NULL) {
|
|
|
|
error_setg_errno(errp, errno, "Could not read blkdebug config file");
|
|
|
|
return -errno;
|
|
|
|
}
|
2010-03-15 17:27:00 +01:00
|
|
|
|
2013-12-20 19:28:06 +01:00
|
|
|
ret = qemu_config_parse(f, config_groups, filename);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_setg(errp, "Could not parse blkdebug config file");
|
|
|
|
goto fail;
|
|
|
|
}
|
2010-03-15 17:27:00 +01:00
|
|
|
}
|
|
|
|
|
2013-12-20 19:28:07 +01:00
|
|
|
qemu_config_parse_qdict(options, config_groups, &local_err);
|
2014-01-30 15:07:28 +01:00
|
|
|
if (local_err) {
|
2013-12-20 19:28:07 +01:00
|
|
|
error_propagate(errp, local_err);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2010-03-15 17:27:00 +01:00
|
|
|
d.s = s;
|
|
|
|
d.action = ACTION_INJECT_ERROR;
|
2015-03-13 13:38:42 +01:00
|
|
|
qemu_opts_foreach(&inject_error_opts, add_rule, &d, &local_err);
|
2014-09-20 10:55:52 +02:00
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
2010-03-15 17:27:00 +01:00
|
|
|
|
|
|
|
d.action = ACTION_SET_STATE;
|
2015-03-13 13:38:42 +01:00
|
|
|
qemu_opts_foreach(&set_state_opts, add_rule, &d, &local_err);
|
2014-09-20 10:55:52 +02:00
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
2010-03-15 17:27:00 +01:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
fail:
|
2010-06-30 17:42:23 +02:00
|
|
|
qemu_opts_reset(&inject_error_opts);
|
|
|
|
qemu_opts_reset(&set_state_opts);
|
2013-12-20 19:28:06 +01:00
|
|
|
if (f) {
|
|
|
|
fclose(f);
|
|
|
|
}
|
2010-03-15 17:27:00 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Valid blkdebug filenames look like blkdebug:path/to/config:path/to/image */
|
2013-04-10 13:37:33 +02:00
|
|
|
static void blkdebug_parse_filename(const char *filename, QDict *options,
|
|
|
|
Error **errp)
|
2010-02-18 17:48:12 +01:00
|
|
|
{
|
2013-04-10 13:37:33 +02:00
|
|
|
const char *c;
|
2010-02-18 17:48:12 +01:00
|
|
|
|
2010-03-15 17:27:00 +01:00
|
|
|
/* Parse the blkdebug: prefix */
|
2013-04-10 13:37:33 +02:00
|
|
|
if (!strstart(filename, "blkdebug:", &filename)) {
|
2013-12-20 19:28:02 +01:00
|
|
|
/* There was no prefix; therefore, all options have to be already
|
|
|
|
present in the QDict (except for the filename) */
|
2017-04-27 23:58:17 +02:00
|
|
|
qdict_put_str(options, "x-image", filename);
|
2013-04-10 13:37:33 +02:00
|
|
|
return;
|
2010-02-18 17:48:12 +01:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:37:33 +02:00
|
|
|
/* Parse config file path */
|
2010-03-15 17:27:00 +01:00
|
|
|
c = strchr(filename, ':');
|
|
|
|
if (c == NULL) {
|
2013-04-10 13:37:33 +02:00
|
|
|
error_setg(errp, "blkdebug requires both config file and image path");
|
|
|
|
return;
|
2010-03-15 17:27:00 +01:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:37:33 +02:00
|
|
|
if (c != filename) {
|
|
|
|
QString *config_path;
|
2018-07-27 08:22:04 +02:00
|
|
|
config_path = qstring_from_substr(filename, 0, c - filename);
|
2013-04-10 13:37:33 +02:00
|
|
|
qdict_put(options, "config", config_path);
|
2010-03-15 17:27:00 +01:00
|
|
|
}
|
2013-04-10 13:37:33 +02:00
|
|
|
|
|
|
|
/* TODO Allow multi-level nesting and set file.filename here */
|
2010-03-15 17:27:00 +01:00
|
|
|
filename = c + 1;
|
2017-04-27 23:58:17 +02:00
|
|
|
qdict_put_str(options, "x-image", filename);
|
2013-04-10 13:37:33 +02:00
|
|
|
}
|
|
|
|
|
2019-11-08 13:34:53 +01:00
|
|
|
static int blkdebug_parse_perm_list(uint64_t *dest, QDict *options,
|
|
|
|
const char *prefix, Error **errp)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
QDict *subqdict = NULL;
|
|
|
|
QObject *crumpled_subqdict = NULL;
|
|
|
|
Visitor *v = NULL;
|
|
|
|
BlockPermissionList *perm_list = NULL, *element;
|
|
|
|
|
|
|
|
*dest = 0;
|
|
|
|
|
|
|
|
qdict_extract_subqdict(options, &subqdict, prefix);
|
|
|
|
if (!qdict_size(subqdict)) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
crumpled_subqdict = qdict_crumple(subqdict, errp);
|
|
|
|
if (!crumpled_subqdict) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
v = qobject_input_visitor_new(crumpled_subqdict);
|
2020-07-07 18:06:03 +02:00
|
|
|
if (!visit_type_BlockPermissionList(v, NULL, &perm_list, errp)) {
|
2019-11-08 13:34:53 +01:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (element = perm_list; element; element = element->next) {
|
|
|
|
*dest |= bdrv_qapi_perm_to_blk_perm(element->value);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
qapi_free_BlockPermissionList(perm_list);
|
|
|
|
visit_free(v);
|
|
|
|
qobject_unref(subqdict);
|
|
|
|
qobject_unref(crumpled_subqdict);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int blkdebug_parse_perms(BDRVBlkdebugState *s, QDict *options,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = blkdebug_parse_perm_list(&s->take_child_perms, options,
|
|
|
|
"take-child-perms.", errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = blkdebug_parse_perm_list(&s->unshare_child_perms, options,
|
|
|
|
"unshare-child-perms.", errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-04-10 13:37:33 +02:00
|
|
|
static QemuOptsList runtime_opts = {
|
|
|
|
.name = "blkdebug",
|
|
|
|
.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
|
|
|
|
.desc = {
|
|
|
|
{
|
|
|
|
.name = "config",
|
|
|
|
.type = QEMU_OPT_STRING,
|
|
|
|
.help = "Path to the configuration file",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "x-image",
|
|
|
|
.type = QEMU_OPT_STRING,
|
|
|
|
.help = "[internal use only, will be removed]",
|
|
|
|
},
|
2014-01-14 13:44:35 +01:00
|
|
|
{
|
|
|
|
.name = "align",
|
|
|
|
.type = QEMU_OPT_SIZE,
|
|
|
|
.help = "Required alignment in bytes",
|
|
|
|
},
|
2017-04-29 21:14:18 +02:00
|
|
|
{
|
|
|
|
.name = "max-transfer",
|
|
|
|
.type = QEMU_OPT_SIZE,
|
|
|
|
.help = "Maximum transfer size in bytes",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "opt-write-zero",
|
|
|
|
.type = QEMU_OPT_SIZE,
|
|
|
|
.help = "Optimum write zero alignment in bytes",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "max-write-zero",
|
|
|
|
.type = QEMU_OPT_SIZE,
|
|
|
|
.help = "Maximum write zero size in bytes",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "opt-discard",
|
|
|
|
.type = QEMU_OPT_SIZE,
|
|
|
|
.help = "Optimum discard alignment in bytes",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "max-discard",
|
|
|
|
.type = QEMU_OPT_SIZE,
|
|
|
|
.help = "Maximum discard size in bytes",
|
|
|
|
},
|
2013-04-10 13:37:33 +02:00
|
|
|
{ /* end of list */ }
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2013-09-05 14:22:29 +02:00
|
|
|
static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
|
|
|
|
Error **errp)
|
2013-04-10 13:37:33 +02:00
|
|
|
{
|
|
|
|
BDRVBlkdebugState *s = bs->opaque;
|
|
|
|
QemuOpts *opts;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
int ret;
|
2017-04-29 21:14:18 +02:00
|
|
|
uint64_t align;
|
2013-04-10 13:37:33 +02:00
|
|
|
|
2014-01-02 03:49:17 +01:00
|
|
|
opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
|
2020-07-07 18:06:03 +02:00
|
|
|
if (!qemu_opts_absorb_qdict(opts, options, errp)) {
|
2013-04-10 13:37:33 +02:00
|
|
|
ret = -EINVAL;
|
2014-02-08 09:53:22 +01:00
|
|
|
goto out;
|
2013-04-10 13:37:33 +02:00
|
|
|
}
|
|
|
|
|
2013-12-20 19:28:07 +01:00
|
|
|
/* Read rules from config file or command line options */
|
2016-08-15 15:29:25 +02:00
|
|
|
s->config_file = g_strdup(qemu_opt_get(opts, "config"));
|
|
|
|
ret = read_config(s, s->config_file, options, errp);
|
2013-12-20 19:28:06 +01:00
|
|
|
if (ret) {
|
2014-02-08 09:53:22 +01:00
|
|
|
goto out;
|
2013-04-10 13:37:33 +02:00
|
|
|
}
|
2010-03-15 17:27:00 +01:00
|
|
|
|
2010-06-30 17:43:40 +02:00
|
|
|
/* Set initial state */
|
2012-06-06 08:10:42 +02:00
|
|
|
s->state = 1;
|
2010-06-30 17:43:40 +02:00
|
|
|
|
2019-11-08 13:34:53 +01:00
|
|
|
/* Parse permissions modifiers before opening the image file */
|
|
|
|
ret = blkdebug_parse_perms(s, options, errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-10-16 12:46:04 +02:00
|
|
|
/* Open the image file */
|
2015-06-16 14:19:22 +02:00
|
|
|
bs->file = bdrv_open_child(qemu_opt_get(opts, "x-image"), options, "image",
|
2020-05-13 13:05:36 +02:00
|
|
|
bs, &child_of_bds,
|
|
|
|
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
|
|
|
false, &local_err);
|
2015-06-16 14:19:22 +02:00
|
|
|
if (local_err) {
|
|
|
|
ret = -EINVAL;
|
2013-10-10 15:44:03 +02:00
|
|
|
error_propagate(errp, local_err);
|
2014-02-08 09:53:22 +01:00
|
|
|
goto out;
|
2010-03-15 17:27:00 +01:00
|
|
|
}
|
|
|
|
|
2018-04-21 15:29:26 +02:00
|
|
|
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED |
|
|
|
|
(BDRV_REQ_FUA & bs->file->bs->supported_write_flags);
|
|
|
|
bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
|
2019-03-22 13:42:39 +01:00
|
|
|
((BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) &
|
2018-04-21 15:29:26 +02:00
|
|
|
bs->file->bs->supported_zero_flags);
|
2017-04-29 21:14:17 +02:00
|
|
|
ret = -EINVAL;
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
|
2017-04-29 21:14:18 +02:00
|
|
|
/* Set alignment overrides */
|
2017-04-29 21:14:17 +02:00
|
|
|
s->align = qemu_opt_get_size(opts, "align", 0);
|
|
|
|
if (s->align && (s->align >= INT_MAX || !is_power_of_2(s->align))) {
|
|
|
|
error_setg(errp, "Cannot meet constraints with align %" PRIu64,
|
|
|
|
s->align);
|
2017-04-13 17:43:34 +02:00
|
|
|
goto out;
|
2014-01-14 13:44:35 +01:00
|
|
|
}
|
2017-04-29 21:14:18 +02:00
|
|
|
align = MAX(s->align, bs->file->bs->bl.request_alignment);
|
|
|
|
|
|
|
|
s->max_transfer = qemu_opt_get_size(opts, "max-transfer", 0);
|
|
|
|
if (s->max_transfer &&
|
|
|
|
(s->max_transfer >= INT_MAX ||
|
|
|
|
!QEMU_IS_ALIGNED(s->max_transfer, align))) {
|
|
|
|
error_setg(errp, "Cannot meet constraints with max-transfer %" PRIu64,
|
|
|
|
s->max_transfer);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->opt_write_zero = qemu_opt_get_size(opts, "opt-write-zero", 0);
|
|
|
|
if (s->opt_write_zero &&
|
|
|
|
(s->opt_write_zero >= INT_MAX ||
|
|
|
|
!QEMU_IS_ALIGNED(s->opt_write_zero, align))) {
|
|
|
|
error_setg(errp, "Cannot meet constraints with opt-write-zero %" PRIu64,
|
|
|
|
s->opt_write_zero);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->max_write_zero = qemu_opt_get_size(opts, "max-write-zero", 0);
|
|
|
|
if (s->max_write_zero &&
|
|
|
|
(s->max_write_zero >= INT_MAX ||
|
|
|
|
!QEMU_IS_ALIGNED(s->max_write_zero,
|
|
|
|
MAX(s->opt_write_zero, align)))) {
|
|
|
|
error_setg(errp, "Cannot meet constraints with max-write-zero %" PRIu64,
|
|
|
|
s->max_write_zero);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->opt_discard = qemu_opt_get_size(opts, "opt-discard", 0);
|
|
|
|
if (s->opt_discard &&
|
|
|
|
(s->opt_discard >= INT_MAX ||
|
|
|
|
!QEMU_IS_ALIGNED(s->opt_discard, align))) {
|
|
|
|
error_setg(errp, "Cannot meet constraints with opt-discard %" PRIu64,
|
|
|
|
s->opt_discard);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->max_discard = qemu_opt_get_size(opts, "max-discard", 0);
|
|
|
|
if (s->max_discard &&
|
|
|
|
(s->max_discard >= INT_MAX ||
|
|
|
|
!QEMU_IS_ALIGNED(s->max_discard,
|
|
|
|
MAX(s->opt_discard, align)))) {
|
|
|
|
error_setg(errp, "Cannot meet constraints with max-discard %" PRIu64,
|
|
|
|
s->max_discard);
|
|
|
|
goto out;
|
|
|
|
}
|
2014-01-14 13:44:35 +01:00
|
|
|
|
2019-05-07 22:35:05 +02:00
|
|
|
bdrv_debug_event(bs, BLKDBG_NONE);
|
|
|
|
|
2013-04-10 13:37:33 +02:00
|
|
|
ret = 0;
|
2014-02-08 09:53:22 +01:00
|
|
|
out:
|
2016-08-15 15:29:25 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
g_free(s->config_file);
|
|
|
|
}
|
2013-04-10 13:37:33 +02:00
|
|
|
qemu_opts_del(opts);
|
|
|
|
return ret;
|
2010-02-18 17:48:12 +01:00
|
|
|
}
|
|
|
|
|
2019-05-07 22:35:04 +02:00
|
|
|
static int rule_check(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
|
|
|
BlkdebugIOType iotype)
|
2010-02-19 16:24:35 +01:00
|
|
|
{
|
|
|
|
BDRVBlkdebugState *s = bs->opaque;
|
2017-04-29 21:14:15 +02:00
|
|
|
BlkdebugRule *rule = NULL;
|
|
|
|
int error;
|
|
|
|
bool immediately;
|
|
|
|
|
|
|
|
QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) {
|
|
|
|
uint64_t inject_offset = rule->options.inject.offset;
|
|
|
|
|
2019-05-07 22:35:04 +02:00
|
|
|
if ((inject_offset == -1 ||
|
|
|
|
(bytes && inject_offset >= offset &&
|
|
|
|
inject_offset < offset + bytes)) &&
|
|
|
|
(rule->options.inject.iotype_mask & (1ull << iotype)))
|
2017-04-29 21:14:15 +02:00
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!rule || !rule->options.inject.error) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
immediately = rule->options.inject.immediately;
|
|
|
|
error = rule->options.inject.error;
|
2010-02-19 16:24:35 +01:00
|
|
|
|
2012-06-06 08:10:42 +02:00
|
|
|
if (rule->options.inject.once) {
|
blkdebug: fix "once" rule
Background:
The blkdebug scripts are currently engineered so that when a debug
event occurs, a prefilter browses a master list of parsed rules for a
certain event and adds them to an "active list" of rules to be used for
the forthcoming action, provided the events and state numbers match.
Then, once the request is received, the last active rule is used to
inject an error if certain parameters match.
This active list is cleared every time the prefilter injects a new
rule for the first time during a debug event.
The "once" rule currently causes the error injection, if it is
triggered, to only clear the active list. This is insufficient for
preventing future injections of the same rule.
Remedy:
This patch /deletes/ the rule from the list that the prefilter
browses, so it is gone for good. In V2, we remove only the rule of
interest from the active list instead of allowing the "once" rule to
clear the entire list of active rules.
Impact:
This affects iotests 026. Several ENOSPC tests that used "once" can
be seen to have output that shows multiple failure messages. After
this patch, the error messages tend to be smaller and less severe, but
the injection can still be seen to be working. I have patched the
expected output to expect the smaller error messages.
Signed-off-by: John Snow <jsnow@redhat.com>
Message-id: 1423257977-25630-1-git-send-email-jsnow@redhat.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2015-02-06 22:26:17 +01:00
|
|
|
QSIMPLEQ_REMOVE(&s->active_rules, rule, BlkdebugRule, active_next);
|
|
|
|
remove_rule(rule);
|
2010-02-19 16:24:35 +01:00
|
|
|
}
|
|
|
|
|
2016-11-04 21:13:45 +01:00
|
|
|
if (!immediately) {
|
2017-02-13 14:52:26 +01:00
|
|
|
aio_co_schedule(qemu_get_current_aio_context(), qemu_coroutine_self());
|
2016-11-04 21:13:45 +01:00
|
|
|
qemu_coroutine_yield();
|
2010-02-19 16:24:35 +01:00
|
|
|
}
|
|
|
|
|
2016-11-04 21:13:45 +01:00
|
|
|
return -error;
|
2010-02-19 16:24:35 +01:00
|
|
|
}
|
|
|
|
|
2016-11-04 21:13:45 +01:00
|
|
|
static int coroutine_fn
|
|
|
|
blkdebug_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
|
|
|
QEMUIOVector *qiov, int flags)
|
2010-02-18 17:48:12 +01:00
|
|
|
{
|
2017-04-29 21:14:15 +02:00
|
|
|
int err;
|
2012-06-06 08:10:43 +02:00
|
|
|
|
2017-04-29 21:14:14 +02:00
|
|
|
/* Sanity check block layer guarantees */
|
|
|
|
assert(QEMU_IS_ALIGNED(offset, bs->bl.request_alignment));
|
|
|
|
assert(QEMU_IS_ALIGNED(bytes, bs->bl.request_alignment));
|
|
|
|
if (bs->bl.max_transfer) {
|
|
|
|
assert(bytes <= bs->bl.max_transfer);
|
|
|
|
}
|
|
|
|
|
2019-05-07 22:35:04 +02:00
|
|
|
err = rule_check(bs, offset, bytes, BLKDEBUG_IO_TYPE_READ);
|
2017-04-29 21:14:15 +02:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2010-02-19 16:24:35 +01:00
|
|
|
}
|
|
|
|
|
2016-11-04 21:13:45 +01:00
|
|
|
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
|
2010-02-18 17:48:12 +01:00
|
|
|
}
|
|
|
|
|
2016-11-04 21:13:45 +01:00
|
|
|
static int coroutine_fn
|
|
|
|
blkdebug_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
|
|
|
QEMUIOVector *qiov, int flags)
|
2010-02-18 17:48:12 +01:00
|
|
|
{
|
2017-04-29 21:14:15 +02:00
|
|
|
int err;
|
2012-06-06 08:10:43 +02:00
|
|
|
|
2017-04-29 21:14:14 +02:00
|
|
|
/* Sanity check block layer guarantees */
|
|
|
|
assert(QEMU_IS_ALIGNED(offset, bs->bl.request_alignment));
|
|
|
|
assert(QEMU_IS_ALIGNED(bytes, bs->bl.request_alignment));
|
|
|
|
if (bs->bl.max_transfer) {
|
|
|
|
assert(bytes <= bs->bl.max_transfer);
|
|
|
|
}
|
|
|
|
|
2019-05-07 22:35:04 +02:00
|
|
|
err = rule_check(bs, offset, bytes, BLKDEBUG_IO_TYPE_WRITE);
|
2017-04-29 21:14:15 +02:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2010-02-19 16:24:35 +01:00
|
|
|
}
|
|
|
|
|
2016-11-04 21:13:45 +01:00
|
|
|
return bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
|
2010-02-18 17:48:12 +01:00
|
|
|
}
|
|
|
|
|
2016-11-04 21:13:45 +01:00
|
|
|
static int blkdebug_co_flush(BlockDriverState *bs)
|
2014-08-04 23:11:02 +02:00
|
|
|
{
|
2019-05-07 22:35:04 +02:00
|
|
|
int err = rule_check(bs, 0, 0, BLKDEBUG_IO_TYPE_FLUSH);
|
2014-08-04 23:11:02 +02:00
|
|
|
|
2017-04-29 21:14:15 +02:00
|
|
|
if (err) {
|
|
|
|
return err;
|
2014-08-04 23:11:02 +02:00
|
|
|
}
|
|
|
|
|
2016-11-04 21:13:45 +01:00
|
|
|
return bdrv_co_flush(bs->file->bs);
|
2014-08-04 23:11:02 +02:00
|
|
|
}
|
|
|
|
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
static int coroutine_fn blkdebug_co_pwrite_zeroes(BlockDriverState *bs,
|
2017-06-09 12:18:08 +02:00
|
|
|
int64_t offset, int bytes,
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
BdrvRequestFlags flags)
|
|
|
|
{
|
|
|
|
uint32_t align = MAX(bs->bl.request_alignment,
|
|
|
|
bs->bl.pwrite_zeroes_alignment);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Only pass through requests that are larger than requested
|
|
|
|
* preferred alignment (so that we test the fallback to writes on
|
|
|
|
* unaligned portions), and check that the block layer never hands
|
|
|
|
* us anything unaligned that crosses an alignment boundary. */
|
2017-06-09 12:18:08 +02:00
|
|
|
if (bytes < align) {
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
assert(QEMU_IS_ALIGNED(offset, align) ||
|
2017-06-09 12:18:08 +02:00
|
|
|
QEMU_IS_ALIGNED(offset + bytes, align) ||
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
DIV_ROUND_UP(offset, align) ==
|
2017-06-09 12:18:08 +02:00
|
|
|
DIV_ROUND_UP(offset + bytes, align));
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
assert(QEMU_IS_ALIGNED(offset, align));
|
2017-06-09 12:18:08 +02:00
|
|
|
assert(QEMU_IS_ALIGNED(bytes, align));
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
if (bs->bl.max_pwrite_zeroes) {
|
2017-06-09 12:18:08 +02:00
|
|
|
assert(bytes <= bs->bl.max_pwrite_zeroes);
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
}
|
|
|
|
|
2019-05-07 22:35:04 +02:00
|
|
|
err = rule_check(bs, offset, bytes, BLKDEBUG_IO_TYPE_WRITE_ZEROES);
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-06-09 12:18:08 +02:00
|
|
|
return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int coroutine_fn blkdebug_co_pdiscard(BlockDriverState *bs,
|
2017-06-09 12:18:08 +02:00
|
|
|
int64_t offset, int bytes)
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
{
|
|
|
|
uint32_t align = bs->bl.pdiscard_alignment;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Only pass through requests that are larger than requested
|
|
|
|
* minimum alignment, and ensure that unaligned requests do not
|
|
|
|
* cross optimum discard boundaries. */
|
2017-06-09 12:18:08 +02:00
|
|
|
if (bytes < bs->bl.request_alignment) {
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
assert(QEMU_IS_ALIGNED(offset, align) ||
|
2017-06-09 12:18:08 +02:00
|
|
|
QEMU_IS_ALIGNED(offset + bytes, align) ||
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
DIV_ROUND_UP(offset, align) ==
|
2017-06-09 12:18:08 +02:00
|
|
|
DIV_ROUND_UP(offset + bytes, align));
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
assert(QEMU_IS_ALIGNED(offset, bs->bl.request_alignment));
|
2017-06-09 12:18:08 +02:00
|
|
|
assert(QEMU_IS_ALIGNED(bytes, bs->bl.request_alignment));
|
|
|
|
if (align && bytes >= align) {
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
assert(QEMU_IS_ALIGNED(offset, align));
|
2017-06-09 12:18:08 +02:00
|
|
|
assert(QEMU_IS_ALIGNED(bytes, align));
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
}
|
|
|
|
if (bs->bl.max_pdiscard) {
|
2017-06-09 12:18:08 +02:00
|
|
|
assert(bytes <= bs->bl.max_pdiscard);
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
}
|
|
|
|
|
2019-05-07 22:35:04 +02:00
|
|
|
err = rule_check(bs, offset, bytes, BLKDEBUG_IO_TYPE_DISCARD);
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-10 08:31:17 +02:00
|
|
|
return bdrv_co_pdiscard(bs->file, offset, bytes);
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
}
|
2012-12-06 14:32:57 +01:00
|
|
|
|
2018-02-13 21:26:43 +01:00
|
|
|
static int coroutine_fn blkdebug_co_block_status(BlockDriverState *bs,
|
|
|
|
bool want_zero,
|
|
|
|
int64_t offset,
|
|
|
|
int64_t bytes,
|
|
|
|
int64_t *pnum,
|
|
|
|
int64_t *map,
|
|
|
|
BlockDriverState **file)
|
2017-10-12 05:47:17 +02:00
|
|
|
{
|
2019-05-07 22:35:06 +02:00
|
|
|
int err;
|
|
|
|
|
2018-02-13 21:26:43 +01:00
|
|
|
assert(QEMU_IS_ALIGNED(offset | bytes, bs->bl.request_alignment));
|
2019-05-07 22:35:06 +02:00
|
|
|
|
|
|
|
err = rule_check(bs, offset, bytes, BLKDEBUG_IO_TYPE_BLOCK_STATUS);
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-02-13 18:05:23 +01:00
|
|
|
assert(bs->file && bs->file->bs);
|
|
|
|
*pnum = bytes;
|
|
|
|
*map = offset;
|
|
|
|
*file = bs->file->bs;
|
|
|
|
return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
|
2017-10-12 05:47:17 +02:00
|
|
|
}
|
|
|
|
|
2010-02-18 17:48:12 +01:00
|
|
|
static void blkdebug_close(BlockDriverState *bs)
|
|
|
|
{
|
|
|
|
BDRVBlkdebugState *s = bs->opaque;
|
2010-03-15 17:27:00 +01:00
|
|
|
BlkdebugRule *rule, *next;
|
|
|
|
int i;
|
|
|
|
|
qapi: Don't let implicit enum MAX member collide
Now that we guarantee the user doesn't have any enum values
beginning with a single underscore, we can use that for our
own purposes. Renaming ENUM_MAX to ENUM__MAX makes it obvious
that the sentinel is generated.
This patch was mostly generated by applying a temporary patch:
|diff --git a/scripts/qapi.py b/scripts/qapi.py
|index e6d014b..b862ec9 100644
|--- a/scripts/qapi.py
|+++ b/scripts/qapi.py
|@@ -1570,6 +1570,7 @@ const char *const %(c_name)s_lookup[] = {
| max_index = c_enum_const(name, 'MAX', prefix)
| ret += mcgen('''
| [%(max_index)s] = NULL,
|+// %(max_index)s
| };
| ''',
| max_index=max_index)
then running:
$ cat qapi-{types,event}.c tests/test-qapi-types.c |
sed -n 's,^// \(.*\)MAX,s|\1MAX|\1_MAX|g,p' > list
$ git grep -l _MAX | xargs sed -i -f list
The only things not generated are the changes in scripts/qapi.py.
Rejecting enum members named 'MAX' is now useless, and will be dropped
in the next patch.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1447836791-369-23-git-send-email-eblake@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
[Rebased to current master, commit message tweaked]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2015-11-18 09:52:57 +01:00
|
|
|
for (i = 0; i < BLKDBG__MAX; i++) {
|
2010-03-15 17:27:00 +01:00
|
|
|
QLIST_FOREACH_SAFE(rule, &s->rules[i], next, next) {
|
2012-12-06 14:32:56 +01:00
|
|
|
remove_rule(rule);
|
2010-03-15 17:27:00 +01:00
|
|
|
}
|
|
|
|
}
|
2016-08-15 15:29:25 +02:00
|
|
|
|
|
|
|
g_free(s->config_file);
|
2010-02-18 17:48:12 +01:00
|
|
|
}
|
|
|
|
|
2012-12-06 14:32:57 +01:00
|
|
|
static void suspend_request(BlockDriverState *bs, BlkdebugRule *rule)
|
|
|
|
{
|
|
|
|
BDRVBlkdebugState *s = bs->opaque;
|
|
|
|
BlkdebugSuspendedReq r;
|
|
|
|
|
|
|
|
r = (BlkdebugSuspendedReq) {
|
|
|
|
.co = qemu_coroutine_self(),
|
|
|
|
.tag = g_strdup(rule->options.suspend.tag),
|
|
|
|
};
|
|
|
|
|
|
|
|
remove_rule(rule);
|
|
|
|
QLIST_INSERT_HEAD(&s->suspended_reqs, &r, next);
|
|
|
|
|
2015-11-30 12:44:44 +01:00
|
|
|
if (!qtest_enabled()) {
|
|
|
|
printf("blkdebug: Suspended request '%s'\n", r.tag);
|
|
|
|
}
|
2012-12-06 14:32:57 +01:00
|
|
|
qemu_coroutine_yield();
|
2015-11-30 12:44:44 +01:00
|
|
|
if (!qtest_enabled()) {
|
|
|
|
printf("blkdebug: Resuming request '%s'\n", r.tag);
|
|
|
|
}
|
2012-12-06 14:32:57 +01:00
|
|
|
|
|
|
|
QLIST_REMOVE(&r, next);
|
|
|
|
g_free(r.tag);
|
|
|
|
}
|
|
|
|
|
2012-06-06 08:10:42 +02:00
|
|
|
static bool process_rule(BlockDriverState *bs, struct BlkdebugRule *rule,
|
2012-09-28 17:23:00 +02:00
|
|
|
bool injected)
|
2010-03-15 17:27:00 +01:00
|
|
|
{
|
|
|
|
BDRVBlkdebugState *s = bs->opaque;
|
|
|
|
|
|
|
|
/* Only process rules for the current state */
|
2012-09-28 17:23:00 +02:00
|
|
|
if (rule->state && rule->state != s->state) {
|
2012-06-06 08:10:42 +02:00
|
|
|
return injected;
|
2010-03-15 17:27:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Take the action */
|
|
|
|
switch (rule->action) {
|
|
|
|
case ACTION_INJECT_ERROR:
|
2012-06-06 08:10:42 +02:00
|
|
|
if (!injected) {
|
|
|
|
QSIMPLEQ_INIT(&s->active_rules);
|
|
|
|
injected = true;
|
|
|
|
}
|
|
|
|
QSIMPLEQ_INSERT_HEAD(&s->active_rules, rule, active_next);
|
2010-03-15 17:27:00 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ACTION_SET_STATE:
|
2012-09-28 17:23:00 +02:00
|
|
|
s->new_state = rule->options.set_state.new_state;
|
2010-03-15 17:27:00 +01:00
|
|
|
break;
|
2012-12-06 14:32:57 +01:00
|
|
|
|
|
|
|
case ACTION_SUSPEND:
|
|
|
|
suspend_request(bs, rule);
|
|
|
|
break;
|
2010-03-15 17:27:00 +01:00
|
|
|
}
|
2012-06-06 08:10:42 +02:00
|
|
|
return injected;
|
2010-03-15 17:27:00 +01:00
|
|
|
}
|
|
|
|
|
2015-11-18 09:52:54 +01:00
|
|
|
static void blkdebug_debug_event(BlockDriverState *bs, BlkdebugEvent event)
|
2010-03-15 17:27:00 +01:00
|
|
|
{
|
|
|
|
BDRVBlkdebugState *s = bs->opaque;
|
2012-12-06 14:32:57 +01:00
|
|
|
struct BlkdebugRule *rule, *next;
|
2012-06-06 08:10:42 +02:00
|
|
|
bool injected;
|
2010-03-15 17:27:00 +01:00
|
|
|
|
qapi: Don't let implicit enum MAX member collide
Now that we guarantee the user doesn't have any enum values
beginning with a single underscore, we can use that for our
own purposes. Renaming ENUM_MAX to ENUM__MAX makes it obvious
that the sentinel is generated.
This patch was mostly generated by applying a temporary patch:
|diff --git a/scripts/qapi.py b/scripts/qapi.py
|index e6d014b..b862ec9 100644
|--- a/scripts/qapi.py
|+++ b/scripts/qapi.py
|@@ -1570,6 +1570,7 @@ const char *const %(c_name)s_lookup[] = {
| max_index = c_enum_const(name, 'MAX', prefix)
| ret += mcgen('''
| [%(max_index)s] = NULL,
|+// %(max_index)s
| };
| ''',
| max_index=max_index)
then running:
$ cat qapi-{types,event}.c tests/test-qapi-types.c |
sed -n 's,^// \(.*\)MAX,s|\1MAX|\1_MAX|g,p' > list
$ git grep -l _MAX | xargs sed -i -f list
The only things not generated are the changes in scripts/qapi.py.
Rejecting enum members named 'MAX' is now useless, and will be dropped
in the next patch.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1447836791-369-23-git-send-email-eblake@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
[Rebased to current master, commit message tweaked]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2015-11-18 09:52:57 +01:00
|
|
|
assert((int)event >= 0 && event < BLKDBG__MAX);
|
2010-03-15 17:27:00 +01:00
|
|
|
|
2012-06-06 08:10:42 +02:00
|
|
|
injected = false;
|
2012-09-28 17:23:00 +02:00
|
|
|
s->new_state = s->state;
|
2012-12-06 14:32:57 +01:00
|
|
|
QLIST_FOREACH_SAFE(rule, &s->rules[event], next, next) {
|
2012-09-28 17:23:00 +02:00
|
|
|
injected = process_rule(bs, rule, injected);
|
2010-03-15 17:27:00 +01:00
|
|
|
}
|
2012-09-28 17:23:00 +02:00
|
|
|
s->state = s->new_state;
|
2010-03-15 17:27:00 +01:00
|
|
|
}
|
|
|
|
|
2012-12-06 14:32:57 +01:00
|
|
|
static int blkdebug_debug_breakpoint(BlockDriverState *bs, const char *event,
|
|
|
|
const char *tag)
|
|
|
|
{
|
|
|
|
BDRVBlkdebugState *s = bs->opaque;
|
|
|
|
struct BlkdebugRule *rule;
|
2017-08-24 10:46:02 +02:00
|
|
|
int blkdebug_event;
|
2012-12-06 14:32:57 +01:00
|
|
|
|
2017-08-24 10:46:10 +02:00
|
|
|
blkdebug_event = qapi_enum_parse(&BlkdebugEvent_lookup, event, -1, NULL);
|
2017-08-24 10:46:02 +02:00
|
|
|
if (blkdebug_event < 0) {
|
2012-12-06 14:32:57 +01:00
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
rule = g_malloc(sizeof(*rule));
|
|
|
|
*rule = (struct BlkdebugRule) {
|
|
|
|
.event = blkdebug_event,
|
|
|
|
.action = ACTION_SUSPEND,
|
|
|
|
.state = 0,
|
|
|
|
.options.suspend.tag = g_strdup(tag),
|
|
|
|
};
|
|
|
|
|
|
|
|
QLIST_INSERT_HEAD(&s->rules[blkdebug_event], rule, next);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int blkdebug_debug_resume(BlockDriverState *bs, const char *tag)
|
|
|
|
{
|
|
|
|
BDRVBlkdebugState *s = bs->opaque;
|
2013-12-13 08:25:12 +01:00
|
|
|
BlkdebugSuspendedReq *r, *next;
|
2012-12-06 14:32:57 +01:00
|
|
|
|
2013-12-13 08:25:12 +01:00
|
|
|
QLIST_FOREACH_SAFE(r, &s->suspended_reqs, next, next) {
|
2012-12-06 14:32:57 +01:00
|
|
|
if (!strcmp(r->tag, tag)) {
|
coroutine: move entry argument to qemu_coroutine_create
In practice the entry argument is always known at creation time, and
it is confusing that sometimes qemu_coroutine_enter is used with a
non-NULL argument to re-enter a coroutine (this happens in
block/sheepdog.c and tests/test-coroutine.c). So pass the opaque value
at creation time, for consistency with e.g. aio_bh_new.
Mostly done with the following semantic patch:
@ entry1 @
expression entry, arg, co;
@@
- co = qemu_coroutine_create(entry);
+ co = qemu_coroutine_create(entry, arg);
...
- qemu_coroutine_enter(co, arg);
+ qemu_coroutine_enter(co);
@ entry2 @
expression entry, arg;
identifier co;
@@
- Coroutine *co = qemu_coroutine_create(entry);
+ Coroutine *co = qemu_coroutine_create(entry, arg);
...
- qemu_coroutine_enter(co, arg);
+ qemu_coroutine_enter(co);
@ entry3 @
expression entry, arg;
@@
- qemu_coroutine_enter(qemu_coroutine_create(entry), arg);
+ qemu_coroutine_enter(qemu_coroutine_create(entry, arg));
@ reentry @
expression co;
@@
- qemu_coroutine_enter(co, NULL);
+ qemu_coroutine_enter(co);
except for the aforementioned few places where the semantic patch
stumbled (as expected) and for test_co_queue, which would otherwise
produce an uninitialized variable warning.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-07-04 19:10:01 +02:00
|
|
|
qemu_coroutine_enter(r->co);
|
2012-12-06 14:32:57 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2013-11-20 03:01:54 +01:00
|
|
|
static int blkdebug_debug_remove_breakpoint(BlockDriverState *bs,
|
|
|
|
const char *tag)
|
|
|
|
{
|
|
|
|
BDRVBlkdebugState *s = bs->opaque;
|
2013-12-13 08:25:12 +01:00
|
|
|
BlkdebugSuspendedReq *r, *r_next;
|
2013-11-20 03:01:54 +01:00
|
|
|
BlkdebugRule *rule, *next;
|
|
|
|
int i, ret = -ENOENT;
|
|
|
|
|
qapi: Don't let implicit enum MAX member collide
Now that we guarantee the user doesn't have any enum values
beginning with a single underscore, we can use that for our
own purposes. Renaming ENUM_MAX to ENUM__MAX makes it obvious
that the sentinel is generated.
This patch was mostly generated by applying a temporary patch:
|diff --git a/scripts/qapi.py b/scripts/qapi.py
|index e6d014b..b862ec9 100644
|--- a/scripts/qapi.py
|+++ b/scripts/qapi.py
|@@ -1570,6 +1570,7 @@ const char *const %(c_name)s_lookup[] = {
| max_index = c_enum_const(name, 'MAX', prefix)
| ret += mcgen('''
| [%(max_index)s] = NULL,
|+// %(max_index)s
| };
| ''',
| max_index=max_index)
then running:
$ cat qapi-{types,event}.c tests/test-qapi-types.c |
sed -n 's,^// \(.*\)MAX,s|\1MAX|\1_MAX|g,p' > list
$ git grep -l _MAX | xargs sed -i -f list
The only things not generated are the changes in scripts/qapi.py.
Rejecting enum members named 'MAX' is now useless, and will be dropped
in the next patch.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1447836791-369-23-git-send-email-eblake@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
[Rebased to current master, commit message tweaked]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2015-11-18 09:52:57 +01:00
|
|
|
for (i = 0; i < BLKDBG__MAX; i++) {
|
2013-11-20 03:01:54 +01:00
|
|
|
QLIST_FOREACH_SAFE(rule, &s->rules[i], next, next) {
|
|
|
|
if (rule->action == ACTION_SUSPEND &&
|
|
|
|
!strcmp(rule->options.suspend.tag, tag)) {
|
|
|
|
remove_rule(rule);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-12-13 08:25:12 +01:00
|
|
|
QLIST_FOREACH_SAFE(r, &s->suspended_reqs, next, r_next) {
|
2013-11-20 03:01:54 +01:00
|
|
|
if (!strcmp(r->tag, tag)) {
|
coroutine: move entry argument to qemu_coroutine_create
In practice the entry argument is always known at creation time, and
it is confusing that sometimes qemu_coroutine_enter is used with a
non-NULL argument to re-enter a coroutine (this happens in
block/sheepdog.c and tests/test-coroutine.c). So pass the opaque value
at creation time, for consistency with e.g. aio_bh_new.
Mostly done with the following semantic patch:
@ entry1 @
expression entry, arg, co;
@@
- co = qemu_coroutine_create(entry);
+ co = qemu_coroutine_create(entry, arg);
...
- qemu_coroutine_enter(co, arg);
+ qemu_coroutine_enter(co);
@ entry2 @
expression entry, arg;
identifier co;
@@
- Coroutine *co = qemu_coroutine_create(entry);
+ Coroutine *co = qemu_coroutine_create(entry, arg);
...
- qemu_coroutine_enter(co, arg);
+ qemu_coroutine_enter(co);
@ entry3 @
expression entry, arg;
@@
- qemu_coroutine_enter(qemu_coroutine_create(entry), arg);
+ qemu_coroutine_enter(qemu_coroutine_create(entry, arg));
@ reentry @
expression co;
@@
- qemu_coroutine_enter(co, NULL);
+ qemu_coroutine_enter(co);
except for the aforementioned few places where the semantic patch
stumbled (as expected) and for test_co_queue, which would otherwise
produce an uninitialized variable warning.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-07-04 19:10:01 +02:00
|
|
|
qemu_coroutine_enter(r->co);
|
2013-11-20 03:01:54 +01:00
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2012-12-06 14:32:57 +01:00
|
|
|
|
|
|
|
static bool blkdebug_debug_is_suspended(BlockDriverState *bs, const char *tag)
|
|
|
|
{
|
|
|
|
BDRVBlkdebugState *s = bs->opaque;
|
|
|
|
BlkdebugSuspendedReq *r;
|
|
|
|
|
|
|
|
QLIST_FOREACH(r, &s->suspended_reqs, next) {
|
|
|
|
if (!strcmp(r->tag, tag)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-06-06 08:10:41 +02:00
|
|
|
static int64_t blkdebug_getlength(BlockDriverState *bs)
|
|
|
|
{
|
2015-06-16 14:19:22 +02:00
|
|
|
return bdrv_getlength(bs->file->bs);
|
2012-06-06 08:10:41 +02:00
|
|
|
}
|
|
|
|
|
2019-02-01 20:29:28 +01:00
|
|
|
static void blkdebug_refresh_filename(BlockDriverState *bs)
|
2014-07-18 20:24:57 +02:00
|
|
|
{
|
2016-08-15 15:29:25 +02:00
|
|
|
BDRVBlkdebugState *s = bs->opaque;
|
2014-11-11 10:23:44 +01:00
|
|
|
const QDictEntry *e;
|
2019-02-01 20:29:28 +01:00
|
|
|
int ret;
|
2014-07-18 20:24:57 +02:00
|
|
|
|
2019-02-01 20:29:28 +01:00
|
|
|
if (!bs->file->bs->exact_filename[0]) {
|
2014-07-18 20:24:57 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-02-01 20:29:28 +01:00
|
|
|
for (e = qdict_first(bs->full_open_options); e;
|
|
|
|
e = qdict_next(bs->full_open_options, e))
|
|
|
|
{
|
|
|
|
/* Real child options are under "image", but "x-image" may
|
|
|
|
* contain a filename */
|
|
|
|
if (strcmp(qdict_entry_key(e), "config") &&
|
|
|
|
strcmp(qdict_entry_key(e), "image") &&
|
|
|
|
strcmp(qdict_entry_key(e), "x-image") &&
|
|
|
|
strcmp(qdict_entry_key(e), "driver"))
|
|
|
|
{
|
|
|
|
return;
|
2017-06-13 19:20:05 +02:00
|
|
|
}
|
2014-11-11 10:23:44 +01:00
|
|
|
}
|
|
|
|
|
2019-02-01 20:29:28 +01:00
|
|
|
ret = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
|
|
|
|
"blkdebug:%s:%s",
|
|
|
|
s->config_file ?: "", bs->file->bs->exact_filename);
|
|
|
|
if (ret >= sizeof(bs->exact_filename)) {
|
|
|
|
/* An overflow makes the filename unusable, so do not report any */
|
|
|
|
bs->exact_filename[0] = 0;
|
2014-07-18 20:24:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-24 00:37:13 +02:00
|
|
|
static void blkdebug_refresh_limits(BlockDriverState *bs, Error **errp)
|
|
|
|
{
|
|
|
|
BDRVBlkdebugState *s = bs->opaque;
|
|
|
|
|
|
|
|
if (s->align) {
|
2016-06-24 00:37:24 +02:00
|
|
|
bs->bl.request_alignment = s->align;
|
2016-06-24 00:37:13 +02:00
|
|
|
}
|
2017-04-29 21:14:18 +02:00
|
|
|
if (s->max_transfer) {
|
|
|
|
bs->bl.max_transfer = s->max_transfer;
|
|
|
|
}
|
|
|
|
if (s->opt_write_zero) {
|
|
|
|
bs->bl.pwrite_zeroes_alignment = s->opt_write_zero;
|
|
|
|
}
|
|
|
|
if (s->max_write_zero) {
|
|
|
|
bs->bl.max_pwrite_zeroes = s->max_write_zero;
|
|
|
|
}
|
|
|
|
if (s->opt_discard) {
|
|
|
|
bs->bl.pdiscard_alignment = s->opt_discard;
|
|
|
|
}
|
|
|
|
if (s->max_discard) {
|
|
|
|
bs->bl.max_pdiscard = s->max_discard;
|
|
|
|
}
|
2016-06-24 00:37:13 +02:00
|
|
|
}
|
|
|
|
|
2015-10-29 15:22:27 +01:00
|
|
|
static int blkdebug_reopen_prepare(BDRVReopenState *reopen_state,
|
|
|
|
BlockReopenQueue *queue, Error **errp)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-08 13:34:53 +01:00
|
|
|
static void blkdebug_child_perm(BlockDriverState *bs, BdrvChild *c,
|
2020-05-13 13:05:16 +02:00
|
|
|
BdrvChildRole role,
|
2019-11-08 13:34:53 +01:00
|
|
|
BlockReopenQueue *reopen_queue,
|
|
|
|
uint64_t perm, uint64_t shared,
|
|
|
|
uint64_t *nperm, uint64_t *nshared)
|
|
|
|
{
|
|
|
|
BDRVBlkdebugState *s = bs->opaque;
|
|
|
|
|
2020-05-13 13:05:44 +02:00
|
|
|
bdrv_default_perms(bs, c, role, reopen_queue,
|
2020-05-13 13:05:39 +02:00
|
|
|
perm, shared, nperm, nshared);
|
2019-11-08 13:34:53 +01:00
|
|
|
|
|
|
|
*nperm |= s->take_child_perms;
|
|
|
|
*nshared &= ~s->unshare_child_perms;
|
|
|
|
}
|
|
|
|
|
2019-02-01 20:29:25 +01:00
|
|
|
static const char *const blkdebug_strong_runtime_opts[] = {
|
|
|
|
"config",
|
|
|
|
"inject-error.",
|
|
|
|
"set-state.",
|
|
|
|
"align",
|
|
|
|
"max-transfer",
|
|
|
|
"opt-write-zero",
|
|
|
|
"max-write-zero",
|
|
|
|
"opt-discard",
|
|
|
|
"max-discard",
|
|
|
|
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2010-02-18 17:48:12 +01:00
|
|
|
static BlockDriver bdrv_blkdebug = {
|
2013-04-10 13:37:33 +02:00
|
|
|
.format_name = "blkdebug",
|
|
|
|
.protocol_name = "blkdebug",
|
|
|
|
.instance_size = sizeof(BDRVBlkdebugState),
|
2017-07-13 17:30:27 +02:00
|
|
|
.is_filter = true,
|
2010-02-18 17:48:12 +01:00
|
|
|
|
2013-04-10 13:37:33 +02:00
|
|
|
.bdrv_parse_filename = blkdebug_parse_filename,
|
|
|
|
.bdrv_file_open = blkdebug_open,
|
|
|
|
.bdrv_close = blkdebug_close,
|
2015-10-29 15:22:27 +01:00
|
|
|
.bdrv_reopen_prepare = blkdebug_reopen_prepare,
|
2019-11-08 13:34:53 +01:00
|
|
|
.bdrv_child_perm = blkdebug_child_perm,
|
2016-12-15 12:28:58 +01:00
|
|
|
|
2013-04-10 13:37:33 +02:00
|
|
|
.bdrv_getlength = blkdebug_getlength,
|
2014-07-18 20:24:57 +02:00
|
|
|
.bdrv_refresh_filename = blkdebug_refresh_filename,
|
2016-06-24 00:37:13 +02:00
|
|
|
.bdrv_refresh_limits = blkdebug_refresh_limits,
|
2010-02-18 17:48:12 +01:00
|
|
|
|
2016-11-04 21:13:45 +01:00
|
|
|
.bdrv_co_preadv = blkdebug_co_preadv,
|
|
|
|
.bdrv_co_pwritev = blkdebug_co_pwritev,
|
|
|
|
.bdrv_co_flush_to_disk = blkdebug_co_flush,
|
blkdebug: Add pass-through write_zero and discard support
In order to test the effects of artificial geometry constraints
on operations like write zero or discard, we first need blkdebug
to manage these actions. It also allows us to inject errors on
those operations, just like we can for read/write/flush.
We can also test the contract promised by the block layer; namely,
if a device has specified limits on alignment or maximum size,
then those limits must be obeyed (for now, the blkdebug driver
merely inherits limits from whatever it is wrapping, but the next
patch will further enhance it to allow specific limit overrides).
This patch intentionally refuses to service requests smaller than
the requested alignments; this is because an upcoming patch adds
a qemu-iotest to prove that the block layer is correctly handling
fragmentation, but the test only works if there is a way to tell
the difference at artificial alignment boundaries when blkdebug is
using a larger-than-default alignment. If we let the blkdebug
layer always defer to the underlying layer, which potentially has
a smaller granularity, the iotest will be thwarted.
Tested by setting up an NBD server with export 'foo', then invoking:
$ ./qemu-io
qemu-io> open -o driver=blkdebug blkdebug::nbd://localhost:10809/foo
qemu-io> d 0 15M
qemu-io> w -z 0 15M
Pre-patch, the server never sees the discard (it was silently
eaten by the block layer); post-patch it is passed across the
wire. Likewise, pre-patch the write is always passed with
NBD_WRITE (with 15M of zeroes on the wire), while post-patch
it can utilize NBD_WRITE_ZEROES (for less traffic).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20170429191419.30051-7-eblake@redhat.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-04-29 21:14:16 +02:00
|
|
|
.bdrv_co_pwrite_zeroes = blkdebug_co_pwrite_zeroes,
|
|
|
|
.bdrv_co_pdiscard = blkdebug_co_pdiscard,
|
2018-02-13 21:26:43 +01:00
|
|
|
.bdrv_co_block_status = blkdebug_co_block_status,
|
2010-03-15 17:27:00 +01:00
|
|
|
|
2012-12-06 14:32:57 +01:00
|
|
|
.bdrv_debug_event = blkdebug_debug_event,
|
|
|
|
.bdrv_debug_breakpoint = blkdebug_debug_breakpoint,
|
2013-11-20 03:01:54 +01:00
|
|
|
.bdrv_debug_remove_breakpoint
|
|
|
|
= blkdebug_debug_remove_breakpoint,
|
2012-12-06 14:32:57 +01:00
|
|
|
.bdrv_debug_resume = blkdebug_debug_resume,
|
|
|
|
.bdrv_debug_is_suspended = blkdebug_debug_is_suspended,
|
2019-02-01 20:29:25 +01:00
|
|
|
|
|
|
|
.strong_runtime_opts = blkdebug_strong_runtime_opts,
|
2010-02-18 17:48:12 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static void bdrv_blkdebug_init(void)
|
|
|
|
{
|
|
|
|
bdrv_register(&bdrv_blkdebug);
|
|
|
|
}
|
|
|
|
|
|
|
|
block_init(bdrv_blkdebug_init);
|