qemu-e2k/blockdev.c

3747 lines
111 KiB
C
Raw Normal View History

/*
* QEMU host block devices
*
* Copyright (c) 2003-2008 Fabrice Bellard
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* later. See the COPYING file in the top-level directory.
*
* This file incorporates work covered by the following copyright and
* permission notice:
*
* Copyright (c) 2003-2008 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
block: New BlockBackend A block device consists of a frontend device model and a backend. A block backend has a tree of block drivers doing the actual work. The tree is managed by the block layer. We currently use a single abstraction BlockDriverState both for tree nodes and the backend as a whole. Drawbacks: * Its API includes both stuff that makes sense only at the block backend level (root of the tree) and stuff that's only for use within the block layer. This makes the API bigger and more complex than necessary. Moreover, it's not obvious which interfaces are meant for device models, and which really aren't. * Since device models keep a reference to their backend, the backend object can't just be destroyed. But for media change, we need to replace the tree. Our solution is to make the BlockDriverState generic, with actual driver state in a separate object, pointed to by member opaque. That lets us replace the tree by deinitializing and reinitializing its root. This special need of the root makes the data structure awkward everywhere in the tree. The general plan is to separate the APIs into "block backend", for use by device models, monitor and whatever other code dealing with block backends, and "block driver", for use by the block layer and whatever other code (if any) dealing with trees and tree nodes. Code dealing with block backends, device models in particular, should become completely oblivious of BlockDriverState. This should let us clean up both APIs, and the tree data structures. This commit is a first step. It creates a minimal "block backend" API: type BlockBackend and functions to create, destroy and find them. BlockBackend objects are created and destroyed exactly when root BlockDriverState objects are created and destroyed. "Root" in the sense of "in bdrv_states". They're not yet used for anything; that'll come shortly. A root BlockDriverState is created with bdrv_new_root(), so where to create a BlockBackend is obvious. Where these roots get destroyed isn't always as obvious. It is obvious in qemu-img.c, qemu-io.c and qemu-nbd.c, and in error paths of blockdev_init(), blk_connect(). That leaves destruction of objects successfully created by blockdev_init() and blk_connect(). blockdev_init() is used only by drive_new() and qmp_blockdev_add(). Objects created by the latter are currently indestructible (see commit 48f364d "blockdev: Refuse to drive_del something added with blockdev-add" and commit 2d246f0 "blockdev: Introduce DriveInfo.enable_auto_del"). Objects created by the former get destroyed by drive_del(). Objects created by blk_connect() get destroyed by blk_disconnect(). BlockBackend is reference-counted. Its reference count never exceeds one so far, but that's going to change. In drive_del(), the BB's reference count is surely one now. The BDS's reference count is greater than one when something else is holding a reference, such as a block job. In this case, the BB is destroyed right away, but the BDS lives on until all extra references get dropped. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-10-07 13:59:04 +02:00
#include "sysemu/block-backend.h"
#include "sysemu/blockdev.h"
#include "hw/block/block.h"
#include "block/blockjob.h"
#include "block/dirty-bitmap.h"
#include "block/qdict.h"
#include "block/throttle-groups.h"
#include "monitor/monitor.h"
#include "qemu/error-report.h"
#include "qemu/option.h"
#include "qemu/qemu-print.h"
#include "qemu/config-file.h"
#include "qapi/qapi-commands-block.h"
#include "qapi/qapi-commands-transaction.h"
#include "qapi/qapi-visit-block-core.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qnum.h"
#include "qapi/qmp/qstring.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qapi/qmp/qlist.h"
#include "qapi/qobject-output-visitor.h"
#include "sysemu/sysemu.h"
#include "sysemu/iothread.h"
#include "block/block_int.h"
#include "block/trace.h"
#include "sysemu/runstate.h"
#include "sysemu/replay.h"
#include "qemu/cutils.h"
#include "qemu/help_option.h"
#include "qemu/main-loop.h"
#include "qemu/throttle-options.h"
/* Protected by BQL */
QTAILQ_HEAD(, BlockDriverState) monitor_bdrv_states =
QTAILQ_HEAD_INITIALIZER(monitor_bdrv_states);
void bdrv_set_monitor_owned(BlockDriverState *bs)
{
GLOBAL_STATE_CODE();
QTAILQ_INSERT_TAIL(&monitor_bdrv_states, bs, monitor_list);
}
static const char *const if_name[IF_COUNT] = {
[IF_NONE] = "none",
[IF_IDE] = "ide",
[IF_SCSI] = "scsi",
[IF_FLOPPY] = "floppy",
[IF_PFLASH] = "pflash",
[IF_MTD] = "mtd",
[IF_SD] = "sd",
[IF_VIRTIO] = "virtio",
[IF_XEN] = "xen",
};
static int if_max_devs[IF_COUNT] = {
/*
* Do not change these numbers! They govern how drive option
* index maps to unit and bus. That mapping is ABI.
*
* All controllers used to implement if=T drives need to support
* if_max_devs[T] units, for any T with if_max_devs[T] != 0.
* Otherwise, some index values map to "impossible" bus, unit
* values.
*
* For instance, if you change [IF_SCSI] to 255, -drive
* if=scsi,index=12 no longer means bus=1,unit=5, but
* bus=0,unit=12. With an lsi53c895a controller (7 units max),
* the drive can't be set up. Regression.
*/
[IF_IDE] = 2,
[IF_SCSI] = 7,
};
/**
* Boards may call this to offer board-by-board overrides
* of the default, global values.
*/
void override_max_devs(BlockInterfaceType type, int max_devs)
{
BlockBackend *blk;
DriveInfo *dinfo;
GLOBAL_STATE_CODE();
if (max_devs <= 0) {
return;
}
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
dinfo = blk_legacy_dinfo(blk);
if (dinfo->type == type) {
fprintf(stderr, "Cannot override units-per-bus property of"
" the %s interface, because a drive of that type has"
" already been added.\n", if_name[type]);
g_assert_not_reached();
}
}
if_max_devs[type] = max_devs;
}
/*
* We automatically delete the drive when a device using it gets
* unplugged. Questionable feature, but we can't just drop it.
* Device models call blockdev_mark_auto_del() to schedule the
* automatic deletion, and generic qdev code calls blockdev_auto_del()
* when deletion is actually safe.
*/
void blockdev_mark_auto_del(BlockBackend *blk)
{
DriveInfo *dinfo = blk_legacy_dinfo(blk);
BlockJob *job;
GLOBAL_STATE_CODE();
if (!dinfo) {
return;
}
JOB_LOCK_GUARD();
do {
job = block_job_next_locked(NULL);
while (job && (job->job.cancelled ||
job->job.deferred_to_main_loop ||
!block_job_has_bdrv(job, blk_bs(blk))))
{
job = block_job_next_locked(job);
}
if (job) {
/*
* This drops the job lock temporarily and polls, so we need to
* restart processing the list from the start after this.
*/
job_cancel_locked(&job->job, false);
}
} while (job);
dinfo->auto_del = 1;
}
void blockdev_auto_del(BlockBackend *blk)
{
DriveInfo *dinfo = blk_legacy_dinfo(blk);
GLOBAL_STATE_CODE();
if (dinfo && dinfo->auto_del) {
monitor_remove_blk(blk);
blk_unref(blk);
}
}
static int drive_index_to_bus_id(BlockInterfaceType type, int index)
{
int max_devs = if_max_devs[type];
return max_devs ? index / max_devs : 0;
}
static int drive_index_to_unit_id(BlockInterfaceType type, int index)
{
int max_devs = if_max_devs[type];
return max_devs ? index % max_devs : index;
}
QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file,
const char *optstr)
{
QemuOpts *opts;
GLOBAL_STATE_CODE();
opts = qemu_opts_parse_noisily(qemu_find_opts("drive"), optstr, false);
if (!opts) {
return NULL;
}
if (type != IF_DEFAULT) {
qemu_opt_set(opts, "if", if_name[type], &error_abort);
}
if (index >= 0) {
qemu_opt_set_number(opts, "index", index, &error_abort);
}
if (file)
qemu_opt_set(opts, "file", file, &error_abort);
return opts;
}
DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit)
{
BlockBackend *blk;
DriveInfo *dinfo;
GLOBAL_STATE_CODE();
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
dinfo = blk_legacy_dinfo(blk);
if (dinfo && dinfo->type == type
&& dinfo->bus == bus && dinfo->unit == unit) {
return dinfo;
}
}
return NULL;
}
/*
* Check board claimed all -drive that are meant to be claimed.
* Fatal error if any remain unclaimed.
*/
void drive_check_orphaned(void)
{
BlockBackend *blk;
DriveInfo *dinfo;
Location loc;
bool orphans = false;
GLOBAL_STATE_CODE();
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
dinfo = blk_legacy_dinfo(blk);
/*
* Ignore default drives, because we create certain default
* drives unconditionally, then leave them unclaimed. Not the
* users fault.
* Ignore IF_VIRTIO, because it gets desugared into -device,
* so we can leave failing to -device.
* Ignore IF_NONE, because leaving unclaimed IF_NONE remains
* available for device_add is a feature.
*/
if (dinfo->is_default || dinfo->type == IF_VIRTIO
|| dinfo->type == IF_NONE) {
continue;
}
if (!blk_get_attached_dev(blk)) {
loc_push_none(&loc);
qemu_opts_loc_restore(dinfo->opts);
error_report("machine type does not support"
" if=%s,bus=%d,unit=%d",
if_name[dinfo->type], dinfo->bus, dinfo->unit);
loc_pop(&loc);
orphans = true;
}
}
if (orphans) {
exit(1);
}
}
DriveInfo *drive_get_by_index(BlockInterfaceType type, int index)
{
GLOBAL_STATE_CODE();
return drive_get(type,
drive_index_to_bus_id(type, index),
drive_index_to_unit_id(type, index));
}
int drive_get_max_bus(BlockInterfaceType type)
{
int max_bus;
BlockBackend *blk;
DriveInfo *dinfo;
GLOBAL_STATE_CODE();
max_bus = -1;
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
dinfo = blk_legacy_dinfo(blk);
if (dinfo && dinfo->type == type && dinfo->bus > max_bus) {
max_bus = dinfo->bus;
}
}
return max_bus;
}
static void bdrv_format_print(void *opaque, const char *name)
{
qemu_printf(" %s", name);
}
typedef struct {
QEMUBH *bh;
BlockDriverState *bs;
} BDRVPutRefBH;
static int parse_block_error_action(const char *buf, bool is_read, Error **errp)
{
if (!strcmp(buf, "ignore")) {
return BLOCKDEV_ON_ERROR_IGNORE;
} else if (!is_read && !strcmp(buf, "enospc")) {
return BLOCKDEV_ON_ERROR_ENOSPC;
} else if (!strcmp(buf, "stop")) {
return BLOCKDEV_ON_ERROR_STOP;
} else if (!strcmp(buf, "report")) {
return BLOCKDEV_ON_ERROR_REPORT;
} else {
error_setg(errp, "'%s' invalid %s error action",
buf, is_read ? "read" : "write");
return -1;
}
}
static bool parse_stats_intervals(BlockAcctStats *stats, QList *intervals,
Error **errp)
{
const QListEntry *entry;
for (entry = qlist_first(intervals); entry; entry = qlist_next(entry)) {
switch (qobject_type(entry->value)) {
case QTYPE_QSTRING: {
uint64_t length;
const char *str = qstring_get_str(qobject_to(QString,
entry->value));
if (parse_uint_full(str, 10, &length) == 0 &&
length > 0 && length <= UINT_MAX) {
block_acct_add_interval(stats, (unsigned) length);
} else {
error_setg(errp, "Invalid interval length: %s", str);
return false;
}
break;
}
case QTYPE_QNUM: {
int64_t length = qnum_get_int(qobject_to(QNum, entry->value));
if (length > 0 && length <= UINT_MAX) {
block_acct_add_interval(stats, (unsigned) length);
} else {
error_setg(errp, "Invalid interval length: %" PRId64, length);
return false;
}
break;
}
default:
error_setg(errp, "The specification of stats-intervals is invalid");
return false;
}
}
return true;
}
typedef enum { MEDIA_DISK, MEDIA_CDROM } DriveMediaType;
/* All parameters but @opts are optional and may be set to NULL. */
static void extract_common_blockdev_options(QemuOpts *opts, int *bdrv_flags,
const char **throttling_group, ThrottleConfig *throttle_cfg,
BlockdevDetectZeroesOptions *detect_zeroes, Error **errp)
{
Error *local_error = NULL;
const char *aio;
if (bdrv_flags) {
if (qemu_opt_get_bool(opts, "copy-on-read", false)) {
*bdrv_flags |= BDRV_O_COPY_ON_READ;
}
if ((aio = qemu_opt_get(opts, "aio")) != NULL) {
if (bdrv_parse_aio(aio, bdrv_flags) < 0) {
error_setg(errp, "invalid aio option");
return;
}
}
}
/* disk I/O throttling */
if (throttling_group) {
*throttling_group = qemu_opt_get(opts, "throttling.group");
}
if (throttle_cfg) {
throttle_config_init(throttle_cfg);
throttle_cfg->buckets[THROTTLE_BPS_TOTAL].avg =
qemu_opt_get_number(opts, "throttling.bps-total", 0);
throttle_cfg->buckets[THROTTLE_BPS_READ].avg =
qemu_opt_get_number(opts, "throttling.bps-read", 0);
throttle_cfg->buckets[THROTTLE_BPS_WRITE].avg =
qemu_opt_get_number(opts, "throttling.bps-write", 0);
throttle_cfg->buckets[THROTTLE_OPS_TOTAL].avg =
qemu_opt_get_number(opts, "throttling.iops-total", 0);
throttle_cfg->buckets[THROTTLE_OPS_READ].avg =
qemu_opt_get_number(opts, "throttling.iops-read", 0);
throttle_cfg->buckets[THROTTLE_OPS_WRITE].avg =
qemu_opt_get_number(opts, "throttling.iops-write", 0);
throttle_cfg->buckets[THROTTLE_BPS_TOTAL].max =
qemu_opt_get_number(opts, "throttling.bps-total-max", 0);
throttle_cfg->buckets[THROTTLE_BPS_READ].max =
qemu_opt_get_number(opts, "throttling.bps-read-max", 0);
throttle_cfg->buckets[THROTTLE_BPS_WRITE].max =
qemu_opt_get_number(opts, "throttling.bps-write-max", 0);
throttle_cfg->buckets[THROTTLE_OPS_TOTAL].max =
qemu_opt_get_number(opts, "throttling.iops-total-max", 0);
throttle_cfg->buckets[THROTTLE_OPS_READ].max =
qemu_opt_get_number(opts, "throttling.iops-read-max", 0);
throttle_cfg->buckets[THROTTLE_OPS_WRITE].max =
qemu_opt_get_number(opts, "throttling.iops-write-max", 0);
throttle_cfg->buckets[THROTTLE_BPS_TOTAL].burst_length =
qemu_opt_get_number(opts, "throttling.bps-total-max-length", 1);
throttle_cfg->buckets[THROTTLE_BPS_READ].burst_length =
qemu_opt_get_number(opts, "throttling.bps-read-max-length", 1);
throttle_cfg->buckets[THROTTLE_BPS_WRITE].burst_length =
qemu_opt_get_number(opts, "throttling.bps-write-max-length", 1);
throttle_cfg->buckets[THROTTLE_OPS_TOTAL].burst_length =
qemu_opt_get_number(opts, "throttling.iops-total-max-length", 1);
throttle_cfg->buckets[THROTTLE_OPS_READ].burst_length =
qemu_opt_get_number(opts, "throttling.iops-read-max-length", 1);
throttle_cfg->buckets[THROTTLE_OPS_WRITE].burst_length =
qemu_opt_get_number(opts, "throttling.iops-write-max-length", 1);
throttle_cfg->op_size =
qemu_opt_get_number(opts, "throttling.iops-size", 0);
if (!throttle_is_valid(throttle_cfg, errp)) {
return;
}
}
if (detect_zeroes) {
*detect_zeroes =
qapi_enum_parse(&BlockdevDetectZeroesOptions_lookup,
qemu_opt_get(opts, "detect-zeroes"),
BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF,
&local_error);
if (local_error) {
error_propagate(errp, local_error);
return;
}
}
}
static OnOffAuto account_get_opt(QemuOpts *opts, const char *name)
{
if (!qemu_opt_find(opts, name)) {
return ON_OFF_AUTO_AUTO;
}
if (qemu_opt_get_bool(opts, name, true)) {
return ON_OFF_AUTO_ON;
}
return ON_OFF_AUTO_OFF;
}
/* Takes the ownership of bs_opts */
static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
Error **errp)
{
const char *buf;
int bdrv_flags = 0;
int on_read_error, on_write_error;
OnOffAuto account_invalid, account_failed;
bool writethrough, read_only;
block: New BlockBackend A block device consists of a frontend device model and a backend. A block backend has a tree of block drivers doing the actual work. The tree is managed by the block layer. We currently use a single abstraction BlockDriverState both for tree nodes and the backend as a whole. Drawbacks: * Its API includes both stuff that makes sense only at the block backend level (root of the tree) and stuff that's only for use within the block layer. This makes the API bigger and more complex than necessary. Moreover, it's not obvious which interfaces are meant for device models, and which really aren't. * Since device models keep a reference to their backend, the backend object can't just be destroyed. But for media change, we need to replace the tree. Our solution is to make the BlockDriverState generic, with actual driver state in a separate object, pointed to by member opaque. That lets us replace the tree by deinitializing and reinitializing its root. This special need of the root makes the data structure awkward everywhere in the tree. The general plan is to separate the APIs into "block backend", for use by device models, monitor and whatever other code dealing with block backends, and "block driver", for use by the block layer and whatever other code (if any) dealing with trees and tree nodes. Code dealing with block backends, device models in particular, should become completely oblivious of BlockDriverState. This should let us clean up both APIs, and the tree data structures. This commit is a first step. It creates a minimal "block backend" API: type BlockBackend and functions to create, destroy and find them. BlockBackend objects are created and destroyed exactly when root BlockDriverState objects are created and destroyed. "Root" in the sense of "in bdrv_states". They're not yet used for anything; that'll come shortly. A root BlockDriverState is created with bdrv_new_root(), so where to create a BlockBackend is obvious. Where these roots get destroyed isn't always as obvious. It is obvious in qemu-img.c, qemu-io.c and qemu-nbd.c, and in error paths of blockdev_init(), blk_connect(). That leaves destruction of objects successfully created by blockdev_init() and blk_connect(). blockdev_init() is used only by drive_new() and qmp_blockdev_add(). Objects created by the latter are currently indestructible (see commit 48f364d "blockdev: Refuse to drive_del something added with blockdev-add" and commit 2d246f0 "blockdev: Introduce DriveInfo.enable_auto_del"). Objects created by the former get destroyed by drive_del(). Objects created by blk_connect() get destroyed by blk_disconnect(). BlockBackend is reference-counted. Its reference count never exceeds one so far, but that's going to change. In drive_del(), the BB's reference count is surely one now. The BDS's reference count is greater than one when something else is holding a reference, such as a block job. In this case, the BB is destroyed right away, but the BDS lives on until all extra references get dropped. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-10-07 13:59:04 +02:00
BlockBackend *blk;
BlockDriverState *bs;
ThrottleConfig cfg;
int snapshot = 0;
Error *error = NULL;
QemuOpts *opts;
QDict *interval_dict = NULL;
QList *interval_list = NULL;
const char *id;
BlockdevDetectZeroesOptions detect_zeroes =
BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF;
const char *throttling_group = NULL;
/* Check common options by copying from bs_opts to opts, all other options
* stay in bs_opts for processing by bdrv_open(). */
id = qdict_get_try_str(bs_opts, "id");
opts = qemu_opts_create(&qemu_common_drive_opts, id, 1, errp);
if (!opts) {
goto err_no_opts;
}
error: Eliminate error_propagate() with Coccinelle, part 1 When all we do with an Error we receive into a local variable is propagating to somewhere else, we can just as well receive it there right away. Convert if (!foo(..., &err)) { ... error_propagate(errp, err); ... return ... } to if (!foo(..., errp)) { ... ... return ... } where nothing else needs @err. Coccinelle script: @rule1 forall@ identifier fun, err, errp, lbl; expression list args, args2; binary operator op; constant c1, c2; symbol false; @@ if ( ( - fun(args, &err, args2) + fun(args, errp, args2) | - !fun(args, &err, args2) + !fun(args, errp, args2) | - fun(args, &err, args2) op c1 + fun(args, errp, args2) op c1 ) ) { ... when != err when != lbl: when strict - error_propagate(errp, err); ... when != err ( return; | return c2; | return false; ) } @rule2 forall@ identifier fun, err, errp, lbl; expression list args, args2; expression var; binary operator op; constant c1, c2; symbol false; @@ - var = fun(args, &err, args2); + var = fun(args, errp, args2); ... when != err if ( ( var | !var | var op c1 ) ) { ... when != err when != lbl: when strict - error_propagate(errp, err); ... when != err ( return; | return c2; | return false; | return var; ) } @depends on rule1 || rule2@ identifier err; @@ - Error *err = NULL; ... when != err Not exactly elegant, I'm afraid. The "when != lbl:" is necessary to avoid transforming if (fun(args, &err)) { goto out } ... out: error_propagate(errp, err); even though other paths to label out still need the error_propagate(). For an actual example, see sclp_realize(). Without the "when strict", Coccinelle transforms vfio_msix_setup(), incorrectly. I don't know what exactly "when strict" does, only that it helps here. The match of return is narrower than what I want, but I can't figure out how to express "return where the operand doesn't use @err". For an example where it's too narrow, see vfio_intx_enable(). Silently fails to convert hw/arm/armsse.c, because Coccinelle gets confused by ARMSSE being used both as typedef and function-like macro there. Converted manually. Line breaks tidied up manually. One nested declaration of @local_err deleted manually. Preexisting unwanted blank line dropped in hw/riscv/sifive_e.c. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-Id: <20200707160613.848843-35-armbru@redhat.com>
2020-07-07 18:06:02 +02:00
if (!qemu_opts_absorb_qdict(opts, bs_opts, errp)) {
goto early_err;
}
if (id) {
qdict_del(bs_opts, "id");
}
/* extract parameters */
snapshot = qemu_opt_get_bool(opts, "snapshot", 0);
account_invalid = account_get_opt(opts, "stats-account-invalid");
account_failed = account_get_opt(opts, "stats-account-failed");
writethrough = !qemu_opt_get_bool(opts, BDRV_OPT_CACHE_WB, true);
id = qemu_opts_id(opts);
qdict_extract_subqdict(bs_opts, &interval_dict, "stats-intervals.");
qdict_array_split(interval_dict, &interval_list);
if (qdict_size(interval_dict) != 0) {
error_setg(errp, "Invalid option stats-intervals.%s",
qdict_first(interval_dict)->key);
goto early_err;
}
extract_common_blockdev_options(opts, &bdrv_flags, &throttling_group, &cfg,
&detect_zeroes, &error);
if (error) {
error_propagate(errp, error);
goto early_err;
}
if ((buf = qemu_opt_get(opts, "format")) != NULL) {
if (is_help_option(buf)) {
qemu_printf("Supported formats:");
bdrv_iterate_format(bdrv_format_print, NULL, false);
qemu_printf("\nSupported formats (read-only):");
bdrv_iterate_format(bdrv_format_print, NULL, true);
qemu_printf("\n");
goto early_err;
}
if (qdict_haskey(bs_opts, "driver")) {
error_setg(errp, "Cannot specify both 'driver' and 'format'");
goto early_err;
}
qdict_put_str(bs_opts, "driver", buf);
}
on_write_error = BLOCKDEV_ON_ERROR_ENOSPC;
if ((buf = qemu_opt_get(opts, "werror")) != NULL) {
on_write_error = parse_block_error_action(buf, 0, &error);
if (error) {
error_propagate(errp, error);
goto early_err;
}
}
on_read_error = BLOCKDEV_ON_ERROR_REPORT;
if ((buf = qemu_opt_get(opts, "rerror")) != NULL) {
on_read_error = parse_block_error_action(buf, 1, &error);
if (error) {
error_propagate(errp, error);
goto early_err;
}
}
if (snapshot) {
bdrv_flags |= BDRV_O_SNAPSHOT;
}
read_only = qemu_opt_get_bool(opts, BDRV_OPT_READ_ONLY, false);
/* init */
if ((!file || !*file) && !qdict_size(bs_opts)) {
BlockBackendRootState *blk_rs;
blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
blk_rs = blk_get_root_state(blk);
blk_rs->open_flags = bdrv_flags | (read_only ? 0 : BDRV_O_RDWR);
blk_rs->detect_zeroes = detect_zeroes;
qobject_unref(bs_opts);
} else {
if (file && !*file) {
file = NULL;
}
/* bdrv_open() defaults to the values in bdrv_flags (for compatibility
* with other callers) rather than what we want as the real defaults.
* Apply the defaults here instead. */
qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_DIRECT, "off");
qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_NO_FLUSH, "off");
qdict_set_default_str(bs_opts, BDRV_OPT_READ_ONLY,
read_only ? "on" : "off");
qdict_set_default_str(bs_opts, BDRV_OPT_AUTO_READ_ONLY, "on");
assert((bdrv_flags & BDRV_O_CACHE_MASK) == 0);
if (runstate_check(RUN_STATE_INMIGRATE)) {
bdrv_flags |= BDRV_O_INACTIVE;
}
blk = blk_new_open(file, NULL, bs_opts, bdrv_flags, errp);
if (!blk) {
goto err_no_bs_opts;
}
bs = blk_bs(blk);
bs->detect_zeroes = detect_zeroes;
block_acct_setup(blk_get_stats(blk), account_invalid, account_failed);
if (!parse_stats_intervals(blk_get_stats(blk), interval_list, errp)) {
blk_unref(blk);
blk = NULL;
goto err_no_bs_opts;
}
}
/* disk I/O throttling */
if (throttle_enabled(&cfg)) {
if (!throttling_group) {
throttling_group = id;
}
blk_io_limits_enable(blk, throttling_group);
blk_set_io_limits(blk, &cfg);
}
blk_set_enable_write_cache(blk, !writethrough);
blk_set_on_error(blk, on_read_error, on_write_error);
if (!monitor_add_blk(blk, id, errp)) {
blk_unref(blk);
blk = NULL;
goto err_no_bs_opts;
}
err_no_bs_opts:
qemu_opts_del(opts);
qobject_unref(interval_dict);
qobject_unref(interval_list);
return blk;
early_err:
qemu_opts_del(opts);
qobject_unref(interval_dict);
qobject_unref(interval_list);
err_no_opts:
qobject_unref(bs_opts);
return NULL;
}
/* Takes the ownership of bs_opts */
BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp)
{
BlockDriverState *bs;
int bdrv_flags = 0;
GLOBAL_STATE_CODE();
/* bdrv_open() defaults to the values in bdrv_flags (for compatibility
* with other callers) rather than what we want as the real defaults.
* Apply the defaults here instead. */
qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_DIRECT, "off");
qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_NO_FLUSH, "off");
qdict_set_default_str(bs_opts, BDRV_OPT_READ_ONLY, "off");
if (runstate_check(RUN_STATE_INMIGRATE)) {
bdrv_flags |= BDRV_O_INACTIVE;
}
aio_context_acquire(qemu_get_aio_context());
bs = bdrv_open(NULL, NULL, bs_opts, bdrv_flags, errp);
aio_context_release(qemu_get_aio_context());
return bs;
}
void blockdev_close_all_bdrv_states(void)
{
BlockDriverState *bs, *next_bs;
GLOBAL_STATE_CODE();
QTAILQ_FOREACH_SAFE(bs, &monitor_bdrv_states, monitor_list, next_bs) {
AioContext *ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
bdrv_unref(bs);
aio_context_release(ctx);
}
}
/* Iterates over the list of monitor-owned BlockDriverStates */
BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs)
{
GLOBAL_STATE_CODE();
return bs ? QTAILQ_NEXT(bs, monitor_list)
: QTAILQ_FIRST(&monitor_bdrv_states);
}
static bool qemu_opt_rename(QemuOpts *opts, const char *from, const char *to,
Error **errp)
{
const char *value;
value = qemu_opt_get(opts, from);
if (value) {
if (qemu_opt_find(opts, to)) {
error_setg(errp, "'%s' and its alias '%s' can't be used at the "
"same time", to, from);
return false;
}
}
/* rename all items in opts */
while ((value = qemu_opt_get(opts, from))) {
qemu_opt_set(opts, to, value, &error_abort);
qemu_opt_unset(opts, from);
}
return true;
}
QemuOptsList qemu_legacy_drive_opts = {
.name = "drive",
.head = QTAILQ_HEAD_INITIALIZER(qemu_legacy_drive_opts.head),
.desc = {
{
.name = "bus",
.type = QEMU_OPT_NUMBER,
.help = "bus number",
},{
.name = "unit",
.type = QEMU_OPT_NUMBER,
.help = "unit number (i.e. lun for scsi)",
},{
.name = "index",
.type = QEMU_OPT_NUMBER,
.help = "index number",
},{
.name = "media",
.type = QEMU_OPT_STRING,
.help = "media type (disk, cdrom)",
},{
.name = "if",
.type = QEMU_OPT_STRING,
.help = "interface (ide, scsi, sd, mtd, floppy, pflash, virtio)",
},{
.name = "file",
.type = QEMU_OPT_STRING,
.help = "file name",
},
/* Options that are passed on, but have special semantics with -drive */
{
.name = BDRV_OPT_READ_ONLY,
.type = QEMU_OPT_BOOL,
.help = "open drive file as read-only",
},{
.name = "rerror",
.type = QEMU_OPT_STRING,
.help = "read error action",
},{
.name = "werror",
.type = QEMU_OPT_STRING,
.help = "write error action",
},{
.name = "copy-on-read",
.type = QEMU_OPT_BOOL,
.help = "copy read data from backing file into image file",
},
{ /* end of list */ }
},
};
DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type,
Error **errp)
{
const char *value;
BlockBackend *blk;
DriveInfo *dinfo = NULL;
QDict *bs_opts;
QemuOpts *legacy_opts;
DriveMediaType media = MEDIA_DISK;
BlockInterfaceType type;
int max_devs, bus_id, unit_id, index;
const char *werror, *rerror;
bool read_only = false;
bool copy_on_read;
const char *filename;
int i;
GLOBAL_STATE_CODE();
/* Change legacy command line options into QMP ones */
static const struct {
const char *from;
const char *to;
} opt_renames[] = {
{ "iops", "throttling.iops-total" },
{ "iops_rd", "throttling.iops-read" },
{ "iops_wr", "throttling.iops-write" },
{ "bps", "throttling.bps-total" },
{ "bps_rd", "throttling.bps-read" },
{ "bps_wr", "throttling.bps-write" },
{ "iops_max", "throttling.iops-total-max" },
{ "iops_rd_max", "throttling.iops-read-max" },
{ "iops_wr_max", "throttling.iops-write-max" },
{ "bps_max", "throttling.bps-total-max" },
{ "bps_rd_max", "throttling.bps-read-max" },
{ "bps_wr_max", "throttling.bps-write-max" },
{ "iops_size", "throttling.iops-size" },
{ "group", "throttling.group" },
{ "readonly", BDRV_OPT_READ_ONLY },
};
for (i = 0; i < ARRAY_SIZE(opt_renames); i++) {
if (!qemu_opt_rename(all_opts, opt_renames[i].from,
error: Eliminate error_propagate() with Coccinelle, part 1 When all we do with an Error we receive into a local variable is propagating to somewhere else, we can just as well receive it there right away. Convert if (!foo(..., &err)) { ... error_propagate(errp, err); ... return ... } to if (!foo(..., errp)) { ... ... return ... } where nothing else needs @err. Coccinelle script: @rule1 forall@ identifier fun, err, errp, lbl; expression list args, args2; binary operator op; constant c1, c2; symbol false; @@ if ( ( - fun(args, &err, args2) + fun(args, errp, args2) | - !fun(args, &err, args2) + !fun(args, errp, args2) | - fun(args, &err, args2) op c1 + fun(args, errp, args2) op c1 ) ) { ... when != err when != lbl: when strict - error_propagate(errp, err); ... when != err ( return; | return c2; | return false; ) } @rule2 forall@ identifier fun, err, errp, lbl; expression list args, args2; expression var; binary operator op; constant c1, c2; symbol false; @@ - var = fun(args, &err, args2); + var = fun(args, errp, args2); ... when != err if ( ( var | !var | var op c1 ) ) { ... when != err when != lbl: when strict - error_propagate(errp, err); ... when != err ( return; | return c2; | return false; | return var; ) } @depends on rule1 || rule2@ identifier err; @@ - Error *err = NULL; ... when != err Not exactly elegant, I'm afraid. The "when != lbl:" is necessary to avoid transforming if (fun(args, &err)) { goto out } ... out: error_propagate(errp, err); even though other paths to label out still need the error_propagate(). For an actual example, see sclp_realize(). Without the "when strict", Coccinelle transforms vfio_msix_setup(), incorrectly. I don't know what exactly "when strict" does, only that it helps here. The match of return is narrower than what I want, but I can't figure out how to express "return where the operand doesn't use @err". For an example where it's too narrow, see vfio_intx_enable(). Silently fails to convert hw/arm/armsse.c, because Coccinelle gets confused by ARMSSE being used both as typedef and function-like macro there. Converted manually. Line breaks tidied up manually. One nested declaration of @local_err deleted manually. Preexisting unwanted blank line dropped in hw/riscv/sifive_e.c. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-Id: <20200707160613.848843-35-armbru@redhat.com>
2020-07-07 18:06:02 +02:00
opt_renames[i].to, errp)) {
return NULL;
}
}
value = qemu_opt_get(all_opts, "cache");
if (value) {
int flags = 0;
bool writethrough;
if (bdrv_parse_cache_mode(value, &flags, &writethrough) != 0) {
error_setg(errp, "invalid cache option");
return NULL;
}
/* Specific options take precedence */
if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_WB)) {
qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_WB,
!writethrough, &error_abort);
}
if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_DIRECT)) {
qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_DIRECT,
!!(flags & BDRV_O_NOCACHE), &error_abort);
}
if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_NO_FLUSH)) {
qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_NO_FLUSH,
!!(flags & BDRV_O_NO_FLUSH), &error_abort);
}
qemu_opt_unset(all_opts, "cache");
}
/* Get a QDict for processing the options */
bs_opts = qdict_new();
qemu_opts_to_qdict(all_opts, bs_opts);
legacy_opts = qemu_opts_create(&qemu_legacy_drive_opts, NULL, 0,
&error_abort);
if (!qemu_opts_absorb_qdict(legacy_opts, bs_opts, errp)) {
goto fail;
}
/* Media type */
value = qemu_opt_get(legacy_opts, "media");
if (value) {
if (!strcmp(value, "disk")) {
media = MEDIA_DISK;
} else if (!strcmp(value, "cdrom")) {
media = MEDIA_CDROM;
read_only = true;
} else {
error_setg(errp, "'%s' invalid media", value);
goto fail;
}
}
/* copy-on-read is disabled with a warning for read-only devices */
read_only |= qemu_opt_get_bool(legacy_opts, BDRV_OPT_READ_ONLY, false);
copy_on_read = qemu_opt_get_bool(legacy_opts, "copy-on-read", false);
if (read_only && copy_on_read) {
Convert error_report() to warn_report() Convert all uses of error_report("warning:"... to use warn_report() instead. This helps standardise on a single method of printing warnings to the user. All of the warnings were changed using these two commands: find ./* -type f -exec sed -i \ 's|error_report(".*warning[,:] |warn_report("|Ig' {} + Indentation fixed up manually afterwards. The test-qdev-global-props test case was manually updated to ensure that this patch passes make check (as the test cases are case sensitive). Signed-off-by: Alistair Francis <alistair.francis@xilinx.com> Suggested-by: Thomas Huth <thuth@redhat.com> Cc: Jeff Cody <jcody@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Max Reitz <mreitz@redhat.com> Cc: Ronnie Sahlberg <ronniesahlberg@gmail.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Lieven <pl@kamp.de> Cc: Josh Durgin <jdurgin@redhat.com> Cc: "Richard W.M. Jones" <rjones@redhat.com> Cc: Markus Armbruster <armbru@redhat.com> Cc: Peter Crosthwaite <crosthwaite.peter@gmail.com> Cc: Richard Henderson <rth@twiddle.net> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Greg Kurz <groug@kaod.org> Cc: Rob Herring <robh@kernel.org> Cc: Peter Maydell <peter.maydell@linaro.org> Cc: Peter Chubb <peter.chubb@nicta.com.au> Cc: Eduardo Habkost <ehabkost@redhat.com> Cc: Marcel Apfelbaum <marcel@redhat.com> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Igor Mammedov <imammedo@redhat.com> Cc: David Gibson <david@gibson.dropbear.id.au> Cc: Alexander Graf <agraf@suse.de> Cc: Gerd Hoffmann <kraxel@redhat.com> Cc: Jason Wang <jasowang@redhat.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Cornelia Huck <cohuck@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Acked-by: David Gibson <david@gibson.dropbear.id.au> Acked-by: Greg Kurz <groug@kaod.org> Acked-by: Cornelia Huck <cohuck@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed by: Peter Chubb <peter.chubb@data61.csiro.au> Acked-by: Max Reitz <mreitz@redhat.com> Acked-by: Marcel Apfelbaum <marcel@redhat.com> Message-Id: <e1cfa2cd47087c248dd24caca9c33d9af0c499b0.1499866456.git.alistair.francis@xilinx.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com>
2017-07-12 15:57:41 +02:00
warn_report("disabling copy-on-read on read-only drive");
copy_on_read = false;
}
qdict_put_str(bs_opts, BDRV_OPT_READ_ONLY, read_only ? "on" : "off");
qdict_put_str(bs_opts, "copy-on-read", copy_on_read ? "on" : "off");
/* Controller type */
value = qemu_opt_get(legacy_opts, "if");
if (value) {
for (type = 0;
type < IF_COUNT && strcmp(value, if_name[type]);
type++) {
}
if (type == IF_COUNT) {
error_setg(errp, "unsupported bus type '%s'", value);
goto fail;
}
} else {
type = block_default_type;
}
/* Device address specified by bus/unit or index.
* If none was specified, try to find the first free one. */
bus_id = qemu_opt_get_number(legacy_opts, "bus", 0);
unit_id = qemu_opt_get_number(legacy_opts, "unit", -1);
index = qemu_opt_get_number(legacy_opts, "index", -1);
max_devs = if_max_devs[type];
if (index != -1) {
if (bus_id != 0 || unit_id != -1) {
error_setg(errp, "index cannot be used with bus and unit");
goto fail;
}
bus_id = drive_index_to_bus_id(type, index);
unit_id = drive_index_to_unit_id(type, index);
}
if (unit_id == -1) {
unit_id = 0;
while (drive_get(type, bus_id, unit_id) != NULL) {
unit_id++;
if (max_devs && unit_id >= max_devs) {
unit_id -= max_devs;
bus_id++;
}
}
}
if (max_devs && unit_id >= max_devs) {
error_setg(errp, "unit %d too big (max is %d)", unit_id, max_devs - 1);
goto fail;
}
if (drive_get(type, bus_id, unit_id) != NULL) {
error_setg(errp, "drive with bus=%d, unit=%d (index=%d) exists",
bus_id, unit_id, index);
goto fail;
}
/* no id supplied -> create one */
if (qemu_opts_id(all_opts) == NULL) {
char *new_id;
const char *mediastr = "";
if (type == IF_IDE || type == IF_SCSI) {
mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd";
}
if (max_devs) {
new_id = g_strdup_printf("%s%i%s%i", if_name[type], bus_id,
mediastr, unit_id);
} else {
new_id = g_strdup_printf("%s%s%i", if_name[type],
mediastr, unit_id);
}
qdict_put_str(bs_opts, "id", new_id);
g_free(new_id);
}
/* Add virtio block device */
if (type == IF_VIRTIO) {
QemuOpts *devopts;
devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0,
&error_abort);
qemu_opt_set(devopts, "driver", "virtio-blk", &error_abort);
qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"),
&error_abort);
}
filename = qemu_opt_get(legacy_opts, "file");
/* Check werror/rerror compatibility with if=... */
werror = qemu_opt_get(legacy_opts, "werror");
if (werror != NULL) {
if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO &&
type != IF_NONE) {
error_setg(errp, "werror is not supported by this bus type");
goto fail;
}
qdict_put_str(bs_opts, "werror", werror);
}
rerror = qemu_opt_get(legacy_opts, "rerror");
if (rerror != NULL) {
if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI &&
type != IF_NONE) {
error_setg(errp, "rerror is not supported by this bus type");
goto fail;
}
qdict_put_str(bs_opts, "rerror", rerror);
}
/* Actual block device init: Functionality shared with blockdev-add */
blk = blockdev_init(filename, bs_opts, errp);
bs_opts = NULL;
if (!blk) {
goto fail;
}
/* Create legacy DriveInfo */
dinfo = g_malloc0(sizeof(*dinfo));
dinfo->opts = all_opts;
dinfo->type = type;
dinfo->bus = bus_id;
dinfo->unit = unit_id;
blk_set_legacy_dinfo(blk, dinfo);
switch(type) {
case IF_IDE:
case IF_SCSI:
case IF_XEN:
case IF_NONE:
dinfo->media_cd = media == MEDIA_CDROM;
break;
default:
break;
}
fail:
qemu_opts_del(legacy_opts);
qobject_unref(bs_opts);
return dinfo;
}
static BlockDriverState *qmp_get_root_bs(const char *name, Error **errp)
{
BlockDriverState *bs;
AioContext *aio_context;
bs = bdrv_lookup_bs(name, name, errp);
if (bs == NULL) {
return NULL;
}
if (!bdrv_is_root_node(bs)) {
error_setg(errp, "Need a root block node");
return NULL;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
if (!bdrv_is_inserted(bs)) {
error_setg(errp, "Device has no medium");
bs = NULL;
}
aio_context_release(aio_context);
return bs;
}
static void blockdev_do_action(TransactionAction *action, Error **errp)
{
TransactionActionList list;
list.value = action;
list.next = NULL;
qmp_transaction(&list, NULL, errp);
}
void qmp_blockdev_snapshot_sync(const char *device, const char *node_name,
const char *snapshot_file,
const char *snapshot_node_name,
const char *format,
bool has_mode, NewImageMode mode, Error **errp)
{
BlockdevSnapshotSync snapshot = {
.device = (char *) device,
.node_name = (char *) node_name,
.snapshot_file = (char *) snapshot_file,
.snapshot_node_name = (char *) snapshot_node_name,
.format = (char *) format,
.has_mode = has_mode,
.mode = mode,
};
TransactionAction action = {
.type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC,
qapi: Don't special-case simple union wrappers Simple unions were carrying a special case that hid their 'data' QMP member from the resulting C struct, via the hack method QAPISchemaObjectTypeVariant.simple_union_type(). But by using the work we started by unboxing flat union and alternate branches, coupled with the ability to visit the members of an implicit type, we can now expose the simple union's implicit type in qapi-types.h: | struct q_obj_ImageInfoSpecificQCow2_wrapper { | ImageInfoSpecificQCow2 *data; | }; | | struct q_obj_ImageInfoSpecificVmdk_wrapper { | ImageInfoSpecificVmdk *data; | }; ... | struct ImageInfoSpecific { | ImageInfoSpecificKind type; | union { /* union tag is @type */ | void *data; |- ImageInfoSpecificQCow2 *qcow2; |- ImageInfoSpecificVmdk *vmdk; |+ q_obj_ImageInfoSpecificQCow2_wrapper qcow2; |+ q_obj_ImageInfoSpecificVmdk_wrapper vmdk; | } u; | }; Doing this removes asymmetry between QAPI's QMP side and its C side (both sides now expose 'data'), and means that the treatment of a simple union as sugar for a flat union is now equivalent in both languages (previously the two approaches used a different layer of dereferencing, where the simple union could be converted to a flat union with equivalent C layout but different {} on the wire, or to an equivalent QMP wire form but with different C representation). Using the implicit type also lets us get rid of the simple_union_type() hack. Of course, now all clients of simple unions have to adjust from using su->u.member to using su->u.member.data; while this touches a number of files in the tree, some earlier cleanup patches helped minimize the change to the initialization of a temporary variable rather than every single member access. The generated qapi-visit.c code is also affected by the layout change: |@@ -7393,10 +7393,10 @@ void visit_type_ImageInfoSpecific_member | } | switch (obj->type) { | case IMAGE_INFO_SPECIFIC_KIND_QCOW2: |- visit_type_ImageInfoSpecificQCow2(v, "data", &obj->u.qcow2, &err); |+ visit_type_q_obj_ImageInfoSpecificQCow2_wrapper_members(v, &obj->u.qcow2, &err); | break; | case IMAGE_INFO_SPECIFIC_KIND_VMDK: |- visit_type_ImageInfoSpecificVmdk(v, "data", &obj->u.vmdk, &err); |+ visit_type_q_obj_ImageInfoSpecificVmdk_wrapper_members(v, &obj->u.vmdk, &err); | break; | default: | abort(); Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <1458254921-17042-13-git-send-email-eblake@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-03-17 23:48:37 +01:00
.u.blockdev_snapshot_sync.data = &snapshot,
};
blockdev_do_action(&action, errp);
}
void qmp_blockdev_snapshot(const char *node, const char *overlay,
Error **errp)
{
BlockdevSnapshot snapshot_data = {
.node = (char *) node,
.overlay = (char *) overlay
};
TransactionAction action = {
.type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT,
qapi: Don't special-case simple union wrappers Simple unions were carrying a special case that hid their 'data' QMP member from the resulting C struct, via the hack method QAPISchemaObjectTypeVariant.simple_union_type(). But by using the work we started by unboxing flat union and alternate branches, coupled with the ability to visit the members of an implicit type, we can now expose the simple union's implicit type in qapi-types.h: | struct q_obj_ImageInfoSpecificQCow2_wrapper { | ImageInfoSpecificQCow2 *data; | }; | | struct q_obj_ImageInfoSpecificVmdk_wrapper { | ImageInfoSpecificVmdk *data; | }; ... | struct ImageInfoSpecific { | ImageInfoSpecificKind type; | union { /* union tag is @type */ | void *data; |- ImageInfoSpecificQCow2 *qcow2; |- ImageInfoSpecificVmdk *vmdk; |+ q_obj_ImageInfoSpecificQCow2_wrapper qcow2; |+ q_obj_ImageInfoSpecificVmdk_wrapper vmdk; | } u; | }; Doing this removes asymmetry between QAPI's QMP side and its C side (both sides now expose 'data'), and means that the treatment of a simple union as sugar for a flat union is now equivalent in both languages (previously the two approaches used a different layer of dereferencing, where the simple union could be converted to a flat union with equivalent C layout but different {} on the wire, or to an equivalent QMP wire form but with different C representation). Using the implicit type also lets us get rid of the simple_union_type() hack. Of course, now all clients of simple unions have to adjust from using su->u.member to using su->u.member.data; while this touches a number of files in the tree, some earlier cleanup patches helped minimize the change to the initialization of a temporary variable rather than every single member access. The generated qapi-visit.c code is also affected by the layout change: |@@ -7393,10 +7393,10 @@ void visit_type_ImageInfoSpecific_member | } | switch (obj->type) { | case IMAGE_INFO_SPECIFIC_KIND_QCOW2: |- visit_type_ImageInfoSpecificQCow2(v, "data", &obj->u.qcow2, &err); |+ visit_type_q_obj_ImageInfoSpecificQCow2_wrapper_members(v, &obj->u.qcow2, &err); | break; | case IMAGE_INFO_SPECIFIC_KIND_VMDK: |- visit_type_ImageInfoSpecificVmdk(v, "data", &obj->u.vmdk, &err); |+ visit_type_q_obj_ImageInfoSpecificVmdk_wrapper_members(v, &obj->u.vmdk, &err); | break; | default: | abort(); Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <1458254921-17042-13-git-send-email-eblake@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-03-17 23:48:37 +01:00
.u.blockdev_snapshot.data = &snapshot_data,
};
blockdev_do_action(&action, errp);
}
void qmp_blockdev_snapshot_internal_sync(const char *device,
const char *name,
Error **errp)
{
BlockdevSnapshotInternal snapshot = {
.device = (char *) device,
.name = (char *) name
};
TransactionAction action = {
.type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC,
qapi: Don't special-case simple union wrappers Simple unions were carrying a special case that hid their 'data' QMP member from the resulting C struct, via the hack method QAPISchemaObjectTypeVariant.simple_union_type(). But by using the work we started by unboxing flat union and alternate branches, coupled with the ability to visit the members of an implicit type, we can now expose the simple union's implicit type in qapi-types.h: | struct q_obj_ImageInfoSpecificQCow2_wrapper { | ImageInfoSpecificQCow2 *data; | }; | | struct q_obj_ImageInfoSpecificVmdk_wrapper { | ImageInfoSpecificVmdk *data; | }; ... | struct ImageInfoSpecific { | ImageInfoSpecificKind type; | union { /* union tag is @type */ | void *data; |- ImageInfoSpecificQCow2 *qcow2; |- ImageInfoSpecificVmdk *vmdk; |+ q_obj_ImageInfoSpecificQCow2_wrapper qcow2; |+ q_obj_ImageInfoSpecificVmdk_wrapper vmdk; | } u; | }; Doing this removes asymmetry between QAPI's QMP side and its C side (both sides now expose 'data'), and means that the treatment of a simple union as sugar for a flat union is now equivalent in both languages (previously the two approaches used a different layer of dereferencing, where the simple union could be converted to a flat union with equivalent C layout but different {} on the wire, or to an equivalent QMP wire form but with different C representation). Using the implicit type also lets us get rid of the simple_union_type() hack. Of course, now all clients of simple unions have to adjust from using su->u.member to using su->u.member.data; while this touches a number of files in the tree, some earlier cleanup patches helped minimize the change to the initialization of a temporary variable rather than every single member access. The generated qapi-visit.c code is also affected by the layout change: |@@ -7393,10 +7393,10 @@ void visit_type_ImageInfoSpecific_member | } | switch (obj->type) { | case IMAGE_INFO_SPECIFIC_KIND_QCOW2: |- visit_type_ImageInfoSpecificQCow2(v, "data", &obj->u.qcow2, &err); |+ visit_type_q_obj_ImageInfoSpecificQCow2_wrapper_members(v, &obj->u.qcow2, &err); | break; | case IMAGE_INFO_SPECIFIC_KIND_VMDK: |- visit_type_ImageInfoSpecificVmdk(v, "data", &obj->u.vmdk, &err); |+ visit_type_q_obj_ImageInfoSpecificVmdk_wrapper_members(v, &obj->u.vmdk, &err); | break; | default: | abort(); Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <1458254921-17042-13-git-send-email-eblake@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-03-17 23:48:37 +01:00
.u.blockdev_snapshot_internal_sync.data = &snapshot,
};
blockdev_do_action(&action, errp);
}
SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device,
const char *id,
const char *name,
Error **errp)
{
BlockDriverState *bs;
AioContext *aio_context;
QEMUSnapshotInfo sn;
Error *local_err = NULL;
SnapshotInfo *info = NULL;
int ret;
bs = qmp_get_root_bs(device, errp);
if (!bs) {
return NULL;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
if (!id && !name) {
error_setg(errp, "Name or id must be provided");
goto out_aio_context;
}
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, errp)) {
goto out_aio_context;
}
ret = bdrv_snapshot_find_by_id_and_name(bs, id, name, &sn, &local_err);
if (local_err) {
error_propagate(errp, local_err);
goto out_aio_context;
}
if (!ret) {
error_setg(errp,
"Snapshot with id '%s' and name '%s' does not exist on "
"device '%s'",
STR_OR_NULL(id), STR_OR_NULL(name), device);
goto out_aio_context;
}
bdrv_snapshot_delete(bs, id, name, &local_err);
if (local_err) {
error_propagate(errp, local_err);
goto out_aio_context;
}
aio_context_release(aio_context);
info = g_new0(SnapshotInfo, 1);
info->id = g_strdup(sn.id_str);
info->name = g_strdup(sn.name);
info->date_nsec = sn.date_nsec;
info->date_sec = sn.date_sec;
info->vm_state_size = sn.vm_state_size;
info->vm_clock_nsec = sn.vm_clock_nsec % 1000000000;
info->vm_clock_sec = sn.vm_clock_nsec / 1000000000;
if (sn.icount != -1ULL) {
info->icount = sn.icount;
info->has_icount = true;
}
return info;
out_aio_context:
aio_context_release(aio_context);
return NULL;
}
/* internal snapshot private data */
typedef struct InternalSnapshotState {
BlockDriverState *bs;
QEMUSnapshotInfo sn;
bool created;
} InternalSnapshotState;
static void internal_snapshot_abort(void *opaque);
static void internal_snapshot_clean(void *opaque);
TransactionActionDrv internal_snapshot_drv = {
.abort = internal_snapshot_abort,
.clean = internal_snapshot_clean,
};
static void internal_snapshot_action(BlockdevSnapshotInternal *internal,
Transaction *tran, Error **errp)
{
Error *local_err = NULL;
const char *device;
const char *name;
BlockDriverState *bs;
QEMUSnapshotInfo old_sn, *sn;
bool ret;
int64_t rt;
InternalSnapshotState *state = g_new0(InternalSnapshotState, 1);
AioContext *aio_context;
int ret1;
tran_add(tran, &internal_snapshot_drv, state);
device = internal->device;
name = internal->name;
bs = qmp_get_root_bs(device, errp);
if (!bs) {
return;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
state->bs = bs;
/* Paired with .clean() */
bdrv_drained_begin(bs);
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) {
goto out;
}
if (bdrv_is_read_only(bs)) {
error_setg(errp, "Device '%s' is read only", device);
goto out;
}
if (!bdrv_can_snapshot(bs)) {
error_setg(errp, "Block format '%s' used by device '%s' "
"does not support internal snapshots",
bs->drv->format_name, device);
goto out;
}
if (!strlen(name)) {
error_setg(errp, "Name is empty");
goto out;
}
/* check whether a snapshot with name exist */
ret = bdrv_snapshot_find_by_id_and_name(bs, NULL, name, &old_sn,
&local_err);
if (local_err) {
error_propagate(errp, local_err);
goto out;
} else if (ret) {
error_setg(errp,
"Snapshot with name '%s' already exists on device '%s'",
name, device);
goto out;
}
/* 3. take the snapshot */
sn = &state->sn;
pstrcpy(sn->name, sizeof(sn->name), name);
rt = g_get_real_time();
sn->date_sec = rt / G_USEC_PER_SEC;
sn->date_nsec = (rt % G_USEC_PER_SEC) * 1000;
sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (replay_mode != REPLAY_MODE_NONE) {
sn->icount = replay_get_current_icount();
} else {
sn->icount = -1ULL;
}
ret1 = bdrv_snapshot_create(bs, sn);
if (ret1 < 0) {
error_setg_errno(errp, -ret1,
"Failed to create snapshot '%s' on device '%s'",
name, device);
goto out;
}
/* 4. succeed, mark a snapshot is created */
state->created = true;
out:
aio_context_release(aio_context);
}
static void internal_snapshot_abort(void *opaque)
{
InternalSnapshotState *state = opaque;
BlockDriverState *bs = state->bs;
QEMUSnapshotInfo *sn = &state->sn;
AioContext *aio_context;
Error *local_error = NULL;
if (!state->created) {
return;
}
aio_context = bdrv_get_aio_context(state->bs);
aio_context_acquire(aio_context);
if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) {
error_reportf_err(local_error,
"Failed to delete snapshot with id '%s' and "
"name '%s' on device '%s' in abort: ",
sn->id_str, sn->name,
bdrv_get_device_name(bs));
}
aio_context_release(aio_context);
}
static void internal_snapshot_clean(void *opaque)
{
g_autofree InternalSnapshotState *state = opaque;
AioContext *aio_context;
if (!state->bs) {
return;
}
aio_context = bdrv_get_aio_context(state->bs);
aio_context_acquire(aio_context);
bdrv_drained_end(state->bs);
aio_context_release(aio_context);
}
/* external snapshot private data */
typedef struct ExternalSnapshotState {
BlockDriverState *old_bs;
BlockDriverState *new_bs;
bool overlay_appended;
} ExternalSnapshotState;
static void external_snapshot_commit(void *opaque);
static void external_snapshot_abort(void *opaque);
static void external_snapshot_clean(void *opaque);
TransactionActionDrv external_snapshot_drv = {
.commit = external_snapshot_commit,
.abort = external_snapshot_abort,
.clean = external_snapshot_clean,
};
static void external_snapshot_action(TransactionAction *action,
Transaction *tran, Error **errp)
{
int ret;
int flags = 0;
QDict *options = NULL;
Error *local_err = NULL;
/* Device and node name of the image to generate the snapshot from */
const char *device;
const char *node_name;
/* Reference to the new image (for 'blockdev-snapshot') */
const char *snapshot_ref;
/* File name of the new image (for 'blockdev-snapshot-sync') */
const char *new_image_file;
ExternalSnapshotState *state = g_new0(ExternalSnapshotState, 1);
AioContext *aio_context;
block: Relax restrictions for blockdev-snapshot blockdev-snapshot returned an error if the overlay was already in use, which it defined as having any BlockBackend parent. This is in fact both too strict (some parents can tolerate the change of visible data caused by attaching a backing file) and too loose (some non-BlockBackend parents may not be happy with it). One important use case that is prevented by the too strict check is live storage migration with blockdev-mirror. Here, the target node is usually opened without a backing file so that the active layer is mirrored while its backing chain can be copied in the background. The backing chain should be attached to the mirror target node when finalising the job, just before switching the users of the source node to the new copy (at which point the mirror job still has a reference to the node). drive-mirror did this automatically, but with blockdev-mirror this is the job of the QMP client, so it needs a way to do this. blockdev-snapshot is the obvious way, so this patch makes it work in this scenario. The new condition is that no parent uses CONSISTENT_READ permissions. This will ensure that the operation will still be blocked when the node is attached to the guest device, so blockdev-snapshot remains safe. (For the sake of completeness, x-blockdev-reopen can be used to achieve the same, however it is a big hammer, performs the graph change completely unchecked and is still experimental. So even with the option of using x-blockdev-reopen, there are reasons why blockdev-snapshot should be able to perform this operation.) Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-Id: <20200310113831.27293-3-kwolf@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> Tested-by: Peter Krempa <pkrempa@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-03-10 12:38:26 +01:00
uint64_t perm, shared;
/* TODO We'll eventually have to take a writer lock in this function */
GRAPH_RDLOCK_GUARD_MAINLOOP();
tran_add(tran, &external_snapshot_drv, state);
/* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar
* purpose but a different set of parameters */
switch (action->type) {
case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT:
{
qapi: Don't special-case simple union wrappers Simple unions were carrying a special case that hid their 'data' QMP member from the resulting C struct, via the hack method QAPISchemaObjectTypeVariant.simple_union_type(). But by using the work we started by unboxing flat union and alternate branches, coupled with the ability to visit the members of an implicit type, we can now expose the simple union's implicit type in qapi-types.h: | struct q_obj_ImageInfoSpecificQCow2_wrapper { | ImageInfoSpecificQCow2 *data; | }; | | struct q_obj_ImageInfoSpecificVmdk_wrapper { | ImageInfoSpecificVmdk *data; | }; ... | struct ImageInfoSpecific { | ImageInfoSpecificKind type; | union { /* union tag is @type */ | void *data; |- ImageInfoSpecificQCow2 *qcow2; |- ImageInfoSpecificVmdk *vmdk; |+ q_obj_ImageInfoSpecificQCow2_wrapper qcow2; |+ q_obj_ImageInfoSpecificVmdk_wrapper vmdk; | } u; | }; Doing this removes asymmetry between QAPI's QMP side and its C side (both sides now expose 'data'), and means that the treatment of a simple union as sugar for a flat union is now equivalent in both languages (previously the two approaches used a different layer of dereferencing, where the simple union could be converted to a flat union with equivalent C layout but different {} on the wire, or to an equivalent QMP wire form but with different C representation). Using the implicit type also lets us get rid of the simple_union_type() hack. Of course, now all clients of simple unions have to adjust from using su->u.member to using su->u.member.data; while this touches a number of files in the tree, some earlier cleanup patches helped minimize the change to the initialization of a temporary variable rather than every single member access. The generated qapi-visit.c code is also affected by the layout change: |@@ -7393,10 +7393,10 @@ void visit_type_ImageInfoSpecific_member | } | switch (obj->type) { | case IMAGE_INFO_SPECIFIC_KIND_QCOW2: |- visit_type_ImageInfoSpecificQCow2(v, "data", &obj->u.qcow2, &err); |+ visit_type_q_obj_ImageInfoSpecificQCow2_wrapper_members(v, &obj->u.qcow2, &err); | break; | case IMAGE_INFO_SPECIFIC_KIND_VMDK: |- visit_type_ImageInfoSpecificVmdk(v, "data", &obj->u.vmdk, &err); |+ visit_type_q_obj_ImageInfoSpecificVmdk_wrapper_members(v, &obj->u.vmdk, &err); | break; | default: | abort(); Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <1458254921-17042-13-git-send-email-eblake@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-03-17 23:48:37 +01:00
BlockdevSnapshot *s = action->u.blockdev_snapshot.data;
device = s->node;
node_name = s->node;
new_image_file = NULL;
snapshot_ref = s->overlay;
}
break;
case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC:
{
qapi: Don't special-case simple union wrappers Simple unions were carrying a special case that hid their 'data' QMP member from the resulting C struct, via the hack method QAPISchemaObjectTypeVariant.simple_union_type(). But by using the work we started by unboxing flat union and alternate branches, coupled with the ability to visit the members of an implicit type, we can now expose the simple union's implicit type in qapi-types.h: | struct q_obj_ImageInfoSpecificQCow2_wrapper { | ImageInfoSpecificQCow2 *data; | }; | | struct q_obj_ImageInfoSpecificVmdk_wrapper { | ImageInfoSpecificVmdk *data; | }; ... | struct ImageInfoSpecific { | ImageInfoSpecificKind type; | union { /* union tag is @type */ | void *data; |- ImageInfoSpecificQCow2 *qcow2; |- ImageInfoSpecificVmdk *vmdk; |+ q_obj_ImageInfoSpecificQCow2_wrapper qcow2; |+ q_obj_ImageInfoSpecificVmdk_wrapper vmdk; | } u; | }; Doing this removes asymmetry between QAPI's QMP side and its C side (both sides now expose 'data'), and means that the treatment of a simple union as sugar for a flat union is now equivalent in both languages (previously the two approaches used a different layer of dereferencing, where the simple union could be converted to a flat union with equivalent C layout but different {} on the wire, or to an equivalent QMP wire form but with different C representation). Using the implicit type also lets us get rid of the simple_union_type() hack. Of course, now all clients of simple unions have to adjust from using su->u.member to using su->u.member.data; while this touches a number of files in the tree, some earlier cleanup patches helped minimize the change to the initialization of a temporary variable rather than every single member access. The generated qapi-visit.c code is also affected by the layout change: |@@ -7393,10 +7393,10 @@ void visit_type_ImageInfoSpecific_member | } | switch (obj->type) { | case IMAGE_INFO_SPECIFIC_KIND_QCOW2: |- visit_type_ImageInfoSpecificQCow2(v, "data", &obj->u.qcow2, &err); |+ visit_type_q_obj_ImageInfoSpecificQCow2_wrapper_members(v, &obj->u.qcow2, &err); | break; | case IMAGE_INFO_SPECIFIC_KIND_VMDK: |- visit_type_ImageInfoSpecificVmdk(v, "data", &obj->u.vmdk, &err); |+ visit_type_q_obj_ImageInfoSpecificVmdk_wrapper_members(v, &obj->u.vmdk, &err); | break; | default: | abort(); Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <1458254921-17042-13-git-send-email-eblake@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-03-17 23:48:37 +01:00
BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync.data;
device = s->device;
node_name = s->node_name;
new_image_file = s->snapshot_file;
snapshot_ref = NULL;
}
break;
default:
g_assert_not_reached();
}
/* start processing */
state->old_bs = bdrv_lookup_bs(device, node_name, errp);
if (!state->old_bs) {
return;
}
aio_context = bdrv_get_aio_context(state->old_bs);
aio_context_acquire(aio_context);
/* Paired with .clean() */
bdrv_drained_begin(state->old_bs);
if (!bdrv_is_inserted(state->old_bs)) {
error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
goto out;
}
if (bdrv_op_is_blocked(state->old_bs,
BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, errp)) {
goto out;
}
if (!bdrv_is_read_only(state->old_bs)) {
if (bdrv_flush(state->old_bs)) {
error_setg(errp, QERR_IO_ERROR);
goto out;
}
}
if (action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC) {
qapi: Don't special-case simple union wrappers Simple unions were carrying a special case that hid their 'data' QMP member from the resulting C struct, via the hack method QAPISchemaObjectTypeVariant.simple_union_type(). But by using the work we started by unboxing flat union and alternate branches, coupled with the ability to visit the members of an implicit type, we can now expose the simple union's implicit type in qapi-types.h: | struct q_obj_ImageInfoSpecificQCow2_wrapper { | ImageInfoSpecificQCow2 *data; | }; | | struct q_obj_ImageInfoSpecificVmdk_wrapper { | ImageInfoSpecificVmdk *data; | }; ... | struct ImageInfoSpecific { | ImageInfoSpecificKind type; | union { /* union tag is @type */ | void *data; |- ImageInfoSpecificQCow2 *qcow2; |- ImageInfoSpecificVmdk *vmdk; |+ q_obj_ImageInfoSpecificQCow2_wrapper qcow2; |+ q_obj_ImageInfoSpecificVmdk_wrapper vmdk; | } u; | }; Doing this removes asymmetry between QAPI's QMP side and its C side (both sides now expose 'data'), and means that the treatment of a simple union as sugar for a flat union is now equivalent in both languages (previously the two approaches used a different layer of dereferencing, where the simple union could be converted to a flat union with equivalent C layout but different {} on the wire, or to an equivalent QMP wire form but with different C representation). Using the implicit type also lets us get rid of the simple_union_type() hack. Of course, now all clients of simple unions have to adjust from using su->u.member to using su->u.member.data; while this touches a number of files in the tree, some earlier cleanup patches helped minimize the change to the initialization of a temporary variable rather than every single member access. The generated qapi-visit.c code is also affected by the layout change: |@@ -7393,10 +7393,10 @@ void visit_type_ImageInfoSpecific_member | } | switch (obj->type) { | case IMAGE_INFO_SPECIFIC_KIND_QCOW2: |- visit_type_ImageInfoSpecificQCow2(v, "data", &obj->u.qcow2, &err); |+ visit_type_q_obj_ImageInfoSpecificQCow2_wrapper_members(v, &obj->u.qcow2, &err); | break; | case IMAGE_INFO_SPECIFIC_KIND_VMDK: |- visit_type_ImageInfoSpecificVmdk(v, "data", &obj->u.vmdk, &err); |+ visit_type_q_obj_ImageInfoSpecificVmdk_wrapper_members(v, &obj->u.vmdk, &err); | break; | default: | abort(); Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <1458254921-17042-13-git-send-email-eblake@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-03-17 23:48:37 +01:00
BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync.data;
const char *format = s->format ?: "qcow2";
enum NewImageMode mode;
const char *snapshot_node_name = s->snapshot_node_name;
if (node_name && !snapshot_node_name) {
error_setg(errp, "New overlay node-name missing");
goto out;
}
if (snapshot_node_name &&
bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) {
error_setg(errp, "New overlay node-name already in use");
goto out;
}
flags = state->old_bs->open_flags;
flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_COPY_ON_READ);
flags |= BDRV_O_NO_BACKING;
/* create new image w/backing file */
mode = s->has_mode ? s->mode : NEW_IMAGE_MODE_ABSOLUTE_PATHS;
if (mode != NEW_IMAGE_MODE_EXISTING) {
int64_t size = bdrv_getlength(state->old_bs);
if (size < 0) {
error_setg_errno(errp, -size, "bdrv_getlength failed");
goto out;
}
bdrv_refresh_filename(state->old_bs);
aio_context_release(aio_context);
bdrv_img_create(new_image_file, format,
state->old_bs->filename,
state->old_bs->drv->format_name,
NULL, size, flags, false, &local_err);
aio_context_acquire(aio_context);
if (local_err) {
error_propagate(errp, local_err);
goto out;
}
}
options = qdict_new();
if (snapshot_node_name) {
qdict_put_str(options, "node-name", snapshot_node_name);
}
qdict_put_str(options, "driver", format);
}
aio_context_release(aio_context);
aio_context_acquire(qemu_get_aio_context());
state->new_bs = bdrv_open(new_image_file, snapshot_ref, options, flags,
errp);
aio_context_release(qemu_get_aio_context());
/* We will manually add the backing_hd field to the bs later */
if (!state->new_bs) {
return;
}
aio_context_acquire(aio_context);
block: Relax restrictions for blockdev-snapshot blockdev-snapshot returned an error if the overlay was already in use, which it defined as having any BlockBackend parent. This is in fact both too strict (some parents can tolerate the change of visible data caused by attaching a backing file) and too loose (some non-BlockBackend parents may not be happy with it). One important use case that is prevented by the too strict check is live storage migration with blockdev-mirror. Here, the target node is usually opened without a backing file so that the active layer is mirrored while its backing chain can be copied in the background. The backing chain should be attached to the mirror target node when finalising the job, just before switching the users of the source node to the new copy (at which point the mirror job still has a reference to the node). drive-mirror did this automatically, but with blockdev-mirror this is the job of the QMP client, so it needs a way to do this. blockdev-snapshot is the obvious way, so this patch makes it work in this scenario. The new condition is that no parent uses CONSISTENT_READ permissions. This will ensure that the operation will still be blocked when the node is attached to the guest device, so blockdev-snapshot remains safe. (For the sake of completeness, x-blockdev-reopen can be used to achieve the same, however it is a big hammer, performs the graph change completely unchecked and is still experimental. So even with the option of using x-blockdev-reopen, there are reasons why blockdev-snapshot should be able to perform this operation.) Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-Id: <20200310113831.27293-3-kwolf@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> Tested-by: Peter Krempa <pkrempa@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-03-10 12:38:26 +01:00
/*
* Allow attaching a backing file to an overlay that's already in use only
* if the parents don't assume that they are already seeing a valid image.
* (Specifically, allow it as a mirror target, which is write-only access.)
*/
bdrv_get_cumulative_perm(state->new_bs, &perm, &shared);
if (perm & BLK_PERM_CONSISTENT_READ) {
error_setg(errp, "The overlay is already in use");
goto out;
}
if (state->new_bs->drv->is_filter) {
error_setg(errp, "Filters cannot be used as overlays");
goto out;
}
if (bdrv_cow_child(state->new_bs)) {
error_setg(errp, "The overlay already has a backing image");
goto out;
}
if (!state->new_bs->drv->supports_backing) {
error_setg(errp, "The overlay does not support backing images");
goto out;
}
ret = bdrv_append(state->new_bs, state->old_bs, errp);
if (ret < 0) {
goto out;
}
state->overlay_appended = true;
out:
aio_context_release(aio_context);
}
static void external_snapshot_commit(void *opaque)
{
ExternalSnapshotState *state = opaque;
AioContext *aio_context;
aio_context = bdrv_get_aio_context(state->old_bs);
aio_context_acquire(aio_context);
/* We don't need (or want) to use the transactional
* bdrv_reopen_multiple() across all the entries at once, because we
* don't want to abort all of them if one of them fails the reopen */
if (!qatomic_read(&state->old_bs->copy_on_read)) {
bdrv_reopen_set_read_only(state->old_bs, true, NULL);
block: Remove copy-on-read from bdrv_move_feature_fields() Ever since we first introduced bdrv_append() in commit 8802d1fd ('qapi: Introduce blockdev-group-snapshot-sync command'), the copy-on-read flag was moved to the new top layer when taking a snapshot. The only problem is that it doesn't make a whole lot of sense. The use case for manually enabled CoR is to avoid reading data twice from a slow remote image, so we want to save it to a local overlay, say an ISO image accessed via HTTP to a local qcow2 overlay. When taking a snapshot, we end up with a backing chain like this: http <- local.qcow2 <- snap_overlay.qcow2 There is no point in doing CoR from local.qcow2 into snap_overlay.qcow2, we just want to keep copying data from the remote source into local.qcow2. The other use case of CoR is in the context of streaming, which isn't very interesting for bdrv_move_feature_fields() because op blockers prevent this combination. This patch makes the copy-on-read flag stay on the image for which it was originally set and prevents it from being propagated to the new overlay. It is no longer intended to move CoR to the BlockBackend level. In order for this to make sense, we also need to keep the respective image read-write. As a side effect of these changes, creating a live snapshot image (as opposed to using an existing externally created one) on top of a COR block device works now. It used to fail because it tried to open its backing file both read-only and with COR. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com>
2016-02-29 13:12:26 +01:00
}
aio_context_release(aio_context);
}
static void external_snapshot_abort(void *opaque)
{
ExternalSnapshotState *state = opaque;
if (state->new_bs) {
if (state->overlay_appended) {
AioContext *aio_context;
blockdev: Return bs to the proper context on snapshot abort external_snapshot_abort() calls to bdrv_set_backing_hd(), which returns state->old_bs to the main AioContext, as it's intended to be used then the BDS is going to be released. As that's not the case when aborting an external snapshot, return it to the AioContext it was before the call. This issue can be triggered by issuing a transaction with two actions, a proper blockdev-snapshot-sync and a bogus one, so the second will trigger a transaction abort. This results in a crash with an stack trace like this one: #0 0x00007fa1048b28df in __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:50 #1 0x00007fa10489ccf5 in __GI_abort () at abort.c:79 #2 0x00007fa10489cbc9 in __assert_fail_base (fmt=0x7fa104a03300 "%s%s%s:%u: %s%sAssertion `%s' failed.\n%n", assertion=0x5572240b44d8 "bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs)", file=0x557224014d30 "block.c", line=2240, function=<optimized out>) at assert.c:92 #3 0x00007fa1048aae96 in __GI___assert_fail (assertion=assertion@entry=0x5572240b44d8 "bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs)", file=file@entry=0x557224014d30 "block.c", line=line@entry=2240, function=function@entry=0x5572240b5d60 <__PRETTY_FUNCTION__.31620> "bdrv_replace_child_noperm") at assert.c:101 #4 0x0000557223e631f8 in bdrv_replace_child_noperm (child=0x557225b9c980, new_bs=new_bs@entry=0x557225c42e40) at block.c:2240 #5 0x0000557223e68be7 in bdrv_replace_node (from=0x557226951a60, to=0x557225c42e40, errp=0x5572247d6138 <error_abort>) at block.c:4196 #6 0x0000557223d069c4 in external_snapshot_abort (common=0x557225d7e170) at blockdev.c:1731 #7 0x0000557223d069c4 in external_snapshot_abort (common=0x557225d7e170) at blockdev.c:1717 #8 0x0000557223d09013 in qmp_transaction (dev_list=<optimized out>, has_props=<optimized out>, props=0x557225cc7d70, errp=errp@entry=0x7ffe704c0c98) at blockdev.c:2360 #9 0x0000557223e32085 in qmp_marshal_transaction (args=<optimized out>, ret=<optimized out>, errp=0x7ffe704c0d08) at qapi/qapi-commands-transaction.c:44 #10 0x0000557223ee798c in do_qmp_dispatch (errp=0x7ffe704c0d00, allow_oob=<optimized out>, request=<optimized out>, cmds=0x5572247d3cc0 <qmp_commands>) at qapi/qmp-dispatch.c:132 #11 0x0000557223ee798c in qmp_dispatch (cmds=0x5572247d3cc0 <qmp_commands>, request=<optimized out>, allow_oob=<optimized out>) at qapi/qmp-dispatch.c:175 #12 0x0000557223e06141 in monitor_qmp_dispatch (mon=0x557225c69ff0, req=<optimized out>) at monitor/qmp.c:120 #13 0x0000557223e0678a in monitor_qmp_bh_dispatcher (data=<optimized out>) at monitor/qmp.c:209 #14 0x0000557223f2f366 in aio_bh_call (bh=0x557225b9dc60) at util/async.c:117 #15 0x0000557223f2f366 in aio_bh_poll (ctx=ctx@entry=0x557225b9c840) at util/async.c:117 #16 0x0000557223f32754 in aio_dispatch (ctx=0x557225b9c840) at util/aio-posix.c:459 #17 0x0000557223f2f242 in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #18 0x00007fa10913467d in g_main_dispatch (context=0x557225c28e80) at gmain.c:3176 #19 0x00007fa10913467d in g_main_context_dispatch (context=context@entry=0x557225c28e80) at gmain.c:3829 #20 0x0000557223f31808 in glib_pollfds_poll () at util/main-loop.c:219 #21 0x0000557223f31808 in os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #22 0x0000557223f31808 in main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #23 0x0000557223d13201 in main_loop () at vl.c:1828 #24 0x0000557223bbfb82 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4504 RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1779036 Signed-off-by: Sergio Lopez <slp@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-01-08 15:31:37 +01:00
AioContext *tmp_context;
int ret;
aio_context = bdrv_get_aio_context(state->old_bs);
aio_context_acquire(aio_context);
bdrv_ref(state->old_bs); /* we can't let bdrv_set_backind_hd()
close state->old_bs; we need it */
bdrv_set_backing_hd(state->new_bs, NULL, &error_abort);
blockdev: Return bs to the proper context on snapshot abort external_snapshot_abort() calls to bdrv_set_backing_hd(), which returns state->old_bs to the main AioContext, as it's intended to be used then the BDS is going to be released. As that's not the case when aborting an external snapshot, return it to the AioContext it was before the call. This issue can be triggered by issuing a transaction with two actions, a proper blockdev-snapshot-sync and a bogus one, so the second will trigger a transaction abort. This results in a crash with an stack trace like this one: #0 0x00007fa1048b28df in __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:50 #1 0x00007fa10489ccf5 in __GI_abort () at abort.c:79 #2 0x00007fa10489cbc9 in __assert_fail_base (fmt=0x7fa104a03300 "%s%s%s:%u: %s%sAssertion `%s' failed.\n%n", assertion=0x5572240b44d8 "bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs)", file=0x557224014d30 "block.c", line=2240, function=<optimized out>) at assert.c:92 #3 0x00007fa1048aae96 in __GI___assert_fail (assertion=assertion@entry=0x5572240b44d8 "bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs)", file=file@entry=0x557224014d30 "block.c", line=line@entry=2240, function=function@entry=0x5572240b5d60 <__PRETTY_FUNCTION__.31620> "bdrv_replace_child_noperm") at assert.c:101 #4 0x0000557223e631f8 in bdrv_replace_child_noperm (child=0x557225b9c980, new_bs=new_bs@entry=0x557225c42e40) at block.c:2240 #5 0x0000557223e68be7 in bdrv_replace_node (from=0x557226951a60, to=0x557225c42e40, errp=0x5572247d6138 <error_abort>) at block.c:4196 #6 0x0000557223d069c4 in external_snapshot_abort (common=0x557225d7e170) at blockdev.c:1731 #7 0x0000557223d069c4 in external_snapshot_abort (common=0x557225d7e170) at blockdev.c:1717 #8 0x0000557223d09013 in qmp_transaction (dev_list=<optimized out>, has_props=<optimized out>, props=0x557225cc7d70, errp=errp@entry=0x7ffe704c0c98) at blockdev.c:2360 #9 0x0000557223e32085 in qmp_marshal_transaction (args=<optimized out>, ret=<optimized out>, errp=0x7ffe704c0d08) at qapi/qapi-commands-transaction.c:44 #10 0x0000557223ee798c in do_qmp_dispatch (errp=0x7ffe704c0d00, allow_oob=<optimized out>, request=<optimized out>, cmds=0x5572247d3cc0 <qmp_commands>) at qapi/qmp-dispatch.c:132 #11 0x0000557223ee798c in qmp_dispatch (cmds=0x5572247d3cc0 <qmp_commands>, request=<optimized out>, allow_oob=<optimized out>) at qapi/qmp-dispatch.c:175 #12 0x0000557223e06141 in monitor_qmp_dispatch (mon=0x557225c69ff0, req=<optimized out>) at monitor/qmp.c:120 #13 0x0000557223e0678a in monitor_qmp_bh_dispatcher (data=<optimized out>) at monitor/qmp.c:209 #14 0x0000557223f2f366 in aio_bh_call (bh=0x557225b9dc60) at util/async.c:117 #15 0x0000557223f2f366 in aio_bh_poll (ctx=ctx@entry=0x557225b9c840) at util/async.c:117 #16 0x0000557223f32754 in aio_dispatch (ctx=0x557225b9c840) at util/aio-posix.c:459 #17 0x0000557223f2f242 in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #18 0x00007fa10913467d in g_main_dispatch (context=0x557225c28e80) at gmain.c:3176 #19 0x00007fa10913467d in g_main_context_dispatch (context=context@entry=0x557225c28e80) at gmain.c:3829 #20 0x0000557223f31808 in glib_pollfds_poll () at util/main-loop.c:219 #21 0x0000557223f31808 in os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #22 0x0000557223f31808 in main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #23 0x0000557223d13201 in main_loop () at vl.c:1828 #24 0x0000557223bbfb82 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4504 RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1779036 Signed-off-by: Sergio Lopez <slp@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-01-08 15:31:37 +01:00
/*
* The call to bdrv_set_backing_hd() above returns state->old_bs to
* the main AioContext. As we're still going to be using it, return
* it to the AioContext it was before.
*/
tmp_context = bdrv_get_aio_context(state->old_bs);
if (aio_context != tmp_context) {
aio_context_release(aio_context);
aio_context_acquire(tmp_context);
ret = bdrv_try_change_aio_context(state->old_bs,
aio_context, NULL, NULL);
blockdev: Return bs to the proper context on snapshot abort external_snapshot_abort() calls to bdrv_set_backing_hd(), which returns state->old_bs to the main AioContext, as it's intended to be used then the BDS is going to be released. As that's not the case when aborting an external snapshot, return it to the AioContext it was before the call. This issue can be triggered by issuing a transaction with two actions, a proper blockdev-snapshot-sync and a bogus one, so the second will trigger a transaction abort. This results in a crash with an stack trace like this one: #0 0x00007fa1048b28df in __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:50 #1 0x00007fa10489ccf5 in __GI_abort () at abort.c:79 #2 0x00007fa10489cbc9 in __assert_fail_base (fmt=0x7fa104a03300 "%s%s%s:%u: %s%sAssertion `%s' failed.\n%n", assertion=0x5572240b44d8 "bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs)", file=0x557224014d30 "block.c", line=2240, function=<optimized out>) at assert.c:92 #3 0x00007fa1048aae96 in __GI___assert_fail (assertion=assertion@entry=0x5572240b44d8 "bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs)", file=file@entry=0x557224014d30 "block.c", line=line@entry=2240, function=function@entry=0x5572240b5d60 <__PRETTY_FUNCTION__.31620> "bdrv_replace_child_noperm") at assert.c:101 #4 0x0000557223e631f8 in bdrv_replace_child_noperm (child=0x557225b9c980, new_bs=new_bs@entry=0x557225c42e40) at block.c:2240 #5 0x0000557223e68be7 in bdrv_replace_node (from=0x557226951a60, to=0x557225c42e40, errp=0x5572247d6138 <error_abort>) at block.c:4196 #6 0x0000557223d069c4 in external_snapshot_abort (common=0x557225d7e170) at blockdev.c:1731 #7 0x0000557223d069c4 in external_snapshot_abort (common=0x557225d7e170) at blockdev.c:1717 #8 0x0000557223d09013 in qmp_transaction (dev_list=<optimized out>, has_props=<optimized out>, props=0x557225cc7d70, errp=errp@entry=0x7ffe704c0c98) at blockdev.c:2360 #9 0x0000557223e32085 in qmp_marshal_transaction (args=<optimized out>, ret=<optimized out>, errp=0x7ffe704c0d08) at qapi/qapi-commands-transaction.c:44 #10 0x0000557223ee798c in do_qmp_dispatch (errp=0x7ffe704c0d00, allow_oob=<optimized out>, request=<optimized out>, cmds=0x5572247d3cc0 <qmp_commands>) at qapi/qmp-dispatch.c:132 #11 0x0000557223ee798c in qmp_dispatch (cmds=0x5572247d3cc0 <qmp_commands>, request=<optimized out>, allow_oob=<optimized out>) at qapi/qmp-dispatch.c:175 #12 0x0000557223e06141 in monitor_qmp_dispatch (mon=0x557225c69ff0, req=<optimized out>) at monitor/qmp.c:120 #13 0x0000557223e0678a in monitor_qmp_bh_dispatcher (data=<optimized out>) at monitor/qmp.c:209 #14 0x0000557223f2f366 in aio_bh_call (bh=0x557225b9dc60) at util/async.c:117 #15 0x0000557223f2f366 in aio_bh_poll (ctx=ctx@entry=0x557225b9c840) at util/async.c:117 #16 0x0000557223f32754 in aio_dispatch (ctx=0x557225b9c840) at util/aio-posix.c:459 #17 0x0000557223f2f242 in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #18 0x00007fa10913467d in g_main_dispatch (context=0x557225c28e80) at gmain.c:3176 #19 0x00007fa10913467d in g_main_context_dispatch (context=context@entry=0x557225c28e80) at gmain.c:3829 #20 0x0000557223f31808 in glib_pollfds_poll () at util/main-loop.c:219 #21 0x0000557223f31808 in os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #22 0x0000557223f31808 in main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #23 0x0000557223d13201 in main_loop () at vl.c:1828 #24 0x0000557223bbfb82 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4504 RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1779036 Signed-off-by: Sergio Lopez <slp@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-01-08 15:31:37 +01:00
assert(ret == 0);
aio_context_release(tmp_context);
aio_context_acquire(aio_context);
}
bdrv_replace_node(state->new_bs, state->old_bs, &error_abort);
bdrv_unref(state->old_bs); /* bdrv_replace_node() ref'ed old_bs */
aio_context_release(aio_context);
}
}
}
static void external_snapshot_clean(void *opaque)
{
g_autofree ExternalSnapshotState *state = opaque;
AioContext *aio_context;
if (!state->old_bs) {
return;
}
aio_context = bdrv_get_aio_context(state->old_bs);
aio_context_acquire(aio_context);
bdrv_drained_end(state->old_bs);
bdrv_unref(state->new_bs);
aio_context_release(aio_context);
}
typedef struct DriveBackupState {
BlockDriverState *bs;
BlockJob *job;
} DriveBackupState;
static BlockJob *do_backup_common(BackupCommon *backup,
BlockDriverState *bs,
BlockDriverState *target_bs,
AioContext *aio_context,
JobTxn *txn, Error **errp);
static void drive_backup_commit(void *opaque);
static void drive_backup_abort(void *opaque);
static void drive_backup_clean(void *opaque);
TransactionActionDrv drive_backup_drv = {
.commit = drive_backup_commit,
.abort = drive_backup_abort,
.clean = drive_backup_clean,
};
static void drive_backup_action(DriveBackup *backup,
JobTxn *block_job_txn,
Transaction *tran, Error **errp)
{
DriveBackupState *state = g_new0(DriveBackupState, 1);
BlockDriverState *bs;
BlockDriverState *target_bs;
BlockDriverState *source = NULL;
AioContext *aio_context;
AioContext *old_context;
const char *format;
QDict *options;
Error *local_err = NULL;
int flags;
int64_t size;
bool set_backing_hd = false;
int ret;
tran_add(tran, &drive_backup_drv, state);
if (!backup->has_mode) {
backup->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
}
bs = bdrv_lookup_bs(backup->device, backup->device, errp);
if (!bs) {
return;
}
if (!bs->drv) {
error_setg(errp, "Device has no medium");
return;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
state->bs = bs;
/* Paired with .clean() */
bdrv_drained_begin(bs);
format = backup->format;
if (!format && backup->mode != NEW_IMAGE_MODE_EXISTING) {
format = bs->drv->format_name;
}
/* Early check to avoid creating target */
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
goto out;
}
flags = bs->open_flags | BDRV_O_RDWR;
/*
* See if we have a backing HD we can use to create our new image
* on top of.
*/
if (backup->sync == MIRROR_SYNC_MODE_TOP) {
/*
* Backup will not replace the source by the target, so none
* of the filters skipped here will be removed (in contrast to
* mirror). Therefore, we can skip all of them when looking
* for the first COW relationship.
*/
source = bdrv_cow_bs(bdrv_skip_filters(bs));
if (!source) {
backup->sync = MIRROR_SYNC_MODE_FULL;
}
}
if (backup->sync == MIRROR_SYNC_MODE_NONE) {
source = bs;
flags |= BDRV_O_NO_BACKING;
set_backing_hd = true;
}
size = bdrv_getlength(bs);
if (size < 0) {
error_setg_errno(errp, -size, "bdrv_getlength failed");
goto out;
}
if (backup->mode != NEW_IMAGE_MODE_EXISTING) {
assert(format);
if (source) {
/* Implicit filters should not appear in the filename */
BlockDriverState *explicit_backing =
bdrv_skip_implicit_filters(source);
bdrv_refresh_filename(explicit_backing);
bdrv_img_create(backup->target, format,
explicit_backing->filename,
explicit_backing->drv->format_name, NULL,
size, flags, false, &local_err);
} else {
bdrv_img_create(backup->target, format, NULL, NULL, NULL,
size, flags, false, &local_err);
}
}
if (local_err) {
error_propagate(errp, local_err);
goto out;
}
options = qdict_new();
qdict_put_str(options, "discard", "unmap");
qdict_put_str(options, "detect-zeroes", "unmap");
if (format) {
qdict_put_str(options, "driver", format);
}
aio_context_release(aio_context);
aio_context_acquire(qemu_get_aio_context());
target_bs = bdrv_open(backup->target, NULL, options, flags, errp);
aio_context_release(qemu_get_aio_context());
if (!target_bs) {
return;
}
/* Honor bdrv_try_change_aio_context() context acquisition requirements. */
old_context = bdrv_get_aio_context(target_bs);
aio_context_acquire(old_context);
ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
if (ret < 0) {
bdrv_unref(target_bs);
aio_context_release(old_context);
return;
}
aio_context_release(old_context);
aio_context_acquire(aio_context);
if (set_backing_hd) {
if (bdrv_set_backing_hd(target_bs, source, errp) < 0) {
goto unref;
}
}
state->job = do_backup_common(qapi_DriveBackup_base(backup),
bs, target_bs, aio_context,
block_job_txn, errp);
unref:
bdrv_unref(target_bs);
out:
aio_context_release(aio_context);
}
static void drive_backup_commit(void *opaque)
{
DriveBackupState *state = opaque;
AioContext *aio_context;
aio_context = bdrv_get_aio_context(state->bs);
aio_context_acquire(aio_context);
assert(state->job);
job_start(&state->job->job);
aio_context_release(aio_context);
}
static void drive_backup_abort(void *opaque)
{
DriveBackupState *state = opaque;
if (state->job) {
job_cancel_sync(&state->job->job, true);
}
}
static void drive_backup_clean(void *opaque)
{
g_autofree DriveBackupState *state = opaque;
AioContext *aio_context;
if (!state->bs) {
return;
}
aio_context = bdrv_get_aio_context(state->bs);
aio_context_acquire(aio_context);
bdrv_drained_end(state->bs);
aio_context_release(aio_context);
}
typedef struct BlockdevBackupState {
BlockDriverState *bs;
BlockJob *job;
} BlockdevBackupState;
static void blockdev_backup_commit(void *opaque);
static void blockdev_backup_abort(void *opaque);
static void blockdev_backup_clean(void *opaque);
TransactionActionDrv blockdev_backup_drv = {
.commit = blockdev_backup_commit,
.abort = blockdev_backup_abort,
.clean = blockdev_backup_clean,
};
static void blockdev_backup_action(BlockdevBackup *backup,
JobTxn *block_job_txn,
Transaction *tran, Error **errp)
{
BlockdevBackupState *state = g_new0(BlockdevBackupState, 1);
BlockDriverState *bs;
BlockDriverState *target_bs;
AioContext *aio_context;
AioContext *old_context;
int ret;
tran_add(tran, &blockdev_backup_drv, state);
bs = bdrv_lookup_bs(backup->device, backup->device, errp);
if (!bs) {
return;
}
target_bs = bdrv_lookup_bs(backup->target, backup->target, errp);
if (!target_bs) {
return;
}
/* Honor bdrv_try_change_aio_context() context acquisition requirements. */
aio_context = bdrv_get_aio_context(bs);
old_context = bdrv_get_aio_context(target_bs);
aio_context_acquire(old_context);
ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
if (ret < 0) {
aio_context_release(old_context);
return;
}
aio_context_release(old_context);
aio_context_acquire(aio_context);
state->bs = bs;
/* Paired with .clean() */
bdrv_drained_begin(state->bs);
state->job = do_backup_common(qapi_BlockdevBackup_base(backup),
bs, target_bs, aio_context,
block_job_txn, errp);
aio_context_release(aio_context);
}
static void blockdev_backup_commit(void *opaque)
{
BlockdevBackupState *state = opaque;
AioContext *aio_context;
aio_context = bdrv_get_aio_context(state->bs);
aio_context_acquire(aio_context);
assert(state->job);
job_start(&state->job->job);
aio_context_release(aio_context);
}
static void blockdev_backup_abort(void *opaque)
{
BlockdevBackupState *state = opaque;
if (state->job) {
job_cancel_sync(&state->job->job, true);
}
}
static void blockdev_backup_clean(void *opaque)
{
g_autofree BlockdevBackupState *state = opaque;
AioContext *aio_context;
if (!state->bs) {
return;
}
aio_context = bdrv_get_aio_context(state->bs);
aio_context_acquire(aio_context);
bdrv_drained_end(state->bs);
aio_context_release(aio_context);
}
typedef struct BlockDirtyBitmapState {
BdrvDirtyBitmap *bitmap;
BlockDriverState *bs;
HBitmap *backup;
bool was_enabled;
} BlockDirtyBitmapState;
static void block_dirty_bitmap_add_abort(void *opaque);
TransactionActionDrv block_dirty_bitmap_add_drv = {
.abort = block_dirty_bitmap_add_abort,
.clean = g_free,
};
static void block_dirty_bitmap_add_action(BlockDirtyBitmapAdd *action,
Transaction *tran, Error **errp)
{
Error *local_err = NULL;
BlockDirtyBitmapState *state = g_new0(BlockDirtyBitmapState, 1);
tran_add(tran, &block_dirty_bitmap_add_drv, state);
/* AIO context taken and released within qmp_block_dirty_bitmap_add */
qmp_block_dirty_bitmap_add(action->node, action->name,
action->has_granularity, action->granularity,
action->has_persistent, action->persistent,
action->has_disabled, action->disabled,
&local_err);
if (!local_err) {
state->bitmap = block_dirty_bitmap_lookup(action->node, action->name,
NULL, &error_abort);
} else {
error_propagate(errp, local_err);
}
}
static void block_dirty_bitmap_add_abort(void *opaque)
{
BlockDirtyBitmapState *state = opaque;
if (state->bitmap) {
bdrv_release_dirty_bitmap(state->bitmap);
}
}
static void block_dirty_bitmap_restore(void *opaque);
static void block_dirty_bitmap_free_backup(void *opaque);
TransactionActionDrv block_dirty_bitmap_clear_drv = {
.abort = block_dirty_bitmap_restore,
.commit = block_dirty_bitmap_free_backup,
.clean = g_free,
};
static void block_dirty_bitmap_clear_action(BlockDirtyBitmap *action,
Transaction *tran, Error **errp)
{
BlockDirtyBitmapState *state = g_new0(BlockDirtyBitmapState, 1);
tran_add(tran, &block_dirty_bitmap_clear_drv, state);
state->bitmap = block_dirty_bitmap_lookup(action->node,
action->name,
&state->bs,
errp);
if (!state->bitmap) {
return;
}
if (bdrv_dirty_bitmap_check(state->bitmap, BDRV_BITMAP_DEFAULT, errp)) {
return;
}
bdrv_clear_dirty_bitmap(state->bitmap, &state->backup);
}
static void block_dirty_bitmap_restore(void *opaque)
{
BlockDirtyBitmapState *state = opaque;
if (state->backup) {
bdrv_restore_dirty_bitmap(state->bitmap, state->backup);
}
}
static void block_dirty_bitmap_free_backup(void *opaque)
{
BlockDirtyBitmapState *state = opaque;
hbitmap_free(state->backup);
}
static void block_dirty_bitmap_enable_abort(void *opaque);
TransactionActionDrv block_dirty_bitmap_enable_drv = {
.abort = block_dirty_bitmap_enable_abort,
.clean = g_free,
};
static void block_dirty_bitmap_enable_action(BlockDirtyBitmap *action,
Transaction *tran, Error **errp)
{
BlockDirtyBitmapState *state = g_new0(BlockDirtyBitmapState, 1);
tran_add(tran, &block_dirty_bitmap_enable_drv, state);
state->bitmap = block_dirty_bitmap_lookup(action->node,
action->name,
NULL,
errp);
if (!state->bitmap) {
return;
}
if (bdrv_dirty_bitmap_check(state->bitmap, BDRV_BITMAP_ALLOW_RO, errp)) {
return;
}
state->was_enabled = bdrv_dirty_bitmap_enabled(state->bitmap);
bdrv_enable_dirty_bitmap(state->bitmap);
}
static void block_dirty_bitmap_enable_abort(void *opaque)
{
BlockDirtyBitmapState *state = opaque;
if (!state->was_enabled) {
bdrv_disable_dirty_bitmap(state->bitmap);
}
}
static void block_dirty_bitmap_disable_abort(void *opaque);
TransactionActionDrv block_dirty_bitmap_disable_drv = {
.abort = block_dirty_bitmap_disable_abort,
.clean = g_free,
};
static void block_dirty_bitmap_disable_action(BlockDirtyBitmap *action,
Transaction *tran, Error **errp)
{
BlockDirtyBitmapState *state = g_new0(BlockDirtyBitmapState, 1);
tran_add(tran, &block_dirty_bitmap_disable_drv, state);
state->bitmap = block_dirty_bitmap_lookup(action->node,
action->name,
NULL,
errp);
if (!state->bitmap) {
return;
}
if (bdrv_dirty_bitmap_check(state->bitmap, BDRV_BITMAP_ALLOW_RO, errp)) {
return;
}
state->was_enabled = bdrv_dirty_bitmap_enabled(state->bitmap);
bdrv_disable_dirty_bitmap(state->bitmap);
}
static void block_dirty_bitmap_disable_abort(void *opaque)
{
BlockDirtyBitmapState *state = opaque;
if (state->was_enabled) {
bdrv_enable_dirty_bitmap(state->bitmap);
}
}
TransactionActionDrv block_dirty_bitmap_merge_drv = {
.commit = block_dirty_bitmap_free_backup,
.abort = block_dirty_bitmap_restore,
.clean = g_free,
};
static void block_dirty_bitmap_merge_action(BlockDirtyBitmapMerge *action,
Transaction *tran, Error **errp)
{
BlockDirtyBitmapState *state = g_new0(BlockDirtyBitmapState, 1);
tran_add(tran, &block_dirty_bitmap_merge_drv, state);
state->bitmap = block_dirty_bitmap_merge(action->node, action->target,
action->bitmaps, &state->backup,
errp);
}
static void block_dirty_bitmap_remove_commit(void *opaque);
static void block_dirty_bitmap_remove_abort(void *opaque);
TransactionActionDrv block_dirty_bitmap_remove_drv = {
.commit = block_dirty_bitmap_remove_commit,
.abort = block_dirty_bitmap_remove_abort,
.clean = g_free,
};
static void block_dirty_bitmap_remove_action(BlockDirtyBitmap *action,
Transaction *tran, Error **errp)
{
BlockDirtyBitmapState *state = g_new0(BlockDirtyBitmapState, 1);
tran_add(tran, &block_dirty_bitmap_remove_drv, state);
state->bitmap = block_dirty_bitmap_remove(action->node, action->name,
false, &state->bs, errp);
if (state->bitmap) {
bdrv_dirty_bitmap_skip_store(state->bitmap, true);
bdrv_dirty_bitmap_set_busy(state->bitmap, true);
}
}
static void block_dirty_bitmap_remove_abort(void *opaque)
{
BlockDirtyBitmapState *state = opaque;
if (state->bitmap) {
bdrv_dirty_bitmap_skip_store(state->bitmap, false);
bdrv_dirty_bitmap_set_busy(state->bitmap, false);
}
}
static void block_dirty_bitmap_remove_commit(void *opaque)
{
BlockDirtyBitmapState *state = opaque;
bdrv_dirty_bitmap_set_busy(state->bitmap, false);
bdrv_release_dirty_bitmap(state->bitmap);
}
static void abort_commit(void *opaque);
TransactionActionDrv abort_drv = {
.commit = abort_commit,
};
static void abort_action(Transaction *tran, Error **errp)
{
tran_add(tran, &abort_drv, NULL);
error_setg(errp, "Transaction aborted using Abort action");
}
static void abort_commit(void *opaque)
{
g_assert_not_reached(); /* this action never succeeds */
}
static void transaction_action(TransactionAction *act, JobTxn *block_job_txn,
Transaction *tran, Error **errp)
{
switch (act->type) {
case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT:
case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC:
external_snapshot_action(act, tran, errp);
return;
case TRANSACTION_ACTION_KIND_DRIVE_BACKUP:
drive_backup_action(act->u.drive_backup.data,
block_job_txn, tran, errp);
return;
case TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP:
blockdev_backup_action(act->u.blockdev_backup.data,
block_job_txn, tran, errp);
return;
case TRANSACTION_ACTION_KIND_ABORT:
abort_action(tran, errp);
return;
case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC:
internal_snapshot_action(act->u.blockdev_snapshot_internal_sync.data,
tran, errp);
return;
case TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_ADD:
block_dirty_bitmap_add_action(act->u.block_dirty_bitmap_add.data,
tran, errp);
return;
case TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_CLEAR:
block_dirty_bitmap_clear_action(act->u.block_dirty_bitmap_clear.data,
tran, errp);
return;
case TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_ENABLE:
block_dirty_bitmap_enable_action(act->u.block_dirty_bitmap_enable.data,
tran, errp);
return;
case TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_DISABLE:
block_dirty_bitmap_disable_action(
act->u.block_dirty_bitmap_disable.data, tran, errp);
return;
case TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_MERGE:
block_dirty_bitmap_merge_action(act->u.block_dirty_bitmap_merge.data,
tran, errp);
return;
case TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_REMOVE:
block_dirty_bitmap_remove_action(act->u.block_dirty_bitmap_remove.data,
tran, errp);
return;
/*
* Where are transactions for MIRROR, COMMIT and STREAM?
* Although these blockjobs use transaction callbacks like the backup job,
* these jobs do not necessarily adhere to transaction semantics.
* These jobs may not fully undo all of their actions on abort, nor do they
* necessarily work in transactions with more than one job in them.
*/
case TRANSACTION_ACTION_KIND__MAX:
default:
g_assert_not_reached();
};
}
/*
* 'Atomic' group operations. The operations are performed as a set, and if
* any fail then we roll back all operations in the group.
*
* Always run under BQL.
*/
void qmp_transaction(TransactionActionList *actions,
struct TransactionProperties *properties,
Error **errp)
{
TransactionActionList *act;
JobTxn *block_job_txn = NULL;
Error *local_err = NULL;
Transaction *tran;
ActionCompletionMode comp_mode =
properties ? properties->completion_mode :
ACTION_COMPLETION_MODE_INDIVIDUAL;
GLOBAL_STATE_CODE();
/* Does this transaction get canceled as a group on failure?
* If not, we don't really need to make a JobTxn.
*/
if (comp_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) {
for (act = actions; act; act = act->next) {
TransactionActionKind type = act->value->type;
if (type != TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP &&
type != TRANSACTION_ACTION_KIND_DRIVE_BACKUP)
{
error_setg(errp,
"Action '%s' does not support transaction property "
"completion-mode = %s",
TransactionActionKind_str(type),
ActionCompletionMode_str(comp_mode));
return;
}
}
block_job_txn = job_txn_new();
}
/* drain all i/o before any operations */
bdrv_drain_all();
tran = tran_new();
/* We don't do anything in this loop that commits us to the operations */
for (act = actions; act; act = act->next) {
transaction_action(act->value, block_job_txn, tran, &local_err);
if (local_err) {
error_propagate(errp, local_err);
goto delete_and_fail;
}
}
tran_commit(tran);
/* success */
goto exit;
delete_and_fail:
/* failure, and it is all-or-none; roll back all operations */
tran_abort(tran);
exit:
job_txn_unref(block_job_txn);
}
BlockDirtyBitmapSha256 *qmp_x_debug_block_dirty_bitmap_sha256(const char *node,
const char *name,
Error **errp)
{
BdrvDirtyBitmap *bitmap;
BlockDriverState *bs;
BlockDirtyBitmapSha256 *ret = NULL;
char *sha256;
bitmap = block_dirty_bitmap_lookup(node, name, &bs, errp);
if (!bitmap || !bs) {
return NULL;
}
sha256 = bdrv_dirty_bitmap_sha256(bitmap, errp);
if (sha256 == NULL) {
return NULL;
}
ret = g_new(BlockDirtyBitmapSha256, 1);
ret->sha256 = sha256;
return ret;
}
void coroutine_fn qmp_block_resize(const char *device, const char *node_name,
int64_t size, Error **errp)
{
Error *local_err = NULL;
BlockBackend *blk;
BlockDriverState *bs;
AioContext *old_ctx;
bs = bdrv_lookup_bs(device, node_name, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
if (size < 0) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "size", "a >0 size");
return;
}
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
error_setg(errp, QERR_DEVICE_IN_USE, device);
return;
}
blk = blk_co_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL, errp);
if (!blk) {
return;
}
bdrv_co_lock(bs);
bdrv_drained_begin(bs);
bdrv_co_unlock(bs);
old_ctx = bdrv_co_enter(bs);
blk_co_truncate(blk, size, false, PREALLOC_MODE_OFF, 0, errp);
bdrv_co_leave(bs, old_ctx);
bdrv_co_lock(bs);
bdrv_drained_end(bs);
blk_co_unref(blk);
bdrv_co_unlock(bs);
}
void qmp_block_stream(const char *job_id, const char *device,
const char *base,
const char *base_node,
const char *backing_file,
const char *bottom,
bool has_speed, int64_t speed,
bool has_on_error, BlockdevOnError on_error,
const char *filter_node_name,
bool has_auto_finalize, bool auto_finalize,
bool has_auto_dismiss, bool auto_dismiss,
Error **errp)
{
BlockDriverState *bs, *iter, *iter_end;
BlockDriverState *base_bs = NULL;
BlockDriverState *bottom_bs = NULL;
AioContext *aio_context;
Error *local_err = NULL;
int job_flags = JOB_DEFAULT;
if (base && base_node) {
error_setg(errp, "'base' and 'base-node' cannot be specified "
"at the same time");
return;
}
if (base && bottom) {
error_setg(errp, "'base' and 'bottom' cannot be specified "
"at the same time");
return;
}
if (bottom && base_node) {
error_setg(errp, "'bottom' and 'base-node' cannot be specified "
"at the same time");
return;
}
if (!has_on_error) {
on_error = BLOCKDEV_ON_ERROR_REPORT;
}
bs = bdrv_lookup_bs(device, device, errp);
if (!bs) {
return;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
if (base) {
base_bs = bdrv_find_backing_image(bs, base);
if (base_bs == NULL) {
error_setg(errp, "Can't find '%s' in the backing chain", base);
goto out;
}
assert(bdrv_get_aio_context(base_bs) == aio_context);
}
if (base_node) {
base_bs = bdrv_lookup_bs(NULL, base_node, errp);
if (!base_bs) {
goto out;
}
if (bs == base_bs || !bdrv_chain_contains(bs, base_bs)) {
error_setg(errp, "Node '%s' is not a backing image of '%s'",
base_node, device);
goto out;
}
assert(bdrv_get_aio_context(base_bs) == aio_context);
bdrv_refresh_filename(base_bs);
}
if (bottom) {
bottom_bs = bdrv_lookup_bs(NULL, bottom, errp);
if (!bottom_bs) {
goto out;
}
if (!bottom_bs->drv) {
error_setg(errp, "Node '%s' is not open", bottom);
goto out;
}
if (bottom_bs->drv->is_filter) {
error_setg(errp, "Node '%s' is a filter, use a non-filter node "
"as 'bottom'", bottom);
goto out;
}
if (!bdrv_chain_contains(bs, bottom_bs)) {
error_setg(errp, "Node '%s' is not in a chain starting from '%s'",
bottom, device);
goto out;
}
assert(bdrv_get_aio_context(bottom_bs) == aio_context);
}
/*
* Check for op blockers in the whole chain between bs and base (or bottom)
*/
iter_end = bottom ? bdrv_filter_or_cow_bs(bottom_bs) : base_bs;
for (iter = bs; iter && iter != iter_end;
iter = bdrv_filter_or_cow_bs(iter))
{
if (bdrv_op_is_blocked(iter, BLOCK_OP_TYPE_STREAM, errp)) {
goto out;
}
}
/* if we are streaming the entire chain, the result will have no backing
* file, and specifying one is therefore an error */
if (!base_bs && backing_file) {
error_setg(errp, "backing file specified, but streaming the "
"entire chain");
goto out;
}
if (has_auto_finalize && !auto_finalize) {
job_flags |= JOB_MANUAL_FINALIZE;
}
if (has_auto_dismiss && !auto_dismiss) {
job_flags |= JOB_MANUAL_DISMISS;
}
stream_start(job_id, bs, base_bs, backing_file,
bottom_bs, job_flags, has_speed ? speed : 0, on_error,
filter_node_name, &local_err);
if (local_err) {
error_propagate(errp, local_err);
goto out;
}
trace_qmp_block_stream(bs);
out:
aio_context_release(aio_context);
}
void qmp_block_commit(const char *job_id, const char *device,
const char *base_node,
const char *base,
const char *top_node,
const char *top,
const char *backing_file,
bool has_speed, int64_t speed,
bool has_on_error, BlockdevOnError on_error,
const char *filter_node_name,
bool has_auto_finalize, bool auto_finalize,
bool has_auto_dismiss, bool auto_dismiss,
Error **errp)
{
BlockDriverState *bs;
BlockDriverState *iter;
BlockDriverState *base_bs, *top_bs;
AioContext *aio_context;
Error *local_err = NULL;
int job_flags = JOB_DEFAULT;
uint64_t top_perm, top_shared;
/* TODO We'll eventually have to take a writer lock in this function */
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (!has_speed) {
speed = 0;
}
if (!has_on_error) {
on_error = BLOCKDEV_ON_ERROR_REPORT;
}
if (has_auto_finalize && !auto_finalize) {
job_flags |= JOB_MANUAL_FINALIZE;
}
if (has_auto_dismiss && !auto_dismiss) {
job_flags |= JOB_MANUAL_DISMISS;
}
/* Important Note:
* libvirt relies on the DeviceNotFound error class in order to probe for
* live commit feature versions; for this to work, we must make sure to
* perform the device lookup before any generic errors that may occur in a
* scenario in which all optional arguments are omitted. */
bs = qmp_get_root_bs(device, &local_err);
if (!bs) {
bs = bdrv_lookup_bs(device, device, NULL);
if (!bs) {
error_free(local_err);
error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
"Device '%s' not found", device);
} else {
error_propagate(errp, local_err);
}
return;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, errp)) {
goto out;
}
/* default top_bs is the active layer */
top_bs = bs;
if (top_node && top) {
error_setg(errp, "'top-node' and 'top' are mutually exclusive");
goto out;
} else if (top_node) {
top_bs = bdrv_lookup_bs(NULL, top_node, errp);
if (top_bs == NULL) {
goto out;
}
if (!bdrv_chain_contains(bs, top_bs)) {
error_setg(errp, "'%s' is not in this backing file chain",
top_node);
goto out;
}
} else if (top) {
/* This strcmp() is just a shortcut, there is no need to
* refresh @bs's filename. If it mismatches,
* bdrv_find_backing_image() will do the refresh and may still
* return @bs. */
if (strcmp(bs->filename, top) != 0) {
top_bs = bdrv_find_backing_image(bs, top);
}
}
if (top_bs == NULL) {
error_setg(errp, "Top image file %s not found", top ? top : "NULL");
goto out;
}
assert(bdrv_get_aio_context(top_bs) == aio_context);
if (base_node && base) {
error_setg(errp, "'base-node' and 'base' are mutually exclusive");
goto out;
} else if (base_node) {
base_bs = bdrv_lookup_bs(NULL, base_node, errp);
if (base_bs == NULL) {
goto out;
}
if (!bdrv_chain_contains(top_bs, base_bs)) {
error_setg(errp, "'%s' is not in this backing file chain",
base_node);
goto out;
}
} else if (base) {
base_bs = bdrv_find_backing_image(top_bs, base);
if (base_bs == NULL) {
error_setg(errp, "Can't find '%s' in the backing chain", base);
goto out;
}
} else {
base_bs = bdrv_find_base(top_bs);
if (base_bs == NULL) {
error_setg(errp, "There is no backimg image");
goto out;
}
}
assert(bdrv_get_aio_context(base_bs) == aio_context);
for (iter = top_bs; iter != bdrv_filter_or_cow_bs(base_bs);
iter = bdrv_filter_or_cow_bs(iter))
{
if (bdrv_op_is_blocked(iter, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) {
goto out;
}
}
/* Do not allow attempts to commit an image into itself */
if (top_bs == base_bs) {
error_setg(errp, "cannot commit an image into itself");
goto out;
}
/*
* Active commit is required if and only if someone has taken a
* WRITE permission on the top node. Historically, we have always
* used active commit for top nodes, so continue that practice
* lest we possibly break clients that rely on this behavior, e.g.
* to later attach this node to a writing parent.
* (Active commit is never really wrong.)
*/
bdrv_get_cumulative_perm(top_bs, &top_perm, &top_shared);
if (top_perm & BLK_PERM_WRITE ||
bdrv_skip_filters(top_bs) == bdrv_skip_filters(bs))
{
if (backing_file) {
if (bdrv_skip_filters(top_bs) == bdrv_skip_filters(bs)) {
error_setg(errp, "'backing-file' specified,"
" but 'top' is the active layer");
} else {
error_setg(errp, "'backing-file' specified, but 'top' has a "
"writer on it");
}
goto out;
}
if (!job_id) {
/*
* Emulate here what block_job_create() does, because it
* is possible that @bs != @top_bs (the block job should
* be named after @bs, even if @top_bs is the actual
* source)
*/
job_id = bdrv_get_device_name(bs);
}
commit_active_start(job_id, top_bs, base_bs, job_flags, speed, on_error,
filter_node_name, NULL, NULL, false, &local_err);
} else {
BlockDriverState *overlay_bs = bdrv_find_overlay(bs, top_bs);
if (bdrv_op_is_blocked(overlay_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) {
goto out;
}
commit_start(job_id, bs, base_bs, top_bs, job_flags,
speed, on_error, backing_file,
filter_node_name, &local_err);
}
if (local_err != NULL) {
error_propagate(errp, local_err);
goto out;
}
out:
aio_context_release(aio_context);
}
/* Common QMP interface for drive-backup and blockdev-backup */
static BlockJob *do_backup_common(BackupCommon *backup,
BlockDriverState *bs,
BlockDriverState *target_bs,
AioContext *aio_context,
JobTxn *txn, Error **errp)
block: add drive-backup QMP command @drive-backup Start a point-in-time copy of a block device to a new destination. The status of ongoing drive-backup operations can be checked with query-block-jobs where the BlockJobInfo.type field has the value 'backup'. The operation can be stopped before it has completed using the block-job-cancel command. @device: the name of the device which should be copied. @target: the target of the new image. If the file exists, or if it is a device, the existing file/device will be used as the new destination. If it does not exist, a new file will be created. @format: #optional the format of the new destination, default is to probe if @mode is 'existing', else the format of the source @mode: #optional whether and how QEMU should create a new image, default is 'absolute-paths'. @speed: #optional the maximum speed, in bytes per second @on-source-error: #optional the action to take on an error on the source, default 'report'. 'stop' and 'enospc' can only be used if the block device supports io-status (see BlockInfo). @on-target-error: #optional the action to take on an error on the target, default 'report' (no limitations, since this applies to a different block device than @device). Note that @on-source-error and @on-target-error only affect background I/O. If an error occurs during a guest write request, the device's rerror/werror actions will be used. Returns: nothing on success If @device is not a valid block device, DeviceNotFound Since 1.6 Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2013-06-24 17:13:14 +02:00
{
BlockJob *job = NULL;
BdrvDirtyBitmap *bmap = NULL;
BackupPerf perf = { .max_workers = 64 };
int job_flags = JOB_DEFAULT;
block: add drive-backup QMP command @drive-backup Start a point-in-time copy of a block device to a new destination. The status of ongoing drive-backup operations can be checked with query-block-jobs where the BlockJobInfo.type field has the value 'backup'. The operation can be stopped before it has completed using the block-job-cancel command. @device: the name of the device which should be copied. @target: the target of the new image. If the file exists, or if it is a device, the existing file/device will be used as the new destination. If it does not exist, a new file will be created. @format: #optional the format of the new destination, default is to probe if @mode is 'existing', else the format of the source @mode: #optional whether and how QEMU should create a new image, default is 'absolute-paths'. @speed: #optional the maximum speed, in bytes per second @on-source-error: #optional the action to take on an error on the source, default 'report'. 'stop' and 'enospc' can only be used if the block device supports io-status (see BlockInfo). @on-target-error: #optional the action to take on an error on the target, default 'report' (no limitations, since this applies to a different block device than @device). Note that @on-source-error and @on-target-error only affect background I/O. If an error occurs during a guest write request, the device's rerror/werror actions will be used. Returns: nothing on success If @device is not a valid block device, DeviceNotFound Since 1.6 Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2013-06-24 17:13:14 +02:00
if (!backup->has_speed) {
backup->speed = 0;
block: add drive-backup QMP command @drive-backup Start a point-in-time copy of a block device to a new destination. The status of ongoing drive-backup operations can be checked with query-block-jobs where the BlockJobInfo.type field has the value 'backup'. The operation can be stopped before it has completed using the block-job-cancel command. @device: the name of the device which should be copied. @target: the target of the new image. If the file exists, or if it is a device, the existing file/device will be used as the new destination. If it does not exist, a new file will be created. @format: #optional the format of the new destination, default is to probe if @mode is 'existing', else the format of the source @mode: #optional whether and how QEMU should create a new image, default is 'absolute-paths'. @speed: #optional the maximum speed, in bytes per second @on-source-error: #optional the action to take on an error on the source, default 'report'. 'stop' and 'enospc' can only be used if the block device supports io-status (see BlockInfo). @on-target-error: #optional the action to take on an error on the target, default 'report' (no limitations, since this applies to a different block device than @device). Note that @on-source-error and @on-target-error only affect background I/O. If an error occurs during a guest write request, the device's rerror/werror actions will be used. Returns: nothing on success If @device is not a valid block device, DeviceNotFound Since 1.6 Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2013-06-24 17:13:14 +02:00
}
if (!backup->has_on_source_error) {
backup->on_source_error = BLOCKDEV_ON_ERROR_REPORT;
block: add drive-backup QMP command @drive-backup Start a point-in-time copy of a block device to a new destination. The status of ongoing drive-backup operations can be checked with query-block-jobs where the BlockJobInfo.type field has the value 'backup'. The operation can be stopped before it has completed using the block-job-cancel command. @device: the name of the device which should be copied. @target: the target of the new image. If the file exists, or if it is a device, the existing file/device will be used as the new destination. If it does not exist, a new file will be created. @format: #optional the format of the new destination, default is to probe if @mode is 'existing', else the format of the source @mode: #optional whether and how QEMU should create a new image, default is 'absolute-paths'. @speed: #optional the maximum speed, in bytes per second @on-source-error: #optional the action to take on an error on the source, default 'report'. 'stop' and 'enospc' can only be used if the block device supports io-status (see BlockInfo). @on-target-error: #optional the action to take on an error on the target, default 'report' (no limitations, since this applies to a different block device than @device). Note that @on-source-error and @on-target-error only affect background I/O. If an error occurs during a guest write request, the device's rerror/werror actions will be used. Returns: nothing on success If @device is not a valid block device, DeviceNotFound Since 1.6 Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2013-06-24 17:13:14 +02:00
}
if (!backup->has_on_target_error) {
backup->on_target_error = BLOCKDEV_ON_ERROR_REPORT;
block: add drive-backup QMP command @drive-backup Start a point-in-time copy of a block device to a new destination. The status of ongoing drive-backup operations can be checked with query-block-jobs where the BlockJobInfo.type field has the value 'backup'. The operation can be stopped before it has completed using the block-job-cancel command. @device: the name of the device which should be copied. @target: the target of the new image. If the file exists, or if it is a device, the existing file/device will be used as the new destination. If it does not exist, a new file will be created. @format: #optional the format of the new destination, default is to probe if @mode is 'existing', else the format of the source @mode: #optional whether and how QEMU should create a new image, default is 'absolute-paths'. @speed: #optional the maximum speed, in bytes per second @on-source-error: #optional the action to take on an error on the source, default 'report'. 'stop' and 'enospc' can only be used if the block device supports io-status (see BlockInfo). @on-target-error: #optional the action to take on an error on the target, default 'report' (no limitations, since this applies to a different block device than @device). Note that @on-source-error and @on-target-error only affect background I/O. If an error occurs during a guest write request, the device's rerror/werror actions will be used. Returns: nothing on success If @device is not a valid block device, DeviceNotFound Since 1.6 Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2013-06-24 17:13:14 +02:00
}
if (!backup->has_auto_finalize) {
backup->auto_finalize = true;
}
if (!backup->has_auto_dismiss) {
backup->auto_dismiss = true;
}
if (!backup->has_compress) {
backup->compress = false;
}
block: add drive-backup QMP command @drive-backup Start a point-in-time copy of a block device to a new destination. The status of ongoing drive-backup operations can be checked with query-block-jobs where the BlockJobInfo.type field has the value 'backup'. The operation can be stopped before it has completed using the block-job-cancel command. @device: the name of the device which should be copied. @target: the target of the new image. If the file exists, or if it is a device, the existing file/device will be used as the new destination. If it does not exist, a new file will be created. @format: #optional the format of the new destination, default is to probe if @mode is 'existing', else the format of the source @mode: #optional whether and how QEMU should create a new image, default is 'absolute-paths'. @speed: #optional the maximum speed, in bytes per second @on-source-error: #optional the action to take on an error on the source, default 'report'. 'stop' and 'enospc' can only be used if the block device supports io-status (see BlockInfo). @on-target-error: #optional the action to take on an error on the target, default 'report' (no limitations, since this applies to a different block device than @device). Note that @on-source-error and @on-target-error only affect background I/O. If an error occurs during a guest write request, the device's rerror/werror actions will be used. Returns: nothing on success If @device is not a valid block device, DeviceNotFound Since 1.6 Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2013-06-24 17:13:14 +02:00
if (backup->x_perf) {
if (backup->x_perf->has_use_copy_range) {
perf.use_copy_range = backup->x_perf->use_copy_range;
}
if (backup->x_perf->has_max_workers) {
perf.max_workers = backup->x_perf->max_workers;
}
if (backup->x_perf->has_max_chunk) {
perf.max_chunk = backup->x_perf->max_chunk;
}
}
if ((backup->sync == MIRROR_SYNC_MODE_BITMAP) ||
(backup->sync == MIRROR_SYNC_MODE_INCREMENTAL)) {
/* done before desugaring 'incremental' to print the right message */
if (!backup->bitmap) {
error_setg(errp, "must provide a valid bitmap name for "
"'%s' sync mode", MirrorSyncMode_str(backup->sync));
return NULL;
}
}
if (backup->sync == MIRROR_SYNC_MODE_INCREMENTAL) {
if (backup->has_bitmap_mode &&
backup->bitmap_mode != BITMAP_SYNC_MODE_ON_SUCCESS) {
error_setg(errp, "Bitmap sync mode must be '%s' "
"when using sync mode '%s'",
BitmapSyncMode_str(BITMAP_SYNC_MODE_ON_SUCCESS),
MirrorSyncMode_str(backup->sync));
return NULL;
}
backup->has_bitmap_mode = true;
backup->sync = MIRROR_SYNC_MODE_BITMAP;
backup->bitmap_mode = BITMAP_SYNC_MODE_ON_SUCCESS;
}
if (backup->bitmap) {
bmap = bdrv_find_dirty_bitmap(bs, backup->bitmap);
if (!bmap) {
error_setg(errp, "Bitmap '%s' could not be found", backup->bitmap);
return NULL;
}
if (!backup->has_bitmap_mode) {
error_setg(errp, "Bitmap sync mode must be given "
"when providing a bitmap");
return NULL;
}
if (bdrv_dirty_bitmap_check(bmap, BDRV_BITMAP_ALLOW_RO, errp)) {
return NULL;
}
/* This does not produce a useful bitmap artifact: */
if (backup->sync == MIRROR_SYNC_MODE_NONE) {
error_setg(errp, "sync mode '%s' does not produce meaningful bitmap"
" outputs", MirrorSyncMode_str(backup->sync));
return NULL;
}
/* If the bitmap isn't used for input or output, this is useless: */
if (backup->bitmap_mode == BITMAP_SYNC_MODE_NEVER &&
backup->sync != MIRROR_SYNC_MODE_BITMAP) {
error_setg(errp, "Bitmap sync mode '%s' has no meaningful effect"
" when combined with sync mode '%s'",
BitmapSyncMode_str(backup->bitmap_mode),
MirrorSyncMode_str(backup->sync));
return NULL;
}
}
if (!backup->bitmap && backup->has_bitmap_mode) {
error_setg(errp, "Cannot specify bitmap sync mode without a bitmap");
return NULL;
}
if (!backup->auto_finalize) {
job_flags |= JOB_MANUAL_FINALIZE;
}
if (!backup->auto_dismiss) {
job_flags |= JOB_MANUAL_DISMISS;
}
job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
backup->sync, bmap, backup->bitmap_mode,
backup->compress,
block/backup: use backup-top instead of write notifiers Drop write notifiers and use filter node instead. = Changes = 1. Add filter-node-name argument for backup qmp api. We have to do it in this commit, as 257 needs to be fixed. 2. There are no more write notifiers here, so is_write_notifier parameter is dropped from block-copy paths. 3. To sync with in-flight requests at job finish we now have drained removing of the filter, we don't need rw-lock. 4. Block-copy is now using BdrvChildren instead of BlockBackends 5. As backup-top owns these children, we also move block-copy state into backup-top's ownership. = Iotest changes = 56: op-blocker doesn't shoot now, as we set it on source, but then check on filter, when trying to start second backup. To keep the test we instead can catch another collision: both jobs will get 'drive0' job-id, as job-id parameter is unspecified. To prevent interleaving with file-posix locks (as they are dependent on config) let's use another target for second backup. Also, it's obvious now that we'd like to drop this op-blocker at all and add a test-case for two backups from one node (to different destinations) actually works. But not in these series. 141: Output changed: prepatch, "Node is in use" comes from bdrv_has_blk check inside qmp_blockdev_del. But we've dropped block-copy blk objects, so no more blk objects on source bs (job blk is on backup-top filter bs). New message is from op-blocker, which is the next check in qmp_blockdev_add. 257: The test wants to emulate guest write during backup. They should go to filter node, not to original source node, of course. Therefore we need to specify filter node name and use it. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-id: 20191001131409.14202-6-vsementsov@virtuozzo.com Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2019-10-01 15:14:09 +02:00
backup->filter_node_name,
&perf,
backup->on_source_error,
backup->on_target_error,
job_flags, NULL, NULL, txn, errp);
return job;
}
void qmp_drive_backup(DriveBackup *backup, Error **errp)
{
TransactionAction action = {
.type = TRANSACTION_ACTION_KIND_DRIVE_BACKUP,
.u.drive_backup.data = backup,
};
blockdev_do_action(&action, errp);
}
BlockDeviceInfoList *qmp_query_named_block_nodes(bool has_flat,
bool flat,
Error **errp)
{
bool return_flat = has_flat && flat;
return bdrv_named_nodes_list(return_flat, errp);
}
XDbgBlockGraph *qmp_x_debug_query_block_graph(Error **errp)
{
return bdrv_get_xdbg_block_graph(errp);
}
void qmp_blockdev_backup(BlockdevBackup *backup, Error **errp)
{
TransactionAction action = {
.type = TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP,
.u.blockdev_backup.data = backup,
};
blockdev_do_action(&action, errp);
}
/* Parameter check and block job starting for drive mirroring.
* Caller should hold @device and @target's aio context (must be the same).
**/
static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
BlockDriverState *target,
const char *replaces,
enum MirrorSyncMode sync,
block/mirror: Fix target backing BDS Currently, we are trying to move the backing BDS from the source to the target in bdrv_replace_in_backing_chain() which is called from mirror_exit(). However, mirror_complete() already tries to open the target's backing chain with a call to bdrv_open_backing_file(). First, we should only set the target's backing BDS once. Second, the mirroring block job has a better idea of what to set it to than the generic code in bdrv_replace_in_backing_chain() (in fact, the latter's conditions on when to move the backing BDS from source to target are not really correct). Therefore, remove that code from bdrv_replace_in_backing_chain() and leave it to mirror_complete(). Depending on what kind of mirroring is performed, we furthermore want to use different strategies to open the target's backing chain: - If blockdev-mirror is used, we can assume the user made sure that the target already has the correct backing chain. In particular, we should not try to open a backing file if the target does not have any yet. - If drive-mirror with mode=absolute-paths is used, we can and should reuse the already existing chain of nodes that the source BDS is in. In case of sync=full, no backing BDS is required; with sync=top, we just link the source's backing BDS to the target, and with sync=none, we use the source BDS as the target's backing BDS. We should not try to open these backing files anew because this would lead to two BDSs existing per physical file in the backing chain, and we would like to avoid such concurrent access. - If drive-mirror with mode=existing is used, we have to use the information provided in the physical image file which means opening the target's backing chain completely anew, just as it has been done already. If the target's backing chain shares images with the source, this may lead to multiple BDSs per physical image file. But since we cannot reliably ascertain this case, there is nothing we can do about it. Signed-off-by: Max Reitz <mreitz@redhat.com> Message-id: 20160610185750.30956-3-mreitz@redhat.com Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-06-10 20:57:47 +02:00
BlockMirrorBackingMode backing_mode,
bool zero_target,
bool has_speed, int64_t speed,
bool has_granularity, uint32_t granularity,
bool has_buf_size, int64_t buf_size,
bool has_on_source_error,
BlockdevOnError on_source_error,
bool has_on_target_error,
BlockdevOnError on_target_error,
bool has_unmap, bool unmap,
const char *filter_node_name,
bool has_copy_mode, MirrorCopyMode copy_mode,
bool has_auto_finalize, bool auto_finalize,
bool has_auto_dismiss, bool auto_dismiss,
Error **errp)
{
BlockDriverState *unfiltered_bs;
int job_flags = JOB_DEFAULT;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (!has_speed) {
speed = 0;
}
if (!has_on_source_error) {
on_source_error = BLOCKDEV_ON_ERROR_REPORT;
}
if (!has_on_target_error) {
on_target_error = BLOCKDEV_ON_ERROR_REPORT;
}
if (!has_granularity) {
granularity = 0;
}
if (!has_buf_size) {
buf_size = 0;
}
if (!has_unmap) {
unmap = true;
}
if (!has_copy_mode) {
copy_mode = MIRROR_COPY_MODE_BACKGROUND;
}
if (has_auto_finalize && !auto_finalize) {
job_flags |= JOB_MANUAL_FINALIZE;
}
if (has_auto_dismiss && !auto_dismiss) {
job_flags |= JOB_MANUAL_DISMISS;
}
if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
"a value in range [512B, 64MB]");
return;
}
if (granularity & (granularity - 1)) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
"a power of 2");
return;
}
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR_SOURCE, errp)) {
return;
}
if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_MIRROR_TARGET, errp)) {
return;
}
if (!bdrv_backing_chain_next(bs) && sync == MIRROR_SYNC_MODE_TOP) {
sync = MIRROR_SYNC_MODE_FULL;
}
if (!replaces) {
/* We want to mirror from @bs, but keep implicit filters on top */
unfiltered_bs = bdrv_skip_implicit_filters(bs);
if (unfiltered_bs != bs) {
replaces = unfiltered_bs->node_name;
}
}
if (replaces) {
BlockDriverState *to_replace_bs;
AioContext *replace_aio_context;
int64_t bs_size, replace_size;
bs_size = bdrv_getlength(bs);
if (bs_size < 0) {
error_setg_errno(errp, -bs_size, "Failed to query device's size");
return;
}
to_replace_bs = check_to_replace_node(bs, replaces, errp);
if (!to_replace_bs) {
return;
}
replace_aio_context = bdrv_get_aio_context(to_replace_bs);
aio_context_acquire(replace_aio_context);
replace_size = bdrv_getlength(to_replace_bs);
aio_context_release(replace_aio_context);
if (replace_size < 0) {
error_setg_errno(errp, -replace_size,
"Failed to query the replacement node's size");
return;
}
if (bs_size != replace_size) {
error_setg(errp, "cannot replace image with a mirror image of "
"different size");
return;
}
}
/* pass the node name to replace to mirror start since it's loose coupling
* and will allow to check whether the node still exist at mirror completion
*/
mirror_start(job_id, bs, target,
replaces, job_flags,
speed, granularity, buf_size, sync, backing_mode, zero_target,
on_source_error, on_target_error, unmap, filter_node_name,
copy_mode, errp);
}
void qmp_drive_mirror(DriveMirror *arg, Error **errp)
{
BlockDriverState *bs;
BlockDriverState *target_backing_bs, *target_bs;
AioContext *aio_context;
AioContext *old_context;
block/mirror: Fix target backing BDS Currently, we are trying to move the backing BDS from the source to the target in bdrv_replace_in_backing_chain() which is called from mirror_exit(). However, mirror_complete() already tries to open the target's backing chain with a call to bdrv_open_backing_file(). First, we should only set the target's backing BDS once. Second, the mirroring block job has a better idea of what to set it to than the generic code in bdrv_replace_in_backing_chain() (in fact, the latter's conditions on when to move the backing BDS from source to target are not really correct). Therefore, remove that code from bdrv_replace_in_backing_chain() and leave it to mirror_complete(). Depending on what kind of mirroring is performed, we furthermore want to use different strategies to open the target's backing chain: - If blockdev-mirror is used, we can assume the user made sure that the target already has the correct backing chain. In particular, we should not try to open a backing file if the target does not have any yet. - If drive-mirror with mode=absolute-paths is used, we can and should reuse the already existing chain of nodes that the source BDS is in. In case of sync=full, no backing BDS is required; with sync=top, we just link the source's backing BDS to the target, and with sync=none, we use the source BDS as the target's backing BDS. We should not try to open these backing files anew because this would lead to two BDSs existing per physical file in the backing chain, and we would like to avoid such concurrent access. - If drive-mirror with mode=existing is used, we have to use the information provided in the physical image file which means opening the target's backing chain completely anew, just as it has been done already. If the target's backing chain shares images with the source, this may lead to multiple BDSs per physical image file. But since we cannot reliably ascertain this case, there is nothing we can do about it. Signed-off-by: Max Reitz <mreitz@redhat.com> Message-id: 20160610185750.30956-3-mreitz@redhat.com Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-06-10 20:57:47 +02:00
BlockMirrorBackingMode backing_mode;
Error *local_err = NULL;
QDict *options = NULL;
int flags;
int64_t size;
const char *format = arg->format;
bool zero_target;
int ret;
bs = qmp_get_root_bs(arg->device, errp);
if (!bs) {
return;
}
/* Early check to avoid creating target */
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR_SOURCE, errp)) {
return;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
if (!arg->has_mode) {
arg->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
}
if (!arg->format) {
format = (arg->mode == NEW_IMAGE_MODE_EXISTING
? NULL : bs->drv->format_name);
}
flags = bs->open_flags | BDRV_O_RDWR;
target_backing_bs = bdrv_cow_bs(bdrv_skip_filters(bs));
if (!target_backing_bs && arg->sync == MIRROR_SYNC_MODE_TOP) {
arg->sync = MIRROR_SYNC_MODE_FULL;
}
if (arg->sync == MIRROR_SYNC_MODE_NONE) {
target_backing_bs = bs;
}
size = bdrv_getlength(bs);
if (size < 0) {
error_setg_errno(errp, -size, "bdrv_getlength failed");
goto out;
}
if (arg->replaces) {
if (!arg->node_name) {
error_setg(errp, "a node-name must be provided when replacing a"
" named node of the graph");
goto out;
}
}
if (arg->mode == NEW_IMAGE_MODE_ABSOLUTE_PATHS) {
block/mirror: Fix target backing BDS Currently, we are trying to move the backing BDS from the source to the target in bdrv_replace_in_backing_chain() which is called from mirror_exit(). However, mirror_complete() already tries to open the target's backing chain with a call to bdrv_open_backing_file(). First, we should only set the target's backing BDS once. Second, the mirroring block job has a better idea of what to set it to than the generic code in bdrv_replace_in_backing_chain() (in fact, the latter's conditions on when to move the backing BDS from source to target are not really correct). Therefore, remove that code from bdrv_replace_in_backing_chain() and leave it to mirror_complete(). Depending on what kind of mirroring is performed, we furthermore want to use different strategies to open the target's backing chain: - If blockdev-mirror is used, we can assume the user made sure that the target already has the correct backing chain. In particular, we should not try to open a backing file if the target does not have any yet. - If drive-mirror with mode=absolute-paths is used, we can and should reuse the already existing chain of nodes that the source BDS is in. In case of sync=full, no backing BDS is required; with sync=top, we just link the source's backing BDS to the target, and with sync=none, we use the source BDS as the target's backing BDS. We should not try to open these backing files anew because this would lead to two BDSs existing per physical file in the backing chain, and we would like to avoid such concurrent access. - If drive-mirror with mode=existing is used, we have to use the information provided in the physical image file which means opening the target's backing chain completely anew, just as it has been done already. If the target's backing chain shares images with the source, this may lead to multiple BDSs per physical image file. But since we cannot reliably ascertain this case, there is nothing we can do about it. Signed-off-by: Max Reitz <mreitz@redhat.com> Message-id: 20160610185750.30956-3-mreitz@redhat.com Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-06-10 20:57:47 +02:00
backing_mode = MIRROR_SOURCE_BACKING_CHAIN;
} else {
backing_mode = MIRROR_OPEN_BACKING_CHAIN;
}
/* Don't open backing image in create() */
flags |= BDRV_O_NO_BACKING;
if ((arg->sync == MIRROR_SYNC_MODE_FULL || !target_backing_bs)
&& arg->mode != NEW_IMAGE_MODE_EXISTING)
{
/* create new image w/o backing file */
assert(format);
bdrv_img_create(arg->target, format,
NULL, NULL, NULL, size, flags, false, &local_err);
} else {
/* Implicit filters should not appear in the filename */
BlockDriverState *explicit_backing =
bdrv_skip_implicit_filters(target_backing_bs);
switch (arg->mode) {
case NEW_IMAGE_MODE_EXISTING:
break;
case NEW_IMAGE_MODE_ABSOLUTE_PATHS:
/* create new image with backing file */
bdrv_refresh_filename(explicit_backing);
bdrv_img_create(arg->target, format,
explicit_backing->filename,
explicit_backing->drv->format_name,
NULL, size, flags, false, &local_err);
break;
default:
abort();
}
}
if (local_err) {
error_propagate(errp, local_err);
goto out;
}
options = qdict_new();
if (arg->node_name) {
qdict_put_str(options, "node-name", arg->node_name);
}
if (format) {
qdict_put_str(options, "driver", format);
}
aio_context_release(aio_context);
/* Mirroring takes care of copy-on-write using the source's backing
* file.
*/
aio_context_acquire(qemu_get_aio_context());
target_bs = bdrv_open(arg->target, NULL, options, flags, errp);
aio_context_release(qemu_get_aio_context());
if (!target_bs) {
return;
}
zero_target = (arg->sync == MIRROR_SYNC_MODE_FULL &&
(arg->mode == NEW_IMAGE_MODE_EXISTING ||
!bdrv_has_zero_init(target_bs)));
/* Honor bdrv_try_change_aio_context() context acquisition requirements. */
old_context = bdrv_get_aio_context(target_bs);
aio_context_acquire(old_context);
ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
if (ret < 0) {
bdrv_unref(target_bs);
aio_context_release(old_context);
return;
}
aio_context_release(old_context);
aio_context_acquire(aio_context);
blockdev_mirror_common(arg->job_id, bs, target_bs,
arg->replaces, arg->sync,
backing_mode, zero_target,
arg->has_speed, arg->speed,
arg->has_granularity, arg->granularity,
arg->has_buf_size, arg->buf_size,
arg->has_on_source_error, arg->on_source_error,
arg->has_on_target_error, arg->on_target_error,
arg->has_unmap, arg->unmap,
NULL,
arg->has_copy_mode, arg->copy_mode,
arg->has_auto_finalize, arg->auto_finalize,
arg->has_auto_dismiss, arg->auto_dismiss,
errp);
bdrv_unref(target_bs);
out:
aio_context_release(aio_context);
}
void qmp_blockdev_mirror(const char *job_id,
const char *device, const char *target,
const char *replaces,
MirrorSyncMode sync,
bool has_speed, int64_t speed,
bool has_granularity, uint32_t granularity,
bool has_buf_size, int64_t buf_size,
bool has_on_source_error,
BlockdevOnError on_source_error,
bool has_on_target_error,
BlockdevOnError on_target_error,
const char *filter_node_name,
bool has_copy_mode, MirrorCopyMode copy_mode,
bool has_auto_finalize, bool auto_finalize,
bool has_auto_dismiss, bool auto_dismiss,
Error **errp)
{
BlockDriverState *bs;
BlockDriverState *target_bs;
AioContext *aio_context;
AioContext *old_context;
block/mirror: Fix target backing BDS Currently, we are trying to move the backing BDS from the source to the target in bdrv_replace_in_backing_chain() which is called from mirror_exit(). However, mirror_complete() already tries to open the target's backing chain with a call to bdrv_open_backing_file(). First, we should only set the target's backing BDS once. Second, the mirroring block job has a better idea of what to set it to than the generic code in bdrv_replace_in_backing_chain() (in fact, the latter's conditions on when to move the backing BDS from source to target are not really correct). Therefore, remove that code from bdrv_replace_in_backing_chain() and leave it to mirror_complete(). Depending on what kind of mirroring is performed, we furthermore want to use different strategies to open the target's backing chain: - If blockdev-mirror is used, we can assume the user made sure that the target already has the correct backing chain. In particular, we should not try to open a backing file if the target does not have any yet. - If drive-mirror with mode=absolute-paths is used, we can and should reuse the already existing chain of nodes that the source BDS is in. In case of sync=full, no backing BDS is required; with sync=top, we just link the source's backing BDS to the target, and with sync=none, we use the source BDS as the target's backing BDS. We should not try to open these backing files anew because this would lead to two BDSs existing per physical file in the backing chain, and we would like to avoid such concurrent access. - If drive-mirror with mode=existing is used, we have to use the information provided in the physical image file which means opening the target's backing chain completely anew, just as it has been done already. If the target's backing chain shares images with the source, this may lead to multiple BDSs per physical image file. But since we cannot reliably ascertain this case, there is nothing we can do about it. Signed-off-by: Max Reitz <mreitz@redhat.com> Message-id: 20160610185750.30956-3-mreitz@redhat.com Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
2016-06-10 20:57:47 +02:00
BlockMirrorBackingMode backing_mode = MIRROR_LEAVE_BACKING_CHAIN;
bool zero_target;
int ret;
bs = qmp_get_root_bs(device, errp);
if (!bs) {
return;
}
target_bs = bdrv_lookup_bs(target, target, errp);
if (!target_bs) {
return;
}
zero_target = (sync == MIRROR_SYNC_MODE_FULL);
/* Honor bdrv_try_change_aio_context() context acquisition requirements. */
old_context = bdrv_get_aio_context(target_bs);
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(old_context);
ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
aio_context_release(old_context);
aio_context_acquire(aio_context);
if (ret < 0) {
goto out;
}
blockdev_mirror_common(job_id, bs, target_bs,
replaces, sync, backing_mode,
zero_target, has_speed, speed,
has_granularity, granularity,
has_buf_size, buf_size,
has_on_source_error, on_source_error,
has_on_target_error, on_target_error,
true, true, filter_node_name,
has_copy_mode, copy_mode,
has_auto_finalize, auto_finalize,
has_auto_dismiss, auto_dismiss,
errp);
out:
aio_context_release(aio_context);
}
/*
* Get a block job using its ID. Called with job_mutex held.
*/
static BlockJob *find_block_job_locked(const char *id, Error **errp)
{
BlockJob *job;
assert(id != NULL);
job = block_job_get_locked(id);
if (!job) {
error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE,
"Block job '%s' not found", id);
return NULL;
}
return job;
}
void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp)
{
BlockJob *job;
JOB_LOCK_GUARD();
job = find_block_job_locked(device, errp);
if (!job) {
return;
}
block_job_set_speed_locked(job, speed, errp);
}
void qmp_block_job_cancel(const char *device,
bool has_force, bool force, Error **errp)
{
BlockJob *job;
JOB_LOCK_GUARD();
job = find_block_job_locked(device, errp);
if (!job) {
return;
}
if (!has_force) {
force = false;
}
if (job_user_paused_locked(&job->job) && !force) {
error_setg(errp, "The block job for device '%s' is currently paused",
device);
return;
}
trace_qmp_block_job_cancel(job);
job_user_cancel_locked(&job->job, force, errp);
}
void qmp_block_job_pause(const char *device, Error **errp)
{
BlockJob *job;
JOB_LOCK_GUARD();
job = find_block_job_locked(device, errp);
blockjobs: add block_job_verb permission table Which commands ("verbs") are appropriate for jobs in which state is also somewhat burdensome to keep track of. As of this commit, it looks rather useless, but begins to look more interesting the more states we add to the STM table. A recurring theme is that no verb will apply to an 'undefined' job. Further, it's not presently possible to restrict the "pause" or "resume" verbs any more than they are in this commit because of the asynchronous nature of how jobs enter the PAUSED state; justifications for some seemingly erroneous applications are given below. ===== Verbs ===== Cancel: Any state except undefined. Pause: Any state except undefined; 'created': Requests that the job pauses as it starts. 'running': Normal usage. (PAUSED) 'paused': The job may be paused for internal reasons, but the user may wish to force an indefinite user-pause, so this is allowed. 'ready': Normal usage. (STANDBY) 'standby': Same logic as above. Resume: Any state except undefined; 'created': Will lift a user's pause-on-start request. 'running': Will lift a pause request before it takes effect. 'paused': Normal usage. 'ready': Will lift a pause request before it takes effect. 'standby': Normal usage. Set-speed: Any state except undefined, though ready may not be meaningful. Complete: Only a 'ready' job may accept a complete request. ======= Changes ======= (1) To facilitate "nice" error checking, all five major block-job verb interfaces in blockjob.c now support an errp parameter: - block_job_user_cancel is added as a new interface. - block_job_user_pause gains an errp paramter - block_job_user_resume gains an errp parameter - block_job_set_speed already had an errp parameter. - block_job_complete already had an errp parameter. (2) block-job-pause and block-job-resume will no longer no-op when trying to pause an already paused job, or trying to resume a job that isn't paused. These functions will now report that they did not perform the action requested because it was not possible. iotests have been adjusted to address this new behavior. (3) block-job-complete doesn't worry about checking !block_job_started, because the permission table guards against this. (4) test-bdrv-drain's job implementation needs to announce that it is 'ready' now, in order to be completed. Signed-off-by: John Snow <jsnow@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-10 09:27:32 +01:00
if (!job) {
return;
}
trace_qmp_block_job_pause(job);
job_user_pause_locked(&job->job, errp);
}
void qmp_block_job_resume(const char *device, Error **errp)
{
BlockJob *job;
JOB_LOCK_GUARD();
job = find_block_job_locked(device, errp);
blockjobs: add block_job_verb permission table Which commands ("verbs") are appropriate for jobs in which state is also somewhat burdensome to keep track of. As of this commit, it looks rather useless, but begins to look more interesting the more states we add to the STM table. A recurring theme is that no verb will apply to an 'undefined' job. Further, it's not presently possible to restrict the "pause" or "resume" verbs any more than they are in this commit because of the asynchronous nature of how jobs enter the PAUSED state; justifications for some seemingly erroneous applications are given below. ===== Verbs ===== Cancel: Any state except undefined. Pause: Any state except undefined; 'created': Requests that the job pauses as it starts. 'running': Normal usage. (PAUSED) 'paused': The job may be paused for internal reasons, but the user may wish to force an indefinite user-pause, so this is allowed. 'ready': Normal usage. (STANDBY) 'standby': Same logic as above. Resume: Any state except undefined; 'created': Will lift a user's pause-on-start request. 'running': Will lift a pause request before it takes effect. 'paused': Normal usage. 'ready': Will lift a pause request before it takes effect. 'standby': Normal usage. Set-speed: Any state except undefined, though ready may not be meaningful. Complete: Only a 'ready' job may accept a complete request. ======= Changes ======= (1) To facilitate "nice" error checking, all five major block-job verb interfaces in blockjob.c now support an errp parameter: - block_job_user_cancel is added as a new interface. - block_job_user_pause gains an errp paramter - block_job_user_resume gains an errp parameter - block_job_set_speed already had an errp parameter. - block_job_complete already had an errp parameter. (2) block-job-pause and block-job-resume will no longer no-op when trying to pause an already paused job, or trying to resume a job that isn't paused. These functions will now report that they did not perform the action requested because it was not possible. iotests have been adjusted to address this new behavior. (3) block-job-complete doesn't worry about checking !block_job_started, because the permission table guards against this. (4) test-bdrv-drain's job implementation needs to announce that it is 'ready' now, in order to be completed. Signed-off-by: John Snow <jsnow@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-03-10 09:27:32 +01:00
if (!job) {
return;
}
trace_qmp_block_job_resume(job);
job_user_resume_locked(&job->job, errp);
}
void qmp_block_job_complete(const char *device, Error **errp)
{
BlockJob *job;
JOB_LOCK_GUARD();
job = find_block_job_locked(device, errp);
if (!job) {
return;
}
trace_qmp_block_job_complete(job);
job_complete_locked(&job->job, errp);
}
void qmp_block_job_finalize(const char *id, Error **errp)
{
BlockJob *job;
JOB_LOCK_GUARD();
job = find_block_job_locked(id, errp);
if (!job) {
return;
}
trace_qmp_block_job_finalize(job);
job_ref_locked(&job->job);
job_finalize_locked(&job->job, errp);
job_unref_locked(&job->job);
}
void qmp_block_job_dismiss(const char *id, Error **errp)
{
BlockJob *bjob;
Job *job;
JOB_LOCK_GUARD();
bjob = find_block_job_locked(id, errp);
if (!bjob) {
return;
}
trace_qmp_block_job_dismiss(bjob);
job = &bjob->job;
job_dismiss_locked(&job, errp);
}
void qmp_change_backing_file(const char *device,
const char *image_node_name,
const char *backing_file,
Error **errp)
{
BlockDriverState *bs = NULL;
AioContext *aio_context;
BlockDriverState *image_bs = NULL;
Error *local_err = NULL;
bool ro;
int ret;
bs = qmp_get_root_bs(device, errp);
if (!bs) {
return;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
image_bs = bdrv_lookup_bs(NULL, image_node_name, &local_err);
if (local_err) {
error_propagate(errp, local_err);
goto out;
}
if (!image_bs) {
error_setg(errp, "image file not found");
goto out;
}
if (bdrv_find_base(image_bs) == image_bs) {
error_setg(errp, "not allowing backing file change on an image "
"without a backing file");
goto out;
}
/* even though we are not necessarily operating on bs, we need it to
* determine if block ops are currently prohibited on the chain */
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_CHANGE, errp)) {
goto out;
}
/* final sanity check */
if (!bdrv_chain_contains(bs, image_bs)) {
error_setg(errp, "'%s' and image file are not in the same chain",
device);
goto out;
}
/* if not r/w, reopen to make r/w */
ro = bdrv_is_read_only(image_bs);
if (ro) {
if (bdrv_reopen_set_read_only(image_bs, false, errp) != 0) {
goto out;
}
}
ret = bdrv_change_backing_file(image_bs, backing_file,
image_bs->drv ? image_bs->drv->format_name : "",
false);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not change backing file to '%s'",
backing_file);
/* don't exit here, so we can try to restore open flags if
* appropriate */
}
if (ro) {
bdrv_reopen_set_read_only(image_bs, true, errp);
}
out:
aio_context_release(aio_context);
}
void qmp_blockdev_add(BlockdevOptions *options, Error **errp)
{
BlockDriverState *bs;
QObject *obj;
Visitor *v = qobject_output_visitor_new(&obj);
QDict *qdict;
visit_type_BlockdevOptions(v, NULL, &options, &error_abort);
qapi: Add new visit_complete() function Making each output visitor provide its own output collection function was the only remaining reason for exposing visitor sub-types to the rest of the code base. Add a polymorphic visit_complete() function which is a no-op for input visitors, and which populates an opaque pointer for output visitors. For maximum type-safety, also add a parameter to the output visitor constructors with a type-correct version of the output pointer, and assert that the two uses match. This approach was considered superior to either passing the output parameter only during construction (action at a distance during visit_free() feels awkward) or only during visit_complete() (defeating type safety makes it easier to use incorrectly). Most callers were function-local, and therefore a mechanical conversion; the testsuite was a bit trickier, but the previous cleanup patch minimized the churn here. The visit_complete() function may be called at most once; doing so lets us use transfer semantics rather than duplication or ref-count semantics to get the just-built output back to the caller, even though it means our behavior is not idempotent. Generated code is simplified as follows for events: |@@ -26,7 +26,7 @@ void qapi_event_send_acpi_device_ost(ACP | QDict *qmp; | Error *err = NULL; | QMPEventFuncEmit emit; |- QmpOutputVisitor *qov; |+ QObject *obj; | Visitor *v; | q_obj_ACPI_DEVICE_OST_arg param = { | info |@@ -39,8 +39,7 @@ void qapi_event_send_acpi_device_ost(ACP | | qmp = qmp_event_build_dict("ACPI_DEVICE_OST"); | |- qov = qmp_output_visitor_new(); |- v = qmp_output_get_visitor(qov); |+ v = qmp_output_visitor_new(&obj); | | visit_start_struct(v, "ACPI_DEVICE_OST", NULL, 0, &err); | if (err) { |@@ -55,7 +54,8 @@ void qapi_event_send_acpi_device_ost(ACP | goto out; | } | |- qdict_put_obj(qmp, "data", qmp_output_get_qobject(qov)); |+ visit_complete(v, &obj); |+ qdict_put_obj(qmp, "data", obj); | emit(QAPI_EVENT_ACPI_DEVICE_OST, qmp, &err); and for commands: | { | Error *err = NULL; |- QmpOutputVisitor *qov = qmp_output_visitor_new(); | Visitor *v; | |- v = qmp_output_get_visitor(qov); |+ v = qmp_output_visitor_new(ret_out); | visit_type_AddfdInfo(v, "unused", &ret_in, &err); |- if (err) { |- goto out; |+ if (!err) { |+ visit_complete(v, ret_out); | } |- *ret_out = qmp_output_get_qobject(qov); |- |-out: | error_propagate(errp, err); Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <1465490926-28625-13-git-send-email-eblake@redhat.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-06-09 18:48:43 +02:00
visit_complete(v, &obj);
qdict = qobject_to(QDict, obj);
qdict_flatten(qdict);
if (!qdict_get_try_str(qdict, "node-name")) {
error_setg(errp, "'node-name' must be specified for the root node");
goto fail;
}
bs = bds_tree_init(qdict, errp);
if (!bs) {
goto fail;
}
bdrv_set_monitor_owned(bs);
fail:
qapi: Add new visit_complete() function Making each output visitor provide its own output collection function was the only remaining reason for exposing visitor sub-types to the rest of the code base. Add a polymorphic visit_complete() function which is a no-op for input visitors, and which populates an opaque pointer for output visitors. For maximum type-safety, also add a parameter to the output visitor constructors with a type-correct version of the output pointer, and assert that the two uses match. This approach was considered superior to either passing the output parameter only during construction (action at a distance during visit_free() feels awkward) or only during visit_complete() (defeating type safety makes it easier to use incorrectly). Most callers were function-local, and therefore a mechanical conversion; the testsuite was a bit trickier, but the previous cleanup patch minimized the churn here. The visit_complete() function may be called at most once; doing so lets us use transfer semantics rather than duplication or ref-count semantics to get the just-built output back to the caller, even though it means our behavior is not idempotent. Generated code is simplified as follows for events: |@@ -26,7 +26,7 @@ void qapi_event_send_acpi_device_ost(ACP | QDict *qmp; | Error *err = NULL; | QMPEventFuncEmit emit; |- QmpOutputVisitor *qov; |+ QObject *obj; | Visitor *v; | q_obj_ACPI_DEVICE_OST_arg param = { | info |@@ -39,8 +39,7 @@ void qapi_event_send_acpi_device_ost(ACP | | qmp = qmp_event_build_dict("ACPI_DEVICE_OST"); | |- qov = qmp_output_visitor_new(); |- v = qmp_output_get_visitor(qov); |+ v = qmp_output_visitor_new(&obj); | | visit_start_struct(v, "ACPI_DEVICE_OST", NULL, 0, &err); | if (err) { |@@ -55,7 +54,8 @@ void qapi_event_send_acpi_device_ost(ACP | goto out; | } | |- qdict_put_obj(qmp, "data", qmp_output_get_qobject(qov)); |+ visit_complete(v, &obj); |+ qdict_put_obj(qmp, "data", obj); | emit(QAPI_EVENT_ACPI_DEVICE_OST, qmp, &err); and for commands: | { | Error *err = NULL; |- QmpOutputVisitor *qov = qmp_output_visitor_new(); | Visitor *v; | |- v = qmp_output_get_visitor(qov); |+ v = qmp_output_visitor_new(ret_out); | visit_type_AddfdInfo(v, "unused", &ret_in, &err); |- if (err) { |- goto out; |+ if (!err) { |+ visit_complete(v, ret_out); | } |- *ret_out = qmp_output_get_qobject(qov); |- |-out: | error_propagate(errp, err); Signed-off-by: Eric Blake <eblake@redhat.com> Message-Id: <1465490926-28625-13-git-send-email-eblake@redhat.com> Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-06-09 18:48:43 +02:00
visit_free(v);
}
void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp)
{
BlockReopenQueue *queue = NULL;
/* Add each one of the BDS that we want to reopen to the queue */
for (; reopen_list != NULL; reopen_list = reopen_list->next) {
BlockdevOptions *options = reopen_list->value;
BlockDriverState *bs;
AioContext *ctx;
QObject *obj;
Visitor *v;
QDict *qdict;
/* Check for the selected node name */
if (!options->node_name) {
error_setg(errp, "node-name not specified");
goto fail;
}
bs = bdrv_find_node(options->node_name);
if (!bs) {
error_setg(errp, "Failed to find node with node-name='%s'",
options->node_name);
goto fail;
}
/* Put all options in a QDict and flatten it */
v = qobject_output_visitor_new(&obj);
visit_type_BlockdevOptions(v, NULL, &options, &error_abort);
visit_complete(v, &obj);
visit_free(v);
qdict = qobject_to(QDict, obj);
qdict_flatten(qdict);
ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
queue = bdrv_reopen_queue(queue, bs, qdict, false);
aio_context_release(ctx);
}
/* Perform the reopen operation */
bdrv_reopen_multiple(queue, errp);
queue = NULL;
fail:
bdrv_reopen_queue_free(queue);
}
void qmp_blockdev_del(const char *node_name, Error **errp)
{
AioContext *aio_context;
BlockDriverState *bs;
GLOBAL_STATE_CODE();
bs = bdrv_find_node(node_name);
if (!bs) {
error_setg(errp, "Failed to find node with node-name='%s'", node_name);
return;
}
if (bdrv_has_blk(bs)) {
error_setg(errp, "Node %s is in use", node_name);
return;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, errp)) {
goto out;
}
if (!QTAILQ_IN_USE(bs, monitor_list)) {
error_setg(errp, "Node %s is not owned by the monitor",
bs->node_name);
goto out;
}
if (bs->refcnt > 1) {
error_setg(errp, "Block device %s is in use",
bdrv_get_device_or_node_name(bs));
goto out;
}
QTAILQ_REMOVE(&monitor_bdrv_states, bs, monitor_list);
bdrv_unref(bs);
out:
aio_context_release(aio_context);
}
static BdrvChild * GRAPH_RDLOCK
bdrv_find_child(BlockDriverState *parent_bs, const char *child_name)
{
BdrvChild *child;
QLIST_FOREACH(child, &parent_bs->children, next) {
if (strcmp(child->name, child_name) == 0) {
return child;
}
}
return NULL;
}
void qmp_x_blockdev_change(const char *parent, const char *child,
const char *node, Error **errp)
{
BlockDriverState *parent_bs, *new_bs = NULL;
BdrvChild *p_child;
bdrv_graph_wrlock(NULL);
parent_bs = bdrv_lookup_bs(parent, parent, errp);
if (!parent_bs) {
goto out;
}
if (!child == !node) {
if (child) {
error_setg(errp, "The parameters child and node are in conflict");
} else {
error_setg(errp, "Either child or node must be specified");
}
goto out;
}
if (child) {
p_child = bdrv_find_child(parent_bs, child);
if (!p_child) {
error_setg(errp, "Node '%s' does not have child '%s'",
parent, child);
goto out;
}
bdrv_del_child(parent_bs, p_child, errp);
}
if (node) {
new_bs = bdrv_find_node(node);
if (!new_bs) {
error_setg(errp, "Node '%s' not found", node);
goto out;
}
bdrv_add_child(parent_bs, new_bs, errp);
}
out:
bdrv_graph_wrunlock();
}
BlockJobInfoList *qmp_query_block_jobs(Error **errp)
{
BlockJobInfoList *head = NULL, **tail = &head;
BlockJob *job;
JOB_LOCK_GUARD();
for (job = block_job_next_locked(NULL); job;
job = block_job_next_locked(job)) {
BlockJobInfo *value;
if (block_job_is_internal(job)) {
continue;
}
value = block_job_query_locked(job, errp);
if (!value) {
qapi_free_BlockJobInfoList(head);
return NULL;
}
QAPI_LIST_APPEND(tail, value);
}
return head;
}
void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
bool has_force, bool force, Error **errp)
{
AioContext *old_context;
AioContext *new_context;
BlockDriverState *bs;
bs = bdrv_find_node(node_name);
if (!bs) {
error_setg(errp, "Failed to find node with node-name='%s'", node_name);
return;
}
/* Protects against accidents. */
if (!(has_force && force) && bdrv_has_blk(bs)) {
error_setg(errp, "Node %s is associated with a BlockBackend and could "
"be in use (use force=true to override this check)",
node_name);
return;
}
if (iothread->type == QTYPE_QSTRING) {
IOThread *obj = iothread_by_id(iothread->u.s);
if (!obj) {
error_setg(errp, "Cannot find iothread %s", iothread->u.s);
return;
}
new_context = iothread_get_aio_context(obj);
} else {
new_context = qemu_get_aio_context();
}
old_context = bdrv_get_aio_context(bs);
aio_context_acquire(old_context);
bdrv_try_change_aio_context(bs, new_context, NULL, errp);
aio_context_release(old_context);
}
QemuOptsList qemu_common_drive_opts = {
.name = "drive",
.head = QTAILQ_HEAD_INITIALIZER(qemu_common_drive_opts.head),
.desc = {
{
.name = "snapshot",
.type = QEMU_OPT_BOOL,
.help = "enable/disable snapshot mode",
},{
.name = "aio",
.type = QEMU_OPT_STRING,
.help = "host AIO implementation (threads, native, io_uring)",
},{
.name = BDRV_OPT_CACHE_WB,
.type = QEMU_OPT_BOOL,
.help = "Enable writeback mode",
},{
.name = "format",
.type = QEMU_OPT_STRING,
.help = "disk format (raw, qcow2, ...)",
},{
.name = "rerror",
.type = QEMU_OPT_STRING,
.help = "read error action",
},{
.name = "werror",
.type = QEMU_OPT_STRING,
.help = "write error action",
},{
.name = BDRV_OPT_READ_ONLY,
.type = QEMU_OPT_BOOL,
.help = "open drive file as read-only",
},
THROTTLE_OPTS,
{
.name = "throttling.group",
.type = QEMU_OPT_STRING,
.help = "name of the block throttling group",
},{
.name = "copy-on-read",
.type = QEMU_OPT_BOOL,
.help = "copy read data from backing file into image file",
},{
.name = "detect-zeroes",
.type = QEMU_OPT_STRING,
.help = "try to optimize zero writes (off, on, unmap)",
},{
.name = "stats-account-invalid",
.type = QEMU_OPT_BOOL,
.help = "whether to account for invalid I/O operations "
"in the statistics",
},{
.name = "stats-account-failed",
.type = QEMU_OPT_BOOL,
.help = "whether to account for failed I/O operations "
"in the statistics",
},
{ /* end of list */ }
},
};
QemuOptsList qemu_drive_opts = {
.name = "drive",
.head = QTAILQ_HEAD_INITIALIZER(qemu_drive_opts.head),
.desc = {
/*
* no elements => accept any params
* validation will happen later
*/
{ /* end of list */ }
},
};