2012-09-28 17:22:47 +02:00
|
|
|
/*
|
|
|
|
* QEMU System Emulator block driver
|
|
|
|
*
|
|
|
|
* Copyright (c) 2011 IBM Corp.
|
|
|
|
* Copyright (c) 2012 Red Hat, Inc.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2016-01-29 18:50:05 +01:00
|
|
|
#include "qemu/osdep.h"
|
2012-09-28 17:22:47 +02:00
|
|
|
#include "qemu-common.h"
|
2012-12-17 18:19:44 +01:00
|
|
|
#include "block/block.h"
|
2016-10-27 18:07:00 +02:00
|
|
|
#include "block/blockjob_int.h"
|
2012-12-17 18:19:44 +01:00
|
|
|
#include "block/block_int.h"
|
2015-10-19 17:53:22 +02:00
|
|
|
#include "sysemu/block-backend.h"
|
2015-03-17 17:22:46 +01:00
|
|
|
#include "qapi/qmp/qerror.h"
|
2012-12-17 18:19:43 +01:00
|
|
|
#include "qapi/qmp/qjson.h"
|
2015-09-01 15:48:02 +02:00
|
|
|
#include "qemu/coroutine.h"
|
2016-07-05 16:28:56 +02:00
|
|
|
#include "qemu/id.h"
|
2012-09-28 17:22:47 +02:00
|
|
|
#include "qmp-commands.h"
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/timer.h"
|
2014-06-18 08:43:45 +02:00
|
|
|
#include "qapi-event.h"
|
2012-09-28 17:22:47 +02:00
|
|
|
|
2017-11-29 11:25:13 +01:00
|
|
|
/* Right now, this mutex is only needed to synchronize accesses to job->busy
|
|
|
|
* and job->sleep_timer, such as concurrent calls to block_job_do_yield and
|
|
|
|
* block_job_enter. */
|
|
|
|
static QemuMutex block_job_mutex;
|
|
|
|
|
|
|
|
static void block_job_lock(void)
|
|
|
|
{
|
|
|
|
qemu_mutex_lock(&block_job_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void block_job_unlock(void)
|
|
|
|
{
|
|
|
|
qemu_mutex_unlock(&block_job_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __attribute__((__constructor__)) block_job_init(void)
|
|
|
|
{
|
|
|
|
qemu_mutex_init(&block_job_mutex);
|
|
|
|
}
|
|
|
|
|
2016-10-27 18:06:58 +02:00
|
|
|
static void block_job_event_cancelled(BlockJob *job);
|
|
|
|
static void block_job_event_completed(BlockJob *job, const char *msg);
|
2017-12-13 21:46:11 +01:00
|
|
|
static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job));
|
2016-10-27 18:06:58 +02:00
|
|
|
|
2015-11-06 00:13:15 +01:00
|
|
|
/* Transactional group of block jobs */
|
|
|
|
struct BlockJobTxn {
|
|
|
|
|
|
|
|
/* Is this txn being cancelled? */
|
|
|
|
bool aborting;
|
|
|
|
|
|
|
|
/* List of jobs */
|
|
|
|
QLIST_HEAD(, BlockJob) jobs;
|
|
|
|
|
|
|
|
/* Reference count */
|
|
|
|
int refcnt;
|
|
|
|
};
|
|
|
|
|
2016-04-04 15:43:51 +02:00
|
|
|
static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs);
|
|
|
|
|
2017-05-08 16:13:04 +02:00
|
|
|
/*
|
|
|
|
* The block job API is composed of two categories of functions.
|
|
|
|
*
|
|
|
|
* The first includes functions used by the monitor. The monitor is
|
|
|
|
* peculiar in that it accesses the block job list with block_job_get, and
|
|
|
|
* therefore needs consistency across block_job_get and the actual operation
|
|
|
|
* (e.g. block_job_set_speed). The consistency is achieved with
|
|
|
|
* aio_context_acquire/release. These functions are declared in blockjob.h.
|
|
|
|
*
|
|
|
|
* The second includes functions used by the block job drivers and sometimes
|
|
|
|
* by the core block layer. These do not care about locking, because the
|
|
|
|
* whole coroutine runs under the AioContext lock, and are declared in
|
|
|
|
* blockjob_int.h.
|
|
|
|
*/
|
|
|
|
|
2016-04-04 15:43:51 +02:00
|
|
|
BlockJob *block_job_next(BlockJob *job)
|
|
|
|
{
|
|
|
|
if (!job) {
|
|
|
|
return QLIST_FIRST(&block_jobs);
|
|
|
|
}
|
|
|
|
return QLIST_NEXT(job, job_list);
|
|
|
|
}
|
|
|
|
|
2016-07-05 16:28:54 +02:00
|
|
|
BlockJob *block_job_get(const char *id)
|
|
|
|
{
|
|
|
|
BlockJob *job;
|
|
|
|
|
|
|
|
QLIST_FOREACH(job, &block_jobs, job_list) {
|
2016-10-27 18:06:55 +02:00
|
|
|
if (job->id && !strcmp(id, job->id)) {
|
2016-07-05 16:28:54 +02:00
|
|
|
return job;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-05-08 16:13:07 +02:00
|
|
|
BlockJobTxn *block_job_txn_new(void)
|
|
|
|
{
|
|
|
|
BlockJobTxn *txn = g_new0(BlockJobTxn, 1);
|
|
|
|
QLIST_INIT(&txn->jobs);
|
|
|
|
txn->refcnt = 1;
|
|
|
|
return txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void block_job_txn_ref(BlockJobTxn *txn)
|
|
|
|
{
|
|
|
|
txn->refcnt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void block_job_txn_unref(BlockJobTxn *txn)
|
|
|
|
{
|
|
|
|
if (txn && --txn->refcnt == 0) {
|
|
|
|
g_free(txn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job)
|
|
|
|
{
|
|
|
|
if (!txn) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(!job->txn);
|
|
|
|
job->txn = txn;
|
|
|
|
|
|
|
|
QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
|
|
|
|
block_job_txn_ref(txn);
|
|
|
|
}
|
|
|
|
|
2017-05-08 16:13:03 +02:00
|
|
|
static void block_job_pause(BlockJob *job)
|
|
|
|
{
|
|
|
|
job->pause_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void block_job_resume(BlockJob *job)
|
|
|
|
{
|
|
|
|
assert(job->pause_count > 0);
|
|
|
|
job->pause_count--;
|
|
|
|
if (job->pause_count) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
block_job_enter(job);
|
|
|
|
}
|
|
|
|
|
2017-06-15 08:47:33 +02:00
|
|
|
void block_job_ref(BlockJob *job)
|
2017-05-08 16:13:02 +02:00
|
|
|
{
|
|
|
|
++job->refcnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void block_job_attached_aio_context(AioContext *new_context,
|
|
|
|
void *opaque);
|
|
|
|
static void block_job_detach_aio_context(void *opaque);
|
|
|
|
|
2017-06-15 08:47:33 +02:00
|
|
|
void block_job_unref(BlockJob *job)
|
2017-05-08 16:13:02 +02:00
|
|
|
{
|
|
|
|
if (--job->refcnt == 0) {
|
|
|
|
BlockDriverState *bs = blk_bs(job->blk);
|
2017-11-28 15:53:27 +01:00
|
|
|
QLIST_REMOVE(job, job_list);
|
2017-05-08 16:13:02 +02:00
|
|
|
bs->job = NULL;
|
|
|
|
block_job_remove_all_bdrv(job);
|
|
|
|
blk_remove_aio_context_notifier(job->blk,
|
|
|
|
block_job_attached_aio_context,
|
|
|
|
block_job_detach_aio_context, job);
|
|
|
|
blk_unref(job->blk);
|
|
|
|
error_free(job->blocker);
|
|
|
|
g_free(job->id);
|
2017-11-29 11:25:13 +01:00
|
|
|
assert(!timer_pending(&job->sleep_timer));
|
2017-05-08 16:13:02 +02:00
|
|
|
g_free(job);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-16 18:56:27 +02:00
|
|
|
static void block_job_attached_aio_context(AioContext *new_context,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
BlockJob *job = opaque;
|
|
|
|
|
|
|
|
if (job->driver->attached_aio_context) {
|
|
|
|
job->driver->attached_aio_context(job, new_context);
|
|
|
|
}
|
|
|
|
|
|
|
|
block_job_resume(job);
|
|
|
|
}
|
|
|
|
|
2016-10-27 12:48:50 +02:00
|
|
|
static void block_job_drain(BlockJob *job)
|
|
|
|
{
|
|
|
|
/* If job is !job->busy this kicks it into the next pause point. */
|
|
|
|
block_job_enter(job);
|
|
|
|
|
|
|
|
blk_drain(job->blk);
|
|
|
|
if (job->driver->drain) {
|
|
|
|
job->driver->drain(job);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-16 18:56:27 +02:00
|
|
|
static void block_job_detach_aio_context(void *opaque)
|
|
|
|
{
|
|
|
|
BlockJob *job = opaque;
|
|
|
|
|
|
|
|
/* In case the job terminates during aio_poll()... */
|
|
|
|
block_job_ref(job);
|
|
|
|
|
|
|
|
block_job_pause(job);
|
|
|
|
|
|
|
|
while (!job->paused && !job->completed) {
|
2016-10-27 12:48:50 +02:00
|
|
|
block_job_drain(job);
|
2016-06-16 18:56:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
block_job_unref(job);
|
|
|
|
}
|
|
|
|
|
2017-05-08 16:13:03 +02:00
|
|
|
static char *child_job_get_parent_desc(BdrvChild *c)
|
|
|
|
{
|
|
|
|
BlockJob *job = c->opaque;
|
|
|
|
return g_strdup_printf("%s job '%s'",
|
2017-08-24 10:46:08 +02:00
|
|
|
BlockJobType_str(job->driver->job_type),
|
2017-05-08 16:13:03 +02:00
|
|
|
job->id);
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:04:28 +01:00
|
|
|
static void child_job_drained_begin(BdrvChild *c)
|
2017-05-08 16:13:03 +02:00
|
|
|
{
|
2017-12-12 19:04:28 +01:00
|
|
|
BlockJob *job = c->opaque;
|
2017-05-08 16:13:03 +02:00
|
|
|
block_job_pause(job);
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:04:28 +01:00
|
|
|
static void child_job_drained_end(BdrvChild *c)
|
2017-05-08 16:13:03 +02:00
|
|
|
{
|
2017-12-12 19:04:28 +01:00
|
|
|
BlockJob *job = c->opaque;
|
2017-05-08 16:13:03 +02:00
|
|
|
block_job_resume(job);
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:04:28 +01:00
|
|
|
static const BdrvChildRole child_job = {
|
|
|
|
.get_parent_desc = child_job_get_parent_desc,
|
|
|
|
.drained_begin = child_job_drained_begin,
|
|
|
|
.drained_end = child_job_drained_end,
|
|
|
|
.stay_at_node = true,
|
2017-05-08 16:13:03 +02:00
|
|
|
};
|
|
|
|
|
2017-02-28 12:45:58 +01:00
|
|
|
void block_job_remove_all_bdrv(BlockJob *job)
|
|
|
|
{
|
|
|
|
GSList *l;
|
|
|
|
for (l = job->nodes; l; l = l->next) {
|
|
|
|
BdrvChild *c = l->data;
|
|
|
|
bdrv_op_unblock_all(c->bs, job->blocker);
|
|
|
|
bdrv_root_unref_child(c);
|
|
|
|
}
|
|
|
|
g_slist_free(job->nodes);
|
|
|
|
job->nodes = NULL;
|
|
|
|
}
|
|
|
|
|
2017-01-17 11:56:42 +01:00
|
|
|
int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
|
|
|
|
uint64_t perm, uint64_t shared_perm, Error **errp)
|
2016-10-28 09:08:04 +02:00
|
|
|
{
|
2017-01-17 11:56:42 +01:00
|
|
|
BdrvChild *c;
|
|
|
|
|
|
|
|
c = bdrv_root_attach_child(bs, name, &child_job, perm, shared_perm,
|
|
|
|
job, errp);
|
|
|
|
if (c == NULL) {
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
|
|
|
job->nodes = g_slist_prepend(job->nodes, c);
|
2016-10-28 09:08:04 +02:00
|
|
|
bdrv_ref(bs);
|
|
|
|
bdrv_op_block_all(bs, job->blocker);
|
2017-01-17 11:56:42 +01:00
|
|
|
|
|
|
|
return 0;
|
2016-10-28 09:08:04 +02:00
|
|
|
}
|
|
|
|
|
2016-10-27 18:06:55 +02:00
|
|
|
bool block_job_is_internal(BlockJob *job)
|
|
|
|
{
|
|
|
|
return (job->id == NULL);
|
|
|
|
}
|
|
|
|
|
2016-11-08 07:50:37 +01:00
|
|
|
static bool block_job_started(BlockJob *job)
|
|
|
|
{
|
|
|
|
return job->co;
|
|
|
|
}
|
|
|
|
|
2017-03-16 22:23:49 +01:00
|
|
|
/**
|
|
|
|
* All jobs must allow a pause point before entering their job proper. This
|
|
|
|
* ensures that jobs can be paused prior to being started, then resumed later.
|
|
|
|
*/
|
|
|
|
static void coroutine_fn block_job_co_entry(void *opaque)
|
|
|
|
{
|
|
|
|
BlockJob *job = opaque;
|
|
|
|
|
|
|
|
assert(job && job->driver && job->driver->start);
|
|
|
|
block_job_pause_point(job);
|
|
|
|
job->driver->start(job);
|
|
|
|
}
|
|
|
|
|
2017-11-29 11:25:13 +01:00
|
|
|
static void block_job_sleep_timer_cb(void *opaque)
|
|
|
|
{
|
|
|
|
BlockJob *job = opaque;
|
|
|
|
|
|
|
|
block_job_enter(job);
|
|
|
|
}
|
|
|
|
|
2016-11-08 07:50:37 +01:00
|
|
|
void block_job_start(BlockJob *job)
|
|
|
|
{
|
|
|
|
assert(job && !block_job_started(job) && job->paused &&
|
2017-03-16 22:23:49 +01:00
|
|
|
job->driver && job->driver->start);
|
|
|
|
job->co = qemu_coroutine_create(block_job_co_entry, job);
|
|
|
|
job->pause_count--;
|
|
|
|
job->busy = true;
|
|
|
|
job->paused = false;
|
2017-04-10 14:12:05 +02:00
|
|
|
bdrv_coroutine_enter(blk_bs(job->blk), job->co);
|
2016-11-08 07:50:37 +01:00
|
|
|
}
|
|
|
|
|
2015-11-06 00:13:15 +01:00
|
|
|
static void block_job_completed_single(BlockJob *job)
|
|
|
|
{
|
2017-05-08 16:13:09 +02:00
|
|
|
assert(job->completed);
|
|
|
|
|
2015-11-06 00:13:15 +01:00
|
|
|
if (!job->ret) {
|
|
|
|
if (job->driver->commit) {
|
|
|
|
job->driver->commit(job);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (job->driver->abort) {
|
|
|
|
job->driver->abort(job);
|
|
|
|
}
|
|
|
|
}
|
blockjob: add .clean property
Cleaning up after we have deferred to the main thread but before the
transaction has converged can be dangerous and result in deadlocks
if the job cleanup invokes any BH polling loops.
A job may attempt to begin cleaning up, but may induce another job to
enter its cleanup routine. The second job, part of our same transaction,
will block waiting for the first job to finish, so neither job may now
make progress.
To rectify this, allow jobs to register a cleanup operation that will
always run regardless of if the job was in a transaction or not, and
if the transaction job group completed successfully or not.
Move sensitive cleanup to this callback instead which is guaranteed to
be run only after the transaction has converged, which removes sensitive
timing constraints from said cleanup.
Furthermore, in future patches these cleanup operations will be performed
regardless of whether or not we actually started the job. Therefore,
cleanup callbacks should essentially confine themselves to undoing create
operations, e.g. setup actions taken in what is now backup_start.
Reported-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Message-id: 1478587839-9834-3-git-send-email-jsnow@redhat.com
Signed-off-by: Jeff Cody <jcody@redhat.com>
2016-11-08 07:50:35 +01:00
|
|
|
if (job->driver->clean) {
|
|
|
|
job->driver->clean(job);
|
|
|
|
}
|
2016-10-27 18:06:58 +02:00
|
|
|
|
|
|
|
if (job->cb) {
|
|
|
|
job->cb(job->opaque, job->ret);
|
|
|
|
}
|
2016-11-08 07:50:37 +01:00
|
|
|
|
|
|
|
/* Emit events only if we actually started */
|
|
|
|
if (block_job_started(job)) {
|
|
|
|
if (block_job_is_cancelled(job)) {
|
|
|
|
block_job_event_cancelled(job);
|
|
|
|
} else {
|
|
|
|
const char *msg = NULL;
|
|
|
|
if (job->ret < 0) {
|
|
|
|
msg = strerror(-job->ret);
|
|
|
|
}
|
|
|
|
block_job_event_completed(job, msg);
|
2016-10-27 18:06:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-06 00:13:15 +01:00
|
|
|
if (job->txn) {
|
2016-11-08 07:50:34 +01:00
|
|
|
QLIST_REMOVE(job, txn_list);
|
2015-11-06 00:13:15 +01:00
|
|
|
block_job_txn_unref(job->txn);
|
|
|
|
}
|
|
|
|
block_job_unref(job);
|
|
|
|
}
|
|
|
|
|
2017-05-08 16:13:06 +02:00
|
|
|
static void block_job_cancel_async(BlockJob *job)
|
|
|
|
{
|
|
|
|
if (job->iostatus != BLOCK_DEVICE_IO_STATUS_OK) {
|
|
|
|
block_job_iostatus_reset(job);
|
|
|
|
}
|
|
|
|
if (job->user_paused) {
|
|
|
|
/* Do not call block_job_enter here, the caller will handle it. */
|
|
|
|
job->user_paused = false;
|
|
|
|
job->pause_count--;
|
|
|
|
}
|
|
|
|
job->cancelled = true;
|
|
|
|
}
|
|
|
|
|
2017-05-08 16:13:07 +02:00
|
|
|
static int block_job_finish_sync(BlockJob *job,
|
|
|
|
void (*finish)(BlockJob *, Error **errp),
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
Error *local_err = NULL;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
assert(blk_bs(job->blk)->job == job);
|
|
|
|
|
|
|
|
block_job_ref(job);
|
|
|
|
|
2017-05-08 16:13:09 +02:00
|
|
|
if (finish) {
|
|
|
|
finish(job, &local_err);
|
|
|
|
}
|
2017-05-08 16:13:07 +02:00
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
block_job_unref(job);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
/* block_job_drain calls block_job_enter, and it should be enough to
|
|
|
|
* induce progress until the job completes or moves to the main thread.
|
|
|
|
*/
|
|
|
|
while (!job->deferred_to_main_loop && !job->completed) {
|
|
|
|
block_job_drain(job);
|
|
|
|
}
|
|
|
|
while (!job->completed) {
|
|
|
|
aio_poll(qemu_get_aio_context(), true);
|
|
|
|
}
|
|
|
|
ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret;
|
|
|
|
block_job_unref(job);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-11-06 00:13:15 +01:00
|
|
|
static void block_job_completed_txn_abort(BlockJob *job)
|
|
|
|
{
|
|
|
|
AioContext *ctx;
|
|
|
|
BlockJobTxn *txn = job->txn;
|
2017-05-08 16:13:09 +02:00
|
|
|
BlockJob *other_job;
|
2015-11-06 00:13:15 +01:00
|
|
|
|
|
|
|
if (txn->aborting) {
|
|
|
|
/*
|
|
|
|
* We are cancelled by another job, which will handle everything.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
txn->aborting = true;
|
2017-05-08 16:13:09 +02:00
|
|
|
block_job_txn_ref(txn);
|
|
|
|
|
2015-11-06 00:13:15 +01:00
|
|
|
/* We are the first failed job. Cancel other jobs. */
|
|
|
|
QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
|
2016-04-08 14:51:09 +02:00
|
|
|
ctx = blk_get_aio_context(other_job->blk);
|
2015-11-06 00:13:15 +01:00
|
|
|
aio_context_acquire(ctx);
|
|
|
|
}
|
2017-05-08 16:13:09 +02:00
|
|
|
|
|
|
|
/* Other jobs are effectively cancelled by us, set the status for
|
|
|
|
* them; this job, however, may or may not be cancelled, depending
|
|
|
|
* on the caller, so leave it. */
|
2015-11-06 00:13:15 +01:00
|
|
|
QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
|
2017-05-08 16:13:09 +02:00
|
|
|
if (other_job != job) {
|
|
|
|
block_job_cancel_async(other_job);
|
2015-11-06 00:13:15 +01:00
|
|
|
}
|
|
|
|
}
|
2017-05-08 16:13:09 +02:00
|
|
|
while (!QLIST_EMPTY(&txn->jobs)) {
|
|
|
|
other_job = QLIST_FIRST(&txn->jobs);
|
2016-04-08 14:51:09 +02:00
|
|
|
ctx = blk_get_aio_context(other_job->blk);
|
2017-05-08 16:13:09 +02:00
|
|
|
if (!other_job->completed) {
|
|
|
|
assert(other_job->cancelled);
|
|
|
|
block_job_finish_sync(other_job, NULL, NULL);
|
|
|
|
}
|
2015-11-06 00:13:15 +01:00
|
|
|
block_job_completed_single(other_job);
|
|
|
|
aio_context_release(ctx);
|
|
|
|
}
|
2017-05-08 16:13:09 +02:00
|
|
|
|
|
|
|
block_job_txn_unref(txn);
|
2015-11-06 00:13:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void block_job_completed_txn_success(BlockJob *job)
|
|
|
|
{
|
|
|
|
AioContext *ctx;
|
|
|
|
BlockJobTxn *txn = job->txn;
|
|
|
|
BlockJob *other_job, *next;
|
|
|
|
/*
|
|
|
|
* Successful completion, see if there are other running jobs in this
|
|
|
|
* txn.
|
|
|
|
*/
|
|
|
|
QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
|
|
|
|
if (!other_job->completed) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* We are the last completed job, commit the transaction. */
|
|
|
|
QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) {
|
2016-04-08 14:51:09 +02:00
|
|
|
ctx = blk_get_aio_context(other_job->blk);
|
2015-11-06 00:13:15 +01:00
|
|
|
aio_context_acquire(ctx);
|
|
|
|
assert(other_job->ret == 0);
|
|
|
|
block_job_completed_single(other_job);
|
|
|
|
aio_context_release(ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-13 21:46:11 +01:00
|
|
|
/* Assumes the block_job_mutex is held */
|
|
|
|
static bool block_job_timer_pending(BlockJob *job)
|
|
|
|
{
|
|
|
|
return timer_pending(&job->sleep_timer);
|
|
|
|
}
|
|
|
|
|
2012-09-28 17:22:47 +02:00
|
|
|
void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
|
|
|
|
{
|
|
|
|
Error *local_err = NULL;
|
2017-12-13 21:46:11 +01:00
|
|
|
int64_t old_speed = job->speed;
|
2012-09-28 17:22:47 +02:00
|
|
|
|
2013-10-08 11:29:38 +02:00
|
|
|
if (!job->driver->set_speed) {
|
2015-03-17 11:54:50 +01:00
|
|
|
error_setg(errp, QERR_UNSUPPORTED);
|
2012-09-28 17:22:47 +02:00
|
|
|
return;
|
|
|
|
}
|
2013-10-08 11:29:38 +02:00
|
|
|
job->driver->set_speed(job, speed, &local_err);
|
2014-01-30 15:07:28 +01:00
|
|
|
if (local_err) {
|
2012-09-28 17:22:47 +02:00
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
job->speed = speed;
|
2017-12-13 21:46:11 +01:00
|
|
|
if (speed <= old_speed) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* kick only if a timer is pending */
|
|
|
|
block_job_enter_cond(job, block_job_timer_pending);
|
2012-09-28 17:22:47 +02:00
|
|
|
}
|
|
|
|
|
2012-10-18 16:49:21 +02:00
|
|
|
void block_job_complete(BlockJob *job, Error **errp)
|
|
|
|
{
|
2016-10-27 18:06:55 +02:00
|
|
|
/* Should not be reachable via external interface for internal jobs */
|
|
|
|
assert(job->id);
|
2016-11-08 07:50:37 +01:00
|
|
|
if (job->pause_count || job->cancelled ||
|
|
|
|
!block_job_started(job) || !job->driver->complete) {
|
2016-07-05 16:28:53 +02:00
|
|
|
error_setg(errp, "The active block job '%s' cannot be completed",
|
|
|
|
job->id);
|
2012-10-18 16:49:21 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-10-08 11:29:38 +02:00
|
|
|
job->driver->complete(job, errp);
|
2012-10-18 16:49:21 +02:00
|
|
|
}
|
|
|
|
|
2016-10-27 18:06:59 +02:00
|
|
|
void block_job_user_pause(BlockJob *job)
|
|
|
|
{
|
|
|
|
job->user_paused = true;
|
|
|
|
block_job_pause(job);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool block_job_user_paused(BlockJob *job)
|
|
|
|
{
|
2017-05-08 16:13:00 +02:00
|
|
|
return job->user_paused;
|
2016-10-27 18:06:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void block_job_user_resume(BlockJob *job)
|
|
|
|
{
|
|
|
|
if (job && job->user_paused && job->pause_count > 0) {
|
2017-05-08 16:13:05 +02:00
|
|
|
block_job_iostatus_reset(job);
|
2017-05-08 16:13:06 +02:00
|
|
|
job->user_paused = false;
|
2016-10-27 18:06:59 +02:00
|
|
|
block_job_resume(job);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-28 17:22:50 +02:00
|
|
|
void block_job_cancel(BlockJob *job)
|
|
|
|
{
|
2016-11-08 07:50:37 +01:00
|
|
|
if (block_job_started(job)) {
|
2017-05-08 16:13:06 +02:00
|
|
|
block_job_cancel_async(job);
|
2016-11-08 07:50:37 +01:00
|
|
|
block_job_enter(job);
|
|
|
|
} else {
|
|
|
|
block_job_completed(job, -ECANCELED);
|
|
|
|
}
|
2012-09-28 17:22:50 +02:00
|
|
|
}
|
|
|
|
|
2014-10-24 15:57:33 +02:00
|
|
|
/* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
|
|
|
|
* used with block_job_finish_sync() without the need for (rather nasty)
|
|
|
|
* function pointer casts there. */
|
|
|
|
static void block_job_cancel_err(BlockJob *job, Error **errp)
|
|
|
|
{
|
|
|
|
block_job_cancel(job);
|
|
|
|
}
|
|
|
|
|
|
|
|
int block_job_cancel_sync(BlockJob *job)
|
|
|
|
{
|
|
|
|
return block_job_finish_sync(job, &block_job_cancel_err, NULL);
|
|
|
|
}
|
|
|
|
|
2016-04-08 18:26:37 +02:00
|
|
|
void block_job_cancel_sync_all(void)
|
|
|
|
{
|
|
|
|
BlockJob *job;
|
|
|
|
AioContext *aio_context;
|
|
|
|
|
|
|
|
while ((job = QLIST_FIRST(&block_jobs))) {
|
2016-04-08 14:51:09 +02:00
|
|
|
aio_context = blk_get_aio_context(job->blk);
|
2016-04-08 18:26:37 +02:00
|
|
|
aio_context_acquire(aio_context);
|
|
|
|
block_job_cancel_sync(job);
|
|
|
|
aio_context_release(aio_context);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-24 15:57:33 +02:00
|
|
|
int block_job_complete_sync(BlockJob *job, Error **errp)
|
|
|
|
{
|
|
|
|
return block_job_finish_sync(job, &block_job_complete, errp);
|
|
|
|
}
|
|
|
|
|
2016-10-27 18:06:55 +02:00
|
|
|
BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
|
2012-09-28 17:22:48 +02:00
|
|
|
{
|
2016-10-27 18:06:55 +02:00
|
|
|
BlockJobInfo *info;
|
|
|
|
|
|
|
|
if (block_job_is_internal(job)) {
|
|
|
|
error_setg(errp, "Cannot query QEMU internal jobs");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
info = g_new0(BlockJobInfo, 1);
|
2017-08-24 10:46:08 +02:00
|
|
|
info->type = g_strdup(BlockJobType_str(job->driver->job_type));
|
2015-09-16 13:34:54 +02:00
|
|
|
info->device = g_strdup(job->id);
|
block: introduce block job error
The following behaviors are possible:
'report': The behavior is the same as in 1.1. An I/O error,
respectively during a read or a write, will complete the job immediately
with an error code.
'ignore': An I/O error, respectively during a read or a write, will be
ignored. For streaming, the job will complete with an error and the
backing file will be left in place. For mirroring, the sector will be
marked again as dirty and re-examined later.
'stop': The job will be paused and the job iostatus will be set to
failed or nospace, while the VM will keep running. This can only be
specified if the block device has rerror=stop and werror=stop or enospc.
'enospc': Behaves as 'stop' for ENOSPC errors, 'report' for others.
In all cases, even for 'report', the I/O error is reported as a QMP
event BLOCK_JOB_ERROR, with the same arguments as BLOCK_IO_ERROR.
It is possible that while stopping the VM a BLOCK_IO_ERROR event will be
reported and will clobber the event from BLOCK_JOB_ERROR, or vice versa.
This is not really avoidable since stopping the VM completes all pending
I/O requests. In fact, it is already possible now that a series of
BLOCK_IO_ERROR events are reported with rerror=stop, because vm_stop
calls bdrv_drain_all and this can generate further errors.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-09-28 17:22:58 +02:00
|
|
|
info->len = job->len;
|
2017-11-29 11:25:13 +01:00
|
|
|
info->busy = atomic_read(&job->busy);
|
2015-04-03 16:05:18 +02:00
|
|
|
info->paused = job->pause_count > 0;
|
block: introduce block job error
The following behaviors are possible:
'report': The behavior is the same as in 1.1. An I/O error,
respectively during a read or a write, will complete the job immediately
with an error code.
'ignore': An I/O error, respectively during a read or a write, will be
ignored. For streaming, the job will complete with an error and the
backing file will be left in place. For mirroring, the sector will be
marked again as dirty and re-examined later.
'stop': The job will be paused and the job iostatus will be set to
failed or nospace, while the VM will keep running. This can only be
specified if the block device has rerror=stop and werror=stop or enospc.
'enospc': Behaves as 'stop' for ENOSPC errors, 'report' for others.
In all cases, even for 'report', the I/O error is reported as a QMP
event BLOCK_JOB_ERROR, with the same arguments as BLOCK_IO_ERROR.
It is possible that while stopping the VM a BLOCK_IO_ERROR event will be
reported and will clobber the event from BLOCK_JOB_ERROR, or vice versa.
This is not really avoidable since stopping the VM completes all pending
I/O requests. In fact, it is already possible now that a series of
BLOCK_IO_ERROR events are reported with rerror=stop, because vm_stop
calls bdrv_drain_all and this can generate further errors.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-09-28 17:22:58 +02:00
|
|
|
info->offset = job->offset;
|
|
|
|
info->speed = job->speed;
|
|
|
|
info->io_status = job->iostatus;
|
2014-10-24 15:57:34 +02:00
|
|
|
info->ready = job->ready;
|
2012-09-28 17:22:48 +02:00
|
|
|
return info;
|
|
|
|
}
|
block: introduce block job error
The following behaviors are possible:
'report': The behavior is the same as in 1.1. An I/O error,
respectively during a read or a write, will complete the job immediately
with an error code.
'ignore': An I/O error, respectively during a read or a write, will be
ignored. For streaming, the job will complete with an error and the
backing file will be left in place. For mirroring, the sector will be
marked again as dirty and re-examined later.
'stop': The job will be paused and the job iostatus will be set to
failed or nospace, while the VM will keep running. This can only be
specified if the block device has rerror=stop and werror=stop or enospc.
'enospc': Behaves as 'stop' for ENOSPC errors, 'report' for others.
In all cases, even for 'report', the I/O error is reported as a QMP
event BLOCK_JOB_ERROR, with the same arguments as BLOCK_IO_ERROR.
It is possible that while stopping the VM a BLOCK_IO_ERROR event will be
reported and will clobber the event from BLOCK_JOB_ERROR, or vice versa.
This is not really avoidable since stopping the VM completes all pending
I/O requests. In fact, it is already possible now that a series of
BLOCK_IO_ERROR events are reported with rerror=stop, because vm_stop
calls bdrv_drain_all and this can generate further errors.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-09-28 17:22:58 +02:00
|
|
|
|
|
|
|
static void block_job_iostatus_set_err(BlockJob *job, int error)
|
|
|
|
{
|
|
|
|
if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
|
|
|
|
job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
|
|
|
|
BLOCK_DEVICE_IO_STATUS_FAILED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-27 18:06:58 +02:00
|
|
|
static void block_job_event_cancelled(BlockJob *job)
|
2014-06-18 08:43:47 +02:00
|
|
|
{
|
2016-10-27 18:06:55 +02:00
|
|
|
if (block_job_is_internal(job)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-06-18 08:43:47 +02:00
|
|
|
qapi_event_send_block_job_cancelled(job->driver->job_type,
|
2015-09-16 13:34:54 +02:00
|
|
|
job->id,
|
2014-06-18 08:43:47 +02:00
|
|
|
job->len,
|
|
|
|
job->offset,
|
|
|
|
job->speed,
|
|
|
|
&error_abort);
|
|
|
|
}
|
block: introduce block job error
The following behaviors are possible:
'report': The behavior is the same as in 1.1. An I/O error,
respectively during a read or a write, will complete the job immediately
with an error code.
'ignore': An I/O error, respectively during a read or a write, will be
ignored. For streaming, the job will complete with an error and the
backing file will be left in place. For mirroring, the sector will be
marked again as dirty and re-examined later.
'stop': The job will be paused and the job iostatus will be set to
failed or nospace, while the VM will keep running. This can only be
specified if the block device has rerror=stop and werror=stop or enospc.
'enospc': Behaves as 'stop' for ENOSPC errors, 'report' for others.
In all cases, even for 'report', the I/O error is reported as a QMP
event BLOCK_JOB_ERROR, with the same arguments as BLOCK_IO_ERROR.
It is possible that while stopping the VM a BLOCK_IO_ERROR event will be
reported and will clobber the event from BLOCK_JOB_ERROR, or vice versa.
This is not really avoidable since stopping the VM completes all pending
I/O requests. In fact, it is already possible now that a series of
BLOCK_IO_ERROR events are reported with rerror=stop, because vm_stop
calls bdrv_drain_all and this can generate further errors.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-09-28 17:22:58 +02:00
|
|
|
|
2016-10-27 18:06:58 +02:00
|
|
|
static void block_job_event_completed(BlockJob *job, const char *msg)
|
2012-07-23 15:15:47 +02:00
|
|
|
{
|
2016-10-27 18:06:55 +02:00
|
|
|
if (block_job_is_internal(job)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-06-18 08:43:47 +02:00
|
|
|
qapi_event_send_block_job_completed(job->driver->job_type,
|
2015-09-16 13:34:54 +02:00
|
|
|
job->id,
|
2014-06-18 08:43:47 +02:00
|
|
|
job->len,
|
|
|
|
job->offset,
|
|
|
|
job->speed,
|
|
|
|
!!msg,
|
|
|
|
msg,
|
|
|
|
&error_abort);
|
2012-07-23 15:15:47 +02:00
|
|
|
}
|
|
|
|
|
2017-05-08 16:13:04 +02:00
|
|
|
/*
|
|
|
|
* API for block job drivers and the block layer. These functions are
|
|
|
|
* declared in blockjob_int.h.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void *block_job_create(const char *job_id, const BlockJobDriver *driver,
|
|
|
|
BlockDriverState *bs, uint64_t perm,
|
|
|
|
uint64_t shared_perm, int64_t speed, int flags,
|
|
|
|
BlockCompletionFunc *cb, void *opaque, Error **errp)
|
|
|
|
{
|
|
|
|
BlockBackend *blk;
|
|
|
|
BlockJob *job;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (bs->job) {
|
|
|
|
error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) {
|
|
|
|
job_id = bdrv_get_device_name(bs);
|
|
|
|
if (!*job_id) {
|
|
|
|
error_setg(errp, "An explicit job ID is required for this node");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (job_id) {
|
|
|
|
if (flags & BLOCK_JOB_INTERNAL) {
|
|
|
|
error_setg(errp, "Cannot specify job ID for internal block job");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!id_wellformed(job_id)) {
|
|
|
|
error_setg(errp, "Invalid job ID '%s'", job_id);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (block_job_get(job_id)) {
|
|
|
|
error_setg(errp, "Job ID '%s' already in use", job_id);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
blk = blk_new(perm, shared_perm);
|
|
|
|
ret = blk_insert_bs(blk, bs, errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
blk_unref(blk);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
job = g_malloc0(driver->instance_size);
|
|
|
|
job->driver = driver;
|
|
|
|
job->id = g_strdup(job_id);
|
|
|
|
job->blk = blk;
|
|
|
|
job->cb = cb;
|
|
|
|
job->opaque = opaque;
|
|
|
|
job->busy = false;
|
|
|
|
job->paused = true;
|
|
|
|
job->pause_count = 1;
|
|
|
|
job->refcnt = 1;
|
2017-11-29 11:25:13 +01:00
|
|
|
aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
|
|
|
|
QEMU_CLOCK_REALTIME, SCALE_NS,
|
|
|
|
block_job_sleep_timer_cb, job);
|
2017-05-08 16:13:04 +02:00
|
|
|
|
|
|
|
error_setg(&job->blocker, "block device is in use by block job: %s",
|
2017-08-24 10:46:08 +02:00
|
|
|
BlockJobType_str(driver->job_type));
|
2017-05-08 16:13:04 +02:00
|
|
|
block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
|
|
|
|
bs->job = job;
|
|
|
|
|
|
|
|
bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
|
|
|
|
|
|
|
|
QLIST_INSERT_HEAD(&block_jobs, job, job_list);
|
|
|
|
|
|
|
|
blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
|
|
|
|
block_job_detach_aio_context, job);
|
|
|
|
|
|
|
|
/* Only set speed when necessary to avoid NotSupported error */
|
|
|
|
if (speed != 0) {
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
block_job_set_speed(job, speed, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
block_job_unref(job);
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return job;
|
|
|
|
}
|
|
|
|
|
2017-05-08 16:13:03 +02:00
|
|
|
void block_job_pause_all(void)
|
|
|
|
{
|
|
|
|
BlockJob *job = NULL;
|
|
|
|
while ((job = block_job_next(job))) {
|
|
|
|
AioContext *aio_context = blk_get_aio_context(job->blk);
|
|
|
|
|
|
|
|
aio_context_acquire(aio_context);
|
2017-11-29 18:56:34 +01:00
|
|
|
block_job_ref(job);
|
2017-05-08 16:13:03 +02:00
|
|
|
block_job_pause(job);
|
|
|
|
aio_context_release(aio_context);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-08 16:13:04 +02:00
|
|
|
void block_job_early_fail(BlockJob *job)
|
|
|
|
{
|
|
|
|
block_job_unref(job);
|
|
|
|
}
|
|
|
|
|
|
|
|
void block_job_completed(BlockJob *job, int ret)
|
|
|
|
{
|
|
|
|
assert(blk_bs(job->blk)->job == job);
|
|
|
|
assert(!job->completed);
|
|
|
|
job->completed = true;
|
|
|
|
job->ret = ret;
|
|
|
|
if (!job->txn) {
|
|
|
|
block_job_completed_single(job);
|
|
|
|
} else if (ret < 0 || block_job_is_cancelled(job)) {
|
|
|
|
block_job_completed_txn_abort(job);
|
|
|
|
} else {
|
|
|
|
block_job_completed_txn_success(job);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool block_job_should_pause(BlockJob *job)
|
|
|
|
{
|
|
|
|
return job->pause_count > 0;
|
|
|
|
}
|
|
|
|
|
2017-11-29 11:25:13 +01:00
|
|
|
/* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
|
|
|
|
* Reentering the job coroutine with block_job_enter() before the timer has
|
|
|
|
* expired is allowed and cancels the timer.
|
|
|
|
*
|
|
|
|
* If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be
|
|
|
|
* called explicitly. */
|
|
|
|
static void block_job_do_yield(BlockJob *job, uint64_t ns)
|
2017-11-29 11:25:12 +01:00
|
|
|
{
|
2017-11-29 11:25:13 +01:00
|
|
|
block_job_lock();
|
|
|
|
if (ns != -1) {
|
|
|
|
timer_mod(&job->sleep_timer, ns);
|
|
|
|
}
|
2017-11-29 11:25:12 +01:00
|
|
|
job->busy = false;
|
2017-11-29 11:25:13 +01:00
|
|
|
block_job_unlock();
|
2017-11-29 11:25:12 +01:00
|
|
|
qemu_coroutine_yield();
|
|
|
|
|
|
|
|
/* Set by block_job_enter before re-entering the coroutine. */
|
|
|
|
assert(job->busy);
|
|
|
|
}
|
|
|
|
|
2017-05-08 16:13:04 +02:00
|
|
|
void coroutine_fn block_job_pause_point(BlockJob *job)
|
|
|
|
{
|
|
|
|
assert(job && block_job_started(job));
|
|
|
|
|
|
|
|
if (!block_job_should_pause(job)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (block_job_is_cancelled(job)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (job->driver->pause) {
|
|
|
|
job->driver->pause(job);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
|
|
|
|
job->paused = true;
|
2017-11-29 11:25:13 +01:00
|
|
|
block_job_do_yield(job, -1);
|
2017-05-08 16:13:04 +02:00
|
|
|
job->paused = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (job->driver->resume) {
|
|
|
|
job->driver->resume(job);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-08 16:13:03 +02:00
|
|
|
void block_job_resume_all(void)
|
|
|
|
{
|
2017-11-29 18:56:34 +01:00
|
|
|
BlockJob *job, *next;
|
|
|
|
|
|
|
|
QLIST_FOREACH_SAFE(job, &block_jobs, job_list, next) {
|
2017-05-08 16:13:03 +02:00
|
|
|
AioContext *aio_context = blk_get_aio_context(job->blk);
|
|
|
|
|
|
|
|
aio_context_acquire(aio_context);
|
|
|
|
block_job_resume(job);
|
2017-11-29 18:56:34 +01:00
|
|
|
block_job_unref(job);
|
2017-05-08 16:13:03 +02:00
|
|
|
aio_context_release(aio_context);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-13 21:46:11 +01:00
|
|
|
/*
|
|
|
|
* Conditionally enter a block_job pending a call to fn() while
|
|
|
|
* under the block_job_lock critical section.
|
|
|
|
*/
|
|
|
|
static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job))
|
2017-05-08 16:13:04 +02:00
|
|
|
{
|
2017-05-08 16:13:10 +02:00
|
|
|
if (!block_job_started(job)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (job->deferred_to_main_loop) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-11-29 11:25:13 +01:00
|
|
|
block_job_lock();
|
2017-11-29 11:25:12 +01:00
|
|
|
if (job->busy) {
|
2017-11-29 11:25:13 +01:00
|
|
|
block_job_unlock();
|
2017-11-29 11:25:12 +01:00
|
|
|
return;
|
2017-05-08 16:13:04 +02:00
|
|
|
}
|
2017-11-29 11:25:12 +01:00
|
|
|
|
2017-12-13 21:46:11 +01:00
|
|
|
if (fn && !fn(job)) {
|
|
|
|
block_job_unlock();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-11-29 11:25:13 +01:00
|
|
|
assert(!job->deferred_to_main_loop);
|
|
|
|
timer_del(&job->sleep_timer);
|
2017-11-29 11:25:12 +01:00
|
|
|
job->busy = true;
|
2017-11-29 11:25:13 +01:00
|
|
|
block_job_unlock();
|
2017-11-29 11:25:12 +01:00
|
|
|
aio_co_wake(job->co);
|
2017-05-08 16:13:04 +02:00
|
|
|
}
|
|
|
|
|
2017-12-13 21:46:11 +01:00
|
|
|
void block_job_enter(BlockJob *job)
|
|
|
|
{
|
|
|
|
block_job_enter_cond(job, NULL);
|
|
|
|
}
|
|
|
|
|
2017-05-08 16:13:04 +02:00
|
|
|
bool block_job_is_cancelled(BlockJob *job)
|
|
|
|
{
|
|
|
|
return job->cancelled;
|
|
|
|
}
|
|
|
|
|
2017-11-29 11:25:11 +01:00
|
|
|
void block_job_sleep_ns(BlockJob *job, int64_t ns)
|
2017-05-08 16:13:04 +02:00
|
|
|
{
|
|
|
|
assert(job->busy);
|
|
|
|
|
|
|
|
/* Check cancellation *before* setting busy = false, too! */
|
|
|
|
if (block_job_is_cancelled(job)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!block_job_should_pause(job)) {
|
2017-11-29 11:25:13 +01:00
|
|
|
block_job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns);
|
2017-05-08 16:13:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
block_job_pause_point(job);
|
|
|
|
}
|
|
|
|
|
|
|
|
void block_job_yield(BlockJob *job)
|
|
|
|
{
|
|
|
|
assert(job->busy);
|
|
|
|
|
|
|
|
/* Check cancellation *before* setting busy = false, too! */
|
|
|
|
if (block_job_is_cancelled(job)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!block_job_should_pause(job)) {
|
2017-11-29 11:25:13 +01:00
|
|
|
block_job_do_yield(job, -1);
|
2017-05-08 16:13:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
block_job_pause_point(job);
|
|
|
|
}
|
|
|
|
|
2017-05-08 16:13:05 +02:00
|
|
|
void block_job_iostatus_reset(BlockJob *job)
|
|
|
|
{
|
2017-05-08 16:13:06 +02:00
|
|
|
if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
assert(job->user_paused && job->pause_count > 0);
|
2017-05-08 16:13:05 +02:00
|
|
|
job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
|
|
|
|
}
|
|
|
|
|
2014-06-18 08:43:47 +02:00
|
|
|
void block_job_event_ready(BlockJob *job)
|
2012-07-23 15:15:47 +02:00
|
|
|
{
|
2014-10-24 15:57:34 +02:00
|
|
|
job->ready = true;
|
|
|
|
|
2016-10-27 18:06:55 +02:00
|
|
|
if (block_job_is_internal(job)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-06-27 19:24:13 +02:00
|
|
|
qapi_event_send_block_job_ready(job->driver->job_type,
|
2015-09-16 13:34:54 +02:00
|
|
|
job->id,
|
2014-06-27 19:24:13 +02:00
|
|
|
job->len,
|
|
|
|
job->offset,
|
|
|
|
job->speed, &error_abort);
|
2012-07-23 15:15:47 +02:00
|
|
|
}
|
|
|
|
|
2016-04-18 11:36:38 +02:00
|
|
|
BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
|
block: introduce block job error
The following behaviors are possible:
'report': The behavior is the same as in 1.1. An I/O error,
respectively during a read or a write, will complete the job immediately
with an error code.
'ignore': An I/O error, respectively during a read or a write, will be
ignored. For streaming, the job will complete with an error and the
backing file will be left in place. For mirroring, the sector will be
marked again as dirty and re-examined later.
'stop': The job will be paused and the job iostatus will be set to
failed or nospace, while the VM will keep running. This can only be
specified if the block device has rerror=stop and werror=stop or enospc.
'enospc': Behaves as 'stop' for ENOSPC errors, 'report' for others.
In all cases, even for 'report', the I/O error is reported as a QMP
event BLOCK_JOB_ERROR, with the same arguments as BLOCK_IO_ERROR.
It is possible that while stopping the VM a BLOCK_IO_ERROR event will be
reported and will clobber the event from BLOCK_JOB_ERROR, or vice versa.
This is not really avoidable since stopping the VM completes all pending
I/O requests. In fact, it is already possible now that a series of
BLOCK_IO_ERROR events are reported with rerror=stop, because vm_stop
calls bdrv_drain_all and this can generate further errors.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-09-28 17:22:58 +02:00
|
|
|
int is_read, int error)
|
|
|
|
{
|
|
|
|
BlockErrorAction action;
|
|
|
|
|
|
|
|
switch (on_err) {
|
|
|
|
case BLOCKDEV_ON_ERROR_ENOSPC:
|
2016-06-29 17:41:35 +02:00
|
|
|
case BLOCKDEV_ON_ERROR_AUTO:
|
2014-06-18 08:43:30 +02:00
|
|
|
action = (error == ENOSPC) ?
|
|
|
|
BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
|
block: introduce block job error
The following behaviors are possible:
'report': The behavior is the same as in 1.1. An I/O error,
respectively during a read or a write, will complete the job immediately
with an error code.
'ignore': An I/O error, respectively during a read or a write, will be
ignored. For streaming, the job will complete with an error and the
backing file will be left in place. For mirroring, the sector will be
marked again as dirty and re-examined later.
'stop': The job will be paused and the job iostatus will be set to
failed or nospace, while the VM will keep running. This can only be
specified if the block device has rerror=stop and werror=stop or enospc.
'enospc': Behaves as 'stop' for ENOSPC errors, 'report' for others.
In all cases, even for 'report', the I/O error is reported as a QMP
event BLOCK_JOB_ERROR, with the same arguments as BLOCK_IO_ERROR.
It is possible that while stopping the VM a BLOCK_IO_ERROR event will be
reported and will clobber the event from BLOCK_JOB_ERROR, or vice versa.
This is not really avoidable since stopping the VM completes all pending
I/O requests. In fact, it is already possible now that a series of
BLOCK_IO_ERROR events are reported with rerror=stop, because vm_stop
calls bdrv_drain_all and this can generate further errors.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-09-28 17:22:58 +02:00
|
|
|
break;
|
|
|
|
case BLOCKDEV_ON_ERROR_STOP:
|
2014-06-18 08:43:30 +02:00
|
|
|
action = BLOCK_ERROR_ACTION_STOP;
|
block: introduce block job error
The following behaviors are possible:
'report': The behavior is the same as in 1.1. An I/O error,
respectively during a read or a write, will complete the job immediately
with an error code.
'ignore': An I/O error, respectively during a read or a write, will be
ignored. For streaming, the job will complete with an error and the
backing file will be left in place. For mirroring, the sector will be
marked again as dirty and re-examined later.
'stop': The job will be paused and the job iostatus will be set to
failed or nospace, while the VM will keep running. This can only be
specified if the block device has rerror=stop and werror=stop or enospc.
'enospc': Behaves as 'stop' for ENOSPC errors, 'report' for others.
In all cases, even for 'report', the I/O error is reported as a QMP
event BLOCK_JOB_ERROR, with the same arguments as BLOCK_IO_ERROR.
It is possible that while stopping the VM a BLOCK_IO_ERROR event will be
reported and will clobber the event from BLOCK_JOB_ERROR, or vice versa.
This is not really avoidable since stopping the VM completes all pending
I/O requests. In fact, it is already possible now that a series of
BLOCK_IO_ERROR events are reported with rerror=stop, because vm_stop
calls bdrv_drain_all and this can generate further errors.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-09-28 17:22:58 +02:00
|
|
|
break;
|
|
|
|
case BLOCKDEV_ON_ERROR_REPORT:
|
2014-06-18 08:43:30 +02:00
|
|
|
action = BLOCK_ERROR_ACTION_REPORT;
|
block: introduce block job error
The following behaviors are possible:
'report': The behavior is the same as in 1.1. An I/O error,
respectively during a read or a write, will complete the job immediately
with an error code.
'ignore': An I/O error, respectively during a read or a write, will be
ignored. For streaming, the job will complete with an error and the
backing file will be left in place. For mirroring, the sector will be
marked again as dirty and re-examined later.
'stop': The job will be paused and the job iostatus will be set to
failed or nospace, while the VM will keep running. This can only be
specified if the block device has rerror=stop and werror=stop or enospc.
'enospc': Behaves as 'stop' for ENOSPC errors, 'report' for others.
In all cases, even for 'report', the I/O error is reported as a QMP
event BLOCK_JOB_ERROR, with the same arguments as BLOCK_IO_ERROR.
It is possible that while stopping the VM a BLOCK_IO_ERROR event will be
reported and will clobber the event from BLOCK_JOB_ERROR, or vice versa.
This is not really avoidable since stopping the VM completes all pending
I/O requests. In fact, it is already possible now that a series of
BLOCK_IO_ERROR events are reported with rerror=stop, because vm_stop
calls bdrv_drain_all and this can generate further errors.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-09-28 17:22:58 +02:00
|
|
|
break;
|
|
|
|
case BLOCKDEV_ON_ERROR_IGNORE:
|
2014-06-18 08:43:30 +02:00
|
|
|
action = BLOCK_ERROR_ACTION_IGNORE;
|
block: introduce block job error
The following behaviors are possible:
'report': The behavior is the same as in 1.1. An I/O error,
respectively during a read or a write, will complete the job immediately
with an error code.
'ignore': An I/O error, respectively during a read or a write, will be
ignored. For streaming, the job will complete with an error and the
backing file will be left in place. For mirroring, the sector will be
marked again as dirty and re-examined later.
'stop': The job will be paused and the job iostatus will be set to
failed or nospace, while the VM will keep running. This can only be
specified if the block device has rerror=stop and werror=stop or enospc.
'enospc': Behaves as 'stop' for ENOSPC errors, 'report' for others.
In all cases, even for 'report', the I/O error is reported as a QMP
event BLOCK_JOB_ERROR, with the same arguments as BLOCK_IO_ERROR.
It is possible that while stopping the VM a BLOCK_IO_ERROR event will be
reported and will clobber the event from BLOCK_JOB_ERROR, or vice versa.
This is not really avoidable since stopping the VM completes all pending
I/O requests. In fact, it is already possible now that a series of
BLOCK_IO_ERROR events are reported with rerror=stop, because vm_stop
calls bdrv_drain_all and this can generate further errors.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-09-28 17:22:58 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
2016-10-27 18:06:55 +02:00
|
|
|
if (!block_job_is_internal(job)) {
|
|
|
|
qapi_event_send_block_job_error(job->id,
|
|
|
|
is_read ? IO_OPERATION_TYPE_READ :
|
|
|
|
IO_OPERATION_TYPE_WRITE,
|
|
|
|
action, &error_abort);
|
|
|
|
}
|
2014-06-18 08:43:30 +02:00
|
|
|
if (action == BLOCK_ERROR_ACTION_STOP) {
|
2015-04-03 16:05:18 +02:00
|
|
|
/* make the pause user visible, which will be resumed from QMP. */
|
2016-10-27 18:06:59 +02:00
|
|
|
block_job_user_pause(job);
|
block: introduce block job error
The following behaviors are possible:
'report': The behavior is the same as in 1.1. An I/O error,
respectively during a read or a write, will complete the job immediately
with an error code.
'ignore': An I/O error, respectively during a read or a write, will be
ignored. For streaming, the job will complete with an error and the
backing file will be left in place. For mirroring, the sector will be
marked again as dirty and re-examined later.
'stop': The job will be paused and the job iostatus will be set to
failed or nospace, while the VM will keep running. This can only be
specified if the block device has rerror=stop and werror=stop or enospc.
'enospc': Behaves as 'stop' for ENOSPC errors, 'report' for others.
In all cases, even for 'report', the I/O error is reported as a QMP
event BLOCK_JOB_ERROR, with the same arguments as BLOCK_IO_ERROR.
It is possible that while stopping the VM a BLOCK_IO_ERROR event will be
reported and will clobber the event from BLOCK_JOB_ERROR, or vice versa.
This is not really avoidable since stopping the VM completes all pending
I/O requests. In fact, it is already possible now that a series of
BLOCK_IO_ERROR events are reported with rerror=stop, because vm_stop
calls bdrv_drain_all and this can generate further errors.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-09-28 17:22:58 +02:00
|
|
|
block_job_iostatus_set_err(job, error);
|
|
|
|
}
|
|
|
|
return action;
|
|
|
|
}
|
2014-10-21 13:03:54 +02:00
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
BlockJob *job;
|
|
|
|
AioContext *aio_context;
|
|
|
|
BlockJobDeferToMainLoopFn *fn;
|
|
|
|
void *opaque;
|
|
|
|
} BlockJobDeferToMainLoopData;
|
|
|
|
|
|
|
|
static void block_job_defer_to_main_loop_bh(void *opaque)
|
|
|
|
{
|
|
|
|
BlockJobDeferToMainLoopData *data = opaque;
|
|
|
|
AioContext *aio_context;
|
|
|
|
|
|
|
|
/* Prevent race with block_job_defer_to_main_loop() */
|
|
|
|
aio_context_acquire(data->aio_context);
|
|
|
|
|
|
|
|
/* Fetch BDS AioContext again, in case it has changed */
|
2016-04-08 14:51:09 +02:00
|
|
|
aio_context = blk_get_aio_context(data->job->blk);
|
2017-03-21 18:48:10 +01:00
|
|
|
if (aio_context != data->aio_context) {
|
|
|
|
aio_context_acquire(aio_context);
|
|
|
|
}
|
2014-10-21 13:03:54 +02:00
|
|
|
|
|
|
|
data->fn(data->job, data->opaque);
|
|
|
|
|
2017-03-21 18:48:10 +01:00
|
|
|
if (aio_context != data->aio_context) {
|
|
|
|
aio_context_release(aio_context);
|
|
|
|
}
|
2014-10-21 13:03:54 +02:00
|
|
|
|
|
|
|
aio_context_release(data->aio_context);
|
|
|
|
|
|
|
|
g_free(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void block_job_defer_to_main_loop(BlockJob *job,
|
|
|
|
BlockJobDeferToMainLoopFn *fn,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data));
|
|
|
|
data->job = job;
|
2016-04-08 14:51:09 +02:00
|
|
|
data->aio_context = blk_get_aio_context(job->blk);
|
2014-10-21 13:03:54 +02:00
|
|
|
data->fn = fn;
|
|
|
|
data->opaque = opaque;
|
2016-02-02 03:12:24 +01:00
|
|
|
job->deferred_to_main_loop = true;
|
2014-10-21 13:03:54 +02:00
|
|
|
|
2016-10-03 18:14:16 +02:00
|
|
|
aio_bh_schedule_oneshot(qemu_get_aio_context(),
|
|
|
|
block_job_defer_to_main_loop_bh, data);
|
2014-10-21 13:03:54 +02:00
|
|
|
}
|