Jobs patches:

- small fix of job_create()
  - refactoring: drop BlockJob.blk field
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEi5wmzbL9FHyIDoahVh8kwfGfefsFAmHMst0ACgkQVh8kwfGf
 efsByBAApnuspO4UbrQOLXRZrtZ5tFhGWjwgafNbrIrBHF9qPhw3dv2ExjS00mr0
 afBuswDPzs8EhDlQqmoTMx27oLNSFOMsEypNVBR5YzBLWjpOEAt7e81BzpN6E7GD
 22eRIwv5DeNtA08+XzsoC7NUZF7M+m/ELZNGzJEbu7W+jt07ctMhuljI3cxLsi7Z
 Cvm4cRq5jK8/bkUxtLG0BhVDOXRr9Nbp7focKRhl3ftDbvVtQgI89pxUKcCzaWzC
 Dw/iCnp/e8KUonbI22Tcav89pIC3DqupJVb4WIu83W7NpssQmkmkilfFlbblxM/K
 g2xEx4vEE7MfFQH8nGOafYKbT0VVdMa900cuZv9waQs4LZ23/Pv0SwN5cE24Omnh
 tvRTxdp8XBjRqgbQwCbBXVIydLvMIvExI/8Q9iKxEMrNsfz6mbBsEzQirRf9RqxK
 pyMWEmmj0zkJ6+Jc7J4oe+XcmB82LGzlm0YrBaDzJe+Bh/WVEUm47+AxYCbzAwGq
 9HnMDI5gNMbdR0H0GyEXMXVmykn1lL0+YtRf6JfRpKWKr6Am1UczqcRVKqEK4Os3
 uBVeIsG726DqzLWkhMgq1LtUjHaxawpyyKv+ZtYSjriHROcelq60EoOnRiOYLYw5
 pzqMgt+eZnNKNuM2/t7Kb1AeO+NJpQjK4rHmRSzqhbn2OhaEBlk=
 =8ze8
 -----END PGP SIGNATURE-----

Merge tag 'pull-jobs-2021-12-29' of https://src.openvz.org/scm/~vsementsov/qemu into staging

Jobs patches:
 - small fix of job_create()
 - refactoring: drop BlockJob.blk field

# gpg: Signature made Wed 29 Dec 2021 11:11:25 AM PST
# gpg:                using RSA key 8B9C26CDB2FD147C880E86A1561F24C1F19F79FB
# gpg: Good signature from "Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 8B9C 26CD B2FD 147C 880E  86A1 561F 24C1 F19F 79FB

* tag 'pull-jobs-2021-12-29' of https://src.openvz.org/scm/~vsementsov/qemu:
  blockjob: drop BlockJob.blk field
  test-bdrv-drain: don't use BlockJob.blk
  block/stream: add own blk
  test-blockjob-txn: don't abuse job->blk
  blockjob: implement and use block_job_get_aio_context
  job.c: add missing notifier initialization

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2021-12-29 14:33:22 -08:00
commit d5a9f35289
12 changed files with 60 additions and 55 deletions

View File

@ -771,13 +771,6 @@ static int mirror_exit_common(Job *job)
block_job_remove_all_bdrv(bjob);
bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
/* We just changed the BDS the job BB refers to (with either or both of the
* bdrv_replace_node() calls), so switch the BB back so the cleanup does
* the right thing. We don't need any permissions any more now. */
blk_remove_bs(bjob->blk);
blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort);
blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
bs_opaque->job = NULL;
bdrv_drained_end(src);

View File

@ -33,6 +33,7 @@ enum {
typedef struct StreamBlockJob {
BlockJob common;
BlockBackend *blk;
BlockDriverState *base_overlay; /* COW overlay (stream from this) */
BlockDriverState *above_base; /* Node directly above the base */
BlockDriverState *cor_filter_bs;
@ -88,17 +89,18 @@ static int stream_prepare(Job *job)
static void stream_clean(Job *job)
{
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
BlockJob *bjob = &s->common;
if (s->cor_filter_bs) {
bdrv_cor_filter_drop(s->cor_filter_bs);
s->cor_filter_bs = NULL;
}
blk_unref(s->blk);
s->blk = NULL;
/* Reopen the image back in read-only mode if necessary */
if (s->bs_read_only) {
/* Give up write permissions before making it read-only */
blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort);
bdrv_reopen_set_read_only(s->target_bs, true, NULL);
}
@ -108,7 +110,6 @@ static void stream_clean(Job *job)
static int coroutine_fn stream_run(Job *job, Error **errp)
{
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
BlockBackend *blk = s->common.blk;
BlockDriverState *unfiltered_bs = bdrv_skip_filters(s->target_bs);
int64_t len;
int64_t offset = 0;
@ -159,7 +160,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
}
trace_stream_one_iteration(s, offset, n, ret);
if (copy) {
ret = stream_populate(blk, offset, n);
ret = stream_populate(s->blk, offset, n);
}
if (ret < 0) {
BlockErrorAction action =
@ -294,13 +295,24 @@ void stream_start(const char *job_id, BlockDriverState *bs,
}
s = block_job_create(job_id, &stream_job_driver, NULL, cor_filter_bs,
BLK_PERM_CONSISTENT_READ,
basic_flags | BLK_PERM_WRITE,
0, BLK_PERM_ALL,
speed, creation_flags, NULL, NULL, errp);
if (!s) {
goto fail;
}
s->blk = blk_new_with_bs(cor_filter_bs, BLK_PERM_CONSISTENT_READ,
basic_flags | BLK_PERM_WRITE, errp);
if (!s->blk) {
goto fail;
}
/*
* Disable request queuing in the BlockBackend to avoid deadlocks on drain:
* The job reports that it's busy until it reaches a pause point.
*/
blk_set_disable_request_queuing(s->blk, true);
blk_set_allow_aio_context_change(s->blk, true);
/*
* Prevent concurrent jobs trying to modify the graph structure here, we
* already have our own plans. Also don't allow resize as the image size is

View File

@ -3315,7 +3315,7 @@ static BlockJob *find_block_job(const char *id, AioContext **aio_context,
return NULL;
}
*aio_context = blk_get_aio_context(job->blk);
*aio_context = block_job_get_aio_context(job);
aio_context_acquire(*aio_context);
return job;
@ -3420,7 +3420,7 @@ void qmp_block_job_finalize(const char *id, Error **errp)
* automatically acquires the new one), so make sure we release the correct
* one.
*/
aio_context = blk_get_aio_context(job->blk);
aio_context = block_job_get_aio_context(job);
job_unref(&job->job);
aio_context_release(aio_context);
}
@ -3711,7 +3711,7 @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp)
if (block_job_is_internal(job)) {
continue;
}
aio_context = blk_get_aio_context(job->blk);
aio_context = block_job_get_aio_context(job);
aio_context_acquire(aio_context);
value = block_job_query(job, errp);
aio_context_release(aio_context);

View File

@ -86,7 +86,6 @@ void block_job_free(Job *job)
BlockJob *bjob = container_of(job, BlockJob, job);
block_job_remove_all_bdrv(bjob);
blk_unref(bjob->blk);
ratelimit_destroy(&bjob->limit);
error_free(bjob->blocker);
}
@ -433,22 +432,16 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
uint64_t shared_perm, int64_t speed, int flags,
BlockCompletionFunc *cb, void *opaque, Error **errp)
{
BlockBackend *blk;
BlockJob *job;
int ret;
if (job_id == NULL && !(flags & JOB_INTERNAL)) {
job_id = bdrv_get_device_name(bs);
}
blk = blk_new_with_bs(bs, perm, shared_perm, errp);
if (!blk) {
return NULL;
}
job = job_create(job_id, &driver->job_driver, txn, blk_get_aio_context(blk),
job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs),
flags, cb, opaque, errp);
if (job == NULL) {
blk_unref(blk);
return NULL;
}
@ -458,8 +451,6 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
ratelimit_init(&job->limit);
job->blk = blk;
job->finalize_cancelled_notifier.notify = block_job_event_cancelled;
job->finalize_completed_notifier.notify = block_job_event_completed;
job->pending_notifier.notify = block_job_event_pending;
@ -476,21 +467,23 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
error_setg(&job->blocker, "block device is in use by block job: %s",
job_type_str(&job->job));
block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
ret = block_job_add_bdrv(job, "main node", bs, perm, shared_perm, errp);
if (ret < 0) {
goto fail;
}
bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
/* Disable request queuing in the BlockBackend to avoid deadlocks on drain:
* The job reports that it's busy until it reaches a pause point. */
blk_set_disable_request_queuing(blk, true);
blk_set_allow_aio_context_change(blk, true);
if (!block_job_set_speed(job, speed, errp)) {
job_early_fail(&job->job);
return NULL;
goto fail;
}
return job;
fail:
job_early_fail(&job->job);
return NULL;
}
void block_job_iostatus_reset(BlockJob *job)
@ -547,3 +540,8 @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
}
return action;
}
AioContext *block_job_get_aio_context(BlockJob *job)
{
return job->job.aio_context;
}

View File

@ -43,9 +43,6 @@ typedef struct BlockJob {
/** Data belonging to the generic Job infrastructure */
Job job;
/** The block device on which the job is operating. */
BlockBackend *blk;
/** Status that is published by the query-block-jobs QMP API */
BlockDeviceIoStatus iostatus;
@ -173,4 +170,11 @@ bool block_job_is_internal(BlockJob *job);
*/
const BlockJobDriver *block_job_driver(BlockJob *job);
/*
* block_job_get_aio_context:
*
* Returns aio context associated with a block job.
*/
AioContext *block_job_get_aio_context(BlockJob *job);
#endif

1
job.c
View File

@ -352,6 +352,7 @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
notifier_list_init(&job->on_finalize_completed);
notifier_list_init(&job->on_pending);
notifier_list_init(&job->on_ready);
notifier_list_init(&job->on_idle);
job_state_transition(job, JOB_STATUS_CREATED);
aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,

View File

@ -902,7 +902,7 @@ static void common_block_job_cb(void *opaque, int ret)
static void run_block_job(BlockJob *job, Error **errp)
{
uint64_t progress_current, progress_total;
AioContext *aio_context = blk_get_aio_context(job->blk);
AioContext *aio_context = block_job_get_aio_context(job);
int ret = 0;
aio_context_acquire(aio_context);

View File

@ -132,7 +132,7 @@ wrote 1048576/1048576 bytes at offset 0
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
{'execute': 'blockdev-del',
'arguments': {'node-name': 'drv0'}}
{"error": {"class": "GenericError", "desc": "Node drv0 is in use"}}
{"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: block device is in use by block job: commit"}}
{'execute': 'block-job-cancel',
'arguments': {'device': 'job0'}}
{"return": {}}

View File

@ -93,7 +93,8 @@ vm.qmp_log('blockdev-add', **{
'take-child-perms': ['write']
})
vm.qmp_log('blockdev-backup', sync='full', device='source', target='target')
vm.qmp_log('blockdev-backup', sync='full', device='source', target='target',
job_id="backup0")
vm.shutdown()

View File

@ -4,7 +4,7 @@
{"return": {}}
{"execute": "blockdev-add", "arguments": {"driver": "blkdebug", "image": "base", "node-name": "other", "take-child-perms": ["write"]}}
{"return": {}}
{"execute": "blockdev-backup", "arguments": {"device": "source", "sync": "full", "target": "target"}}
{"execute": "blockdev-backup", "arguments": {"device": "source", "job-id": "backup0", "sync": "full", "target": "target"}}
{"error": {"class": "GenericError", "desc": "Permission conflict on node 'base': permissions 'write' are both required by node 'other' (uses node 'base' as 'image' child) and unshared by node 'source' (uses node 'base' as 'image' child)."}}
=== copy-before-write filter should be gone after job-finalize ===

View File

@ -772,6 +772,7 @@ static void test_iothread_drain_subtree(void)
typedef struct TestBlockJob {
BlockJob common;
BlockDriverState *bs;
int run_ret;
int prepare_ret;
bool running;
@ -783,7 +784,7 @@ static int test_job_prepare(Job *job)
TestBlockJob *s = container_of(job, TestBlockJob, common.job);
/* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
blk_flush(s->common.blk);
bdrv_flush(s->bs);
return s->prepare_ret;
}
@ -792,7 +793,7 @@ static void test_job_commit(Job *job)
TestBlockJob *s = container_of(job, TestBlockJob, common.job);
/* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
blk_flush(s->common.blk);
bdrv_flush(s->bs);
}
static void test_job_abort(Job *job)
@ -800,7 +801,7 @@ static void test_job_abort(Job *job)
TestBlockJob *s = container_of(job, TestBlockJob, common.job);
/* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
blk_flush(s->common.blk);
bdrv_flush(s->bs);
}
static int coroutine_fn test_job_run(Job *job, Error **errp)
@ -915,6 +916,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
tjob = block_job_create("job0", &test_job_driver, NULL, src,
0, BLK_PERM_ALL,
0, 0, NULL, NULL, &error_abort);
tjob->bs = src;
job = &tjob->common;
block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
@ -1538,6 +1540,7 @@ typedef struct TestDropBackingBlockJob {
bool should_complete;
bool *did_complete;
BlockDriverState *detach_also;
BlockDriverState *bs;
} TestDropBackingBlockJob;
static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp)
@ -1557,7 +1560,7 @@ static void test_drop_backing_job_commit(Job *job)
TestDropBackingBlockJob *s =
container_of(job, TestDropBackingBlockJob, common.job);
bdrv_set_backing_hd(blk_bs(s->common.blk), NULL, &error_abort);
bdrv_set_backing_hd(s->bs, NULL, &error_abort);
bdrv_set_backing_hd(s->detach_also, NULL, &error_abort);
*s->did_complete = true;
@ -1657,6 +1660,7 @@ static void test_blockjob_commit_by_drained_end(void)
job = block_job_create("job", &test_drop_backing_job_driver, NULL,
bs_parents[2], 0, BLK_PERM_ALL, 0, 0, NULL, NULL,
&error_abort);
job->bs = bs_parents[2];
job->detach_also = bs_parents[0];
job->did_complete = &job_has_completed;

View File

@ -25,14 +25,6 @@ typedef struct {
int *result;
} TestBlockJob;
static void test_block_job_clean(Job *job)
{
BlockJob *bjob = container_of(job, BlockJob, job);
BlockDriverState *bs = blk_bs(bjob->blk);
bdrv_unref(bs);
}
static int coroutine_fn test_block_job_run(Job *job, Error **errp)
{
TestBlockJob *s = container_of(job, TestBlockJob, common.job);
@ -73,7 +65,6 @@ static const BlockJobDriver test_block_job_driver = {
.free = block_job_free,
.user_resume = block_job_user_resume,
.run = test_block_job_run,
.clean = test_block_job_clean,
},
};
@ -105,6 +96,7 @@ static BlockJob *test_block_job_start(unsigned int iterations,
s = block_job_create(job_id, &test_block_job_driver, txn, bs,
0, BLK_PERM_ALL, 0, JOB_DEFAULT,
test_block_job_cb, data, &error_abort);
bdrv_unref(bs); /* referenced by job now */
s->iterations = iterations;
s->use_timer = use_timer;
s->rc = rc;