blockjob: remove clock argument from block_job_sleep_ns

All callers are using QEMU_CLOCK_REALTIME, and it will not be possible to
support more than one clock when block_job_sleep_ns switches to a single
timer stored in the BlockJob struct.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Alberto Garcia <berto@igalia.com>
Tested-By: Jeff Cody <jcody@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Paolo Bonzini 2017-11-29 11:25:11 +01:00 committed by Kevin Wolf
parent 02d213009d
commit 5bf1d5a73a
7 changed files with 14 additions and 14 deletions

View File

@ -346,9 +346,9 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
uint64_t delay_ns = ratelimit_calculate_delay(&job->limit, uint64_t delay_ns = ratelimit_calculate_delay(&job->limit,
job->bytes_read); job->bytes_read);
job->bytes_read = 0; job->bytes_read = 0;
block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns); block_job_sleep_ns(&job->common, delay_ns);
} else { } else {
block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0); block_job_sleep_ns(&job->common, 0);
} }
if (block_job_is_cancelled(&job->common)) { if (block_job_is_cancelled(&job->common)) {

View File

@ -174,7 +174,7 @@ static void coroutine_fn commit_run(void *opaque)
/* Note that even when no rate limit is applied we need to yield /* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns. * with no pending I/O here so that bdrv_drain_all() returns.
*/ */
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); block_job_sleep_ns(&s->common, delay_ns);
if (block_job_is_cancelled(&s->common)) { if (block_job_is_cancelled(&s->common)) {
break; break;
} }

View File

@ -598,7 +598,7 @@ static void mirror_throttle(MirrorBlockJob *s)
if (now - s->last_pause_ns > SLICE_TIME) { if (now - s->last_pause_ns > SLICE_TIME) {
s->last_pause_ns = now; s->last_pause_ns = now;
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0); block_job_sleep_ns(&s->common, 0);
} else { } else {
block_job_pause_point(&s->common); block_job_pause_point(&s->common);
} }
@ -870,13 +870,13 @@ static void coroutine_fn mirror_run(void *opaque)
ret = 0; ret = 0;
trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
if (!s->synced) { if (!s->synced) {
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); block_job_sleep_ns(&s->common, delay_ns);
if (block_job_is_cancelled(&s->common)) { if (block_job_is_cancelled(&s->common)) {
break; break;
} }
} else if (!should_complete) { } else if (!should_complete) {
delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); block_job_sleep_ns(&s->common, delay_ns);
} }
s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
} }

View File

@ -141,7 +141,7 @@ static void coroutine_fn stream_run(void *opaque)
/* Note that even when no rate limit is applied we need to yield /* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns. * with no pending I/O here so that bdrv_drain_all() returns.
*/ */
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); block_job_sleep_ns(&s->common, delay_ns);
if (block_job_is_cancelled(&s->common)) { if (block_job_is_cancelled(&s->common)) {
break; break;
} }

View File

@ -788,7 +788,7 @@ bool block_job_is_cancelled(BlockJob *job)
return job->cancelled; return job->cancelled;
} }
void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns) void block_job_sleep_ns(BlockJob *job, int64_t ns)
{ {
assert(job->busy); assert(job->busy);
@ -803,7 +803,8 @@ void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns)
* it wakes and runs, otherwise we risk double-entry or entry after * it wakes and runs, otherwise we risk double-entry or entry after
* completion. */ * completion. */
if (!block_job_should_pause(job)) { if (!block_job_should_pause(job)) {
co_aio_sleep_ns(blk_get_aio_context(job->blk), type, ns); co_aio_sleep_ns(blk_get_aio_context(job->blk),
QEMU_CLOCK_REALTIME, ns);
} }
block_job_pause_point(job); block_job_pause_point(job);

View File

@ -139,14 +139,13 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
/** /**
* block_job_sleep_ns: * block_job_sleep_ns:
* @job: The job that calls the function. * @job: The job that calls the function.
* @clock: The clock to sleep on.
* @ns: How many nanoseconds to stop for. * @ns: How many nanoseconds to stop for.
* *
* Put the job to sleep (assuming that it wasn't canceled) for @ns * Put the job to sleep (assuming that it wasn't canceled) for @ns
* nanoseconds. Canceling the job will not interrupt the wait, so the * %QEMU_CLOCK_REALTIME nanoseconds. Canceling the job will not interrupt
* cancel will not process until the coroutine wakes up. * the wait, so the cancel will not process until the coroutine wakes up.
*/ */
void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns); void block_job_sleep_ns(BlockJob *job, int64_t ns);
/** /**
* block_job_yield: * block_job_yield:

View File

@ -44,7 +44,7 @@ static void coroutine_fn test_block_job_run(void *opaque)
while (s->iterations--) { while (s->iterations--) {
if (s->use_timer) { if (s->use_timer) {
block_job_sleep_ns(job, QEMU_CLOCK_REALTIME, 0); block_job_sleep_ns(job, 0);
} else { } else {
block_job_yield(job); block_job_yield(job);
} }