aio / timers: convert block_job_sleep_ns and co_sleep_ns to new API
Convert block_job_sleep_ns and co_sleep_ns to use the new timer API. Signed-off-by: Alex Bligh <alex@alex.org.uk> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
884f17c235
commit
7483d1e547
@ -272,9 +272,9 @@ static void coroutine_fn backup_run(void *opaque)
|
|||||||
uint64_t delay_ns = ratelimit_calculate_delay(
|
uint64_t delay_ns = ratelimit_calculate_delay(
|
||||||
&job->limit, job->sectors_read);
|
&job->limit, job->sectors_read);
|
||||||
job->sectors_read = 0;
|
job->sectors_read = 0;
|
||||||
block_job_sleep_ns(&job->common, rt_clock, delay_ns);
|
block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns);
|
||||||
} else {
|
} else {
|
||||||
block_job_sleep_ns(&job->common, rt_clock, 0);
|
block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (block_job_is_cancelled(&job->common)) {
|
if (block_job_is_cancelled(&job->common)) {
|
||||||
|
@ -103,7 +103,7 @@ wait:
|
|||||||
/* Note that even when no rate limit is applied we need to yield
|
/* Note that even when no rate limit is applied we need to yield
|
||||||
* with no pending I/O here so that bdrv_drain_all() returns.
|
* with no pending I/O here so that bdrv_drain_all() returns.
|
||||||
*/
|
*/
|
||||||
block_job_sleep_ns(&s->common, rt_clock, delay_ns);
|
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
|
||||||
if (block_job_is_cancelled(&s->common)) {
|
if (block_job_is_cancelled(&s->common)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -439,13 +439,13 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||||||
delay_ns = 0;
|
delay_ns = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
block_job_sleep_ns(&s->common, rt_clock, delay_ns);
|
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
|
||||||
if (block_job_is_cancelled(&s->common)) {
|
if (block_job_is_cancelled(&s->common)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else if (!should_complete) {
|
} else if (!should_complete) {
|
||||||
delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
|
delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
|
||||||
block_job_sleep_ns(&s->common, rt_clock, delay_ns);
|
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
|
||||||
} else if (cnt == 0) {
|
} else if (cnt == 0) {
|
||||||
/* The two disks are in sync. Exit and report successful
|
/* The two disks are in sync. Exit and report successful
|
||||||
* completion.
|
* completion.
|
||||||
|
@ -114,7 +114,7 @@ wait:
|
|||||||
/* Note that even when no rate limit is applied we need to yield
|
/* Note that even when no rate limit is applied we need to yield
|
||||||
* with no pending I/O here so that bdrv_drain_all() returns.
|
* with no pending I/O here so that bdrv_drain_all() returns.
|
||||||
*/
|
*/
|
||||||
block_job_sleep_ns(&s->common, rt_clock, delay_ns);
|
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
|
||||||
if (block_job_is_cancelled(&s->common)) {
|
if (block_job_is_cancelled(&s->common)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -187,7 +187,7 @@ int block_job_cancel_sync(BlockJob *job)
|
|||||||
return (data.cancelled && data.ret == 0) ? -ECANCELED : data.ret;
|
return (data.cancelled && data.ret == 0) ? -ECANCELED : data.ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns)
|
void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns)
|
||||||
{
|
{
|
||||||
assert(job->busy);
|
assert(job->busy);
|
||||||
|
|
||||||
@ -200,7 +200,7 @@ void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns)
|
|||||||
if (block_job_is_paused(job)) {
|
if (block_job_is_paused(job)) {
|
||||||
qemu_coroutine_yield();
|
qemu_coroutine_yield();
|
||||||
} else {
|
} else {
|
||||||
co_sleep_ns(clock, ns);
|
co_sleep_ns(type, ns);
|
||||||
}
|
}
|
||||||
job->busy = true;
|
job->busy = true;
|
||||||
}
|
}
|
||||||
|
@ -141,7 +141,7 @@ void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs,
|
|||||||
* Put the job to sleep (assuming that it wasn't canceled) for @ns
|
* Put the job to sleep (assuming that it wasn't canceled) for @ns
|
||||||
* nanoseconds. Canceling the job will interrupt the wait immediately.
|
* nanoseconds. Canceling the job will interrupt the wait immediately.
|
||||||
*/
|
*/
|
||||||
void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns);
|
void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* block_job_completed:
|
* block_job_completed:
|
||||||
|
@ -213,7 +213,7 @@ void qemu_co_rwlock_unlock(CoRwlock *lock);
|
|||||||
* Note this function uses timers and hence only works when a main loop is in
|
* Note this function uses timers and hence only works when a main loop is in
|
||||||
* use. See main-loop.h and do not use from qemu-tool programs.
|
* use. See main-loop.h and do not use from qemu-tool programs.
|
||||||
*/
|
*/
|
||||||
void coroutine_fn co_sleep_ns(QEMUClock *clock, int64_t ns);
|
void coroutine_fn co_sleep_ns(QEMUClockType type, int64_t ns);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Yield until a file descriptor becomes readable
|
* Yield until a file descriptor becomes readable
|
||||||
|
@ -26,14 +26,14 @@ static void co_sleep_cb(void *opaque)
|
|||||||
qemu_coroutine_enter(sleep_cb->co, NULL);
|
qemu_coroutine_enter(sleep_cb->co, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void coroutine_fn co_sleep_ns(QEMUClock *clock, int64_t ns)
|
void coroutine_fn co_sleep_ns(QEMUClockType type, int64_t ns)
|
||||||
{
|
{
|
||||||
CoSleepCB sleep_cb = {
|
CoSleepCB sleep_cb = {
|
||||||
.co = qemu_coroutine_self(),
|
.co = qemu_coroutine_self(),
|
||||||
};
|
};
|
||||||
sleep_cb.ts = qemu_new_timer(clock, SCALE_NS, co_sleep_cb, &sleep_cb);
|
sleep_cb.ts = timer_new(type, SCALE_NS, co_sleep_cb, &sleep_cb);
|
||||||
qemu_mod_timer(sleep_cb.ts, qemu_get_clock_ns(clock) + ns);
|
timer_mod(sleep_cb.ts, qemu_clock_get_ns(type) + ns);
|
||||||
qemu_coroutine_yield();
|
qemu_coroutine_yield();
|
||||||
qemu_del_timer(sleep_cb.ts);
|
timer_del(sleep_cb.ts);
|
||||||
qemu_free_timer(sleep_cb.ts);
|
timer_free(sleep_cb.ts);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user