qemu-e2k/include/qemu/ratelimit.h
Paolo Bonzini 4951967d84 ratelimit: protect with a mutex
Right now, rate limiting is protected by the AioContext mutex, which is
taken for example both by the block jobs and by qmp_block_job_set_speed
(via find_block_job).

We would like to remove the dependency of block layer code on the
AioContext mutex, since most drivers and the core I/O code are already
not relying on it.  However, there is no existing lock that can easily
be taken by both ratelimit_set_speed and ratelimit_calculate_delay,
especially because the latter might run in coroutine context (and
therefore under a CoMutex) but the former will not.

Since concurrent calls to ratelimit_calculate_delay are not possible,
one idea could be to use a seqlock to get a snapshot of slice_ns and
slice_quota.  But for now keep it simple, and just add a mutex to the
RateLimit struct; block jobs are generally not performance critical to
the point of optimizing the clock cycles spent in synchronization.

This also requires the introduction of init/destroy functions, so
add them to the two users of ratelimit.h.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-05-04 14:15:35 +02:00

90 lines
2.5 KiB
C

/*
* Ratelimiting calculations
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef QEMU_RATELIMIT_H
#define QEMU_RATELIMIT_H
#include "qemu/lockable.h"
#include "qemu/timer.h"
typedef struct {
QemuMutex lock;
int64_t slice_start_time;
int64_t slice_end_time;
uint64_t slice_quota;
uint64_t slice_ns;
uint64_t dispatched;
} RateLimit;
/** Calculate and return delay for next request in ns
*
* Record that we sent @n data units (where @n matches the scale chosen
* during ratelimit_set_speed). If we may send more data units
* in the current time slice, return 0 (i.e. no delay). Otherwise
* return the amount of time (in ns) until the start of the next time
* slice that will permit sending the next chunk of data.
*
* Recording sent data units even after exceeding the quota is
* permitted; the time slice will be extended accordingly.
*/
static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
{
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
double delay_slices;
QEMU_LOCK_GUARD(&limit->lock);
assert(limit->slice_quota && limit->slice_ns);
if (limit->slice_end_time < now) {
/* Previous, possibly extended, time slice finished; reset the
* accounting. */
limit->slice_start_time = now;
limit->slice_end_time = now + limit->slice_ns;
limit->dispatched = 0;
}
limit->dispatched += n;
if (limit->dispatched < limit->slice_quota) {
/* We may send further data within the current time slice, no
* need to delay the next request. */
return 0;
}
/* Quota exceeded. Wait based on the excess amount and then start a new
* slice. */
delay_slices = (double)limit->dispatched / limit->slice_quota;
limit->slice_end_time = limit->slice_start_time +
(uint64_t)(delay_slices * limit->slice_ns);
return limit->slice_end_time - now;
}
static inline void ratelimit_init(RateLimit *limit)
{
qemu_mutex_init(&limit->lock);
}
static inline void ratelimit_destroy(RateLimit *limit)
{
qemu_mutex_destroy(&limit->lock);
}
static inline void ratelimit_set_speed(RateLimit *limit, uint64_t speed,
uint64_t slice_ns)
{
QEMU_LOCK_GUARD(&limit->lock);
limit->slice_ns = slice_ns;
limit->slice_quota = MAX(((double)speed * slice_ns) / 1000000000ULL, 1);
}
#endif