iothread: add aio-max-batch parameter
The `aio-max-batch` parameter will be propagated to AIO engines and it will be used to control the maximum number of queued requests. When there are in queue a number of requests equal to `aio-max-batch`, the engine invokes the system call to forward the requests to the kernel. This parameter allows us to control the maximum batch size to reduce the latency that requests might accumulate while queued in the AIO engine queue. If `aio-max-batch` is equal to 0 (default value), the AIO engine will use its default maximum batch size value. Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> Message-id: 20210721094211.69853-3-sgarzare@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
0445409d74
commit
1793ad0247
@ -232,6 +232,9 @@ struct AioContext {
|
||||
int64_t poll_grow; /* polling time growth factor */
|
||||
int64_t poll_shrink; /* polling time shrink factor */
|
||||
|
||||
/* AIO engine parameters */
|
||||
int64_t aio_max_batch; /* maximum number of requests in a batch */
|
||||
|
||||
/*
|
||||
* List of handlers participating in userspace polling. Protected by
|
||||
* ctx->list_lock. Iterated and modified mostly by the event loop thread
|
||||
@ -755,4 +758,13 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
|
||||
int64_t grow, int64_t shrink,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* aio_context_set_aio_params:
|
||||
* @ctx: the aio context
|
||||
* @max_batch: maximum number of requests in a batch, 0 means that the
|
||||
* engine will use its default
|
||||
*/
|
||||
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
|
||||
Error **errp);
|
||||
|
||||
#endif
|
||||
|
@ -37,6 +37,9 @@ struct IOThread {
|
||||
int64_t poll_max_ns;
|
||||
int64_t poll_grow;
|
||||
int64_t poll_shrink;
|
||||
|
||||
/* AioContext AIO engine parameters */
|
||||
int64_t aio_max_batch;
|
||||
};
|
||||
typedef struct IOThread IOThread;
|
||||
|
||||
|
55
iothread.c
55
iothread.c
@ -152,6 +152,24 @@ static void iothread_init_gcontext(IOThread *iothread)
|
||||
iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
|
||||
}
|
||||
|
||||
static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
|
||||
aio_context_set_poll_params(iothread->ctx,
|
||||
iothread->poll_max_ns,
|
||||
iothread->poll_grow,
|
||||
iothread->poll_shrink,
|
||||
errp);
|
||||
if (*errp) {
|
||||
return;
|
||||
}
|
||||
|
||||
aio_context_set_aio_params(iothread->ctx,
|
||||
iothread->aio_max_batch,
|
||||
errp);
|
||||
}
|
||||
|
||||
static void iothread_complete(UserCreatable *obj, Error **errp)
|
||||
{
|
||||
Error *local_error = NULL;
|
||||
@ -171,11 +189,7 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
|
||||
*/
|
||||
iothread_init_gcontext(iothread);
|
||||
|
||||
aio_context_set_poll_params(iothread->ctx,
|
||||
iothread->poll_max_ns,
|
||||
iothread->poll_grow,
|
||||
iothread->poll_shrink,
|
||||
&local_error);
|
||||
iothread_set_aio_context_params(iothread, &local_error);
|
||||
if (local_error) {
|
||||
error_propagate(errp, local_error);
|
||||
aio_context_unref(iothread->ctx);
|
||||
@ -212,6 +226,9 @@ static PollParamInfo poll_grow_info = {
|
||||
static PollParamInfo poll_shrink_info = {
|
||||
"poll-shrink", offsetof(IOThread, poll_shrink),
|
||||
};
|
||||
static PollParamInfo aio_max_batch_info = {
|
||||
"aio-max-batch", offsetof(IOThread, aio_max_batch),
|
||||
};
|
||||
|
||||
static void iothread_get_param(Object *obj, Visitor *v,
|
||||
const char *name, void *opaque, Error **errp)
|
||||
@ -271,6 +288,29 @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
|
||||
}
|
||||
}
|
||||
|
||||
static void iothread_get_aio_param(Object *obj, Visitor *v,
|
||||
const char *name, void *opaque, Error **errp)
|
||||
{
|
||||
|
||||
iothread_get_param(obj, v, name, opaque, errp);
|
||||
}
|
||||
|
||||
static void iothread_set_aio_param(Object *obj, Visitor *v,
|
||||
const char *name, void *opaque, Error **errp)
|
||||
{
|
||||
IOThread *iothread = IOTHREAD(obj);
|
||||
|
||||
if (!iothread_set_param(obj, v, name, opaque, errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (iothread->ctx) {
|
||||
aio_context_set_aio_params(iothread->ctx,
|
||||
iothread->aio_max_batch,
|
||||
errp);
|
||||
}
|
||||
}
|
||||
|
||||
static void iothread_class_init(ObjectClass *klass, void *class_data)
|
||||
{
|
||||
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
|
||||
@ -288,6 +328,10 @@ static void iothread_class_init(ObjectClass *klass, void *class_data)
|
||||
iothread_get_poll_param,
|
||||
iothread_set_poll_param,
|
||||
NULL, &poll_shrink_info);
|
||||
object_class_property_add(klass, "aio-max-batch", "int",
|
||||
iothread_get_aio_param,
|
||||
iothread_set_aio_param,
|
||||
NULL, &aio_max_batch_info);
|
||||
}
|
||||
|
||||
static const TypeInfo iothread_info = {
|
||||
@ -337,6 +381,7 @@ static int query_one_iothread(Object *object, void *opaque)
|
||||
info->poll_max_ns = iothread->poll_max_ns;
|
||||
info->poll_grow = iothread->poll_grow;
|
||||
info->poll_shrink = iothread->poll_shrink;
|
||||
info->aio_max_batch = iothread->aio_max_batch;
|
||||
|
||||
QAPI_LIST_APPEND(*tail, info);
|
||||
return 0;
|
||||
|
@ -1893,6 +1893,8 @@ void hmp_info_iothreads(Monitor *mon, const QDict *qdict)
|
||||
monitor_printf(mon, " poll-max-ns=%" PRId64 "\n", value->poll_max_ns);
|
||||
monitor_printf(mon, " poll-grow=%" PRId64 "\n", value->poll_grow);
|
||||
monitor_printf(mon, " poll-shrink=%" PRId64 "\n", value->poll_shrink);
|
||||
monitor_printf(mon, " aio-max-batch=%" PRId64 "\n",
|
||||
value->aio_max_batch);
|
||||
}
|
||||
|
||||
qapi_free_IOThreadInfoList(info_list);
|
||||
|
@ -86,6 +86,9 @@
|
||||
# @poll-shrink: how many ns will be removed from polling time, 0 means that
|
||||
# it's not configured (since 2.9)
|
||||
#
|
||||
# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
|
||||
# 0 means that the engine will use its default (since 6.1)
|
||||
#
|
||||
# Since: 2.0
|
||||
##
|
||||
{ 'struct': 'IOThreadInfo',
|
||||
@ -93,7 +96,8 @@
|
||||
'thread-id': 'int',
|
||||
'poll-max-ns': 'int',
|
||||
'poll-grow': 'int',
|
||||
'poll-shrink': 'int' } }
|
||||
'poll-shrink': 'int',
|
||||
'aio-max-batch': 'int' } }
|
||||
|
||||
##
|
||||
# @query-iothreads:
|
||||
|
@ -516,12 +516,17 @@
|
||||
# algorithm detects it is spending too long polling without
|
||||
# encountering events. 0 selects a default behaviour (default: 0)
|
||||
#
|
||||
# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
|
||||
# 0 means that the engine will use its default
|
||||
# (default:0, since 6.1)
|
||||
#
|
||||
# Since: 2.0
|
||||
##
|
||||
{ 'struct': 'IothreadProperties',
|
||||
'data': { '*poll-max-ns': 'int',
|
||||
'*poll-grow': 'int',
|
||||
'*poll-shrink': 'int' } }
|
||||
'*poll-shrink': 'int',
|
||||
'*aio-max-batch': 'int' } }
|
||||
|
||||
##
|
||||
# @MemoryBackendProperties:
|
||||
|
@ -5301,7 +5301,7 @@ SRST
|
||||
|
||||
CN=laptop.example.com,O=Example Home,L=London,ST=London,C=GB
|
||||
|
||||
``-object iothread,id=id,poll-max-ns=poll-max-ns,poll-grow=poll-grow,poll-shrink=poll-shrink``
|
||||
``-object iothread,id=id,poll-max-ns=poll-max-ns,poll-grow=poll-grow,poll-shrink=poll-shrink,aio-max-batch=aio-max-batch``
|
||||
Creates a dedicated event loop thread that devices can be
|
||||
assigned to. This is known as an IOThread. By default device
|
||||
emulation happens in vCPU threads or the main event loop thread.
|
||||
@ -5337,7 +5337,11 @@ SRST
|
||||
the polling time when the algorithm detects it is spending too
|
||||
long polling without encountering events.
|
||||
|
||||
The polling parameters can be modified at run-time using the
|
||||
The ``aio-max-batch`` parameter is the maximum number of requests
|
||||
in a batch for the AIO engine, 0 means that the engine will use
|
||||
its default.
|
||||
|
||||
The IOThread parameters can be modified at run-time using the
|
||||
``qom-set`` command (where ``iothread1`` is the IOThread's
|
||||
``id``):
|
||||
|
||||
|
@ -716,3 +716,15 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
|
||||
|
||||
aio_notify(ctx);
|
||||
}
|
||||
|
||||
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
|
||||
Error **errp)
|
||||
{
|
||||
/*
|
||||
* No thread synchronization here, it doesn't matter if an incorrect value
|
||||
* is used once.
|
||||
*/
|
||||
ctx->aio_max_batch = max_batch;
|
||||
|
||||
aio_notify(ctx);
|
||||
}
|
||||
|
@ -440,3 +440,8 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
|
||||
error_setg(errp, "AioContext polling is not implemented on Windows");
|
||||
}
|
||||
}
|
||||
|
||||
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
|
||||
Error **errp)
|
||||
{
|
||||
}
|
||||
|
@ -554,6 +554,8 @@ AioContext *aio_context_new(Error **errp)
|
||||
ctx->poll_grow = 0;
|
||||
ctx->poll_shrink = 0;
|
||||
|
||||
ctx->aio_max_batch = 0;
|
||||
|
||||
return ctx;
|
||||
fail:
|
||||
g_source_destroy(&ctx->source);
|
||||
|
Loading…
Reference in New Issue
Block a user