Pull request

Stefano's performance regression fix for commit 2558cb8dd4 ("linux-aio:
 increasing MAX_EVENTS to a larger hardcoded value").
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmD4HU8ACgkQnKSrs4Gr
 c8iXcgf+NBh6U5UoXhQepxGs8EFmPaL/fodpq2YeDRq5uXZTUcEEQbb/bLMcviDM
 uUsG1e3fcSCSv3cwTz0A0Dt4z+HR84EPD6s5ix1g9q9R0BPjXEvdEYCEDCvreXV9
 a1FE2OFV6Q9dEJFsjLI3YJqTVAstS4ZQ3cUOK2Vweatds5K3ZlPq8J1xLifXdPtI
 P4ASDo317Q25PkeEusVeOD8t6BiGKDEcAxS78VauGKUIc57e5SigJNBubOtRa2Hn
 TXKqutgSwBNcorYVeiUsOl0QAUQ5x0jVE++Njj6fFCjJpE0b6jicjT3SRtNKQ0Gv
 UzdhGyNum6C9KL11Tm79XjSSyrD02w==
 =r95S
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/stefanha-gitlab/tags/block-pull-request' into staging

Pull request

Stefano's performance regression fix for commit 2558cb8dd4 ("linux-aio:
increasing MAX_EVENTS to a larger hardcoded value").

# gpg: Signature made Wed 21 Jul 2021 14:12:47 BST
# gpg:                using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full]
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>" [full]
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* remotes/stefanha-gitlab/tags/block-pull-request:
  linux-aio: limit the batch size using `aio-max-batch` parameter
  iothread: add aio-max-batch parameter
  iothread: generalize iothread_set_param/iothread_get_param

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2021-07-21 14:13:32 +01:00
commit 29c7daa007
11 changed files with 134 additions and 14 deletions

View File

@ -28,6 +28,9 @@
*/
#define MAX_EVENTS 1024
/* Maximum number of requests in a batch. (default value) */
#define DEFAULT_MAX_BATCH 32
struct qemu_laiocb {
Coroutine *co;
LinuxAioState *ctx;
@ -351,6 +354,10 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
LinuxAioState *s = laiocb->ctx;
struct iocb *iocbs = &laiocb->iocb;
QEMUIOVector *qiov = laiocb->qiov;
int64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH;
/* limit the batch with the number of available events */
max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch);
switch (type) {
case QEMU_AIO_WRITE:
@ -371,7 +378,7 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
s->io_q.in_queue++;
if (!s->io_q.blocked &&
(!s->io_q.plugged ||
s->io_q.in_flight + s->io_q.in_queue >= MAX_EVENTS)) {
s->io_q.in_queue >= max_batch)) {
ioq_submit(s);
}

View File

@ -232,6 +232,9 @@ struct AioContext {
int64_t poll_grow; /* polling time growth factor */
int64_t poll_shrink; /* polling time shrink factor */
/* AIO engine parameters */
int64_t aio_max_batch; /* maximum number of requests in a batch */
/*
* List of handlers participating in userspace polling. Protected by
* ctx->list_lock. Iterated and modified mostly by the event loop thread
@ -755,4 +758,13 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
int64_t grow, int64_t shrink,
Error **errp);
/**
* aio_context_set_aio_params:
* @ctx: the aio context
* @max_batch: maximum number of requests in a batch, 0 means that the
* engine will use its default
*/
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
Error **errp);
#endif

View File

@ -37,6 +37,9 @@ struct IOThread {
int64_t poll_max_ns;
int64_t poll_grow;
int64_t poll_shrink;
/* AioContext AIO engine parameters */
int64_t aio_max_batch;
};
typedef struct IOThread IOThread;

View File

@ -152,6 +152,24 @@ static void iothread_init_gcontext(IOThread *iothread)
iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
}
static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
{
ERRP_GUARD();
aio_context_set_poll_params(iothread->ctx,
iothread->poll_max_ns,
iothread->poll_grow,
iothread->poll_shrink,
errp);
if (*errp) {
return;
}
aio_context_set_aio_params(iothread->ctx,
iothread->aio_max_batch,
errp);
}
static void iothread_complete(UserCreatable *obj, Error **errp)
{
Error *local_error = NULL;
@ -171,11 +189,7 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
*/
iothread_init_gcontext(iothread);
aio_context_set_poll_params(iothread->ctx,
iothread->poll_max_ns,
iothread->poll_grow,
iothread->poll_shrink,
&local_error);
iothread_set_aio_context_params(iothread, &local_error);
if (local_error) {
error_propagate(errp, local_error);
aio_context_unref(iothread->ctx);
@ -212,8 +226,11 @@ static PollParamInfo poll_grow_info = {
static PollParamInfo poll_shrink_info = {
"poll-shrink", offsetof(IOThread, poll_shrink),
};
static PollParamInfo aio_max_batch_info = {
"aio-max-batch", offsetof(IOThread, aio_max_batch),
};
static void iothread_get_poll_param(Object *obj, Visitor *v,
static void iothread_get_param(Object *obj, Visitor *v,
const char *name, void *opaque, Error **errp)
{
IOThread *iothread = IOTHREAD(obj);
@ -223,7 +240,7 @@ static void iothread_get_poll_param(Object *obj, Visitor *v,
visit_type_int64(v, name, field, errp);
}
static void iothread_set_poll_param(Object *obj, Visitor *v,
static bool iothread_set_param(Object *obj, Visitor *v,
const char *name, void *opaque, Error **errp)
{
IOThread *iothread = IOTHREAD(obj);
@ -232,17 +249,36 @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
int64_t value;
if (!visit_type_int64(v, name, &value, errp)) {
return;
return false;
}
if (value < 0) {
error_setg(errp, "%s value must be in range [0, %" PRId64 "]",
info->name, INT64_MAX);
return;
return false;
}
*field = value;
return true;
}
static void iothread_get_poll_param(Object *obj, Visitor *v,
const char *name, void *opaque, Error **errp)
{
iothread_get_param(obj, v, name, opaque, errp);
}
static void iothread_set_poll_param(Object *obj, Visitor *v,
const char *name, void *opaque, Error **errp)
{
IOThread *iothread = IOTHREAD(obj);
if (!iothread_set_param(obj, v, name, opaque, errp)) {
return;
}
if (iothread->ctx) {
aio_context_set_poll_params(iothread->ctx,
iothread->poll_max_ns,
@ -252,6 +288,29 @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
}
}
static void iothread_get_aio_param(Object *obj, Visitor *v,
const char *name, void *opaque, Error **errp)
{
iothread_get_param(obj, v, name, opaque, errp);
}
static void iothread_set_aio_param(Object *obj, Visitor *v,
const char *name, void *opaque, Error **errp)
{
IOThread *iothread = IOTHREAD(obj);
if (!iothread_set_param(obj, v, name, opaque, errp)) {
return;
}
if (iothread->ctx) {
aio_context_set_aio_params(iothread->ctx,
iothread->aio_max_batch,
errp);
}
}
static void iothread_class_init(ObjectClass *klass, void *class_data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
@ -269,6 +328,10 @@ static void iothread_class_init(ObjectClass *klass, void *class_data)
iothread_get_poll_param,
iothread_set_poll_param,
NULL, &poll_shrink_info);
object_class_property_add(klass, "aio-max-batch", "int",
iothread_get_aio_param,
iothread_set_aio_param,
NULL, &aio_max_batch_info);
}
static const TypeInfo iothread_info = {
@ -318,6 +381,7 @@ static int query_one_iothread(Object *object, void *opaque)
info->poll_max_ns = iothread->poll_max_ns;
info->poll_grow = iothread->poll_grow;
info->poll_shrink = iothread->poll_shrink;
info->aio_max_batch = iothread->aio_max_batch;
QAPI_LIST_APPEND(*tail, info);
return 0;

View File

@ -1893,6 +1893,8 @@ void hmp_info_iothreads(Monitor *mon, const QDict *qdict)
monitor_printf(mon, " poll-max-ns=%" PRId64 "\n", value->poll_max_ns);
monitor_printf(mon, " poll-grow=%" PRId64 "\n", value->poll_grow);
monitor_printf(mon, " poll-shrink=%" PRId64 "\n", value->poll_shrink);
monitor_printf(mon, " aio-max-batch=%" PRId64 "\n",
value->aio_max_batch);
}
qapi_free_IOThreadInfoList(info_list);

View File

@ -86,6 +86,9 @@
# @poll-shrink: how many ns will be removed from polling time, 0 means that
# it's not configured (since 2.9)
#
# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
# 0 means that the engine will use its default (since 6.1)
#
# Since: 2.0
##
{ 'struct': 'IOThreadInfo',
@ -93,7 +96,8 @@
'thread-id': 'int',
'poll-max-ns': 'int',
'poll-grow': 'int',
'poll-shrink': 'int' } }
'poll-shrink': 'int',
'aio-max-batch': 'int' } }
##
# @query-iothreads:

View File

@ -516,12 +516,17 @@
# algorithm detects it is spending too long polling without
# encountering events. 0 selects a default behaviour (default: 0)
#
# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
# 0 means that the engine will use its default
# (default:0, since 6.1)
#
# Since: 2.0
##
{ 'struct': 'IothreadProperties',
'data': { '*poll-max-ns': 'int',
'*poll-grow': 'int',
'*poll-shrink': 'int' } }
'*poll-shrink': 'int',
'*aio-max-batch': 'int' } }
##
# @MemoryBackendProperties:

View File

@ -5301,7 +5301,7 @@ SRST
CN=laptop.example.com,O=Example Home,L=London,ST=London,C=GB
``-object iothread,id=id,poll-max-ns=poll-max-ns,poll-grow=poll-grow,poll-shrink=poll-shrink``
``-object iothread,id=id,poll-max-ns=poll-max-ns,poll-grow=poll-grow,poll-shrink=poll-shrink,aio-max-batch=aio-max-batch``
Creates a dedicated event loop thread that devices can be
assigned to. This is known as an IOThread. By default device
emulation happens in vCPU threads or the main event loop thread.
@ -5337,7 +5337,11 @@ SRST
the polling time when the algorithm detects it is spending too
long polling without encountering events.
The polling parameters can be modified at run-time using the
The ``aio-max-batch`` parameter is the maximum number of requests
in a batch for the AIO engine, 0 means that the engine will use
its default.
The IOThread parameters can be modified at run-time using the
``qom-set`` command (where ``iothread1`` is the IOThread's
``id``):

View File

@ -716,3 +716,15 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
aio_notify(ctx);
}
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
Error **errp)
{
/*
* No thread synchronization here, it doesn't matter if an incorrect value
* is used once.
*/
ctx->aio_max_batch = max_batch;
aio_notify(ctx);
}

View File

@ -440,3 +440,8 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
error_setg(errp, "AioContext polling is not implemented on Windows");
}
}
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
Error **errp)
{
}

View File

@ -554,6 +554,8 @@ AioContext *aio_context_new(Error **errp)
ctx->poll_grow = 0;
ctx->poll_shrink = 0;
ctx->aio_max_batch = 0;
return ctx;
fail:
g_source_destroy(&ctx->source);