file-posix: add `aio-max-batch` option
Commit d7ddd0a161
("linux-aio: limit the batch size using
`aio-max-batch` parameter") added a way to limit the batch size
of Linux AIO backend for the entire AIO context.
The same AIO context can be shared by multiple devices, so
latency-sensitive devices may want to limit the batch size even
more to avoid increasing latency.
For this reason we add the `aio-max-batch` option to the file
backend, which will be used by the next commits to limit the size of
batches including requests generated by this device.
Suggested-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Message-Id: <20211026162346.253081-2-sgarzare@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
3043320390
commit
684960d462
|
@ -150,6 +150,8 @@ typedef struct BDRVRawState {
|
||||||
uint64_t locked_perm;
|
uint64_t locked_perm;
|
||||||
uint64_t locked_shared_perm;
|
uint64_t locked_shared_perm;
|
||||||
|
|
||||||
|
uint64_t aio_max_batch;
|
||||||
|
|
||||||
int perm_change_fd;
|
int perm_change_fd;
|
||||||
int perm_change_flags;
|
int perm_change_flags;
|
||||||
BDRVReopenState *reopen_state;
|
BDRVReopenState *reopen_state;
|
||||||
|
@ -530,6 +532,11 @@ static QemuOptsList raw_runtime_opts = {
|
||||||
.type = QEMU_OPT_STRING,
|
.type = QEMU_OPT_STRING,
|
||||||
.help = "host AIO implementation (threads, native, io_uring)",
|
.help = "host AIO implementation (threads, native, io_uring)",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.name = "aio-max-batch",
|
||||||
|
.type = QEMU_OPT_NUMBER,
|
||||||
|
.help = "AIO max batch size (0 = auto handled by AIO backend, default: 0)",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.name = "locking",
|
.name = "locking",
|
||||||
.type = QEMU_OPT_STRING,
|
.type = QEMU_OPT_STRING,
|
||||||
|
@ -609,6 +616,8 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
|
||||||
s->use_linux_io_uring = (aio == BLOCKDEV_AIO_OPTIONS_IO_URING);
|
s->use_linux_io_uring = (aio == BLOCKDEV_AIO_OPTIONS_IO_URING);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
s->aio_max_batch = qemu_opt_get_number(opts, "aio-max-batch", 0);
|
||||||
|
|
||||||
locking = qapi_enum_parse(&OnOffAuto_lookup,
|
locking = qapi_enum_parse(&OnOffAuto_lookup,
|
||||||
qemu_opt_get(opts, "locking"),
|
qemu_opt_get(opts, "locking"),
|
||||||
ON_OFF_AUTO_AUTO, &local_err);
|
ON_OFF_AUTO_AUTO, &local_err);
|
||||||
|
|
|
@ -2939,6 +2939,12 @@
|
||||||
# for this device (default: none, forward the commands via SG_IO;
|
# for this device (default: none, forward the commands via SG_IO;
|
||||||
# since 2.11)
|
# since 2.11)
|
||||||
# @aio: AIO backend (default: threads) (since: 2.8)
|
# @aio: AIO backend (default: threads) (since: 2.8)
|
||||||
|
# @aio-max-batch: maximum number of requests to batch together into a single
|
||||||
|
# submission in the AIO backend. The smallest value between
|
||||||
|
# this and the aio-max-batch value of the IOThread object is
|
||||||
|
# chosen.
|
||||||
|
# 0 means that the AIO backend will handle it automatically.
|
||||||
|
# (default: 0, since 6.2)
|
||||||
# @locking: whether to enable file locking. If set to 'auto', only enable
|
# @locking: whether to enable file locking. If set to 'auto', only enable
|
||||||
# when Open File Descriptor (OFD) locking API is available
|
# when Open File Descriptor (OFD) locking API is available
|
||||||
# (default: auto, since 2.10)
|
# (default: auto, since 2.10)
|
||||||
|
@ -2968,6 +2974,7 @@
|
||||||
'*pr-manager': 'str',
|
'*pr-manager': 'str',
|
||||||
'*locking': 'OnOffAuto',
|
'*locking': 'OnOffAuto',
|
||||||
'*aio': 'BlockdevAioOptions',
|
'*aio': 'BlockdevAioOptions',
|
||||||
|
'*aio-max-batch': 'int',
|
||||||
'*drop-cache': {'type': 'bool',
|
'*drop-cache': {'type': 'bool',
|
||||||
'if': 'CONFIG_LINUX'},
|
'if': 'CONFIG_LINUX'},
|
||||||
'*x-check-cache-dropped': { 'type': 'bool',
|
'*x-check-cache-dropped': { 'type': 'bool',
|
||||||
|
|
Loading…
Reference in New Issue