Pull request

Bug fixes for 7.0.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmIzZO4ACgkQnKSrs4Gr
 c8iwYAf+NvULaAak3X8ziflVl4PAANbu8VK0VVfvLlEq135cUgJYyeYXqtqd3G8M
 UGBbnoqheJ5XE8Eaacu9zzev1KrloiZMlDNVHldBCXOETEb/elqZIK0vcMqcevb6
 9UXcAdzNykJgsQu25/ZJ5b0bo0e+uijg1RJBJB+/o+m9CBn8J9ituKJKwNtY5dDT
 oxI01bGVREOy4a1RjrYJKp93OHJG/+he0U3yUy0j02LJLcVlmR88fX8anxLc/h8X
 PrL1qVEEMGvBkwN1Spru2vmeDt/7aJeJkvoE0NR8U0q59/PNagHik+hSKAkk9ktu
 6KIx/fcBATnNsyW58ngAYJQQrA7qIA==
 =ILo3
 -----END PGP SIGNATURE-----

Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging

Pull request

Bug fixes for 7.0.

# gpg: Signature made Thu 17 Mar 2022 16:42:22 GMT
# gpg:                using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full]
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>" [full]
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* tag 'block-pull-request' of https://gitlab.com/stefanha/qemu:
  aio-posix: fix spurious ->poll_ready() callbacks in main loop
  aio-posix: fix build failure io_uring 2.2

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2022-03-21 15:27:13 +00:00
commit 2028ab513b
3 changed files with 23 additions and 14 deletions

View File

@ -23,15 +23,6 @@
#include "trace.h"
#include "aio-posix.h"
/*
* G_IO_IN and G_IO_OUT are not appropriate revents values for polling, since
* the handler may not need to access the file descriptor. For example, the
* handler doesn't need to read from an EventNotifier if it polled a memory
* location and a read syscall would be slow. Define our own unique revents
* value to indicate that polling determined this AioHandler is ready.
*/
#define REVENTS_POLL_READY 0
/* Stop userspace polling on a handler if it isn't active for some time */
#define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND)
@ -49,6 +40,14 @@ void aio_add_ready_handler(AioHandlerList *ready_list,
QLIST_INSERT_HEAD(ready_list, node, node_ready);
}
static void aio_add_poll_ready_handler(AioHandlerList *ready_list,
AioHandler *node)
{
QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */
node->poll_ready = true;
QLIST_INSERT_HEAD(ready_list, node, node_ready);
}
static AioHandler *find_aio_handler(AioContext *ctx, int fd)
{
AioHandler *node;
@ -76,6 +75,7 @@ static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
}
node->pfd.revents = 0;
node->poll_ready = false;
/* If the fd monitor has already marked it deleted, leave it alone */
if (QLIST_IS_INSERTED(node, node_deleted)) {
@ -247,7 +247,7 @@ static bool poll_set_started(AioContext *ctx, AioHandlerList *ready_list,
/* Poll one last time in case ->io_poll_end() raced with the event */
if (!started && node->io_poll(node->opaque)) {
aio_add_ready_handler(ready_list, node, REVENTS_POLL_READY);
aio_add_poll_ready_handler(ready_list, node);
progress = true;
}
}
@ -282,6 +282,7 @@ bool aio_pending(AioContext *ctx)
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
int revents;
/* TODO should this check poll ready? */
revents = node->pfd.revents & node->pfd.events;
if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read &&
aio_node_check(ctx, node->is_external)) {
@ -323,11 +324,15 @@ static void aio_free_deleted_handlers(AioContext *ctx)
static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
{
bool progress = false;
bool poll_ready;
int revents;
revents = node->pfd.revents & node->pfd.events;
node->pfd.revents = 0;
poll_ready = node->poll_ready;
node->poll_ready = false;
/*
* Start polling AioHandlers when they become ready because activity is
* likely to continue. Note that starvation is theoretically possible when
@ -344,7 +349,7 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll);
}
if (!QLIST_IS_INSERTED(node, node_deleted) &&
revents == 0 &&
poll_ready && revents == 0 &&
aio_node_check(ctx, node->is_external) &&
node->io_poll_ready) {
node->io_poll_ready(node->opaque);
@ -432,7 +437,7 @@ static bool run_poll_handlers_once(AioContext *ctx,
QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
if (aio_node_check(ctx, node->is_external) &&
node->io_poll(node->opaque)) {
aio_add_ready_handler(ready_list, node, REVENTS_POLL_READY);
aio_add_poll_ready_handler(ready_list, node);
node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
@ -491,8 +496,7 @@ static bool remove_idle_poll_handlers(AioContext *ctx,
* this causes progress.
*/
if (node->io_poll(node->opaque)) {
aio_add_ready_handler(ready_list, node,
REVENTS_POLL_READY);
aio_add_poll_ready_handler(ready_list, node);
progress = true;
}
}

View File

@ -37,6 +37,7 @@ struct AioHandler {
unsigned flags; /* see fdmon-io_uring.c */
#endif
int64_t poll_idle_timeout; /* when to stop userspace polling */
bool poll_ready; /* has polling detected an event? */
bool is_external;
};

View File

@ -179,7 +179,11 @@ static void add_poll_remove_sqe(AioContext *ctx, AioHandler *node)
{
struct io_uring_sqe *sqe = get_sqe(ctx);
#ifdef LIBURING_HAVE_DATA64
io_uring_prep_poll_remove(sqe, (__u64)(uintptr_t)node);
#else
io_uring_prep_poll_remove(sqe, node);
#endif
}
/* Add a timeout that self-cancels when another cqe becomes ready */