Pull request

-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAl5o3EQACgkQnKSrs4Gr
 c8jbYwgAupuS62MCITszybRE5Ote5CL80QDbMXNsZnZh6YBIGhPCqW2zdI2+m0zu
 +qoN6x6dxxNUWCvNlhbOcA45fiZVmYzS69TiEo21kCDijoK+h8+W+YbXuxGR2xJi
 cZMm8Q1DiK6Lj3vyfiwkFf4ns3VNz9DhI9hXu6CcpSkNcp79elQu87JJbzEWWWWy
 uEc7uEyBr0uCAKLEJvaLzzzWE2D2i6qKlmj3G17UbDNgCJ/Q/5HX13RUfMrgNgiP
 wmpcJ5MsB3Prz3K4XMMytUKXX/M8zpRLahp3p31t9qHelTWC3Lk1U4xzLPTJZPlm
 if/lrGRCRml+DKb9keBjWeTF4U31vg==
 =LftU
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging

Pull request

# gpg: Signature made Wed 11 Mar 2020 12:40:36 GMT
# gpg:                using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full]
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>" [full]
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* remotes/stefanha/tags/block-pull-request:
  aio-posix: remove idle poll handlers to improve scalability
  aio-posix: support userspace polling of fd monitoring
  aio-posix: add io_uring fd monitoring implementation
  aio-posix: simplify FDMonOps->update() prototype
  aio-posix: extract ppoll(2) and epoll(7) fd monitoring
  aio-posix: move RCU_READ_LOCK() into run_poll_handlers()
  aio-posix: completely stop polling when disabled
  aio-posix: remove confusing QLIST_SAFE_REMOVE()
  qemu/queue.h: clear linked list pointers on remove

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-03-11 14:41:27 +00:00
commit 6e8a73e911
11 changed files with 914 additions and 312 deletions

View File

@ -1885,6 +1885,8 @@ L: qemu-block@nongnu.org
S: Supported
F: util/async.c
F: util/aio-*.c
F: util/aio-*.h
F: util/fdmon-*.c
F: block/io.c
F: migration/block*
F: include/block/aio.h

5
configure vendored
View File

@ -4093,6 +4093,11 @@ if test "$linux_io_uring" != "no" ; then
linux_io_uring_cflags=$($pkg_config --cflags liburing)
linux_io_uring_libs=$($pkg_config --libs liburing)
linux_io_uring=yes
# io_uring is used in libqemuutil.a where per-file -libs variables are not
# seen by programs linking the archive. It's not ideal, but just add the
# library dependency globally.
LIBS="$linux_io_uring_libs $LIBS"
else
if test "$linux_io_uring" = "yes" ; then
feature_not_found "linux io_uring" "Install liburing devel"

View File

@ -14,6 +14,9 @@
#ifndef QEMU_AIO_H
#define QEMU_AIO_H
#ifdef CONFIG_LINUX_IO_URING
#include <liburing.h>
#endif
#include "qemu/queue.h"
#include "qemu/event_notifier.h"
#include "qemu/thread.h"
@ -52,6 +55,56 @@ struct ThreadPool;
struct LinuxAioState;
struct LuringState;
/* Is polling disabled? */
bool aio_poll_disabled(AioContext *ctx);
/* Callbacks for file descriptor monitoring implementations */
typedef struct {
/*
* update:
* @ctx: the AioContext
* @old_node: the existing handler or NULL if this file descriptor is being
* monitored for the first time
* @new_node: the new handler or NULL if this file descriptor is being
* removed
*
* Add/remove/modify a monitored file descriptor.
*
* Called with ctx->list_lock acquired.
*/
void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node);
/*
* wait:
* @ctx: the AioContext
* @ready_list: list for handlers that become ready
* @timeout: maximum duration to wait, in nanoseconds
*
* Wait for file descriptors to become ready and place them on ready_list.
*
* Called with ctx->list_lock incremented but not locked.
*
* Returns: number of ready file descriptors.
*/
int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout);
/*
* need_wait:
* @ctx: the AioContext
*
* Tell aio_poll() when to stop userspace polling early because ->wait()
* has fds ready.
*
* File descriptor monitoring implementations that cannot poll fd readiness
* from userspace should use aio_poll_disabled() here. This ensures that
* file descriptors are not starved by handlers that frequently make
* progress via userspace polling.
*
* Returns: true if ->wait() should be called, false otherwise.
*/
bool (*need_wait)(AioContext *ctx);
} FDMonOps;
/*
* Each aio_bh_poll() call carves off a slice of the BH list, so that newly
* scheduled BHs are not processed until the next aio_bh_poll() call. All
@ -65,6 +118,8 @@ struct BHListSlice {
QSIMPLEQ_ENTRY(BHListSlice) next;
};
typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
struct AioContext {
GSource source;
@ -150,6 +205,10 @@ struct AioContext {
* locking.
*/
struct LuringState *linux_io_uring;
/* State for file descriptor monitoring using Linux io_uring */
struct io_uring fdmon_io_uring;
AioHandlerSList submit_list;
#endif
/* TimerLists for calling timers - one per clock type. Has its own
@ -168,13 +227,21 @@ struct AioContext {
int64_t poll_grow; /* polling time growth factor */
int64_t poll_shrink; /* polling time shrink factor */
/*
* List of handlers participating in userspace polling. Protected by
* ctx->list_lock. Iterated and modified mostly by the event loop thread
* from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler()
* only touches the list to delete nodes if ctx->list_lock's count is zero.
*/
AioHandlerList poll_aio_handlers;
/* Are we in polling mode or monitoring file descriptors? */
bool poll_started;
/* epoll(7) state used when built with CONFIG_EPOLL */
int epollfd;
bool epoll_enabled;
bool epoll_available;
const FDMonOps *fdmon_ops;
};
/**

View File

@ -142,6 +142,8 @@ struct { \
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = (elm)->field.le_next; \
(elm)->field.le_next = NULL; \
(elm)->field.le_prev = NULL; \
} while (/*CONSTCOND*/0)
/*
@ -225,12 +227,15 @@ struct { \
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE_HEAD(head, field) do { \
(head)->slh_first = (head)->slh_first->field.sle_next; \
typeof((head)->slh_first) elm = (head)->slh_first; \
(head)->slh_first = elm->field.sle_next; \
elm->field.sle_next = NULL; \
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE_AFTER(slistelm, field) do { \
(slistelm)->field.sle_next = \
QSLIST_NEXT(QSLIST_NEXT((slistelm), field), field); \
typeof(slistelm) next = (slistelm)->field.sle_next; \
(slistelm)->field.sle_next = next->field.sle_next; \
next->field.sle_next = NULL; \
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE(head, elm, type, field) do { \
@ -241,6 +246,7 @@ struct { \
while (curelm->field.sle_next != (elm)) \
curelm = curelm->field.sle_next; \
curelm->field.sle_next = curelm->field.sle_next->field.sle_next; \
(elm)->field.sle_next = NULL; \
} \
} while (/*CONSTCOND*/0)
@ -304,8 +310,10 @@ struct { \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_REMOVE_HEAD(head, field) do { \
if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL)\
typeof((head)->sqh_first) elm = (head)->sqh_first; \
if (((head)->sqh_first = elm->field.sqe_next) == NULL) \
(head)->sqh_last = &(head)->sqh_first; \
elm->field.sqe_next = NULL; \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_SPLIT_AFTER(head, elm, field, removed) do { \
@ -329,6 +337,7 @@ struct { \
if ((curelm->field.sqe_next = \
curelm->field.sqe_next->field.sqe_next) == NULL) \
(head)->sqh_last = &(curelm)->field.sqe_next; \
(elm)->field.sqe_next = NULL; \
} \
} while (/*CONSTCOND*/0)
@ -446,6 +455,8 @@ union { \
(head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \
(elm)->field.tqe_circ.tql_prev->tql_next = (elm)->field.tqe_next; \
(elm)->field.tqe_circ.tql_prev = NULL; \
(elm)->field.tqe_circ.tql_next = NULL; \
(elm)->field.tqe_next = NULL; \
} while (/*CONSTCOND*/0)
/* remove @left, @right and all elements in between from @head */

View File

@ -5,6 +5,9 @@ util-obj-y += aiocb.o async.o aio-wait.o thread-pool.o qemu-timer.o
util-obj-y += main-loop.o
util-obj-$(call lnot,$(CONFIG_ATOMIC64)) += atomic64.o
util-obj-$(CONFIG_POSIX) += aio-posix.o
util-obj-$(CONFIG_POSIX) += fdmon-poll.o
util-obj-$(CONFIG_EPOLL_CREATE1) += fdmon-epoll.o
util-obj-$(CONFIG_LINUX_IO_URING) += fdmon-io_uring.o
util-obj-$(CONFIG_POSIX) += compatfd.o
util-obj-$(CONFIG_POSIX) += event_notifier-posix.o
util-obj-$(CONFIG_POSIX) += mmap-alloc.o

View File

@ -20,191 +20,25 @@
#include "qemu/sockets.h"
#include "qemu/cutils.h"
#include "trace.h"
#ifdef CONFIG_EPOLL_CREATE1
#include <sys/epoll.h>
#endif
#include "aio-posix.h"
struct AioHandler
/* Stop userspace polling on a handler if it isn't active for some time */
#define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND)
bool aio_poll_disabled(AioContext *ctx)
{
GPollFD pfd;
IOHandler *io_read;
IOHandler *io_write;
AioPollFn *io_poll;
IOHandler *io_poll_begin;
IOHandler *io_poll_end;
void *opaque;
bool is_external;
QLIST_ENTRY(AioHandler) node;
QLIST_ENTRY(AioHandler) node_ready; /* only used during aio_poll() */
QLIST_ENTRY(AioHandler) node_deleted;
};
return atomic_read(&ctx->poll_disable_cnt);
}
/* Add a handler to a ready list */
static void add_ready_handler(AioHandlerList *ready_list,
AioHandler *node,
int revents)
void aio_add_ready_handler(AioHandlerList *ready_list,
AioHandler *node,
int revents)
{
QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */
node->pfd.revents = revents;
QLIST_INSERT_HEAD(ready_list, node, node_ready);
}
#ifdef CONFIG_EPOLL_CREATE1
/* The fd number threshold to switch to epoll */
#define EPOLL_ENABLE_THRESHOLD 64
static void aio_epoll_disable(AioContext *ctx)
{
ctx->epoll_enabled = false;
if (!ctx->epoll_available) {
return;
}
ctx->epoll_available = false;
close(ctx->epollfd);
}
static inline int epoll_events_from_pfd(int pfd_events)
{
return (pfd_events & G_IO_IN ? EPOLLIN : 0) |
(pfd_events & G_IO_OUT ? EPOLLOUT : 0) |
(pfd_events & G_IO_HUP ? EPOLLHUP : 0) |
(pfd_events & G_IO_ERR ? EPOLLERR : 0);
}
static bool aio_epoll_try_enable(AioContext *ctx)
{
AioHandler *node;
struct epoll_event event;
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
int r;
if (QLIST_IS_INSERTED(node, node_deleted) || !node->pfd.events) {
continue;
}
event.events = epoll_events_from_pfd(node->pfd.events);
event.data.ptr = node;
r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event);
if (r) {
return false;
}
}
ctx->epoll_enabled = true;
return true;
}
static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
{
struct epoll_event event;
int r;
int ctl;
if (!ctx->epoll_enabled) {
return;
}
if (!node->pfd.events) {
ctl = EPOLL_CTL_DEL;
} else {
event.data.ptr = node;
event.events = epoll_events_from_pfd(node->pfd.events);
ctl = is_new ? EPOLL_CTL_ADD : EPOLL_CTL_MOD;
}
r = epoll_ctl(ctx->epollfd, ctl, node->pfd.fd, &event);
if (r) {
aio_epoll_disable(ctx);
}
}
static int aio_epoll(AioContext *ctx, AioHandlerList *ready_list,
int64_t timeout)
{
GPollFD pfd = {
.fd = ctx->epollfd,
.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR,
};
AioHandler *node;
int i, ret = 0;
struct epoll_event events[128];
if (timeout > 0) {
ret = qemu_poll_ns(&pfd, 1, timeout);
if (ret > 0) {
timeout = 0;
}
}
if (timeout <= 0 || ret > 0) {
ret = epoll_wait(ctx->epollfd, events,
ARRAY_SIZE(events),
timeout);
if (ret <= 0) {
goto out;
}
for (i = 0; i < ret; i++) {
int ev = events[i].events;
int revents = (ev & EPOLLIN ? G_IO_IN : 0) |
(ev & EPOLLOUT ? G_IO_OUT : 0) |
(ev & EPOLLHUP ? G_IO_HUP : 0) |
(ev & EPOLLERR ? G_IO_ERR : 0);
node = events[i].data.ptr;
add_ready_handler(ready_list, node, revents);
}
}
out:
return ret;
}
static bool aio_epoll_enabled(AioContext *ctx)
{
/* Fall back to ppoll when external clients are disabled. */
return !aio_external_disabled(ctx) && ctx->epoll_enabled;
}
static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds,
unsigned npfd, int64_t timeout)
{
if (!ctx->epoll_available) {
return false;
}
if (aio_epoll_enabled(ctx)) {
return true;
}
if (npfd >= EPOLL_ENABLE_THRESHOLD) {
if (aio_epoll_try_enable(ctx)) {
return true;
} else {
aio_epoll_disable(ctx);
}
}
return false;
}
#else
static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
{
}
static int aio_epoll(AioContext *ctx, AioHandlerList *ready_list,
int64_t timeout)
{
assert(false);
}
static bool aio_epoll_enabled(AioContext *ctx)
{
return false;
}
static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds,
unsigned npfd, int64_t timeout)
{
return false;
}
#endif
static AioHandler *find_aio_handler(AioContext *ctx, int fd)
{
AioHandler *node;
@ -231,16 +65,23 @@ static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
g_source_remove_poll(&ctx->source, &node->pfd);
}
node->pfd.revents = 0;
/* If the fd monitor has already marked it deleted, leave it alone */
if (QLIST_IS_INSERTED(node, node_deleted)) {
return false;
}
/* If a read is in progress, just mark the node as deleted */
if (qemu_lockcnt_count(&ctx->list_lock)) {
QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted);
node->pfd.revents = 0;
return false;
}
/* Otherwise, delete it for real. We can't just mark it as
* deleted because deleted nodes are only cleaned up while
* no one is walking the handlers list.
*/
QLIST_SAFE_REMOVE(node, node_poll);
QLIST_REMOVE(node, node);
return true;
}
@ -300,9 +141,6 @@ void aio_set_fd_handler(AioContext *ctx,
QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, new_node, node);
}
if (node) {
deleted = aio_remove_fd_handler(ctx, node);
}
/* No need to order poll_disable_cnt writes against other updates;
* the counter is only used to avoid wasting time and latency on
@ -313,11 +151,9 @@ void aio_set_fd_handler(AioContext *ctx,
atomic_set(&ctx->poll_disable_cnt,
atomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
if (new_node) {
aio_epoll_update(ctx, new_node, is_new);
} else if (node) {
/* Unregister deleted fd_handler */
aio_epoll_update(ctx, node, false);
ctx->fdmon_ops->update(ctx, node, new_node);
if (node) {
deleted = aio_remove_fd_handler(ctx, node);
}
qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
@ -361,18 +197,19 @@ void aio_set_event_notifier_poll(AioContext *ctx,
(IOHandler *)io_poll_end);
}
static void poll_set_started(AioContext *ctx, bool started)
static bool poll_set_started(AioContext *ctx, bool started)
{
AioHandler *node;
bool progress = false;
if (started == ctx->poll_started) {
return;
return false;
}
ctx->poll_started = started;
qemu_lockcnt_inc(&ctx->list_lock);
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
QLIST_FOREACH(node, &ctx->poll_aio_handlers, node_poll) {
IOHandler *fn;
if (QLIST_IS_INSERTED(node, node_deleted)) {
@ -388,8 +225,15 @@ static void poll_set_started(AioContext *ctx, bool started)
if (fn) {
fn(node->opaque);
}
/* Poll one last time in case ->io_poll_end() raced with the event */
if (!started) {
progress = node->io_poll(node->opaque) || progress;
}
}
qemu_lockcnt_dec(&ctx->list_lock);
return progress;
}
@ -446,6 +290,7 @@ static void aio_free_deleted_handlers(AioContext *ctx)
while ((node = QLIST_FIRST_RCU(&ctx->deleted_aio_handlers))) {
QLIST_REMOVE(node, node);
QLIST_REMOVE(node, node_deleted);
QLIST_SAFE_REMOVE(node, node_poll);
g_free(node);
}
@ -460,6 +305,22 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
revents = node->pfd.revents & node->pfd.events;
node->pfd.revents = 0;
/*
* Start polling AioHandlers when they become ready because activity is
* likely to continue. Note that starvation is theoretically possible when
* fdmon_supports_polling(), but only until the fd fires for the first
* time.
*/
if (!QLIST_IS_INSERTED(node, node_deleted) &&
!QLIST_IS_INSERTED(node, node_poll) &&
node->io_poll) {
trace_poll_add(ctx, node, node->pfd.fd, revents);
if (ctx->poll_started && node->io_poll_begin) {
node->io_poll_begin(node->opaque);
}
QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll);
}
if (!QLIST_IS_INSERTED(node, node_deleted) &&
(revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
aio_node_check(ctx, node->is_external) &&
@ -493,7 +354,7 @@ static bool aio_dispatch_ready_handlers(AioContext *ctx,
AioHandler *node;
while ((node = QLIST_FIRST(ready_list))) {
QLIST_SAFE_REMOVE(node, node_ready);
QLIST_REMOVE(node, node_ready);
progress = aio_dispatch_handler(ctx, node) || progress;
}
@ -524,71 +385,19 @@ void aio_dispatch(AioContext *ctx)
timerlistgroup_run_timers(&ctx->tlg);
}
/* These thread-local variables are used only in a small part of aio_poll
* around the call to the poll() system call. In particular they are not
* used while aio_poll is performing callbacks, which makes it much easier
* to think about reentrancy!
*
* Stack-allocated arrays would be perfect but they have size limitations;
* heap allocation is expensive enough that we want to reuse arrays across
* calls to aio_poll(). And because poll() has to be called without holding
* any lock, the arrays cannot be stored in AioContext. Thread-local data
* has none of the disadvantages of these three options.
*/
static __thread GPollFD *pollfds;
static __thread AioHandler **nodes;
static __thread unsigned npfd, nalloc;
static __thread Notifier pollfds_cleanup_notifier;
static void pollfds_cleanup(Notifier *n, void *unused)
{
g_assert(npfd == 0);
g_free(pollfds);
g_free(nodes);
nalloc = 0;
}
static void add_pollfd(AioHandler *node)
{
if (npfd == nalloc) {
if (nalloc == 0) {
pollfds_cleanup_notifier.notify = pollfds_cleanup;
qemu_thread_atexit_add(&pollfds_cleanup_notifier);
nalloc = 8;
} else {
g_assert(nalloc <= INT_MAX);
nalloc *= 2;
}
pollfds = g_renew(GPollFD, pollfds, nalloc);
nodes = g_renew(AioHandler *, nodes, nalloc);
}
nodes[npfd] = node;
pollfds[npfd] = (GPollFD) {
.fd = node->pfd.fd,
.events = node->pfd.events,
};
npfd++;
}
static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
static bool run_poll_handlers_once(AioContext *ctx,
int64_t now,
int64_t *timeout)
{
bool progress = false;
AioHandler *node;
AioHandler *tmp;
/*
* Optimization: ->io_poll() handlers often contain RCU read critical
* sections and we therefore see many rcu_read_lock() -> rcu_read_unlock()
* -> rcu_read_lock() -> ... sequences with expensive memory
* synchronization primitives. Make the entire polling loop an RCU
* critical section because nested rcu_read_lock()/rcu_read_unlock() calls
* are cheap.
*/
RCU_READ_LOCK_GUARD();
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!QLIST_IS_INSERTED(node, node_deleted) && node->io_poll &&
aio_node_check(ctx, node->is_external) &&
QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
if (aio_node_check(ctx, node->is_external) &&
node->io_poll(node->opaque)) {
node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
/*
* Polling was successful, exit try_poll_mode immediately
* to adjust the next polling time.
@ -605,6 +414,50 @@ static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
return progress;
}
static bool fdmon_supports_polling(AioContext *ctx)
{
return ctx->fdmon_ops->need_wait != aio_poll_disabled;
}
static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now)
{
AioHandler *node;
AioHandler *tmp;
bool progress = false;
/*
* File descriptor monitoring implementations without userspace polling
* support suffer from starvation when a subset of handlers is polled
* because fds will not be processed in a timely fashion. Don't remove
* idle poll handlers.
*/
if (!fdmon_supports_polling(ctx)) {
return false;
}
QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
if (node->poll_idle_timeout == 0LL) {
node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
} else if (now >= node->poll_idle_timeout) {
trace_poll_remove(ctx, node, node->pfd.fd);
node->poll_idle_timeout = 0LL;
QLIST_SAFE_REMOVE(node, node_poll);
if (ctx->poll_started && node->io_poll_end) {
node->io_poll_end(node->opaque);
/*
* Final poll in case ->io_poll_end() races with an event.
* Nevermind about re-adding the handler in the rare case where
* this causes progress.
*/
progress = node->io_poll(node->opaque) || progress;
}
}
}
return progress;
}
/* run_poll_handlers:
* @ctx: the AioContext
* @max_ns: maximum time to poll for, in nanoseconds
@ -628,13 +481,28 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
trace_run_poll_handlers_begin(ctx, max_ns, *timeout);
/*
* Optimization: ->io_poll() handlers often contain RCU read critical
* sections and we therefore see many rcu_read_lock() -> rcu_read_unlock()
* -> rcu_read_lock() -> ... sequences with expensive memory
* synchronization primitives. Make the entire polling loop an RCU
* critical section because nested rcu_read_lock()/rcu_read_unlock() calls
* are cheap.
*/
RCU_READ_LOCK_GUARD();
start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
do {
progress = run_poll_handlers_once(ctx, timeout);
progress = run_poll_handlers_once(ctx, start_time, timeout);
elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
max_ns = qemu_soonest_timeout(*timeout, max_ns);
assert(!(max_ns && progress));
} while (elapsed_time < max_ns && !atomic_read(&ctx->poll_disable_cnt));
} while (elapsed_time < max_ns && !ctx->fdmon_ops->need_wait(ctx));
if (remove_idle_poll_handlers(ctx, start_time + elapsed_time)) {
*timeout = 0;
progress = true;
}
/* If time has passed with no successful polling, adjust *timeout to
* keep the same ending time.
@ -660,9 +528,14 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
*/
static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
{
int64_t max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
int64_t max_ns;
if (max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
if (QLIST_EMPTY_RCU(&ctx->poll_aio_handlers)) {
return false;
}
max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
poll_set_started(ctx, true);
if (run_poll_handlers(ctx, max_ns, timeout)) {
@ -670,19 +543,17 @@ static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
}
}
poll_set_started(ctx, false);
if (poll_set_started(ctx, false)) {
*timeout = 0;
return true;
}
/* Even if we don't run busy polling, try polling once in case it can make
* progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
*/
return run_poll_handlers_once(ctx, timeout);
return false;
}
bool aio_poll(AioContext *ctx, bool blocking)
{
AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
AioHandler *node;
int i;
int ret = 0;
bool progress;
int64_t timeout;
@ -714,27 +585,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* If polling is allowed, non-blocking aio_poll does not need the
* system call---a single round of run_poll_handlers_once suffices.
*/
if (timeout || atomic_read(&ctx->poll_disable_cnt)) {
assert(npfd == 0);
/* fill pollfds */
if (!aio_epoll_enabled(ctx)) {
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events
&& aio_node_check(ctx, node->is_external)) {
add_pollfd(node);
}
}
}
/* wait until next event */
if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
npfd = 0; /* pollfds[] is not being used */
ret = aio_epoll(ctx, &ready_list, timeout);
} else {
ret = qemu_poll_ns(pollfds, npfd, timeout);
}
if (timeout || ctx->fdmon_ops->need_wait(ctx)) {
ret = ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
}
if (blocking) {
@ -783,19 +635,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
}
}
/* if we have any readable fds, dispatch event */
if (ret > 0) {
for (i = 0; i < npfd; i++) {
int revents = pollfds[i].revents;
if (revents) {
add_ready_handler(&ready_list, nodes[i], revents);
}
}
}
npfd = 0;
progress |= aio_bh_poll(ctx);
if (ret > 0) {
@ -813,23 +652,21 @@ bool aio_poll(AioContext *ctx, bool blocking)
void aio_context_setup(AioContext *ctx)
{
#ifdef CONFIG_EPOLL_CREATE1
assert(!ctx->epollfd);
ctx->epollfd = epoll_create1(EPOLL_CLOEXEC);
if (ctx->epollfd == -1) {
fprintf(stderr, "Failed to create epoll instance: %s", strerror(errno));
ctx->epoll_available = false;
} else {
ctx->epoll_available = true;
ctx->fdmon_ops = &fdmon_poll_ops;
ctx->epollfd = -1;
/* Use the fastest fd monitoring implementation if available */
if (fdmon_io_uring_setup(ctx)) {
return;
}
#endif
fdmon_epoll_setup(ctx);
}
void aio_context_destroy(AioContext *ctx)
{
#ifdef CONFIG_EPOLL_CREATE1
aio_epoll_disable(ctx);
#endif
fdmon_io_uring_destroy(ctx);
fdmon_epoll_disable(ctx);
}
void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,

81
util/aio-posix.h Normal file
View File

@ -0,0 +1,81 @@
/*
* AioContext POSIX event loop implementation internal APIs
*
* Copyright IBM, Corp. 2008
* Copyright Red Hat, Inc. 2020
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* Contributions after 2012-01-13 are licensed under the terms of the
* GNU GPL, version 2 or (at your option) any later version.
*/
#ifndef AIO_POSIX_H
#define AIO_POSIX_H
#include "block/aio.h"
struct AioHandler {
GPollFD pfd;
IOHandler *io_read;
IOHandler *io_write;
AioPollFn *io_poll;
IOHandler *io_poll_begin;
IOHandler *io_poll_end;
void *opaque;
QLIST_ENTRY(AioHandler) node;
QLIST_ENTRY(AioHandler) node_ready; /* only used during aio_poll() */
QLIST_ENTRY(AioHandler) node_deleted;
QLIST_ENTRY(AioHandler) node_poll;
#ifdef CONFIG_LINUX_IO_URING
QSLIST_ENTRY(AioHandler) node_submitted;
unsigned flags; /* see fdmon-io_uring.c */
#endif
int64_t poll_idle_timeout; /* when to stop userspace polling */
bool is_external;
};
/* Add a handler to a ready list */
void aio_add_ready_handler(AioHandlerList *ready_list, AioHandler *node,
int revents);
extern const FDMonOps fdmon_poll_ops;
#ifdef CONFIG_EPOLL_CREATE1
bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd);
void fdmon_epoll_setup(AioContext *ctx);
void fdmon_epoll_disable(AioContext *ctx);
#else
static inline bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd)
{
return false;
}
static inline void fdmon_epoll_setup(AioContext *ctx)
{
}
static inline void fdmon_epoll_disable(AioContext *ctx)
{
}
#endif /* !CONFIG_EPOLL_CREATE1 */
#ifdef CONFIG_LINUX_IO_URING
bool fdmon_io_uring_setup(AioContext *ctx);
void fdmon_io_uring_destroy(AioContext *ctx);
#else
static inline bool fdmon_io_uring_setup(AioContext *ctx)
{
return false;
}
static inline void fdmon_io_uring_destroy(AioContext *ctx)
{
}
#endif /* !CONFIG_LINUX_IO_URING */
#endif /* AIO_POSIX_H */

155
util/fdmon-epoll.c Normal file
View File

@ -0,0 +1,155 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* epoll(7) file descriptor monitoring
*/
#include "qemu/osdep.h"
#include <sys/epoll.h>
#include "qemu/rcu_queue.h"
#include "aio-posix.h"
/* The fd number threshold to switch to epoll */
#define EPOLL_ENABLE_THRESHOLD 64
void fdmon_epoll_disable(AioContext *ctx)
{
if (ctx->epollfd >= 0) {
close(ctx->epollfd);
ctx->epollfd = -1;
}
/* Switch back */
ctx->fdmon_ops = &fdmon_poll_ops;
}
static inline int epoll_events_from_pfd(int pfd_events)
{
return (pfd_events & G_IO_IN ? EPOLLIN : 0) |
(pfd_events & G_IO_OUT ? EPOLLOUT : 0) |
(pfd_events & G_IO_HUP ? EPOLLHUP : 0) |
(pfd_events & G_IO_ERR ? EPOLLERR : 0);
}
static void fdmon_epoll_update(AioContext *ctx,
AioHandler *old_node,
AioHandler *new_node)
{
struct epoll_event event = {
.data.ptr = new_node,
.events = new_node ? epoll_events_from_pfd(new_node->pfd.events) : 0,
};
int r;
if (!new_node) {
r = epoll_ctl(ctx->epollfd, EPOLL_CTL_DEL, old_node->pfd.fd, &event);
} else if (!old_node) {
r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, new_node->pfd.fd, &event);
} else {
r = epoll_ctl(ctx->epollfd, EPOLL_CTL_MOD, new_node->pfd.fd, &event);
}
if (r) {
fdmon_epoll_disable(ctx);
}
}
static int fdmon_epoll_wait(AioContext *ctx, AioHandlerList *ready_list,
int64_t timeout)
{
GPollFD pfd = {
.fd = ctx->epollfd,
.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR,
};
AioHandler *node;
int i, ret = 0;
struct epoll_event events[128];
/* Fall back while external clients are disabled */
if (atomic_read(&ctx->external_disable_cnt)) {
return fdmon_poll_ops.wait(ctx, ready_list, timeout);
}
if (timeout > 0) {
ret = qemu_poll_ns(&pfd, 1, timeout);
if (ret > 0) {
timeout = 0;
}
}
if (timeout <= 0 || ret > 0) {
ret = epoll_wait(ctx->epollfd, events,
ARRAY_SIZE(events),
timeout);
if (ret <= 0) {
goto out;
}
for (i = 0; i < ret; i++) {
int ev = events[i].events;
int revents = (ev & EPOLLIN ? G_IO_IN : 0) |
(ev & EPOLLOUT ? G_IO_OUT : 0) |
(ev & EPOLLHUP ? G_IO_HUP : 0) |
(ev & EPOLLERR ? G_IO_ERR : 0);
node = events[i].data.ptr;
aio_add_ready_handler(ready_list, node, revents);
}
}
out:
return ret;
}
static const FDMonOps fdmon_epoll_ops = {
.update = fdmon_epoll_update,
.wait = fdmon_epoll_wait,
.need_wait = aio_poll_disabled,
};
static bool fdmon_epoll_try_enable(AioContext *ctx)
{
AioHandler *node;
struct epoll_event event;
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
int r;
if (QLIST_IS_INSERTED(node, node_deleted) || !node->pfd.events) {
continue;
}
event.events = epoll_events_from_pfd(node->pfd.events);
event.data.ptr = node;
r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event);
if (r) {
return false;
}
}
ctx->fdmon_ops = &fdmon_epoll_ops;
return true;
}
bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd)
{
if (ctx->epollfd < 0) {
return false;
}
/* Do not upgrade while external clients are disabled */
if (atomic_read(&ctx->external_disable_cnt)) {
return false;
}
if (npfd >= EPOLL_ENABLE_THRESHOLD) {
if (fdmon_epoll_try_enable(ctx)) {
return true;
} else {
fdmon_epoll_disable(ctx);
}
}
return false;
}
void fdmon_epoll_setup(AioContext *ctx)
{
ctx->epollfd = epoll_create1(EPOLL_CLOEXEC);
if (ctx->epollfd == -1) {
fprintf(stderr, "Failed to create epoll instance: %s", strerror(errno));
}
}

332
util/fdmon-io_uring.c Normal file
View File

@ -0,0 +1,332 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Linux io_uring file descriptor monitoring
*
* The Linux io_uring API supports file descriptor monitoring with a few
* advantages over existing APIs like poll(2) and epoll(7):
*
* 1. Userspace polling of events is possible because the completion queue (cq
* ring) is shared between the kernel and userspace. This allows
* applications that rely on userspace polling to also monitor file
* descriptors in the same userspace polling loop.
*
* 2. Submission and completion is batched and done together in a single system
* call. This minimizes the number of system calls.
*
* 3. File descriptor monitoring is O(1) like epoll(7) so it scales better than
* poll(2).
*
* 4. Nanosecond timeouts are supported so it requires fewer syscalls than
* epoll(7).
*
* This code only monitors file descriptors and does not do asynchronous disk
* I/O. Implementing disk I/O efficiently has other requirements and should
* use a separate io_uring so it does not make sense to unify the code.
*
* File descriptor monitoring is implemented using the following operations:
*
* 1. IORING_OP_POLL_ADD - adds a file descriptor to be monitored.
* 2. IORING_OP_POLL_REMOVE - removes a file descriptor being monitored. When
* the poll mask changes for a file descriptor it is first removed and then
* re-added with the new poll mask, so this operation is also used as part
* of modifying an existing monitored file descriptor.
* 3. IORING_OP_TIMEOUT - added every time a blocking syscall is made to wait
* for events. This operation self-cancels if another event completes
* before the timeout.
*
* io_uring calls the submission queue the "sq ring" and the completion queue
* the "cq ring". Ring entries are called "sqe" and "cqe", respectively.
*
* The code is structured so that sq/cq rings are only modified within
* fdmon_io_uring_wait(). Changes to AioHandlers are made by enqueuing them on
* ctx->submit_list so that fdmon_io_uring_wait() can submit IORING_OP_POLL_ADD
* and/or IORING_OP_POLL_REMOVE sqes for them.
*/
#include "qemu/osdep.h"
#include <poll.h>
#include "qemu/rcu_queue.h"
#include "aio-posix.h"
enum {
FDMON_IO_URING_ENTRIES = 128, /* sq/cq ring size */
/* AioHandler::flags */
FDMON_IO_URING_PENDING = (1 << 0),
FDMON_IO_URING_ADD = (1 << 1),
FDMON_IO_URING_REMOVE = (1 << 2),
};
static inline int poll_events_from_pfd(int pfd_events)
{
return (pfd_events & G_IO_IN ? POLLIN : 0) |
(pfd_events & G_IO_OUT ? POLLOUT : 0) |
(pfd_events & G_IO_HUP ? POLLHUP : 0) |
(pfd_events & G_IO_ERR ? POLLERR : 0);
}
static inline int pfd_events_from_poll(int poll_events)
{
return (poll_events & POLLIN ? G_IO_IN : 0) |
(poll_events & POLLOUT ? G_IO_OUT : 0) |
(poll_events & POLLHUP ? G_IO_HUP : 0) |
(poll_events & POLLERR ? G_IO_ERR : 0);
}
/*
* Returns an sqe for submitting a request. Only be called within
* fdmon_io_uring_wait().
*/
static struct io_uring_sqe *get_sqe(AioContext *ctx)
{
struct io_uring *ring = &ctx->fdmon_io_uring;
struct io_uring_sqe *sqe = io_uring_get_sqe(ring);
int ret;
if (likely(sqe)) {
return sqe;
}
/* No free sqes left, submit pending sqes first */
ret = io_uring_submit(ring);
assert(ret > 1);
sqe = io_uring_get_sqe(ring);
assert(sqe);
return sqe;
}
/* Atomically enqueue an AioHandler for sq ring submission */
static void enqueue(AioHandlerSList *head, AioHandler *node, unsigned flags)
{
unsigned old_flags;
old_flags = atomic_fetch_or(&node->flags, FDMON_IO_URING_PENDING | flags);
if (!(old_flags & FDMON_IO_URING_PENDING)) {
QSLIST_INSERT_HEAD_ATOMIC(head, node, node_submitted);
}
}
/* Dequeue an AioHandler for sq ring submission. Called by fill_sq_ring(). */
static AioHandler *dequeue(AioHandlerSList *head, unsigned *flags)
{
AioHandler *node = QSLIST_FIRST(head);
if (!node) {
return NULL;
}
/* Doesn't need to be atomic since fill_sq_ring() moves the list */
QSLIST_REMOVE_HEAD(head, node_submitted);
/*
* Don't clear FDMON_IO_URING_REMOVE. It's sticky so it can serve two
* purposes: telling fill_sq_ring() to submit IORING_OP_POLL_REMOVE and
* telling process_cqe() to delete the AioHandler when its
* IORING_OP_POLL_ADD completes.
*/
*flags = atomic_fetch_and(&node->flags, ~(FDMON_IO_URING_PENDING |
FDMON_IO_URING_ADD));
return node;
}
static void fdmon_io_uring_update(AioContext *ctx,
AioHandler *old_node,
AioHandler *new_node)
{
if (new_node) {
enqueue(&ctx->submit_list, new_node, FDMON_IO_URING_ADD);
}
if (old_node) {
/*
* Deletion is tricky because IORING_OP_POLL_ADD and
* IORING_OP_POLL_REMOVE are async. We need to wait for the original
* IORING_OP_POLL_ADD to complete before this handler can be freed
* safely.
*
* It's possible that the file descriptor becomes ready and the
* IORING_OP_POLL_ADD cqe is enqueued before IORING_OP_POLL_REMOVE is
* submitted, too.
*
* Mark this handler deleted right now but don't place it on
* ctx->deleted_aio_handlers yet. Instead, manually fudge the list
* entry to make QLIST_IS_INSERTED() think this handler has been
* inserted and other code recognizes this AioHandler as deleted.
*
* Once the original IORING_OP_POLL_ADD completes we enqueue the
* handler on the real ctx->deleted_aio_handlers list to be freed.
*/
assert(!QLIST_IS_INSERTED(old_node, node_deleted));
old_node->node_deleted.le_prev = &old_node->node_deleted.le_next;
enqueue(&ctx->submit_list, old_node, FDMON_IO_URING_REMOVE);
}
}
static void add_poll_add_sqe(AioContext *ctx, AioHandler *node)
{
struct io_uring_sqe *sqe = get_sqe(ctx);
int events = poll_events_from_pfd(node->pfd.events);
io_uring_prep_poll_add(sqe, node->pfd.fd, events);
io_uring_sqe_set_data(sqe, node);
}
static void add_poll_remove_sqe(AioContext *ctx, AioHandler *node)
{
struct io_uring_sqe *sqe = get_sqe(ctx);
io_uring_prep_poll_remove(sqe, node);
}
/* Add a timeout that self-cancels when another cqe becomes ready */
static void add_timeout_sqe(AioContext *ctx, int64_t ns)
{
struct io_uring_sqe *sqe;
struct __kernel_timespec ts = {
.tv_sec = ns / NANOSECONDS_PER_SECOND,
.tv_nsec = ns % NANOSECONDS_PER_SECOND,
};
sqe = get_sqe(ctx);
io_uring_prep_timeout(sqe, &ts, 1, 0);
}
/* Add sqes from ctx->submit_list for submission */
static void fill_sq_ring(AioContext *ctx)
{
AioHandlerSList submit_list;
AioHandler *node;
unsigned flags;
QSLIST_MOVE_ATOMIC(&submit_list, &ctx->submit_list);
while ((node = dequeue(&submit_list, &flags))) {
/* Order matters, just in case both flags were set */
if (flags & FDMON_IO_URING_ADD) {
add_poll_add_sqe(ctx, node);
}
if (flags & FDMON_IO_URING_REMOVE) {
add_poll_remove_sqe(ctx, node);
}
}
}
/* Returns true if a handler became ready */
static bool process_cqe(AioContext *ctx,
AioHandlerList *ready_list,
struct io_uring_cqe *cqe)
{
AioHandler *node = io_uring_cqe_get_data(cqe);
unsigned flags;
/* poll_timeout and poll_remove have a zero user_data field */
if (!node) {
return false;
}
/*
* Deletion can only happen when IORING_OP_POLL_ADD completes. If we race
* with enqueue() here then we can safely clear the FDMON_IO_URING_REMOVE
* bit before IORING_OP_POLL_REMOVE is submitted.
*/
flags = atomic_fetch_and(&node->flags, ~FDMON_IO_URING_REMOVE);
if (flags & FDMON_IO_URING_REMOVE) {
QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted);
return false;
}
aio_add_ready_handler(ready_list, node, pfd_events_from_poll(cqe->res));
/* IORING_OP_POLL_ADD is one-shot so we must re-arm it */
add_poll_add_sqe(ctx, node);
return true;
}
static int process_cq_ring(AioContext *ctx, AioHandlerList *ready_list)
{
struct io_uring *ring = &ctx->fdmon_io_uring;
struct io_uring_cqe *cqe;
unsigned num_cqes = 0;
unsigned num_ready = 0;
unsigned head;
io_uring_for_each_cqe(ring, head, cqe) {
if (process_cqe(ctx, ready_list, cqe)) {
num_ready++;
}
num_cqes++;
}
io_uring_cq_advance(ring, num_cqes);
return num_ready;
}
static int fdmon_io_uring_wait(AioContext *ctx, AioHandlerList *ready_list,
int64_t timeout)
{
unsigned wait_nr = 1; /* block until at least one cqe is ready */
int ret;
/* Fall back while external clients are disabled */
if (atomic_read(&ctx->external_disable_cnt)) {
return fdmon_poll_ops.wait(ctx, ready_list, timeout);
}
if (timeout == 0) {
wait_nr = 0; /* non-blocking */
} else if (timeout > 0) {
add_timeout_sqe(ctx, timeout);
}
fill_sq_ring(ctx);
ret = io_uring_submit_and_wait(&ctx->fdmon_io_uring, wait_nr);
assert(ret >= 0);
return process_cq_ring(ctx, ready_list);
}
static bool fdmon_io_uring_need_wait(AioContext *ctx)
{
return io_uring_cq_ready(&ctx->fdmon_io_uring);
}
static const FDMonOps fdmon_io_uring_ops = {
.update = fdmon_io_uring_update,
.wait = fdmon_io_uring_wait,
.need_wait = fdmon_io_uring_need_wait,
};
bool fdmon_io_uring_setup(AioContext *ctx)
{
int ret;
ret = io_uring_queue_init(FDMON_IO_URING_ENTRIES, &ctx->fdmon_io_uring, 0);
if (ret != 0) {
return false;
}
QSLIST_INIT(&ctx->submit_list);
ctx->fdmon_ops = &fdmon_io_uring_ops;
return true;
}
void fdmon_io_uring_destroy(AioContext *ctx)
{
if (ctx->fdmon_ops == &fdmon_io_uring_ops) {
AioHandler *node;
io_uring_queue_exit(&ctx->fdmon_io_uring);
/* No need to submit these anymore, just free them. */
while ((node = QSLIST_FIRST_RCU(&ctx->submit_list))) {
QSLIST_REMOVE_HEAD_RCU(&ctx->submit_list, node_submitted);
QLIST_REMOVE(node, node);
g_free(node);
}
ctx->fdmon_ops = &fdmon_poll_ops;
}
}

107
util/fdmon-poll.c Normal file
View File

@ -0,0 +1,107 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* poll(2) file descriptor monitoring
*
* Uses ppoll(2) when available, g_poll() otherwise.
*/
#include "qemu/osdep.h"
#include "aio-posix.h"
#include "qemu/rcu_queue.h"
/*
* These thread-local variables are used only in fdmon_poll_wait() around the
* call to the poll() system call. In particular they are not used while
* aio_poll is performing callbacks, which makes it much easier to think about
* reentrancy!
*
* Stack-allocated arrays would be perfect but they have size limitations;
* heap allocation is expensive enough that we want to reuse arrays across
* calls to aio_poll(). And because poll() has to be called without holding
* any lock, the arrays cannot be stored in AioContext. Thread-local data
* has none of the disadvantages of these three options.
*/
static __thread GPollFD *pollfds;
static __thread AioHandler **nodes;
static __thread unsigned npfd, nalloc;
static __thread Notifier pollfds_cleanup_notifier;
static void pollfds_cleanup(Notifier *n, void *unused)
{
g_assert(npfd == 0);
g_free(pollfds);
g_free(nodes);
nalloc = 0;
}
static void add_pollfd(AioHandler *node)
{
if (npfd == nalloc) {
if (nalloc == 0) {
pollfds_cleanup_notifier.notify = pollfds_cleanup;
qemu_thread_atexit_add(&pollfds_cleanup_notifier);
nalloc = 8;
} else {
g_assert(nalloc <= INT_MAX);
nalloc *= 2;
}
pollfds = g_renew(GPollFD, pollfds, nalloc);
nodes = g_renew(AioHandler *, nodes, nalloc);
}
nodes[npfd] = node;
pollfds[npfd] = (GPollFD) {
.fd = node->pfd.fd,
.events = node->pfd.events,
};
npfd++;
}
static int fdmon_poll_wait(AioContext *ctx, AioHandlerList *ready_list,
int64_t timeout)
{
AioHandler *node;
int ret;
assert(npfd == 0);
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events
&& aio_node_check(ctx, node->is_external)) {
add_pollfd(node);
}
}
/* epoll(7) is faster above a certain number of fds */
if (fdmon_epoll_try_upgrade(ctx, npfd)) {
return ctx->fdmon_ops->wait(ctx, ready_list, timeout);
}
ret = qemu_poll_ns(pollfds, npfd, timeout);
if (ret > 0) {
int i;
for (i = 0; i < npfd; i++) {
int revents = pollfds[i].revents;
if (revents) {
aio_add_ready_handler(ready_list, nodes[i], revents);
}
}
}
npfd = 0;
return ret;
}
static void fdmon_poll_update(AioContext *ctx,
AioHandler *old_node,
AioHandler *new_node)
{
/* Do nothing, AioHandler already contains the state we'll need */
}
const FDMonOps fdmon_poll_ops = {
.update = fdmon_poll_update,
.wait = fdmon_poll_wait,
.need_wait = aio_poll_disabled,
};

View File

@ -5,6 +5,8 @@ run_poll_handlers_begin(void *ctx, int64_t max_ns, int64_t timeout) "ctx %p max_
run_poll_handlers_end(void *ctx, bool progress, int64_t timeout) "ctx %p progress %d new timeout %"PRId64
poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
poll_add(void *ctx, void *node, int fd, unsigned revents) "ctx %p node %p fd %d revents 0x%x"
poll_remove(void *ctx, void *node, int fd) "ctx %p node %p fd %d"
# async.c
aio_co_schedule(void *ctx, void *co) "ctx %p co %p"