aio: remove aio_disable_external() API

All callers now pass is_external=false to aio_set_fd_handler() and
aio_set_event_notifier(). The aio_disable_external() API that
temporarily disables fd handlers that were registered is_external=true
is therefore dead code.

Remove aio_disable_external(), aio_enable_external(), and the
is_external arguments to aio_set_fd_handler() and
aio_set_event_notifier().

The entire test-fdmon-epoll test is removed because its sole purpose was
testing aio_disable_external().

Parts of this patch were generated using the following coccinelle
(https://coccinelle.lip6.fr/) semantic patch:

  @@
  expression ctx, fd, is_external, io_read, io_write, io_poll, io_poll_ready, opaque;
  @@
  - aio_set_fd_handler(ctx, fd, is_external, io_read, io_write, io_poll, io_poll_ready, opaque)
  + aio_set_fd_handler(ctx, fd, io_read, io_write, io_poll, io_poll_ready, opaque)

  @@
  expression ctx, notifier, is_external, io_read, io_poll, io_poll_ready;
  @@
  - aio_set_event_notifier(ctx, notifier, is_external, io_read, io_poll, io_poll_ready)
  + aio_set_event_notifier(ctx, notifier, io_read, io_poll, io_poll_ready)

Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20230516190238.8401-21-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-05-16 15:02:38 -04:00 committed by Kevin Wolf
parent 03d7162a21
commit 60f782b6b7
36 changed files with 80 additions and 298 deletions

View File

@ -7305,9 +7305,6 @@ static void bdrv_detach_aio_context(BlockDriverState *bs)
bs->drv->bdrv_detach_aio_context(bs); bs->drv->bdrv_detach_aio_context(bs);
} }
if (bs->quiesce_counter) {
aio_enable_external(bs->aio_context);
}
bs->aio_context = NULL; bs->aio_context = NULL;
} }
@ -7317,10 +7314,6 @@ static void bdrv_attach_aio_context(BlockDriverState *bs,
BdrvAioNotifier *ban, *ban_tmp; BdrvAioNotifier *ban, *ban_tmp;
GLOBAL_STATE_CODE(); GLOBAL_STATE_CODE();
if (bs->quiesce_counter) {
aio_disable_external(new_context);
}
bs->aio_context = new_context; bs->aio_context = new_context;
if (bs->drv && bs->drv->bdrv_attach_aio_context) { if (bs->drv && bs->drv->bdrv_attach_aio_context) {

View File

@ -306,23 +306,18 @@ static void blkio_attach_aio_context(BlockDriverState *bs,
{ {
BDRVBlkioState *s = bs->opaque; BDRVBlkioState *s = bs->opaque;
aio_set_fd_handler(new_context, aio_set_fd_handler(new_context, s->completion_fd,
s->completion_fd, blkio_completion_fd_read, NULL,
false,
blkio_completion_fd_read,
NULL,
blkio_completion_fd_poll, blkio_completion_fd_poll,
blkio_completion_fd_poll_ready, blkio_completion_fd_poll_ready, bs);
bs);
} }
static void blkio_detach_aio_context(BlockDriverState *bs) static void blkio_detach_aio_context(BlockDriverState *bs)
{ {
BDRVBlkioState *s = bs->opaque; BDRVBlkioState *s = bs->opaque;
aio_set_fd_handler(bdrv_get_aio_context(bs), aio_set_fd_handler(bdrv_get_aio_context(bs), s->completion_fd, NULL, NULL,
s->completion_fd, NULL, NULL, NULL);
false, NULL, NULL, NULL, NULL, NULL);
} }
/* Call with s->blkio_lock held to submit I/O after enqueuing a new request */ /* Call with s->blkio_lock held to submit I/O after enqueuing a new request */

View File

@ -132,7 +132,7 @@ static gboolean curl_drop_socket(void *key, void *value, void *opaque)
CURLSocket *socket = value; CURLSocket *socket = value;
BDRVCURLState *s = socket->s; BDRVCURLState *s = socket->s;
aio_set_fd_handler(s->aio_context, socket->fd, false, aio_set_fd_handler(s->aio_context, socket->fd,
NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
return true; return true;
} }
@ -180,20 +180,20 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
trace_curl_sock_cb(action, (int)fd); trace_curl_sock_cb(action, (int)fd);
switch (action) { switch (action) {
case CURL_POLL_IN: case CURL_POLL_IN:
aio_set_fd_handler(s->aio_context, fd, false, aio_set_fd_handler(s->aio_context, fd,
curl_multi_do, NULL, NULL, NULL, socket); curl_multi_do, NULL, NULL, NULL, socket);
break; break;
case CURL_POLL_OUT: case CURL_POLL_OUT:
aio_set_fd_handler(s->aio_context, fd, false, aio_set_fd_handler(s->aio_context, fd,
NULL, curl_multi_do, NULL, NULL, socket); NULL, curl_multi_do, NULL, NULL, socket);
break; break;
case CURL_POLL_INOUT: case CURL_POLL_INOUT:
aio_set_fd_handler(s->aio_context, fd, false, aio_set_fd_handler(s->aio_context, fd,
curl_multi_do, curl_multi_do, curl_multi_do, curl_multi_do,
NULL, NULL, socket); NULL, NULL, socket);
break; break;
case CURL_POLL_REMOVE: case CURL_POLL_REMOVE:
aio_set_fd_handler(s->aio_context, fd, false, aio_set_fd_handler(s->aio_context, fd,
NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
break; break;
} }

View File

@ -84,7 +84,7 @@ static void fuse_export_drained_begin(void *opaque)
FuseExport *exp = opaque; FuseExport *exp = opaque;
aio_set_fd_handler(exp->common.ctx, aio_set_fd_handler(exp->common.ctx,
fuse_session_fd(exp->fuse_session), false, fuse_session_fd(exp->fuse_session),
NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
exp->fd_handler_set_up = false; exp->fd_handler_set_up = false;
} }
@ -97,7 +97,7 @@ static void fuse_export_drained_end(void *opaque)
exp->common.ctx = blk_get_aio_context(exp->common.blk); exp->common.ctx = blk_get_aio_context(exp->common.blk);
aio_set_fd_handler(exp->common.ctx, aio_set_fd_handler(exp->common.ctx,
fuse_session_fd(exp->fuse_session), false, fuse_session_fd(exp->fuse_session),
read_from_fuse_export, NULL, NULL, NULL, exp); read_from_fuse_export, NULL, NULL, NULL, exp);
exp->fd_handler_set_up = true; exp->fd_handler_set_up = true;
} }
@ -270,7 +270,7 @@ static int setup_fuse_export(FuseExport *exp, const char *mountpoint,
g_hash_table_insert(exports, g_strdup(mountpoint), NULL); g_hash_table_insert(exports, g_strdup(mountpoint), NULL);
aio_set_fd_handler(exp->common.ctx, aio_set_fd_handler(exp->common.ctx,
fuse_session_fd(exp->fuse_session), false, fuse_session_fd(exp->fuse_session),
read_from_fuse_export, NULL, NULL, NULL, exp); read_from_fuse_export, NULL, NULL, NULL, exp);
exp->fd_handler_set_up = true; exp->fd_handler_set_up = true;
@ -320,7 +320,7 @@ static void fuse_export_shutdown(BlockExport *blk_exp)
if (exp->fd_handler_set_up) { if (exp->fd_handler_set_up) {
aio_set_fd_handler(exp->common.ctx, aio_set_fd_handler(exp->common.ctx,
fuse_session_fd(exp->fuse_session), false, fuse_session_fd(exp->fuse_session),
NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
exp->fd_handler_set_up = false; exp->fd_handler_set_up = false;
} }

View File

@ -137,7 +137,7 @@ static void vduse_blk_enable_queue(VduseDev *dev, VduseVirtq *vq)
} }
aio_set_fd_handler(vblk_exp->export.ctx, vduse_queue_get_fd(vq), aio_set_fd_handler(vblk_exp->export.ctx, vduse_queue_get_fd(vq),
false, on_vduse_vq_kick, NULL, NULL, NULL, vq); on_vduse_vq_kick, NULL, NULL, NULL, vq);
/* Make sure we don't miss any kick afer reconnecting */ /* Make sure we don't miss any kick afer reconnecting */
eventfd_write(vduse_queue_get_fd(vq), 1); eventfd_write(vduse_queue_get_fd(vq), 1);
} }
@ -151,7 +151,7 @@ static void vduse_blk_disable_queue(VduseDev *dev, VduseVirtq *vq)
return; return;
} }
aio_set_fd_handler(vblk_exp->export.ctx, fd, false, aio_set_fd_handler(vblk_exp->export.ctx, fd,
NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
} }
@ -170,7 +170,7 @@ static void on_vduse_dev_kick(void *opaque)
static void vduse_blk_attach_ctx(VduseBlkExport *vblk_exp, AioContext *ctx) static void vduse_blk_attach_ctx(VduseBlkExport *vblk_exp, AioContext *ctx)
{ {
aio_set_fd_handler(vblk_exp->export.ctx, vduse_dev_get_fd(vblk_exp->dev), aio_set_fd_handler(vblk_exp->export.ctx, vduse_dev_get_fd(vblk_exp->dev),
false, on_vduse_dev_kick, NULL, NULL, NULL, on_vduse_dev_kick, NULL, NULL, NULL,
vblk_exp->dev); vblk_exp->dev);
/* Virtqueues are handled by vduse_blk_drained_end() */ /* Virtqueues are handled by vduse_blk_drained_end() */
@ -179,7 +179,7 @@ static void vduse_blk_attach_ctx(VduseBlkExport *vblk_exp, AioContext *ctx)
static void vduse_blk_detach_ctx(VduseBlkExport *vblk_exp) static void vduse_blk_detach_ctx(VduseBlkExport *vblk_exp)
{ {
aio_set_fd_handler(vblk_exp->export.ctx, vduse_dev_get_fd(vblk_exp->dev), aio_set_fd_handler(vblk_exp->export.ctx, vduse_dev_get_fd(vblk_exp->dev),
false, NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
/* Virtqueues are handled by vduse_blk_drained_begin() */ /* Virtqueues are handled by vduse_blk_drained_begin() */
} }
@ -364,7 +364,7 @@ static int vduse_blk_exp_create(BlockExport *exp, BlockExportOptions *opts,
vduse_dev_setup_queue(vblk_exp->dev, i, queue_size); vduse_dev_setup_queue(vblk_exp->dev, i, queue_size);
} }
aio_set_fd_handler(exp->ctx, vduse_dev_get_fd(vblk_exp->dev), false, aio_set_fd_handler(exp->ctx, vduse_dev_get_fd(vblk_exp->dev),
on_vduse_dev_kick, NULL, NULL, NULL, vblk_exp->dev); on_vduse_dev_kick, NULL, NULL, NULL, vblk_exp->dev);
blk_add_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach, blk_add_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach,

View File

@ -362,7 +362,6 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
/* Stop things in parent-to-child order */ /* Stop things in parent-to-child order */
if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
aio_disable_external(bdrv_get_aio_context(bs));
bdrv_parent_drained_begin(bs, parent); bdrv_parent_drained_begin(bs, parent);
if (bs->drv && bs->drv->bdrv_drain_begin) { if (bs->drv && bs->drv->bdrv_drain_begin) {
bs->drv->bdrv_drain_begin(bs); bs->drv->bdrv_drain_begin(bs);
@ -418,7 +417,6 @@ static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
bs->drv->bdrv_drain_end(bs); bs->drv->bdrv_drain_end(bs);
} }
bdrv_parent_drained_end(bs, parent); bdrv_parent_drained_end(bs, parent);
aio_enable_external(bdrv_get_aio_context(bs));
} }
} }

View File

@ -410,7 +410,7 @@ int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset,
void luring_detach_aio_context(LuringState *s, AioContext *old_context) void luring_detach_aio_context(LuringState *s, AioContext *old_context)
{ {
aio_set_fd_handler(old_context, s->ring.ring_fd, false, aio_set_fd_handler(old_context, s->ring.ring_fd,
NULL, NULL, NULL, NULL, s); NULL, NULL, NULL, NULL, s);
qemu_bh_delete(s->completion_bh); qemu_bh_delete(s->completion_bh);
s->aio_context = NULL; s->aio_context = NULL;
@ -420,7 +420,7 @@ void luring_attach_aio_context(LuringState *s, AioContext *new_context)
{ {
s->aio_context = new_context; s->aio_context = new_context;
s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s); s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s);
aio_set_fd_handler(s->aio_context, s->ring.ring_fd, false, aio_set_fd_handler(s->aio_context, s->ring.ring_fd,
qemu_luring_completion_cb, NULL, qemu_luring_completion_cb, NULL,
qemu_luring_poll_cb, qemu_luring_poll_ready, s); qemu_luring_poll_cb, qemu_luring_poll_ready, s);
} }

View File

@ -363,7 +363,6 @@ iscsi_set_events(IscsiLun *iscsilun)
if (ev != iscsilun->events) { if (ev != iscsilun->events) {
aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsi), aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsi),
false,
(ev & POLLIN) ? iscsi_process_read : NULL, (ev & POLLIN) ? iscsi_process_read : NULL,
(ev & POLLOUT) ? iscsi_process_write : NULL, (ev & POLLOUT) ? iscsi_process_write : NULL,
NULL, NULL, NULL, NULL,
@ -1540,7 +1539,7 @@ static void iscsi_detach_aio_context(BlockDriverState *bs)
IscsiLun *iscsilun = bs->opaque; IscsiLun *iscsilun = bs->opaque;
aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi), aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi),
false, NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
iscsilun->events = 0; iscsilun->events = 0;
if (iscsilun->nop_timer) { if (iscsilun->nop_timer) {

View File

@ -446,7 +446,7 @@ int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov,
void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context) void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
{ {
aio_set_event_notifier(old_context, &s->e, false, NULL, NULL, NULL); aio_set_event_notifier(old_context, &s->e, NULL, NULL, NULL);
qemu_bh_delete(s->completion_bh); qemu_bh_delete(s->completion_bh);
s->aio_context = NULL; s->aio_context = NULL;
} }
@ -455,7 +455,7 @@ void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
{ {
s->aio_context = new_context; s->aio_context = new_context;
s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s); s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
aio_set_event_notifier(new_context, &s->e, false, aio_set_event_notifier(new_context, &s->e,
qemu_laio_completion_cb, qemu_laio_completion_cb,
qemu_laio_poll_cb, qemu_laio_poll_cb,
qemu_laio_poll_ready); qemu_laio_poll_ready);

View File

@ -195,7 +195,6 @@ static void nfs_set_events(NFSClient *client)
int ev = nfs_which_events(client->context); int ev = nfs_which_events(client->context);
if (ev != client->events) { if (ev != client->events) {
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
false,
(ev & POLLIN) ? nfs_process_read : NULL, (ev & POLLIN) ? nfs_process_read : NULL,
(ev & POLLOUT) ? nfs_process_write : NULL, (ev & POLLOUT) ? nfs_process_write : NULL,
NULL, NULL, client); NULL, NULL, client);
@ -373,7 +372,7 @@ static void nfs_detach_aio_context(BlockDriverState *bs)
NFSClient *client = bs->opaque; NFSClient *client = bs->opaque;
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
false, NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
client->events = 0; client->events = 0;
} }
@ -391,7 +390,7 @@ static void nfs_client_close(NFSClient *client)
if (client->context) { if (client->context) {
qemu_mutex_lock(&client->mutex); qemu_mutex_lock(&client->mutex);
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
false, NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
qemu_mutex_unlock(&client->mutex); qemu_mutex_unlock(&client->mutex);
if (client->fh) { if (client->fh) {
nfs_close(client->context, client->fh); nfs_close(client->context, client->fh);

View File

@ -862,7 +862,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
} }
aio_set_event_notifier(bdrv_get_aio_context(bs), aio_set_event_notifier(bdrv_get_aio_context(bs),
&s->irq_notifier[MSIX_SHARED_IRQ_IDX], &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, nvme_handle_event, nvme_poll_cb, nvme_handle_event, nvme_poll_cb,
nvme_poll_ready); nvme_poll_ready);
if (!nvme_identify(bs, namespace, errp)) { if (!nvme_identify(bs, namespace, errp)) {
@ -948,7 +948,7 @@ static void nvme_close(BlockDriverState *bs)
g_free(s->queues); g_free(s->queues);
aio_set_event_notifier(bdrv_get_aio_context(bs), aio_set_event_notifier(bdrv_get_aio_context(bs),
&s->irq_notifier[MSIX_SHARED_IRQ_IDX], &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, NULL, NULL, NULL); NULL, NULL, NULL);
event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]); event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map, qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map,
0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE); 0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE);
@ -1546,7 +1546,7 @@ static void nvme_detach_aio_context(BlockDriverState *bs)
aio_set_event_notifier(bdrv_get_aio_context(bs), aio_set_event_notifier(bdrv_get_aio_context(bs),
&s->irq_notifier[MSIX_SHARED_IRQ_IDX], &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, NULL, NULL, NULL); NULL, NULL, NULL);
} }
static void nvme_attach_aio_context(BlockDriverState *bs, static void nvme_attach_aio_context(BlockDriverState *bs,
@ -1556,7 +1556,7 @@ static void nvme_attach_aio_context(BlockDriverState *bs,
s->aio_context = new_context; s->aio_context = new_context;
aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX], aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, nvme_handle_event, nvme_poll_cb, nvme_handle_event, nvme_poll_cb,
nvme_poll_ready); nvme_poll_ready);
for (unsigned i = 0; i < s->queue_count; i++) { for (unsigned i = 0; i < s->queue_count; i++) {

View File

@ -1019,7 +1019,7 @@ static void restart_coroutine(void *opaque)
AioContext *ctx = bdrv_get_aio_context(bs); AioContext *ctx = bdrv_get_aio_context(bs);
trace_ssh_restart_coroutine(restart->co); trace_ssh_restart_coroutine(restart->co);
aio_set_fd_handler(ctx, s->sock, false, NULL, NULL, NULL, NULL, NULL); aio_set_fd_handler(ctx, s->sock, NULL, NULL, NULL, NULL, NULL);
aio_co_wake(restart->co); aio_co_wake(restart->co);
} }
@ -1049,7 +1049,7 @@ static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs)
trace_ssh_co_yield(s->sock, rd_handler, wr_handler); trace_ssh_co_yield(s->sock, rd_handler, wr_handler);
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock, aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
false, rd_handler, wr_handler, NULL, NULL, &restart); rd_handler, wr_handler, NULL, NULL, &restart);
qemu_coroutine_yield(); qemu_coroutine_yield();
trace_ssh_co_yield_back(s->sock); trace_ssh_co_yield_back(s->sock);
} }

View File

@ -174,7 +174,7 @@ int win32_aio_attach(QEMUWin32AIOState *aio, HANDLE hfile)
void win32_aio_detach_aio_context(QEMUWin32AIOState *aio, void win32_aio_detach_aio_context(QEMUWin32AIOState *aio,
AioContext *old_context) AioContext *old_context)
{ {
aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL, NULL); aio_set_event_notifier(old_context, &aio->e, NULL, NULL, NULL);
aio->aio_ctx = NULL; aio->aio_ctx = NULL;
} }
@ -182,8 +182,8 @@ void win32_aio_attach_aio_context(QEMUWin32AIOState *aio,
AioContext *new_context) AioContext *new_context)
{ {
aio->aio_ctx = new_context; aio->aio_ctx = new_context;
aio_set_event_notifier(new_context, &aio->e, false, aio_set_event_notifier(new_context, &aio->e, win32_aio_completion_cb,
win32_aio_completion_cb, NULL, NULL); NULL, NULL);
} }
QEMUWin32AIOState *win32_aio_init(void) QEMUWin32AIOState *win32_aio_init(void)

View File

@ -133,7 +133,7 @@ static void xen_xenstore_realize(DeviceState *dev, Error **errp)
error_setg(errp, "Xenstore evtchn port init failed"); error_setg(errp, "Xenstore evtchn port init failed");
return; return;
} }
aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s->eh), false, aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s->eh),
xen_xenstore_event, NULL, NULL, NULL, s); xen_xenstore_event, NULL, NULL, NULL, s);
s->impl = xs_impl_create(xen_domid); s->impl = xs_impl_create(xen_domid);

View File

@ -3491,7 +3491,7 @@ static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx) void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
{ {
aio_set_event_notifier(ctx, &vq->host_notifier, false, aio_set_event_notifier(ctx, &vq->host_notifier,
virtio_queue_host_notifier_read, virtio_queue_host_notifier_read,
virtio_queue_host_notifier_aio_poll, virtio_queue_host_notifier_aio_poll,
virtio_queue_host_notifier_aio_poll_ready); virtio_queue_host_notifier_aio_poll_ready);
@ -3508,14 +3508,14 @@ void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
*/ */
void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx) void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx)
{ {
aio_set_event_notifier(ctx, &vq->host_notifier, false, aio_set_event_notifier(ctx, &vq->host_notifier,
virtio_queue_host_notifier_read, virtio_queue_host_notifier_read,
NULL, NULL); NULL, NULL);
} }
void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx) void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
{ {
aio_set_event_notifier(ctx, &vq->host_notifier, false, NULL, NULL, NULL); aio_set_event_notifier(ctx, &vq->host_notifier, NULL, NULL, NULL);
} }
void virtio_queue_host_notifier_read(EventNotifier *n) void virtio_queue_host_notifier_read(EventNotifier *n)

View File

@ -842,14 +842,14 @@ void xen_device_set_event_channel_context(XenDevice *xendev,
} }
if (channel->ctx) if (channel->ctx)
aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), false, aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh),
NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
channel->ctx = ctx; channel->ctx = ctx;
if (ctx) { if (ctx) {
aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh),
false, xen_device_event, NULL, xen_device_poll, xen_device_event, NULL, xen_device_poll, NULL,
NULL, channel); channel);
} }
} }
@ -923,7 +923,7 @@ void xen_device_unbind_event_channel(XenDevice *xendev,
QLIST_REMOVE(channel, list); QLIST_REMOVE(channel, list);
aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), false, aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh),
NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
if (qemu_xen_evtchn_unbind(channel->xeh, channel->local_port) < 0) { if (qemu_xen_evtchn_unbind(channel->xeh, channel->local_port) < 0) {

View File

@ -225,8 +225,6 @@ struct AioContext {
*/ */
QEMUTimerListGroup tlg; QEMUTimerListGroup tlg;
int external_disable_cnt;
/* Number of AioHandlers without .io_poll() */ /* Number of AioHandlers without .io_poll() */
int poll_disable_cnt; int poll_disable_cnt;
@ -481,7 +479,6 @@ bool aio_poll(AioContext *ctx, bool blocking);
*/ */
void aio_set_fd_handler(AioContext *ctx, void aio_set_fd_handler(AioContext *ctx,
int fd, int fd,
bool is_external,
IOHandler *io_read, IOHandler *io_read,
IOHandler *io_write, IOHandler *io_write,
AioPollFn *io_poll, AioPollFn *io_poll,
@ -497,7 +494,6 @@ void aio_set_fd_handler(AioContext *ctx,
*/ */
void aio_set_event_notifier(AioContext *ctx, void aio_set_event_notifier(AioContext *ctx,
EventNotifier *notifier, EventNotifier *notifier,
bool is_external,
EventNotifierHandler *io_read, EventNotifierHandler *io_read,
AioPollFn *io_poll, AioPollFn *io_poll,
EventNotifierHandler *io_poll_ready); EventNotifierHandler *io_poll_ready);
@ -626,59 +622,6 @@ static inline void aio_timer_init(AioContext *ctx,
*/ */
int64_t aio_compute_timeout(AioContext *ctx); int64_t aio_compute_timeout(AioContext *ctx);
/**
* aio_disable_external:
* @ctx: the aio context
*
* Disable the further processing of external clients.
*/
static inline void aio_disable_external(AioContext *ctx)
{
qatomic_inc(&ctx->external_disable_cnt);
}
/**
* aio_enable_external:
* @ctx: the aio context
*
* Enable the processing of external clients.
*/
static inline void aio_enable_external(AioContext *ctx)
{
int old;
old = qatomic_fetch_dec(&ctx->external_disable_cnt);
assert(old > 0);
if (old == 1) {
/* Kick event loop so it re-arms file descriptors */
aio_notify(ctx);
}
}
/**
* aio_external_disabled:
* @ctx: the aio context
*
* Return true if the external clients are disabled.
*/
static inline bool aio_external_disabled(AioContext *ctx)
{
return qatomic_read(&ctx->external_disable_cnt);
}
/**
* aio_node_check:
* @ctx: the aio context
* @is_external: Whether or not the checked node is an external event source.
*
* Check if the node's is_external flag is okay to be polled by the ctx at this
* moment. True means green light.
*/
static inline bool aio_node_check(AioContext *ctx, bool is_external)
{
return !is_external || !qatomic_read(&ctx->external_disable_cnt);
}
/** /**
* aio_co_schedule: * aio_co_schedule:
* @ctx: the aio context * @ctx: the aio context

View File

@ -337,10 +337,8 @@ static void qio_channel_command_set_aio_fd_handler(QIOChannel *ioc,
void *opaque) void *opaque)
{ {
QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc); QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc);
aio_set_fd_handler(ctx, cioc->readfd, false, aio_set_fd_handler(ctx, cioc->readfd, io_read, NULL, NULL, NULL, opaque);
io_read, NULL, NULL, NULL, opaque); aio_set_fd_handler(ctx, cioc->writefd, NULL, io_write, NULL, NULL, opaque);
aio_set_fd_handler(ctx, cioc->writefd, false,
NULL, io_write, NULL, NULL, opaque);
} }

View File

@ -198,8 +198,7 @@ static void qio_channel_file_set_aio_fd_handler(QIOChannel *ioc,
void *opaque) void *opaque)
{ {
QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc); QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc);
aio_set_fd_handler(ctx, fioc->fd, false, io_read, io_write, aio_set_fd_handler(ctx, fioc->fd, io_read, io_write, NULL, NULL, opaque);
NULL, NULL, opaque);
} }
static GSource *qio_channel_file_create_watch(QIOChannel *ioc, static GSource *qio_channel_file_create_watch(QIOChannel *ioc,

View File

@ -899,8 +899,7 @@ static void qio_channel_socket_set_aio_fd_handler(QIOChannel *ioc,
void *opaque) void *opaque)
{ {
QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
aio_set_fd_handler(ctx, sioc->fd, false, aio_set_fd_handler(ctx, sioc->fd, io_read, io_write, NULL, NULL, opaque);
io_read, io_write, NULL, NULL, opaque);
} }
static GSource *qio_channel_socket_create_watch(QIOChannel *ioc, static GSource *qio_channel_socket_create_watch(QIOChannel *ioc,

View File

@ -3110,15 +3110,15 @@ static void qio_channel_rdma_set_aio_fd_handler(QIOChannel *ioc,
{ {
QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc); QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
if (io_read) { if (io_read) {
aio_set_fd_handler(ctx, rioc->rdmain->recv_comp_channel->fd, aio_set_fd_handler(ctx, rioc->rdmain->recv_comp_channel->fd, io_read,
false, io_read, io_write, NULL, NULL, opaque); io_write, NULL, NULL, opaque);
aio_set_fd_handler(ctx, rioc->rdmain->send_comp_channel->fd, aio_set_fd_handler(ctx, rioc->rdmain->send_comp_channel->fd, io_read,
false, io_read, io_write, NULL, NULL, opaque); io_write, NULL, NULL, opaque);
} else { } else {
aio_set_fd_handler(ctx, rioc->rdmaout->recv_comp_channel->fd, aio_set_fd_handler(ctx, rioc->rdmaout->recv_comp_channel->fd, io_read,
false, io_read, io_write, NULL, NULL, opaque); io_write, NULL, NULL, opaque);
aio_set_fd_handler(ctx, rioc->rdmaout->send_comp_channel->fd, aio_set_fd_handler(ctx, rioc->rdmaout->send_comp_channel->fd, io_read,
false, io_read, io_write, NULL, NULL, opaque); io_write, NULL, NULL, opaque);
} }
} }

View File

@ -125,9 +125,6 @@ if have_block
if nettle.found() or gcrypt.found() if nettle.found() or gcrypt.found()
tests += {'test-crypto-pbkdf': [io]} tests += {'test-crypto-pbkdf': [io]}
endif endif
if config_host_data.get('CONFIG_EPOLL_CREATE1')
tests += {'test-fdmon-epoll': [testblock]}
endif
endif endif
if have_system if have_system

View File

@ -130,7 +130,7 @@ static void *test_acquire_thread(void *opaque)
static void set_event_notifier(AioContext *ctx, EventNotifier *notifier, static void set_event_notifier(AioContext *ctx, EventNotifier *notifier,
EventNotifierHandler *handler) EventNotifierHandler *handler)
{ {
aio_set_event_notifier(ctx, notifier, false, handler, NULL, NULL); aio_set_event_notifier(ctx, notifier, handler, NULL, NULL);
} }
static void dummy_notifier_read(EventNotifier *n) static void dummy_notifier_read(EventNotifier *n)
@ -383,30 +383,6 @@ static void test_flush_event_notifier(void)
event_notifier_cleanup(&data.e); event_notifier_cleanup(&data.e);
} }
static void test_aio_external_client(void)
{
int i, j;
for (i = 1; i < 3; i++) {
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
event_notifier_init(&data.e, false);
aio_set_event_notifier(ctx, &data.e, true, event_ready_cb, NULL, NULL);
event_notifier_set(&data.e);
for (j = 0; j < i; j++) {
aio_disable_external(ctx);
}
for (j = 0; j < i; j++) {
assert(!aio_poll(ctx, false));
assert(event_notifier_test_and_clear(&data.e));
event_notifier_set(&data.e);
aio_enable_external(ctx);
}
assert(aio_poll(ctx, false));
set_event_notifier(ctx, &data.e, NULL);
event_notifier_cleanup(&data.e);
}
}
static void test_wait_event_notifier_noflush(void) static void test_wait_event_notifier_noflush(void)
{ {
EventNotifierTestData data = { .n = 0 }; EventNotifierTestData data = { .n = 0 };
@ -935,7 +911,6 @@ int main(int argc, char **argv)
g_test_add_func("/aio/event/wait", test_wait_event_notifier); g_test_add_func("/aio/event/wait", test_wait_event_notifier);
g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush); g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
g_test_add_func("/aio/event/flush", test_flush_event_notifier); g_test_add_func("/aio/event/flush", test_flush_event_notifier);
g_test_add_func("/aio/external-client", test_aio_external_client);
g_test_add_func("/aio/timer/schedule", test_timer_schedule); g_test_add_func("/aio/timer/schedule", test_timer_schedule);
g_test_add_func("/aio/coroutine/queue-chaining", test_queue_chaining); g_test_add_func("/aio/coroutine/queue-chaining", test_queue_chaining);

View File

@ -473,7 +473,6 @@ static void test_graph_change_drain_all(void)
g_assert_cmpint(bs_b->quiesce_counter, ==, 0); g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
g_assert_cmpint(b_s->drain_count, ==, 0); g_assert_cmpint(b_s->drain_count, ==, 0);
g_assert_cmpint(qemu_get_aio_context()->external_disable_cnt, ==, 0);
bdrv_unref(bs_b); bdrv_unref(bs_b);
blk_unref(blk_b); blk_unref(blk_b);

View File

@ -1,73 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* fdmon-epoll tests
*
* Copyright (c) 2020 Red Hat, Inc.
*/
#include "qemu/osdep.h"
#include "block/aio.h"
#include "qapi/error.h"
#include "qemu/main-loop.h"
static AioContext *ctx;
static void dummy_fd_handler(EventNotifier *notifier)
{
event_notifier_test_and_clear(notifier);
}
static void add_event_notifiers(EventNotifier *notifiers, size_t n)
{
for (size_t i = 0; i < n; i++) {
event_notifier_init(&notifiers[i], false);
aio_set_event_notifier(ctx, &notifiers[i], false,
dummy_fd_handler, NULL, NULL);
}
}
static void remove_event_notifiers(EventNotifier *notifiers, size_t n)
{
for (size_t i = 0; i < n; i++) {
aio_set_event_notifier(ctx, &notifiers[i], false, NULL, NULL, NULL);
event_notifier_cleanup(&notifiers[i]);
}
}
/* Check that fd handlers work when external clients are disabled */
static void test_external_disabled(void)
{
EventNotifier notifiers[100];
/* fdmon-epoll is only enabled when many fd handlers are registered */
add_event_notifiers(notifiers, G_N_ELEMENTS(notifiers));
event_notifier_set(&notifiers[0]);
assert(aio_poll(ctx, true));
aio_disable_external(ctx);
event_notifier_set(&notifiers[0]);
assert(aio_poll(ctx, true));
aio_enable_external(ctx);
remove_event_notifiers(notifiers, G_N_ELEMENTS(notifiers));
}
int main(int argc, char **argv)
{
/*
* This code relies on the fact that fdmon-io_uring disables itself when
* the glib main loop is in use. The main loop uses fdmon-poll and upgrades
* to fdmon-epoll when the number of fds exceeds a threshold.
*/
qemu_init_main_loop(&error_fatal);
ctx = qemu_get_aio_context();
while (g_main_context_iteration(NULL, false)) {
/* Do nothing */
}
g_test_init(&argc, &argv, NULL);
g_test_add_func("/fdmon-epoll/external-disabled", test_external_disabled);
return g_test_run();
}

View File

@ -91,12 +91,12 @@ static void test(void)
/* Make the event notifier active (set) right away */ /* Make the event notifier active (set) right away */
event_notifier_init(&td.poll_notifier, 1); event_notifier_init(&td.poll_notifier, 1);
aio_set_event_notifier(td.ctx, &td.poll_notifier, false, aio_set_event_notifier(td.ctx, &td.poll_notifier,
io_read, io_poll_true, io_poll_ready); io_read, io_poll_true, io_poll_ready);
/* This event notifier will be used later */ /* This event notifier will be used later */
event_notifier_init(&td.dummy_notifier, 0); event_notifier_init(&td.dummy_notifier, 0);
aio_set_event_notifier(td.ctx, &td.dummy_notifier, false, aio_set_event_notifier(td.ctx, &td.dummy_notifier,
io_read, io_poll_false, io_poll_never_ready); io_read, io_poll_false, io_poll_never_ready);
/* Consume aio_notify() */ /* Consume aio_notify() */
@ -114,9 +114,8 @@ static void test(void)
/* Run io_poll()/io_poll_ready() one more time to show it keeps working */ /* Run io_poll()/io_poll_ready() one more time to show it keeps working */
g_assert(aio_poll(td.ctx, true)); g_assert(aio_poll(td.ctx, true));
aio_set_event_notifier(td.ctx, &td.dummy_notifier, false, aio_set_event_notifier(td.ctx, &td.dummy_notifier, NULL, NULL, NULL);
NULL, NULL, NULL); aio_set_event_notifier(td.ctx, &td.poll_notifier, NULL, NULL, NULL);
aio_set_event_notifier(td.ctx, &td.poll_notifier, false, NULL, NULL, NULL);
event_notifier_cleanup(&td.dummy_notifier); event_notifier_cleanup(&td.dummy_notifier);
event_notifier_cleanup(&td.poll_notifier); event_notifier_cleanup(&td.poll_notifier);
aio_context_unref(td.ctx); aio_context_unref(td.ctx);

View File

@ -99,7 +99,6 @@ static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
void aio_set_fd_handler(AioContext *ctx, void aio_set_fd_handler(AioContext *ctx,
int fd, int fd,
bool is_external,
IOHandler *io_read, IOHandler *io_read,
IOHandler *io_write, IOHandler *io_write,
AioPollFn *io_poll, AioPollFn *io_poll,
@ -144,7 +143,6 @@ void aio_set_fd_handler(AioContext *ctx,
new_node->io_poll = io_poll; new_node->io_poll = io_poll;
new_node->io_poll_ready = io_poll_ready; new_node->io_poll_ready = io_poll_ready;
new_node->opaque = opaque; new_node->opaque = opaque;
new_node->is_external = is_external;
if (is_new) { if (is_new) {
new_node->pfd.fd = fd; new_node->pfd.fd = fd;
@ -196,12 +194,11 @@ static void aio_set_fd_poll(AioContext *ctx, int fd,
void aio_set_event_notifier(AioContext *ctx, void aio_set_event_notifier(AioContext *ctx,
EventNotifier *notifier, EventNotifier *notifier,
bool is_external,
EventNotifierHandler *io_read, EventNotifierHandler *io_read,
AioPollFn *io_poll, AioPollFn *io_poll,
EventNotifierHandler *io_poll_ready) EventNotifierHandler *io_poll_ready)
{ {
aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external, aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
(IOHandler *)io_read, NULL, io_poll, (IOHandler *)io_read, NULL, io_poll,
(IOHandler *)io_poll_ready, notifier); (IOHandler *)io_poll_ready, notifier);
} }
@ -285,13 +282,11 @@ bool aio_pending(AioContext *ctx)
/* TODO should this check poll ready? */ /* TODO should this check poll ready? */
revents = node->pfd.revents & node->pfd.events; revents = node->pfd.revents & node->pfd.events;
if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read && if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) {
aio_node_check(ctx, node->is_external)) {
result = true; result = true;
break; break;
} }
if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write && if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) {
aio_node_check(ctx, node->is_external)) {
result = true; result = true;
break; break;
} }
@ -350,9 +345,7 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll); QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll);
} }
if (!QLIST_IS_INSERTED(node, node_deleted) && if (!QLIST_IS_INSERTED(node, node_deleted) &&
poll_ready && revents == 0 && poll_ready && revents == 0 && node->io_poll_ready) {
aio_node_check(ctx, node->is_external) &&
node->io_poll_ready) {
/* /*
* Remove temporarily to avoid infinite loops when ->io_poll_ready() * Remove temporarily to avoid infinite loops when ->io_poll_ready()
* calls aio_poll() before clearing the condition that made the poll * calls aio_poll() before clearing the condition that made the poll
@ -375,7 +368,6 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
if (!QLIST_IS_INSERTED(node, node_deleted) && if (!QLIST_IS_INSERTED(node, node_deleted) &&
(revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
aio_node_check(ctx, node->is_external) &&
node->io_read) { node->io_read) {
node->io_read(node->opaque); node->io_read(node->opaque);
@ -386,7 +378,6 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
} }
if (!QLIST_IS_INSERTED(node, node_deleted) && if (!QLIST_IS_INSERTED(node, node_deleted) &&
(revents & (G_IO_OUT | G_IO_ERR)) && (revents & (G_IO_OUT | G_IO_ERR)) &&
aio_node_check(ctx, node->is_external) &&
node->io_write) { node->io_write) {
node->io_write(node->opaque); node->io_write(node->opaque);
progress = true; progress = true;
@ -447,8 +438,7 @@ static bool run_poll_handlers_once(AioContext *ctx,
AioHandler *tmp; AioHandler *tmp;
QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) { QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
if (aio_node_check(ctx, node->is_external) && if (node->io_poll(node->opaque)) {
node->io_poll(node->opaque)) {
aio_add_poll_ready_handler(ready_list, node); aio_add_poll_ready_handler(ready_list, node);
node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS; node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;

View File

@ -38,7 +38,6 @@ struct AioHandler {
#endif #endif
int64_t poll_idle_timeout; /* when to stop userspace polling */ int64_t poll_idle_timeout; /* when to stop userspace polling */
bool poll_ready; /* has polling detected an event? */ bool poll_ready; /* has polling detected an event? */
bool is_external;
}; };
/* Add a handler to a ready list */ /* Add a handler to a ready list */

View File

@ -32,7 +32,6 @@ struct AioHandler {
GPollFD pfd; GPollFD pfd;
int deleted; int deleted;
void *opaque; void *opaque;
bool is_external;
QLIST_ENTRY(AioHandler) node; QLIST_ENTRY(AioHandler) node;
}; };
@ -64,7 +63,6 @@ static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
void aio_set_fd_handler(AioContext *ctx, void aio_set_fd_handler(AioContext *ctx,
int fd, int fd,
bool is_external,
IOHandler *io_read, IOHandler *io_read,
IOHandler *io_write, IOHandler *io_write,
AioPollFn *io_poll, AioPollFn *io_poll,
@ -111,7 +109,6 @@ void aio_set_fd_handler(AioContext *ctx,
node->opaque = opaque; node->opaque = opaque;
node->io_read = io_read; node->io_read = io_read;
node->io_write = io_write; node->io_write = io_write;
node->is_external = is_external;
if (io_read) { if (io_read) {
bitmask |= FD_READ | FD_ACCEPT | FD_CLOSE; bitmask |= FD_READ | FD_ACCEPT | FD_CLOSE;
@ -135,7 +132,6 @@ void aio_set_fd_handler(AioContext *ctx,
void aio_set_event_notifier(AioContext *ctx, void aio_set_event_notifier(AioContext *ctx,
EventNotifier *e, EventNotifier *e,
bool is_external,
EventNotifierHandler *io_notify, EventNotifierHandler *io_notify,
AioPollFn *io_poll, AioPollFn *io_poll,
EventNotifierHandler *io_poll_ready) EventNotifierHandler *io_poll_ready)
@ -161,7 +157,6 @@ void aio_set_event_notifier(AioContext *ctx,
node->e = e; node->e = e;
node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
node->pfd.events = G_IO_IN; node->pfd.events = G_IO_IN;
node->is_external = is_external;
QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
g_source_add_poll(&ctx->source, &node->pfd); g_source_add_poll(&ctx->source, &node->pfd);
@ -368,8 +363,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* fill fd sets */ /* fill fd sets */
count = 0; count = 0;
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!node->deleted && node->io_notify if (!node->deleted && node->io_notify) {
&& aio_node_check(ctx, node->is_external)) {
assert(count < MAXIMUM_WAIT_OBJECTS); assert(count < MAXIMUM_WAIT_OBJECTS);
events[count++] = event_notifier_get_handle(node->e); events[count++] = event_notifier_get_handle(node->e);
} }

View File

@ -409,7 +409,7 @@ aio_ctx_finalize(GSource *source)
g_free(bh); g_free(bh);
} }
aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL); aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL, NULL);
event_notifier_cleanup(&ctx->notifier); event_notifier_cleanup(&ctx->notifier);
qemu_rec_mutex_destroy(&ctx->lock); qemu_rec_mutex_destroy(&ctx->lock);
qemu_lockcnt_destroy(&ctx->list_lock); qemu_lockcnt_destroy(&ctx->list_lock);
@ -593,7 +593,6 @@ AioContext *aio_context_new(Error **errp)
QSLIST_INIT(&ctx->scheduled_coroutines); QSLIST_INIT(&ctx->scheduled_coroutines);
aio_set_event_notifier(ctx, &ctx->notifier, aio_set_event_notifier(ctx, &ctx->notifier,
false,
aio_context_notifier_cb, aio_context_notifier_cb,
aio_context_notifier_poll, aio_context_notifier_poll,
aio_context_notifier_poll_ready); aio_context_notifier_poll_ready);

View File

@ -64,11 +64,6 @@ static int fdmon_epoll_wait(AioContext *ctx, AioHandlerList *ready_list,
int i, ret = 0; int i, ret = 0;
struct epoll_event events[128]; struct epoll_event events[128];
/* Fall back while external clients are disabled */
if (qatomic_read(&ctx->external_disable_cnt)) {
return fdmon_poll_ops.wait(ctx, ready_list, timeout);
}
if (timeout > 0) { if (timeout > 0) {
ret = qemu_poll_ns(&pfd, 1, timeout); ret = qemu_poll_ns(&pfd, 1, timeout);
if (ret > 0) { if (ret > 0) {
@ -133,11 +128,6 @@ bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd)
return false; return false;
} }
/* Do not upgrade while external clients are disabled */
if (qatomic_read(&ctx->external_disable_cnt)) {
return false;
}
if (npfd < EPOLL_ENABLE_THRESHOLD) { if (npfd < EPOLL_ENABLE_THRESHOLD) {
return false; return false;
} }

View File

@ -276,11 +276,6 @@ static int fdmon_io_uring_wait(AioContext *ctx, AioHandlerList *ready_list,
unsigned wait_nr = 1; /* block until at least one cqe is ready */ unsigned wait_nr = 1; /* block until at least one cqe is ready */
int ret; int ret;
/* Fall back while external clients are disabled */
if (qatomic_read(&ctx->external_disable_cnt)) {
return fdmon_poll_ops.wait(ctx, ready_list, timeout);
}
if (timeout == 0) { if (timeout == 0) {
wait_nr = 0; /* non-blocking */ wait_nr = 0; /* non-blocking */
} else if (timeout > 0) { } else if (timeout > 0) {
@ -315,8 +310,7 @@ static bool fdmon_io_uring_need_wait(AioContext *ctx)
return true; return true;
} }
/* Are we falling back to fdmon-poll? */ return false;
return qatomic_read(&ctx->external_disable_cnt);
} }
static const FDMonOps fdmon_io_uring_ops = { static const FDMonOps fdmon_io_uring_ops = {

View File

@ -65,8 +65,7 @@ static int fdmon_poll_wait(AioContext *ctx, AioHandlerList *ready_list,
assert(npfd == 0); assert(npfd == 0);
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events) {
&& aio_node_check(ctx, node->is_external)) {
add_pollfd(node); add_pollfd(node);
} }
} }

View File

@ -644,14 +644,13 @@ void qemu_set_fd_handler(int fd,
void *opaque) void *opaque)
{ {
iohandler_init(); iohandler_init();
aio_set_fd_handler(iohandler_ctx, fd, false, aio_set_fd_handler(iohandler_ctx, fd, fd_read, fd_write, NULL, NULL,
fd_read, fd_write, NULL, NULL, opaque); opaque);
} }
void event_notifier_set_handler(EventNotifier *e, void event_notifier_set_handler(EventNotifier *e,
EventNotifierHandler *handler) EventNotifierHandler *handler)
{ {
iohandler_init(); iohandler_init();
aio_set_event_notifier(iohandler_ctx, e, false, aio_set_event_notifier(iohandler_ctx, e, handler, NULL, NULL);
handler, NULL, NULL);
} }

View File

@ -74,8 +74,7 @@ typedef struct {
static void fd_coroutine_enter(void *opaque) static void fd_coroutine_enter(void *opaque)
{ {
FDYieldUntilData *data = opaque; FDYieldUntilData *data = opaque;
aio_set_fd_handler(data->ctx, data->fd, false, aio_set_fd_handler(data->ctx, data->fd, NULL, NULL, NULL, NULL, NULL);
NULL, NULL, NULL, NULL, NULL);
qemu_coroutine_enter(data->co); qemu_coroutine_enter(data->co);
} }
@ -87,7 +86,7 @@ void coroutine_fn yield_until_fd_readable(int fd)
data.ctx = qemu_get_current_aio_context(); data.ctx = qemu_get_current_aio_context();
data.co = qemu_coroutine_self(); data.co = qemu_coroutine_self();
data.fd = fd; data.fd = fd;
aio_set_fd_handler( aio_set_fd_handler(data.ctx, fd, fd_coroutine_enter, NULL, NULL, NULL,
data.ctx, fd, false, fd_coroutine_enter, NULL, NULL, NULL, &data); &data);
qemu_coroutine_yield(); qemu_coroutine_yield();
} }

View File

@ -278,7 +278,7 @@ set_watch(VuDev *vu_dev, int fd, int vu_evt,
vu_fd_watch->fd = fd; vu_fd_watch->fd = fd;
vu_fd_watch->cb = cb; vu_fd_watch->cb = cb;
qemu_socket_set_nonblock(fd); qemu_socket_set_nonblock(fd);
aio_set_fd_handler(server->ioc->ctx, fd, false, kick_handler, aio_set_fd_handler(server->ioc->ctx, fd, kick_handler,
NULL, NULL, NULL, vu_fd_watch); NULL, NULL, NULL, vu_fd_watch);
vu_fd_watch->vu_dev = vu_dev; vu_fd_watch->vu_dev = vu_dev;
vu_fd_watch->pvt = pvt; vu_fd_watch->pvt = pvt;
@ -299,8 +299,7 @@ static void remove_watch(VuDev *vu_dev, int fd)
if (!vu_fd_watch) { if (!vu_fd_watch) {
return; return;
} }
aio_set_fd_handler(server->ioc->ctx, fd, false, aio_set_fd_handler(server->ioc->ctx, fd, NULL, NULL, NULL, NULL, NULL);
NULL, NULL, NULL, NULL, NULL);
QTAILQ_REMOVE(&server->vu_fd_watches, vu_fd_watch, next); QTAILQ_REMOVE(&server->vu_fd_watches, vu_fd_watch, next);
g_free(vu_fd_watch); g_free(vu_fd_watch);
@ -362,7 +361,7 @@ void vhost_user_server_stop(VuServer *server)
VuFdWatch *vu_fd_watch; VuFdWatch *vu_fd_watch;
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) { QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
aio_set_fd_handler(server->ctx, vu_fd_watch->fd, false, aio_set_fd_handler(server->ctx, vu_fd_watch->fd,
NULL, NULL, NULL, NULL, vu_fd_watch); NULL, NULL, NULL, NULL, vu_fd_watch);
} }
@ -403,7 +402,7 @@ void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx)
qio_channel_attach_aio_context(server->ioc, ctx); qio_channel_attach_aio_context(server->ioc, ctx);
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) { QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
aio_set_fd_handler(ctx, vu_fd_watch->fd, false, kick_handler, NULL, aio_set_fd_handler(ctx, vu_fd_watch->fd, kick_handler, NULL,
NULL, NULL, vu_fd_watch); NULL, NULL, vu_fd_watch);
} }
@ -417,7 +416,7 @@ void vhost_user_server_detach_aio_context(VuServer *server)
VuFdWatch *vu_fd_watch; VuFdWatch *vu_fd_watch;
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) { QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
aio_set_fd_handler(server->ctx, vu_fd_watch->fd, false, aio_set_fd_handler(server->ctx, vu_fd_watch->fd,
NULL, NULL, NULL, NULL, vu_fd_watch); NULL, NULL, NULL, NULL, vu_fd_watch);
} }