dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel

If DMA's owning thread cancels the IO while the bounce buffer's owning thread
is notifying the "cpu client list", a use-after-free happens:

     continue_after_map_failure               dma_aio_cancel
     ------------------------------------------------------------------
     aio_bh_new
                                              qemu_bh_delete
     qemu_bh_schedule (use after free)

Also, the old code doesn't run the bh in the right AioContext.

Fix both problems by passing a QEMUBH to cpu_register_map_client.

Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <1426496617-10702-6-git-send-email-famz@redhat.com>
[Remove unnecessary forward declaration. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Fam Zheng 2015-03-16 17:03:37 +08:00 committed by Paolo Bonzini
parent 33b6c2edf6
commit e95205e1f9
3 changed files with 31 additions and 23 deletions

View File

@ -92,14 +92,6 @@ static void reschedule_dma(void *opaque)
dma_blk_cb(dbs, 0);
}
static void continue_after_map_failure(void *opaque)
{
DMAAIOCB *dbs = (DMAAIOCB *)opaque;
dbs->bh = qemu_bh_new(reschedule_dma, dbs);
qemu_bh_schedule(dbs->bh);
}
static void dma_blk_unmap(DMAAIOCB *dbs)
{
int i;
@ -161,7 +153,9 @@ static void dma_blk_cb(void *opaque, int ret)
if (dbs->iov.size == 0) {
trace_dma_map_wait(dbs);
cpu_register_map_client(dbs, continue_after_map_failure);
dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk),
reschedule_dma, dbs);
cpu_register_map_client(dbs->bh);
return;
}
@ -183,6 +177,11 @@ static void dma_aio_cancel(BlockAIOCB *acb)
if (dbs->acb) {
blk_aio_cancel_async(dbs->acb);
}
if (dbs->bh) {
cpu_unregister_map_client(dbs->bh);
qemu_bh_delete(dbs->bh);
dbs->bh = NULL;
}
}

34
exec.c
View File

@ -2479,8 +2479,7 @@ typedef struct {
static BounceBuffer bounce;
typedef struct MapClient {
void *opaque;
void (*callback)(void *opaque);
QEMUBH *bh;
QLIST_ENTRY(MapClient) link;
} MapClient;
@ -2488,31 +2487,34 @@ QemuMutex map_client_list_lock;
static QLIST_HEAD(map_client_list, MapClient) map_client_list
= QLIST_HEAD_INITIALIZER(map_client_list);
static void cpu_unregister_map_client(void *_client);
static void cpu_unregister_map_client_do(MapClient *client)
{
QLIST_REMOVE(client, link);
g_free(client);
}
static void cpu_notify_map_clients_locked(void)
{
MapClient *client;
while (!QLIST_EMPTY(&map_client_list)) {
client = QLIST_FIRST(&map_client_list);
client->callback(client->opaque);
cpu_unregister_map_client(client);
qemu_bh_schedule(client->bh);
cpu_unregister_map_client_do(client);
}
}
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
void cpu_register_map_client(QEMUBH *bh)
{
MapClient *client = g_malloc(sizeof(*client));
qemu_mutex_lock(&map_client_list_lock);
client->opaque = opaque;
client->callback = callback;
client->bh = bh;
QLIST_INSERT_HEAD(&map_client_list, client, link);
if (!atomic_read(&bounce.in_use)) {
cpu_notify_map_clients_locked();
}
qemu_mutex_unlock(&map_client_list_lock);
return client;
}
void cpu_exec_init_all(void)
@ -2523,12 +2525,18 @@ void cpu_exec_init_all(void)
qemu_mutex_init(&map_client_list_lock);
}
static void cpu_unregister_map_client(void *_client)
void cpu_unregister_map_client(QEMUBH *bh)
{
MapClient *client = (MapClient *)_client;
MapClient *client;
QLIST_REMOVE(client, link);
g_free(client);
qemu_mutex_lock(&map_client_list_lock);
QLIST_FOREACH(client, &map_client_list, link) {
if (client->bh == bh) {
cpu_unregister_map_client_do(client);
break;
}
}
qemu_mutex_unlock(&map_client_list_lock);
}
static void cpu_notify_map_clients(void)

View File

@ -82,7 +82,8 @@ void *cpu_physical_memory_map(hwaddr addr,
int is_write);
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
int is_write, hwaddr access_len);
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
void cpu_register_map_client(QEMUBH *bh);
void cpu_unregister_map_client(QEMUBH *bh);
bool cpu_physical_memory_is_io(hwaddr phys_addr);