threadpool: drop global thread pool

Now that each AioContext has a ThreadPool and the main loop AioContext
can be fetched with bdrv_get_aio_context(), we can eliminate the concept
of a global thread pool from thread-pool.c.

The submit functions must take a ThreadPool* argument.

block/raw-posix.c and block/raw-win32.c use
aio_get_thread_pool(bdrv_get_aio_context(bs)) to fetch the main loop's
ThreadPool.

tests/test-thread-pool.c must be updated to reflect the new
thread_pool_submit() function prototypes.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2013-03-07 13:41:49 +01:00
parent 85d126f3ee
commit c4d9d19645
5 changed files with 43 additions and 46 deletions

View File

@ -750,6 +750,7 @@ static BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd,
BlockDriverCompletionFunc *cb, void *opaque, int type) BlockDriverCompletionFunc *cb, void *opaque, int type)
{ {
RawPosixAIOData *acb = g_slice_new(RawPosixAIOData); RawPosixAIOData *acb = g_slice_new(RawPosixAIOData);
ThreadPool *pool;
acb->bs = bs; acb->bs = bs;
acb->aio_type = type; acb->aio_type = type;
@ -763,7 +764,8 @@ static BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd,
acb->aio_offset = sector_num * 512; acb->aio_offset = sector_num * 512;
trace_paio_submit(acb, opaque, sector_num, nb_sectors, type); trace_paio_submit(acb, opaque, sector_num, nb_sectors, type);
return thread_pool_submit_aio(aio_worker, acb, cb, opaque); pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
} }
static BlockDriverAIOCB *raw_aio_submit(BlockDriverState *bs, static BlockDriverAIOCB *raw_aio_submit(BlockDriverState *bs,
@ -1413,6 +1415,7 @@ static BlockDriverAIOCB *hdev_aio_ioctl(BlockDriverState *bs,
{ {
BDRVRawState *s = bs->opaque; BDRVRawState *s = bs->opaque;
RawPosixAIOData *acb; RawPosixAIOData *acb;
ThreadPool *pool;
if (fd_open(bs) < 0) if (fd_open(bs) < 0)
return NULL; return NULL;
@ -1424,7 +1427,8 @@ static BlockDriverAIOCB *hdev_aio_ioctl(BlockDriverState *bs,
acb->aio_offset = 0; acb->aio_offset = 0;
acb->aio_ioctl_buf = buf; acb->aio_ioctl_buf = buf;
acb->aio_ioctl_cmd = req; acb->aio_ioctl_cmd = req;
return thread_pool_submit_aio(aio_worker, acb, cb, opaque); pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
} }
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)

View File

@ -144,6 +144,7 @@ static BlockDriverAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile,
BlockDriverCompletionFunc *cb, void *opaque, int type) BlockDriverCompletionFunc *cb, void *opaque, int type)
{ {
RawWin32AIOData *acb = g_slice_new(RawWin32AIOData); RawWin32AIOData *acb = g_slice_new(RawWin32AIOData);
ThreadPool *pool;
acb->bs = bs; acb->bs = bs;
acb->hfile = hfile; acb->hfile = hfile;
@ -157,7 +158,8 @@ static BlockDriverAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile,
acb->aio_offset = sector_num * 512; acb->aio_offset = sector_num * 512;
trace_paio_submit(acb, opaque, sector_num, nb_sectors, type); trace_paio_submit(acb, opaque, sector_num, nb_sectors, type);
return thread_pool_submit_aio(aio_worker, acb, cb, opaque); pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
} }
int qemu_ftruncate64(int fd, int64_t length) int qemu_ftruncate64(int fd, int64_t length)

View File

@ -31,9 +31,11 @@ typedef struct ThreadPool ThreadPool;
ThreadPool *thread_pool_new(struct AioContext *ctx); ThreadPool *thread_pool_new(struct AioContext *ctx);
void thread_pool_free(ThreadPool *pool); void thread_pool_free(ThreadPool *pool);
BlockDriverAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg, BlockDriverAIOCB *thread_pool_submit_aio(ThreadPool *pool,
BlockDriverCompletionFunc *cb, void *opaque); ThreadPoolFunc *func, void *arg,
int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg); BlockDriverCompletionFunc *cb, void *opaque);
void thread_pool_submit(ThreadPoolFunc *func, void *arg); int coroutine_fn thread_pool_submit_co(ThreadPool *pool,
ThreadPoolFunc *func, void *arg);
void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg);
#endif #endif

View File

@ -4,6 +4,8 @@
#include "block/thread-pool.h" #include "block/thread-pool.h"
#include "block/block.h" #include "block/block.h"
static AioContext *ctx;
static ThreadPool *pool;
static int active; static int active;
typedef struct { typedef struct {
@ -38,19 +40,10 @@ static void done_cb(void *opaque, int ret)
active--; active--;
} }
/* A non-blocking poll of the main AIO context (we cannot use aio_poll
* because we do not know the AioContext).
*/
static void qemu_aio_wait_nonblocking(void)
{
qemu_notify_event();
qemu_aio_wait();
}
/* Wait until all aio and bh activity has finished */ /* Wait until all aio and bh activity has finished */
static void qemu_aio_wait_all(void) static void qemu_aio_wait_all(void)
{ {
while (qemu_aio_wait()) { while (aio_poll(ctx, true)) {
/* Do nothing */ /* Do nothing */
} }
} }
@ -58,7 +51,7 @@ static void qemu_aio_wait_all(void)
static void test_submit(void) static void test_submit(void)
{ {
WorkerTestData data = { .n = 0 }; WorkerTestData data = { .n = 0 };
thread_pool_submit(worker_cb, &data); thread_pool_submit(pool, worker_cb, &data);
qemu_aio_wait_all(); qemu_aio_wait_all();
g_assert_cmpint(data.n, ==, 1); g_assert_cmpint(data.n, ==, 1);
} }
@ -66,7 +59,8 @@ static void test_submit(void)
static void test_submit_aio(void) static void test_submit_aio(void)
{ {
WorkerTestData data = { .n = 0, .ret = -EINPROGRESS }; WorkerTestData data = { .n = 0, .ret = -EINPROGRESS };
data.aiocb = thread_pool_submit_aio(worker_cb, &data, done_cb, &data); data.aiocb = thread_pool_submit_aio(pool, worker_cb, &data,
done_cb, &data);
/* The callbacks are not called until after the first wait. */ /* The callbacks are not called until after the first wait. */
active = 1; active = 1;
@ -84,7 +78,7 @@ static void co_test_cb(void *opaque)
active = 1; active = 1;
data->n = 0; data->n = 0;
data->ret = -EINPROGRESS; data->ret = -EINPROGRESS;
thread_pool_submit_co(worker_cb, data); thread_pool_submit_co(pool, worker_cb, data);
/* The test continues in test_submit_co, after qemu_coroutine_enter... */ /* The test continues in test_submit_co, after qemu_coroutine_enter... */
@ -126,12 +120,12 @@ static void test_submit_many(void)
for (i = 0; i < 100; i++) { for (i = 0; i < 100; i++) {
data[i].n = 0; data[i].n = 0;
data[i].ret = -EINPROGRESS; data[i].ret = -EINPROGRESS;
thread_pool_submit_aio(worker_cb, &data[i], done_cb, &data[i]); thread_pool_submit_aio(pool, worker_cb, &data[i], done_cb, &data[i]);
} }
active = 100; active = 100;
while (active > 0) { while (active > 0) {
qemu_aio_wait(); aio_poll(ctx, true);
} }
for (i = 0; i < 100; i++) { for (i = 0; i < 100; i++) {
g_assert_cmpint(data[i].n, ==, 1); g_assert_cmpint(data[i].n, ==, 1);
@ -154,7 +148,7 @@ static void test_cancel(void)
for (i = 0; i < 100; i++) { for (i = 0; i < 100; i++) {
data[i].n = 0; data[i].n = 0;
data[i].ret = -EINPROGRESS; data[i].ret = -EINPROGRESS;
data[i].aiocb = thread_pool_submit_aio(long_cb, &data[i], data[i].aiocb = thread_pool_submit_aio(pool, long_cb, &data[i],
done_cb, &data[i]); done_cb, &data[i]);
} }
@ -162,7 +156,8 @@ static void test_cancel(void)
* run, but do not waste too much time... * run, but do not waste too much time...
*/ */
active = 100; active = 100;
qemu_aio_wait_nonblocking(); aio_notify(ctx);
aio_poll(ctx, false);
/* Wait some time for the threads to start, with some sanity /* Wait some time for the threads to start, with some sanity
* testing on the behavior of the scheduler... * testing on the behavior of the scheduler...
@ -208,11 +203,10 @@ static void test_cancel(void)
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
/* These should be removed once each AioContext has its thread pool. int ret;
* The test should create its own AioContext.
*/ ctx = aio_context_new();
qemu_init_main_loop(); pool = aio_get_thread_pool(ctx);
bdrv_init();
g_test_init(&argc, &argv, NULL); g_test_init(&argc, &argv, NULL);
g_test_add_func("/thread-pool/submit", test_submit); g_test_add_func("/thread-pool/submit", test_submit);
@ -220,5 +214,9 @@ int main(int argc, char **argv)
g_test_add_func("/thread-pool/submit-co", test_submit_co); g_test_add_func("/thread-pool/submit-co", test_submit_co);
g_test_add_func("/thread-pool/submit-many", test_submit_many); g_test_add_func("/thread-pool/submit-many", test_submit_many);
g_test_add_func("/thread-pool/cancel", test_cancel); g_test_add_func("/thread-pool/cancel", test_cancel);
return g_test_run();
ret = g_test_run();
aio_context_unref(ctx);
return ret;
} }

View File

@ -78,9 +78,6 @@ struct ThreadPool {
bool stopping; bool stopping;
}; };
/* Currently there is only one thread pool instance. */
static ThreadPool global_pool;
static void *worker_thread(void *opaque) static void *worker_thread(void *opaque)
{ {
ThreadPool *pool = opaque; ThreadPool *pool = opaque;
@ -239,10 +236,10 @@ static const AIOCBInfo thread_pool_aiocb_info = {
.cancel = thread_pool_cancel, .cancel = thread_pool_cancel,
}; };
BlockDriverAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg, BlockDriverAIOCB *thread_pool_submit_aio(ThreadPool *pool,
ThreadPoolFunc *func, void *arg,
BlockDriverCompletionFunc *cb, void *opaque) BlockDriverCompletionFunc *cb, void *opaque)
{ {
ThreadPool *pool = &global_pool;
ThreadPoolElement *req; ThreadPoolElement *req;
req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque); req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque);
@ -278,18 +275,19 @@ static void thread_pool_co_cb(void *opaque, int ret)
qemu_coroutine_enter(co->co, NULL); qemu_coroutine_enter(co->co, NULL);
} }
int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg) int coroutine_fn thread_pool_submit_co(ThreadPool *pool, ThreadPoolFunc *func,
void *arg)
{ {
ThreadPoolCo tpc = { .co = qemu_coroutine_self(), .ret = -EINPROGRESS }; ThreadPoolCo tpc = { .co = qemu_coroutine_self(), .ret = -EINPROGRESS };
assert(qemu_in_coroutine()); assert(qemu_in_coroutine());
thread_pool_submit_aio(func, arg, thread_pool_co_cb, &tpc); thread_pool_submit_aio(pool, func, arg, thread_pool_co_cb, &tpc);
qemu_coroutine_yield(); qemu_coroutine_yield();
return tpc.ret; return tpc.ret;
} }
void thread_pool_submit(ThreadPoolFunc *func, void *arg) void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg)
{ {
thread_pool_submit_aio(func, arg, NULL, NULL); thread_pool_submit_aio(pool, func, arg, NULL, NULL);
} }
static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
@ -354,10 +352,3 @@ void thread_pool_free(ThreadPool *pool)
event_notifier_cleanup(&pool->notifier); event_notifier_cleanup(&pool->notifier);
g_free(pool); g_free(pool);
} }
static void thread_pool_init(void)
{
thread_pool_init_one(&global_pool, NULL);
}
block_init(thread_pool_init)