gluster: allocate GlusterAIOCBs on the stack

This is simpler now that the driver has been converted to coroutines.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Jeff Cody <jcody@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Paolo Bonzini 2015-10-01 13:04:38 +02:00 committed by Jeff Cody
parent 3c07587d49
commit c833d1e8f5
1 changed files with 33 additions and 53 deletions

View File

@ -429,28 +429,23 @@ static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
{ {
int ret; int ret;
GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); GlusterAIOCB acb;
BDRVGlusterState *s = bs->opaque; BDRVGlusterState *s = bs->opaque;
off_t size = nb_sectors * BDRV_SECTOR_SIZE; off_t size = nb_sectors * BDRV_SECTOR_SIZE;
off_t offset = sector_num * BDRV_SECTOR_SIZE; off_t offset = sector_num * BDRV_SECTOR_SIZE;
acb->size = size; acb.size = size;
acb->ret = 0; acb.ret = 0;
acb->coroutine = qemu_coroutine_self(); acb.coroutine = qemu_coroutine_self();
acb->aio_context = bdrv_get_aio_context(bs); acb.aio_context = bdrv_get_aio_context(bs);
ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb); ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
if (ret < 0) { if (ret < 0) {
ret = -errno; return -errno;
goto out;
} }
qemu_coroutine_yield(); qemu_coroutine_yield();
ret = acb->ret; return acb.ret;
out:
g_slice_free(GlusterAIOCB, acb);
return ret;
} }
static inline bool gluster_supports_zerofill(void) static inline bool gluster_supports_zerofill(void)
@ -541,35 +536,30 @@ static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write) int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
{ {
int ret; int ret;
GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); GlusterAIOCB acb;
BDRVGlusterState *s = bs->opaque; BDRVGlusterState *s = bs->opaque;
size_t size = nb_sectors * BDRV_SECTOR_SIZE; size_t size = nb_sectors * BDRV_SECTOR_SIZE;
off_t offset = sector_num * BDRV_SECTOR_SIZE; off_t offset = sector_num * BDRV_SECTOR_SIZE;
acb->size = size; acb.size = size;
acb->ret = 0; acb.ret = 0;
acb->coroutine = qemu_coroutine_self(); acb.coroutine = qemu_coroutine_self();
acb->aio_context = bdrv_get_aio_context(bs); acb.aio_context = bdrv_get_aio_context(bs);
if (write) { if (write) {
ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
&gluster_finish_aiocb, acb); gluster_finish_aiocb, &acb);
} else { } else {
ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0, ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
&gluster_finish_aiocb, acb); gluster_finish_aiocb, &acb);
} }
if (ret < 0) { if (ret < 0) {
ret = -errno; return -errno;
goto out;
} }
qemu_coroutine_yield(); qemu_coroutine_yield();
ret = acb->ret; return acb.ret;
out:
g_slice_free(GlusterAIOCB, acb);
return ret;
} }
static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset) static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
@ -600,26 +590,21 @@ static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs) static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
{ {
int ret; int ret;
GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); GlusterAIOCB acb;
BDRVGlusterState *s = bs->opaque; BDRVGlusterState *s = bs->opaque;
acb->size = 0; acb.size = 0;
acb->ret = 0; acb.ret = 0;
acb->coroutine = qemu_coroutine_self(); acb.coroutine = qemu_coroutine_self();
acb->aio_context = bdrv_get_aio_context(bs); acb.aio_context = bdrv_get_aio_context(bs);
ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb); ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
if (ret < 0) { if (ret < 0) {
ret = -errno; return -errno;
goto out;
} }
qemu_coroutine_yield(); qemu_coroutine_yield();
ret = acb->ret; return acb.ret;
out:
g_slice_free(GlusterAIOCB, acb);
return ret;
} }
#ifdef CONFIG_GLUSTERFS_DISCARD #ifdef CONFIG_GLUSTERFS_DISCARD
@ -627,28 +612,23 @@ static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
int64_t sector_num, int nb_sectors) int64_t sector_num, int nb_sectors)
{ {
int ret; int ret;
GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); GlusterAIOCB acb;
BDRVGlusterState *s = bs->opaque; BDRVGlusterState *s = bs->opaque;
size_t size = nb_sectors * BDRV_SECTOR_SIZE; size_t size = nb_sectors * BDRV_SECTOR_SIZE;
off_t offset = sector_num * BDRV_SECTOR_SIZE; off_t offset = sector_num * BDRV_SECTOR_SIZE;
acb->size = 0; acb.size = 0;
acb->ret = 0; acb.ret = 0;
acb->coroutine = qemu_coroutine_self(); acb.coroutine = qemu_coroutine_self();
acb->aio_context = bdrv_get_aio_context(bs); acb.aio_context = bdrv_get_aio_context(bs);
ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb); ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
if (ret < 0) { if (ret < 0) {
ret = -errno; return -errno;
goto out;
} }
qemu_coroutine_yield(); qemu_coroutine_yield();
ret = acb->ret; return acb.ret;
out:
g_slice_free(GlusterAIOCB, acb);
return ret;
} }
#endif #endif