block/nbd-client: rename read_reply_co to connection_co

This coroutine will serve nbd reconnects, so, rename it to be something
more generic.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20190201130138.94525-7-vsementsov@virtuozzo.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
This commit is contained in:
Vladimir Sementsov-Ogievskiy 2019-02-01 16:01:38 +03:00 committed by Eric Blake
parent 88ed4e1bf0
commit bc5a03350c
2 changed files with 14 additions and 14 deletions

View File

@ -59,7 +59,7 @@ static void nbd_teardown_connection(BlockDriverState *bs)
qio_channel_shutdown(client->ioc, qio_channel_shutdown(client->ioc,
QIO_CHANNEL_SHUTDOWN_BOTH, QIO_CHANNEL_SHUTDOWN_BOTH,
NULL); NULL);
BDRV_POLL_WHILE(bs, client->read_reply_co); BDRV_POLL_WHILE(bs, client->connection_co);
nbd_client_detach_aio_context(bs); nbd_client_detach_aio_context(bs);
object_unref(OBJECT(client->sioc)); object_unref(OBJECT(client->sioc));
@ -68,7 +68,7 @@ static void nbd_teardown_connection(BlockDriverState *bs)
client->ioc = NULL; client->ioc = NULL;
} }
static coroutine_fn void nbd_read_reply_entry(void *opaque) static coroutine_fn void nbd_connection_entry(void *opaque)
{ {
NBDClientSession *s = opaque; NBDClientSession *s = opaque;
uint64_t i; uint64_t i;
@ -100,14 +100,14 @@ static coroutine_fn void nbd_read_reply_entry(void *opaque)
} }
/* We're woken up again by the request itself. Note that there /* We're woken up again by the request itself. Note that there
* is no race between yielding and reentering read_reply_co. This * is no race between yielding and reentering connection_co. This
* is because: * is because:
* *
* - if the request runs on the same AioContext, it is only * - if the request runs on the same AioContext, it is only
* entered after we yield * entered after we yield
* *
* - if the request runs on a different AioContext, reentering * - if the request runs on a different AioContext, reentering
* read_reply_co happens through a bottom half, which can only * connection_co happens through a bottom half, which can only
* run after we yield. * run after we yield.
*/ */
aio_co_wake(s->requests[i].coroutine); aio_co_wake(s->requests[i].coroutine);
@ -116,7 +116,7 @@ static coroutine_fn void nbd_read_reply_entry(void *opaque)
s->quit = true; s->quit = true;
nbd_recv_coroutines_wake_all(s); nbd_recv_coroutines_wake_all(s);
s->read_reply_co = NULL; s->connection_co = NULL;
aio_wait_kick(); aio_wait_kick();
} }
@ -420,7 +420,7 @@ static coroutine_fn int nbd_co_do_receive_one_chunk(
} }
*request_ret = 0; *request_ret = 0;
/* Wait until we're woken up by nbd_read_reply_entry. */ /* Wait until we're woken up by nbd_connection_entry. */
s->requests[i].receiving = true; s->requests[i].receiving = true;
qemu_coroutine_yield(); qemu_coroutine_yield();
s->requests[i].receiving = false; s->requests[i].receiving = false;
@ -495,7 +495,7 @@ static coroutine_fn int nbd_co_do_receive_one_chunk(
} }
/* nbd_co_receive_one_chunk /* nbd_co_receive_one_chunk
* Read reply, wake up read_reply_co and set s->quit if needed. * Read reply, wake up connection_co and set s->quit if needed.
* Return value is a fatal error code or normal nbd reply error code * Return value is a fatal error code or normal nbd reply error code
*/ */
static coroutine_fn int nbd_co_receive_one_chunk( static coroutine_fn int nbd_co_receive_one_chunk(
@ -509,15 +509,15 @@ static coroutine_fn int nbd_co_receive_one_chunk(
if (ret < 0) { if (ret < 0) {
s->quit = true; s->quit = true;
} else { } else {
/* For assert at loop start in nbd_read_reply_entry */ /* For assert at loop start in nbd_connection_entry */
if (reply) { if (reply) {
*reply = s->reply; *reply = s->reply;
} }
s->reply.handle = 0; s->reply.handle = 0;
} }
if (s->read_reply_co) { if (s->connection_co) {
aio_co_wake(s->read_reply_co); aio_co_wake(s->connection_co);
} }
return ret; return ret;
@ -970,7 +970,7 @@ void nbd_client_attach_aio_context(BlockDriverState *bs,
{ {
NBDClientSession *client = nbd_get_client_session(bs); NBDClientSession *client = nbd_get_client_session(bs);
qio_channel_attach_aio_context(QIO_CHANNEL(client->ioc), new_context); qio_channel_attach_aio_context(QIO_CHANNEL(client->ioc), new_context);
aio_co_schedule(new_context, client->read_reply_co); aio_co_schedule(new_context, client->connection_co);
} }
void nbd_client_close(BlockDriverState *bs) void nbd_client_close(BlockDriverState *bs)
@ -1075,7 +1075,7 @@ static int nbd_client_connect(BlockDriverState *bs,
/* Now that we're connected, set the socket to be non-blocking and /* Now that we're connected, set the socket to be non-blocking and
* kick the reply mechanism. */ * kick the reply mechanism. */
qio_channel_set_blocking(QIO_CHANNEL(sioc), false, NULL); qio_channel_set_blocking(QIO_CHANNEL(sioc), false, NULL);
client->read_reply_co = qemu_coroutine_create(nbd_read_reply_entry, client); client->connection_co = qemu_coroutine_create(nbd_connection_entry, client);
nbd_client_attach_aio_context(bs, bdrv_get_aio_context(bs)); nbd_client_attach_aio_context(bs, bdrv_get_aio_context(bs));
logout("Established connection with NBD server\n"); logout("Established connection with NBD server\n");

View File

@ -20,7 +20,7 @@
typedef struct { typedef struct {
Coroutine *coroutine; Coroutine *coroutine;
uint64_t offset; /* original offset of the request */ uint64_t offset; /* original offset of the request */
bool receiving; /* waiting for read_reply_co? */ bool receiving; /* waiting for connection_co? */
} NBDClientRequest; } NBDClientRequest;
typedef struct NBDClientSession { typedef struct NBDClientSession {
@ -30,7 +30,7 @@ typedef struct NBDClientSession {
CoMutex send_mutex; CoMutex send_mutex;
CoQueue free_sema; CoQueue free_sema;
Coroutine *read_reply_co; Coroutine *connection_co;
int in_flight; int in_flight;
NBDClientRequest requests[MAX_NBD_REQUESTS]; NBDClientRequest requests[MAX_NBD_REQUESTS];