nbd/server: Fix race in draining the export
When draining an NBD export, nbd_drained_begin() first sets
client->quiescing so that nbd_client_receive_next_request() won't start
any new request coroutines. Then nbd_drained_poll() tries to makes sure
that we wait for any existing request coroutines by checking that
client->nb_requests has become 0.
However, there is a small window between creating a new request
coroutine and increasing client->nb_requests. If a coroutine is in this
state, it won't be waited for and drain returns too early.
In the context of switching to a different AioContext, this means that
blk_aio_attached() will see client->recv_coroutine != NULL and fail its
assertion.
Fix this by increasing client->nb_requests immediately when starting the
coroutine. Doing this after the checks if we should create a new
coroutine is okay because client->lock is held.
Cc: qemu-stable@nongnu.org
Fixes: fd6afc501a
("nbd/server: Use drained block ops to quiesce the server")
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20240314165825.40261-2-kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
ae5a40e858
commit
9c707525cb
15
nbd/server.c
15
nbd/server.c
@ -3007,8 +3007,8 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
|
|||||||
/* Owns a reference to the NBDClient passed as opaque. */
|
/* Owns a reference to the NBDClient passed as opaque. */
|
||||||
static coroutine_fn void nbd_trip(void *opaque)
|
static coroutine_fn void nbd_trip(void *opaque)
|
||||||
{
|
{
|
||||||
NBDClient *client = opaque;
|
NBDRequestData *req = opaque;
|
||||||
NBDRequestData *req = NULL;
|
NBDClient *client = req->client;
|
||||||
NBDRequest request = { 0 }; /* GCC thinks it can be used uninitialized */
|
NBDRequest request = { 0 }; /* GCC thinks it can be used uninitialized */
|
||||||
int ret;
|
int ret;
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
@ -3037,8 +3037,6 @@ static coroutine_fn void nbd_trip(void *opaque)
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
req = nbd_request_get(client);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* nbd_co_receive_request() returns -EAGAIN when nbd_drained_begin() has
|
* nbd_co_receive_request() returns -EAGAIN when nbd_drained_begin() has
|
||||||
* set client->quiescing but by the time we get back nbd_drained_end() may
|
* set client->quiescing but by the time we get back nbd_drained_end() may
|
||||||
@ -3112,9 +3110,7 @@ static coroutine_fn void nbd_trip(void *opaque)
|
|||||||
}
|
}
|
||||||
|
|
||||||
done:
|
done:
|
||||||
if (req) {
|
nbd_request_put(req);
|
||||||
nbd_request_put(req);
|
|
||||||
}
|
|
||||||
|
|
||||||
qemu_mutex_unlock(&client->lock);
|
qemu_mutex_unlock(&client->lock);
|
||||||
|
|
||||||
@ -3143,10 +3139,13 @@ disconnect:
|
|||||||
*/
|
*/
|
||||||
static void nbd_client_receive_next_request(NBDClient *client)
|
static void nbd_client_receive_next_request(NBDClient *client)
|
||||||
{
|
{
|
||||||
|
NBDRequestData *req;
|
||||||
|
|
||||||
if (!client->recv_coroutine && client->nb_requests < MAX_NBD_REQUESTS &&
|
if (!client->recv_coroutine && client->nb_requests < MAX_NBD_REQUESTS &&
|
||||||
!client->quiescing) {
|
!client->quiescing) {
|
||||||
nbd_client_get(client);
|
nbd_client_get(client);
|
||||||
client->recv_coroutine = qemu_coroutine_create(nbd_trip, client);
|
req = nbd_request_get(client);
|
||||||
|
client->recv_coroutine = qemu_coroutine_create(nbd_trip, req);
|
||||||
aio_co_schedule(client->exp->common.ctx, client->recv_coroutine);
|
aio_co_schedule(client->exp->common.ctx, client->recv_coroutine);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user