coroutine-lock: Reimplement CoRwlock to fix downgrade bug
An invariant of the current rwlock is that if multiple coroutines hold a reader lock, all must be runnable. The unlock implementation relies on this, choosing to wake a single coroutine when the final read lock holder exits the critical section, assuming that it will wake a coroutine attempting to acquire a write lock. The downgrade implementation violates this assumption by creating a read lock owning coroutine that is exclusively runnable - any other coroutines that are waiting to acquire a read lock are *not* made runnable when the write lock holder converts its ownership to read only. More in general, the old implementation had lots of other fairness bugs. The root cause of the bugs was that CoQueue would wake up readers even if there were pending writers, and would wake up writers even if there were readers. In that case, the coroutine would go back to sleep *at the end* of the CoQueue, losing its place at the head of the line. To fix this, keep the queue of waiters explicitly in the CoRwlock instead of using CoQueue, and store for each whether it is a potential reader or a writer. This way, downgrade can look at the first queued coroutines and wake it only if it is a reader, causing all other readers in line to be released in turn. Reported-by: David Edmondson <david.edmondson@oracle.com> Reviewed-by: David Edmondson <david.edmondson@oracle.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-id: 20210325112941.365238-5-pbonzini@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
2f6ef0393b
commit
050de36b13
@ -237,11 +237,15 @@ bool qemu_co_enter_next_impl(CoQueue *queue, QemuLockable *lock);
|
|||||||
bool qemu_co_queue_empty(CoQueue *queue);
|
bool qemu_co_queue_empty(CoQueue *queue);
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct CoRwTicket CoRwTicket;
|
||||||
typedef struct CoRwlock {
|
typedef struct CoRwlock {
|
||||||
int pending_writer;
|
|
||||||
int reader;
|
|
||||||
CoMutex mutex;
|
CoMutex mutex;
|
||||||
CoQueue queue;
|
|
||||||
|
/* Number of readers, or -1 if owned for writing. */
|
||||||
|
int owners;
|
||||||
|
|
||||||
|
/* Waiting coroutines. */
|
||||||
|
QSIMPLEQ_HEAD(, CoRwTicket) tickets;
|
||||||
} CoRwlock;
|
} CoRwlock;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -260,10 +264,9 @@ void qemu_co_rwlock_rdlock(CoRwlock *lock);
|
|||||||
/**
|
/**
|
||||||
* Write Locks the CoRwlock from a reader. This is a bit more efficient than
|
* Write Locks the CoRwlock from a reader. This is a bit more efficient than
|
||||||
* @qemu_co_rwlock_unlock followed by a separate @qemu_co_rwlock_wrlock.
|
* @qemu_co_rwlock_unlock followed by a separate @qemu_co_rwlock_wrlock.
|
||||||
* However, if the lock cannot be upgraded immediately, control is transferred
|
* Note that if the lock cannot be upgraded immediately, control is transferred
|
||||||
* to the caller of the current coroutine. Also, @qemu_co_rwlock_upgrade
|
* to the caller of the current coroutine; another writer might run while
|
||||||
* only overrides CoRwlock fairness if there are no concurrent readers, so
|
* @qemu_co_rwlock_upgrade blocks.
|
||||||
* another writer might run while @qemu_co_rwlock_upgrade blocks.
|
|
||||||
*/
|
*/
|
||||||
void qemu_co_rwlock_upgrade(CoRwlock *lock);
|
void qemu_co_rwlock_upgrade(CoRwlock *lock);
|
||||||
|
|
||||||
|
@ -327,11 +327,51 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
|
|||||||
trace_qemu_co_mutex_unlock_return(mutex, self);
|
trace_qemu_co_mutex_unlock_return(mutex, self);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct CoRwTicket {
|
||||||
|
bool read;
|
||||||
|
Coroutine *co;
|
||||||
|
QSIMPLEQ_ENTRY(CoRwTicket) next;
|
||||||
|
};
|
||||||
|
|
||||||
void qemu_co_rwlock_init(CoRwlock *lock)
|
void qemu_co_rwlock_init(CoRwlock *lock)
|
||||||
{
|
{
|
||||||
memset(lock, 0, sizeof(*lock));
|
|
||||||
qemu_co_queue_init(&lock->queue);
|
|
||||||
qemu_co_mutex_init(&lock->mutex);
|
qemu_co_mutex_init(&lock->mutex);
|
||||||
|
lock->owners = 0;
|
||||||
|
QSIMPLEQ_INIT(&lock->tickets);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Releases the internal CoMutex. */
|
||||||
|
static void qemu_co_rwlock_maybe_wake_one(CoRwlock *lock)
|
||||||
|
{
|
||||||
|
CoRwTicket *tkt = QSIMPLEQ_FIRST(&lock->tickets);
|
||||||
|
Coroutine *co = NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Setting lock->owners here prevents rdlock and wrlock from
|
||||||
|
* sneaking in between unlock and wake.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (tkt) {
|
||||||
|
if (tkt->read) {
|
||||||
|
if (lock->owners >= 0) {
|
||||||
|
lock->owners++;
|
||||||
|
co = tkt->co;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (lock->owners == 0) {
|
||||||
|
lock->owners = -1;
|
||||||
|
co = tkt->co;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (co) {
|
||||||
|
QSIMPLEQ_REMOVE_HEAD(&lock->tickets, next);
|
||||||
|
qemu_co_mutex_unlock(&lock->mutex);
|
||||||
|
aio_co_wake(co);
|
||||||
|
} else {
|
||||||
|
qemu_co_mutex_unlock(&lock->mutex);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void qemu_co_rwlock_rdlock(CoRwlock *lock)
|
void qemu_co_rwlock_rdlock(CoRwlock *lock)
|
||||||
@ -340,13 +380,22 @@ void qemu_co_rwlock_rdlock(CoRwlock *lock)
|
|||||||
|
|
||||||
qemu_co_mutex_lock(&lock->mutex);
|
qemu_co_mutex_lock(&lock->mutex);
|
||||||
/* For fairness, wait if a writer is in line. */
|
/* For fairness, wait if a writer is in line. */
|
||||||
while (lock->pending_writer) {
|
if (lock->owners == 0 || (lock->owners > 0 && QSIMPLEQ_EMPTY(&lock->tickets))) {
|
||||||
qemu_co_queue_wait(&lock->queue, &lock->mutex);
|
lock->owners++;
|
||||||
}
|
qemu_co_mutex_unlock(&lock->mutex);
|
||||||
lock->reader++;
|
} else {
|
||||||
qemu_co_mutex_unlock(&lock->mutex);
|
CoRwTicket my_ticket = { true, self };
|
||||||
|
|
||||||
|
QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next);
|
||||||
|
qemu_co_mutex_unlock(&lock->mutex);
|
||||||
|
qemu_coroutine_yield();
|
||||||
|
assert(lock->owners >= 1);
|
||||||
|
|
||||||
|
/* Possibly wake another reader, which will wake the next in line. */
|
||||||
|
qemu_co_mutex_lock(&lock->mutex);
|
||||||
|
qemu_co_rwlock_maybe_wake_one(lock);
|
||||||
|
}
|
||||||
|
|
||||||
/* The rest of the read-side critical section is run without the mutex. */
|
|
||||||
self->locks_held++;
|
self->locks_held++;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -355,69 +404,64 @@ void qemu_co_rwlock_unlock(CoRwlock *lock)
|
|||||||
Coroutine *self = qemu_coroutine_self();
|
Coroutine *self = qemu_coroutine_self();
|
||||||
|
|
||||||
assert(qemu_in_coroutine());
|
assert(qemu_in_coroutine());
|
||||||
if (!lock->reader) {
|
self->locks_held--;
|
||||||
/* The critical section started in qemu_co_rwlock_wrlock. */
|
|
||||||
qemu_co_queue_restart_all(&lock->queue);
|
|
||||||
} else {
|
|
||||||
self->locks_held--;
|
|
||||||
|
|
||||||
qemu_co_mutex_lock(&lock->mutex);
|
qemu_co_mutex_lock(&lock->mutex);
|
||||||
lock->reader--;
|
if (lock->owners > 0) {
|
||||||
assert(lock->reader >= 0);
|
lock->owners--;
|
||||||
/* Wakeup only one waiting writer */
|
} else {
|
||||||
if (!lock->reader) {
|
assert(lock->owners == -1);
|
||||||
qemu_co_queue_next(&lock->queue);
|
lock->owners = 0;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
qemu_co_mutex_unlock(&lock->mutex);
|
|
||||||
|
qemu_co_rwlock_maybe_wake_one(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void qemu_co_rwlock_downgrade(CoRwlock *lock)
|
void qemu_co_rwlock_downgrade(CoRwlock *lock)
|
||||||
{
|
{
|
||||||
Coroutine *self = qemu_coroutine_self();
|
qemu_co_mutex_lock(&lock->mutex);
|
||||||
|
assert(lock->owners == -1);
|
||||||
|
lock->owners = 1;
|
||||||
|
|
||||||
/* lock->mutex critical section started in qemu_co_rwlock_wrlock or
|
/* Possibly wake another reader, which will wake the next in line. */
|
||||||
* qemu_co_rwlock_upgrade.
|
qemu_co_rwlock_maybe_wake_one(lock);
|
||||||
*/
|
|
||||||
assert(lock->reader == 0);
|
|
||||||
lock->reader++;
|
|
||||||
qemu_co_mutex_unlock(&lock->mutex);
|
|
||||||
|
|
||||||
/* The rest of the read-side critical section is run without the mutex. */
|
|
||||||
self->locks_held++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void qemu_co_rwlock_wrlock(CoRwlock *lock)
|
void qemu_co_rwlock_wrlock(CoRwlock *lock)
|
||||||
{
|
{
|
||||||
qemu_co_mutex_lock(&lock->mutex);
|
Coroutine *self = qemu_coroutine_self();
|
||||||
lock->pending_writer++;
|
|
||||||
while (lock->reader) {
|
|
||||||
qemu_co_queue_wait(&lock->queue, &lock->mutex);
|
|
||||||
}
|
|
||||||
lock->pending_writer--;
|
|
||||||
|
|
||||||
/* The rest of the write-side critical section is run with
|
qemu_co_mutex_lock(&lock->mutex);
|
||||||
* the mutex taken, so that lock->reader remains zero.
|
if (lock->owners == 0) {
|
||||||
* There is no need to update self->locks_held.
|
lock->owners = -1;
|
||||||
*/
|
qemu_co_mutex_unlock(&lock->mutex);
|
||||||
|
} else {
|
||||||
|
CoRwTicket my_ticket = { false, qemu_coroutine_self() };
|
||||||
|
|
||||||
|
QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next);
|
||||||
|
qemu_co_mutex_unlock(&lock->mutex);
|
||||||
|
qemu_coroutine_yield();
|
||||||
|
assert(lock->owners == -1);
|
||||||
|
}
|
||||||
|
|
||||||
|
self->locks_held++;
|
||||||
}
|
}
|
||||||
|
|
||||||
void qemu_co_rwlock_upgrade(CoRwlock *lock)
|
void qemu_co_rwlock_upgrade(CoRwlock *lock)
|
||||||
{
|
{
|
||||||
Coroutine *self = qemu_coroutine_self();
|
|
||||||
|
|
||||||
qemu_co_mutex_lock(&lock->mutex);
|
qemu_co_mutex_lock(&lock->mutex);
|
||||||
assert(lock->reader > 0);
|
assert(lock->owners > 0);
|
||||||
lock->reader--;
|
/* For fairness, wait if a writer is in line. */
|
||||||
lock->pending_writer++;
|
if (lock->owners == 1 && QSIMPLEQ_EMPTY(&lock->tickets)) {
|
||||||
while (lock->reader) {
|
lock->owners = -1;
|
||||||
qemu_co_queue_wait(&lock->queue, &lock->mutex);
|
qemu_co_mutex_unlock(&lock->mutex);
|
||||||
}
|
} else {
|
||||||
lock->pending_writer--;
|
CoRwTicket my_ticket = { false, qemu_coroutine_self() };
|
||||||
|
|
||||||
/* The rest of the write-side critical section is run with
|
lock->owners--;
|
||||||
* the mutex taken, similar to qemu_co_rwlock_wrlock. Do
|
QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next);
|
||||||
* not account for the lock twice in self->locks_held.
|
qemu_co_rwlock_maybe_wake_one(lock);
|
||||||
*/
|
qemu_coroutine_yield();
|
||||||
self->locks_held--;
|
assert(lock->owners == -1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user