2013-09-25 08:20:57 +02:00
|
|
|
/*
|
|
|
|
* Seqlock implementation for QEMU
|
|
|
|
*
|
|
|
|
* Copyright Red Hat, Inc. 2013
|
|
|
|
*
|
|
|
|
* Author:
|
|
|
|
* Paolo Bonzini <pbonzini@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
2016-06-29 15:29:06 +02:00
|
|
|
|
2013-09-25 08:20:57 +02:00
|
|
|
#ifndef QEMU_SEQLOCK_H
|
2016-06-29 15:29:06 +02:00
|
|
|
#define QEMU_SEQLOCK_H
|
2013-09-25 08:20:57 +02:00
|
|
|
|
2016-06-22 19:11:19 +02:00
|
|
|
#include "qemu/atomic.h"
|
|
|
|
#include "qemu/thread.h"
|
2018-08-14 09:48:29 +02:00
|
|
|
#include "qemu/lockable.h"
|
2013-09-25 08:20:57 +02:00
|
|
|
|
|
|
|
typedef struct QemuSeqLock QemuSeqLock;
|
|
|
|
|
|
|
|
struct QemuSeqLock {
|
|
|
|
unsigned sequence;
|
|
|
|
};
|
|
|
|
|
2016-06-08 20:55:20 +02:00
|
|
|
static inline void seqlock_init(QemuSeqLock *sl)
|
2013-09-25 08:20:57 +02:00
|
|
|
{
|
|
|
|
sl->sequence = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Lock out other writers and update the count. */
|
2016-06-08 20:55:21 +02:00
|
|
|
static inline void seqlock_write_begin(QemuSeqLock *sl)
|
2013-09-25 08:20:57 +02:00
|
|
|
{
|
2016-09-30 23:30:56 +02:00
|
|
|
atomic_set(&sl->sequence, sl->sequence + 1);
|
2013-09-25 08:20:57 +02:00
|
|
|
|
|
|
|
/* Write sequence before updating other fields. */
|
|
|
|
smp_wmb();
|
|
|
|
}
|
|
|
|
|
2016-06-08 20:55:21 +02:00
|
|
|
static inline void seqlock_write_end(QemuSeqLock *sl)
|
2013-09-25 08:20:57 +02:00
|
|
|
{
|
|
|
|
/* Write other fields before finalizing sequence. */
|
|
|
|
smp_wmb();
|
|
|
|
|
2016-09-30 23:30:56 +02:00
|
|
|
atomic_set(&sl->sequence, sl->sequence + 1);
|
2013-09-25 08:20:57 +02:00
|
|
|
}
|
|
|
|
|
2018-08-14 09:48:29 +02:00
|
|
|
/* Lock out other writers and update the count. */
|
|
|
|
static inline void seqlock_write_lock_impl(QemuSeqLock *sl, QemuLockable *lock)
|
|
|
|
{
|
|
|
|
qemu_lockable_lock(lock);
|
|
|
|
seqlock_write_begin(sl);
|
|
|
|
}
|
|
|
|
#define seqlock_write_lock(sl, lock) \
|
|
|
|
seqlock_write_lock_impl(sl, QEMU_MAKE_LOCKABLE(lock))
|
|
|
|
|
|
|
|
/* Lock out other writers and update the count. */
|
|
|
|
static inline void seqlock_write_unlock_impl(QemuSeqLock *sl, QemuLockable *lock)
|
|
|
|
{
|
|
|
|
qemu_lockable_unlock(lock);
|
|
|
|
seqlock_write_begin(sl);
|
|
|
|
}
|
|
|
|
#define seqlock_write_unlock(sl, lock) \
|
|
|
|
seqlock_write_unlock_impl(sl, QEMU_MAKE_LOCKABLE(lock))
|
|
|
|
|
|
|
|
|
2018-08-18 06:25:12 +02:00
|
|
|
static inline unsigned seqlock_read_begin(const QemuSeqLock *sl)
|
2013-09-25 08:20:57 +02:00
|
|
|
{
|
|
|
|
/* Always fail if a write is in progress. */
|
2015-08-24 02:23:36 +02:00
|
|
|
unsigned ret = atomic_read(&sl->sequence);
|
2013-09-25 08:20:57 +02:00
|
|
|
|
|
|
|
/* Read sequence before reading other fields. */
|
|
|
|
smp_rmb();
|
2015-08-24 02:23:36 +02:00
|
|
|
return ret & ~1;
|
2013-09-25 08:20:57 +02:00
|
|
|
}
|
|
|
|
|
2015-08-24 02:23:35 +02:00
|
|
|
static inline int seqlock_read_retry(const QemuSeqLock *sl, unsigned start)
|
2013-09-25 08:20:57 +02:00
|
|
|
{
|
|
|
|
/* Read other fields before reading final sequence. */
|
|
|
|
smp_rmb();
|
2015-08-24 02:23:36 +02:00
|
|
|
return unlikely(atomic_read(&sl->sequence) != start);
|
2013-09-25 08:20:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|