2016-06-29 13:47:03 +02:00
|
|
|
#ifndef QEMU_THREAD_H
|
|
|
|
#define QEMU_THREAD_H
|
2009-04-24 20:03:15 +02:00
|
|
|
|
2016-06-08 20:55:23 +02:00
|
|
|
#include "qemu/processor.h"
|
|
|
|
#include "qemu/atomic.h"
|
2011-03-14 23:18:02 +01:00
|
|
|
|
2009-04-24 20:03:15 +02:00
|
|
|
typedef struct QemuCond QemuCond;
|
2011-08-08 14:36:41 +02:00
|
|
|
typedef struct QemuSemaphore QemuSemaphore;
|
2013-09-25 08:20:59 +02:00
|
|
|
typedef struct QemuEvent QemuEvent;
|
2017-01-12 19:07:52 +01:00
|
|
|
typedef struct QemuLockCnt QemuLockCnt;
|
2009-04-24 20:03:15 +02:00
|
|
|
typedef struct QemuThread QemuThread;
|
|
|
|
|
2011-03-12 17:43:52 +01:00
|
|
|
#ifdef _WIN32
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/thread-win32.h"
|
2011-03-12 17:43:52 +01:00
|
|
|
#else
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/thread-posix.h"
|
2011-03-12 17:43:52 +01:00
|
|
|
#endif
|
|
|
|
|
2017-08-08 19:53:15 +02:00
|
|
|
/* include QSP header once QemuMutex, QemuCond etc. are defined */
|
|
|
|
#include "qemu/qsp.h"
|
|
|
|
|
2011-12-12 17:21:31 +01:00
|
|
|
#define QEMU_THREAD_JOINABLE 0
|
|
|
|
#define QEMU_THREAD_DETACHED 1
|
|
|
|
|
2009-04-24 20:03:15 +02:00
|
|
|
void qemu_mutex_init(QemuMutex *mutex);
|
2010-07-07 20:58:01 +02:00
|
|
|
void qemu_mutex_destroy(QemuMutex *mutex);
|
2018-01-11 09:27:16 +01:00
|
|
|
int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line);
|
|
|
|
void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line);
|
|
|
|
void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line);
|
|
|
|
|
2021-06-15 01:31:38 +02:00
|
|
|
void qemu_rec_mutex_init(QemuRecMutex *mutex);
|
|
|
|
void qemu_rec_mutex_destroy(QemuRecMutex *mutex);
|
|
|
|
void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line);
|
|
|
|
int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line);
|
2021-06-15 01:31:39 +02:00
|
|
|
void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line);
|
2021-06-15 01:31:38 +02:00
|
|
|
|
2017-08-08 19:53:15 +02:00
|
|
|
typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l);
|
|
|
|
typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l);
|
|
|
|
typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l);
|
|
|
|
typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l);
|
|
|
|
typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
|
|
|
|
int l);
|
2019-09-09 15:13:33 +02:00
|
|
|
typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms,
|
|
|
|
const char *f, int l);
|
2017-08-08 19:53:15 +02:00
|
|
|
|
2017-10-28 08:16:41 +02:00
|
|
|
extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
|
2017-08-08 19:53:15 +02:00
|
|
|
extern QemuMutexLockFunc qemu_mutex_lock_func;
|
|
|
|
extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
|
|
|
|
extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
|
|
|
|
extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func;
|
|
|
|
extern QemuCondWaitFunc qemu_cond_wait_func;
|
2019-09-09 15:13:33 +02:00
|
|
|
extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
|
2017-08-08 19:53:15 +02:00
|
|
|
|
|
|
|
/* convenience macros to bypass the profiler */
|
|
|
|
#define qemu_mutex_lock__raw(m) \
|
|
|
|
qemu_mutex_lock_impl(m, __FILE__, __LINE__)
|
|
|
|
#define qemu_mutex_trylock__raw(m) \
|
|
|
|
qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
|
|
|
|
|
2018-08-24 17:08:09 +02:00
|
|
|
#ifdef __COVERITY__
|
|
|
|
/*
|
|
|
|
* Coverity is severely confused by the indirect function calls,
|
|
|
|
* hide them.
|
|
|
|
*/
|
|
|
|
#define qemu_mutex_lock(m) \
|
2020-03-19 20:33:20 +01:00
|
|
|
qemu_mutex_lock_impl(m, __FILE__, __LINE__)
|
2018-08-24 17:08:09 +02:00
|
|
|
#define qemu_mutex_trylock(m) \
|
2020-03-19 20:33:20 +01:00
|
|
|
qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
|
2018-08-24 17:08:09 +02:00
|
|
|
#define qemu_rec_mutex_lock(m) \
|
2020-03-19 20:33:20 +01:00
|
|
|
qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__)
|
2018-08-24 17:08:09 +02:00
|
|
|
#define qemu_rec_mutex_trylock(m) \
|
2020-03-19 20:33:20 +01:00
|
|
|
qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__)
|
2018-08-24 17:08:09 +02:00
|
|
|
#define qemu_cond_wait(c, m) \
|
2020-03-19 20:33:20 +01:00
|
|
|
qemu_cond_wait_impl(c, m, __FILE__, __LINE__)
|
2019-09-09 15:13:33 +02:00
|
|
|
#define qemu_cond_timedwait(c, m, ms) \
|
2020-03-19 20:33:20 +01:00
|
|
|
qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__)
|
2018-08-24 17:08:09 +02:00
|
|
|
#else
|
2017-08-08 19:53:15 +02:00
|
|
|
#define qemu_mutex_lock(m) ({ \
|
2020-09-23 12:56:46 +02:00
|
|
|
QemuMutexLockFunc _f = qatomic_read(&qemu_mutex_lock_func); \
|
2017-08-08 19:53:15 +02:00
|
|
|
_f(m, __FILE__, __LINE__); \
|
|
|
|
})
|
|
|
|
|
2020-09-23 12:56:46 +02:00
|
|
|
#define qemu_mutex_trylock(m) ({ \
|
|
|
|
QemuMutexTrylockFunc _f = qatomic_read(&qemu_mutex_trylock_func); \
|
|
|
|
_f(m, __FILE__, __LINE__); \
|
2017-08-08 19:53:15 +02:00
|
|
|
})
|
|
|
|
|
2020-09-23 12:56:46 +02:00
|
|
|
#define qemu_rec_mutex_lock(m) ({ \
|
|
|
|
QemuRecMutexLockFunc _f = qatomic_read(&qemu_rec_mutex_lock_func);\
|
|
|
|
_f(m, __FILE__, __LINE__); \
|
2017-08-08 19:53:15 +02:00
|
|
|
})
|
|
|
|
|
|
|
|
#define qemu_rec_mutex_trylock(m) ({ \
|
|
|
|
QemuRecMutexTrylockFunc _f; \
|
2020-09-23 12:56:46 +02:00
|
|
|
_f = qatomic_read(&qemu_rec_mutex_trylock_func); \
|
2017-08-08 19:53:15 +02:00
|
|
|
_f(m, __FILE__, __LINE__); \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define qemu_cond_wait(c, m) ({ \
|
2020-09-23 12:56:46 +02:00
|
|
|
QemuCondWaitFunc _f = qatomic_read(&qemu_cond_wait_func); \
|
2017-08-08 19:53:15 +02:00
|
|
|
_f(c, m, __FILE__, __LINE__); \
|
|
|
|
})
|
2019-09-09 15:13:33 +02:00
|
|
|
|
|
|
|
#define qemu_cond_timedwait(c, m, ms) ({ \
|
2020-09-23 12:56:46 +02:00
|
|
|
QemuCondTimedWaitFunc _f = qatomic_read(&qemu_cond_timedwait_func);\
|
2019-09-09 15:13:33 +02:00
|
|
|
_f(c, m, ms, __FILE__, __LINE__); \
|
|
|
|
})
|
2018-08-24 17:08:09 +02:00
|
|
|
#endif
|
2017-08-08 19:53:15 +02:00
|
|
|
|
2018-01-11 09:27:16 +01:00
|
|
|
#define qemu_mutex_unlock(mutex) \
|
|
|
|
qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__)
|
|
|
|
|
2021-06-15 01:31:39 +02:00
|
|
|
#define qemu_rec_mutex_unlock(mutex) \
|
|
|
|
qemu_rec_mutex_unlock_impl(mutex, __FILE__, __LINE__)
|
|
|
|
|
2018-01-11 09:27:16 +01:00
|
|
|
static inline void (qemu_mutex_lock)(QemuMutex *mutex)
|
|
|
|
{
|
|
|
|
qemu_mutex_lock(mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int (qemu_mutex_trylock)(QemuMutex *mutex)
|
|
|
|
{
|
|
|
|
return qemu_mutex_trylock(mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void (qemu_mutex_unlock)(QemuMutex *mutex)
|
|
|
|
{
|
|
|
|
qemu_mutex_unlock(mutex);
|
|
|
|
}
|
2016-10-27 12:49:07 +02:00
|
|
|
|
2017-08-08 19:53:15 +02:00
|
|
|
static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex)
|
|
|
|
{
|
|
|
|
qemu_rec_mutex_lock(mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex)
|
|
|
|
{
|
|
|
|
return qemu_rec_mutex_trylock(mutex);
|
|
|
|
}
|
|
|
|
|
2021-06-15 01:31:39 +02:00
|
|
|
static inline void (qemu_rec_mutex_unlock)(QemuRecMutex *mutex)
|
|
|
|
{
|
|
|
|
qemu_rec_mutex_unlock(mutex);
|
|
|
|
}
|
|
|
|
|
2009-04-24 20:03:15 +02:00
|
|
|
void qemu_cond_init(QemuCond *cond);
|
2010-07-07 20:58:01 +02:00
|
|
|
void qemu_cond_destroy(QemuCond *cond);
|
2011-03-12 17:43:52 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* IMPORTANT: The implementation does not guarantee that pthread_cond_signal
|
|
|
|
* and pthread_cond_broadcast can be called except while the same mutex is
|
|
|
|
* held as in the corresponding pthread_cond_wait calls!
|
|
|
|
*/
|
2009-04-24 20:03:15 +02:00
|
|
|
void qemu_cond_signal(QemuCond *cond);
|
|
|
|
void qemu_cond_broadcast(QemuCond *cond);
|
2018-01-11 09:27:16 +01:00
|
|
|
void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
|
|
|
|
const char *file, const int line);
|
2019-09-09 15:13:33 +02:00
|
|
|
bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
|
|
|
|
const char *file, const int line);
|
2018-01-11 09:27:16 +01:00
|
|
|
|
|
|
|
static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex)
|
|
|
|
{
|
|
|
|
qemu_cond_wait(cond, mutex);
|
|
|
|
}
|
2009-04-24 20:03:15 +02:00
|
|
|
|
2019-09-09 15:13:33 +02:00
|
|
|
/* Returns true if timeout has not expired, and false otherwise */
|
|
|
|
static inline bool (qemu_cond_timedwait)(QemuCond *cond, QemuMutex *mutex,
|
|
|
|
int ms)
|
|
|
|
{
|
|
|
|
return qemu_cond_timedwait(cond, mutex, ms);
|
|
|
|
}
|
|
|
|
|
2011-08-08 14:36:41 +02:00
|
|
|
void qemu_sem_init(QemuSemaphore *sem, int init);
|
|
|
|
void qemu_sem_post(QemuSemaphore *sem);
|
|
|
|
void qemu_sem_wait(QemuSemaphore *sem);
|
|
|
|
int qemu_sem_timedwait(QemuSemaphore *sem, int ms);
|
|
|
|
void qemu_sem_destroy(QemuSemaphore *sem);
|
|
|
|
|
2013-09-25 08:20:59 +02:00
|
|
|
void qemu_event_init(QemuEvent *ev, bool init);
|
|
|
|
void qemu_event_set(QemuEvent *ev);
|
|
|
|
void qemu_event_reset(QemuEvent *ev);
|
|
|
|
void qemu_event_wait(QemuEvent *ev);
|
|
|
|
void qemu_event_destroy(QemuEvent *ev);
|
|
|
|
|
2014-01-30 11:20:32 +01:00
|
|
|
void qemu_thread_create(QemuThread *thread, const char *name,
|
2011-12-12 17:21:31 +01:00
|
|
|
void *(*start_routine)(void *),
|
|
|
|
void *arg, int mode);
|
|
|
|
void *qemu_thread_join(QemuThread *thread);
|
2011-03-12 17:43:51 +01:00
|
|
|
void qemu_thread_get_self(QemuThread *thread);
|
2012-05-02 17:21:31 +02:00
|
|
|
bool qemu_thread_is_self(QemuThread *thread);
|
2020-06-01 11:38:08 +02:00
|
|
|
void qemu_thread_exit(void *retval) QEMU_NORETURN;
|
2014-01-30 11:20:31 +01:00
|
|
|
void qemu_thread_naming(bool enable);
|
2010-07-07 20:58:01 +02:00
|
|
|
|
2014-12-02 12:05:45 +01:00
|
|
|
struct Notifier;
|
2018-11-05 14:55:37 +01:00
|
|
|
/**
|
|
|
|
* qemu_thread_atexit_add:
|
|
|
|
* @notifier: Notifier to add
|
|
|
|
*
|
|
|
|
* Add the specified notifier to a list which will be run via
|
|
|
|
* notifier_list_notify() when this thread exits (either by calling
|
|
|
|
* qemu_thread_exit() or by returning from its start_routine).
|
|
|
|
* The usual usage is that the caller passes a Notifier which is
|
|
|
|
* a per-thread variable; it can then use the callback to free
|
|
|
|
* other per-thread data.
|
|
|
|
*
|
|
|
|
* If the thread exits as part of the entire process exiting,
|
|
|
|
* it is unspecified whether notifiers are called or not.
|
|
|
|
*/
|
2014-12-02 12:05:45 +01:00
|
|
|
void qemu_thread_atexit_add(struct Notifier *notifier);
|
2018-11-05 14:55:37 +01:00
|
|
|
/**
|
|
|
|
* qemu_thread_atexit_remove:
|
|
|
|
* @notifier: Notifier to remove
|
|
|
|
*
|
|
|
|
* Remove the specified notifier from the thread-exit notification
|
|
|
|
* list. It is not valid to try to remove a notifier which is not
|
|
|
|
* on the list.
|
|
|
|
*/
|
2014-12-02 12:05:45 +01:00
|
|
|
void qemu_thread_atexit_remove(struct Notifier *notifier);
|
|
|
|
|
2020-06-12 21:02:30 +02:00
|
|
|
#ifdef CONFIG_TSAN
|
|
|
|
#include <sanitizer/tsan_interface.h>
|
|
|
|
#endif
|
|
|
|
|
2018-02-03 16:39:32 +01:00
|
|
|
struct QemuSpin {
|
2016-06-08 20:55:23 +02:00
|
|
|
int value;
|
2018-02-03 16:39:32 +01:00
|
|
|
};
|
2016-06-08 20:55:23 +02:00
|
|
|
|
|
|
|
static inline void qemu_spin_init(QemuSpin *spin)
|
|
|
|
{
|
|
|
|
__sync_lock_release(&spin->value);
|
2020-06-12 21:02:30 +02:00
|
|
|
#ifdef CONFIG_TSAN
|
|
|
|
__tsan_mutex_create(spin, __tsan_mutex_not_static);
|
|
|
|
#endif
|
2016-06-08 20:55:23 +02:00
|
|
|
}
|
|
|
|
|
2020-06-12 21:02:30 +02:00
|
|
|
/* const parameter because the only purpose here is the TSAN annotation */
|
|
|
|
static inline void qemu_spin_destroy(const QemuSpin *spin)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_TSAN
|
|
|
|
__tsan_mutex_destroy((void *)spin, __tsan_mutex_not_static);
|
|
|
|
#endif
|
|
|
|
}
|
2020-06-12 21:02:25 +02:00
|
|
|
|
2016-06-08 20:55:23 +02:00
|
|
|
static inline void qemu_spin_lock(QemuSpin *spin)
|
|
|
|
{
|
2020-06-12 21:02:30 +02:00
|
|
|
#ifdef CONFIG_TSAN
|
|
|
|
__tsan_mutex_pre_lock(spin, 0);
|
|
|
|
#endif
|
2016-06-08 20:55:23 +02:00
|
|
|
while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
|
2020-09-23 12:56:46 +02:00
|
|
|
while (qatomic_read(&spin->value)) {
|
2016-06-08 20:55:23 +02:00
|
|
|
cpu_relax();
|
|
|
|
}
|
|
|
|
}
|
2020-06-12 21:02:30 +02:00
|
|
|
#ifdef CONFIG_TSAN
|
|
|
|
__tsan_mutex_post_lock(spin, 0, 0);
|
|
|
|
#endif
|
2016-06-08 20:55:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool qemu_spin_trylock(QemuSpin *spin)
|
|
|
|
{
|
2020-06-12 21:02:30 +02:00
|
|
|
#ifdef CONFIG_TSAN
|
|
|
|
__tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock);
|
|
|
|
#endif
|
|
|
|
bool busy = __sync_lock_test_and_set(&spin->value, true);
|
|
|
|
#ifdef CONFIG_TSAN
|
|
|
|
unsigned flags = __tsan_mutex_try_lock;
|
|
|
|
flags |= busy ? __tsan_mutex_try_lock_failed : 0;
|
|
|
|
__tsan_mutex_post_lock(spin, flags, 0);
|
|
|
|
#endif
|
|
|
|
return busy;
|
2016-06-08 20:55:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool qemu_spin_locked(QemuSpin *spin)
|
|
|
|
{
|
2020-09-23 12:56:46 +02:00
|
|
|
return qatomic_read(&spin->value);
|
2016-06-08 20:55:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void qemu_spin_unlock(QemuSpin *spin)
|
|
|
|
{
|
2020-06-12 21:02:30 +02:00
|
|
|
#ifdef CONFIG_TSAN
|
|
|
|
__tsan_mutex_pre_unlock(spin, 0);
|
|
|
|
#endif
|
2016-06-08 20:55:23 +02:00
|
|
|
__sync_lock_release(&spin->value);
|
2020-06-12 21:02:30 +02:00
|
|
|
#ifdef CONFIG_TSAN
|
|
|
|
__tsan_mutex_post_unlock(spin, 0);
|
|
|
|
#endif
|
2016-06-08 20:55:23 +02:00
|
|
|
}
|
|
|
|
|
2017-01-12 19:07:52 +01:00
|
|
|
struct QemuLockCnt {
|
2017-01-12 19:07:54 +01:00
|
|
|
#ifndef CONFIG_LINUX
|
2017-01-12 19:07:52 +01:00
|
|
|
QemuMutex mutex;
|
2017-01-12 19:07:54 +01:00
|
|
|
#endif
|
2017-01-12 19:07:52 +01:00
|
|
|
unsigned count;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemu_lockcnt_init: initialize a QemuLockcnt
|
|
|
|
* @lockcnt: the lockcnt to initialize
|
|
|
|
*
|
|
|
|
* Initialize lockcnt's counter to zero and prepare its mutex
|
|
|
|
* for usage.
|
|
|
|
*/
|
|
|
|
void qemu_lockcnt_init(QemuLockCnt *lockcnt);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemu_lockcnt_destroy: destroy a QemuLockcnt
|
|
|
|
* @lockcnt: the lockcnt to destruct
|
|
|
|
*
|
|
|
|
* Destroy lockcnt's mutex.
|
|
|
|
*/
|
|
|
|
void qemu_lockcnt_destroy(QemuLockCnt *lockcnt);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemu_lockcnt_inc: increment a QemuLockCnt's counter
|
|
|
|
* @lockcnt: the lockcnt to operate on
|
|
|
|
*
|
|
|
|
* If the lockcnt's count is zero, wait for critical sections
|
|
|
|
* to finish and increment lockcnt's count to 1. If the count
|
|
|
|
* is not zero, just increment it.
|
|
|
|
*
|
|
|
|
* Because this function can wait on the mutex, it must not be
|
|
|
|
* called while the lockcnt's mutex is held by the current thread.
|
|
|
|
* For the same reason, qemu_lockcnt_inc can also contribute to
|
|
|
|
* AB-BA deadlocks. This is a sample deadlock scenario:
|
|
|
|
*
|
|
|
|
* thread 1 thread 2
|
|
|
|
* -------------------------------------------------------
|
|
|
|
* qemu_lockcnt_lock(&lc1);
|
|
|
|
* qemu_lockcnt_lock(&lc2);
|
|
|
|
* qemu_lockcnt_inc(&lc2);
|
|
|
|
* qemu_lockcnt_inc(&lc1);
|
|
|
|
*/
|
|
|
|
void qemu_lockcnt_inc(QemuLockCnt *lockcnt);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemu_lockcnt_dec: decrement a QemuLockCnt's counter
|
|
|
|
* @lockcnt: the lockcnt to operate on
|
|
|
|
*/
|
|
|
|
void qemu_lockcnt_dec(QemuLockCnt *lockcnt);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and
|
|
|
|
* possibly lock it.
|
|
|
|
* @lockcnt: the lockcnt to operate on
|
|
|
|
*
|
|
|
|
* Decrement lockcnt's count. If the new count is zero, lock
|
|
|
|
* the mutex and return true. Otherwise, return false.
|
|
|
|
*/
|
|
|
|
bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and
|
|
|
|
* lock it.
|
|
|
|
* @lockcnt: the lockcnt to operate on
|
|
|
|
*
|
|
|
|
* If the count is 1, decrement the count to zero, lock
|
|
|
|
* the mutex and return true. Otherwise, return false.
|
|
|
|
*/
|
|
|
|
bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemu_lockcnt_lock: lock a QemuLockCnt's mutex.
|
|
|
|
* @lockcnt: the lockcnt to operate on
|
|
|
|
*
|
|
|
|
* Remember that concurrent visits are not blocked unless the count is
|
|
|
|
* also zero. You can use qemu_lockcnt_count to check for this inside a
|
|
|
|
* critical section.
|
|
|
|
*/
|
|
|
|
void qemu_lockcnt_lock(QemuLockCnt *lockcnt);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemu_lockcnt_unlock: release a QemuLockCnt's mutex.
|
|
|
|
* @lockcnt: the lockcnt to operate on.
|
|
|
|
*/
|
|
|
|
void qemu_lockcnt_unlock(QemuLockCnt *lockcnt);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt.
|
|
|
|
* @lockcnt: the lockcnt to operate on.
|
|
|
|
*
|
|
|
|
* This is the same as
|
|
|
|
*
|
|
|
|
* qemu_lockcnt_unlock(lockcnt);
|
|
|
|
* qemu_lockcnt_inc(lockcnt);
|
|
|
|
*
|
|
|
|
* but more efficient.
|
|
|
|
*/
|
|
|
|
void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemu_lockcnt_count: query a LockCnt's count.
|
|
|
|
* @lockcnt: the lockcnt to query.
|
|
|
|
*
|
|
|
|
* Note that the count can change at any time. Still, while the
|
|
|
|
* lockcnt is locked, one can usefully check whether the count
|
|
|
|
* is non-zero.
|
|
|
|
*/
|
|
|
|
unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt);
|
|
|
|
|
2009-04-24 20:03:15 +02:00
|
|
|
#endif
|