2013-05-13 13:29:47 +02:00
|
|
|
#ifndef QEMU_RCU_H
|
|
|
|
#define QEMU_RCU_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* urcu-mb.h
|
|
|
|
*
|
|
|
|
* Userspace RCU header with explicit memory barrier.
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*
|
|
|
|
* IBM's contributions to this file may be relicensed under LGPLv2 or later.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include "qemu/thread.h"
|
|
|
|
#include "qemu/queue.h"
|
|
|
|
#include "qemu/atomic.h"
|
2021-11-09 19:35:22 +01:00
|
|
|
#include "qemu/notify.h"
|
2018-02-16 10:04:18 +01:00
|
|
|
#include "qemu/sys_membarrier.h"
|
2022-02-22 15:01:49 +01:00
|
|
|
#include "qemu/coroutine-tls.h"
|
2013-05-13 13:29:47 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Important !
|
|
|
|
*
|
|
|
|
* Each thread containing read-side critical sections must be registered
|
|
|
|
* with rcu_register_thread() before calling rcu_read_lock().
|
|
|
|
* rcu_unregister_thread() should be called before the thread exits.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef DEBUG_RCU
|
|
|
|
#define rcu_assert(args...) assert(args)
|
|
|
|
#else
|
|
|
|
#define rcu_assert(args...)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Global quiescent period counter with low-order bits unused.
|
|
|
|
* Using a int rather than a char to eliminate false register dependencies
|
|
|
|
* causing stalls on some architectures.
|
|
|
|
*/
|
|
|
|
extern unsigned long rcu_gp_ctr;
|
|
|
|
|
|
|
|
extern QemuEvent rcu_gp_event;
|
|
|
|
|
|
|
|
struct rcu_reader_data {
|
|
|
|
/* Data used by both reader and synchronize_rcu() */
|
|
|
|
unsigned long ctr;
|
|
|
|
bool waiting;
|
|
|
|
|
2015-01-13 17:34:15 +01:00
|
|
|
/* Data used by reader only */
|
|
|
|
unsigned depth;
|
|
|
|
|
2015-08-24 02:23:38 +02:00
|
|
|
/* Data used for registry, protected by rcu_registry_lock */
|
2013-05-13 13:29:47 +02:00
|
|
|
QLIST_ENTRY(rcu_reader_data) node;
|
2021-11-09 19:35:22 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* NotifierList used to force an RCU grace period. Accessed under
|
|
|
|
* rcu_registry_lock. Note that the notifier is called _outside_
|
|
|
|
* the thread!
|
|
|
|
*/
|
|
|
|
NotifierList force_rcu;
|
2013-05-13 13:29:47 +02:00
|
|
|
};
|
|
|
|
|
2022-02-22 15:01:49 +01:00
|
|
|
QEMU_DECLARE_CO_TLS(struct rcu_reader_data, rcu_reader)
|
2013-05-13 13:29:47 +02:00
|
|
|
|
|
|
|
static inline void rcu_read_lock(void)
|
|
|
|
{
|
2022-02-22 15:01:49 +01:00
|
|
|
struct rcu_reader_data *p_rcu_reader = get_ptr_rcu_reader();
|
2015-01-13 17:34:15 +01:00
|
|
|
unsigned ctr;
|
|
|
|
|
|
|
|
if (p_rcu_reader->depth++ > 0) {
|
|
|
|
return;
|
|
|
|
}
|
2013-05-13 13:29:47 +02:00
|
|
|
|
2020-09-23 12:56:46 +02:00
|
|
|
ctr = qatomic_read(&rcu_gp_ctr);
|
|
|
|
qatomic_set(&p_rcu_reader->ctr, ctr);
|
2018-02-16 09:23:31 +01:00
|
|
|
|
2023-03-03 13:46:03 +01:00
|
|
|
/*
|
|
|
|
* Read rcu_gp_ptr and write p_rcu_reader->ctr before reading
|
|
|
|
* RCU-protected pointers.
|
|
|
|
*/
|
2018-02-16 10:04:18 +01:00
|
|
|
smp_mb_placeholder();
|
2013-05-13 13:29:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void rcu_read_unlock(void)
|
|
|
|
{
|
2022-02-22 15:01:49 +01:00
|
|
|
struct rcu_reader_data *p_rcu_reader = get_ptr_rcu_reader();
|
2013-05-13 13:29:47 +02:00
|
|
|
|
2015-01-13 17:34:15 +01:00
|
|
|
assert(p_rcu_reader->depth != 0);
|
|
|
|
if (--p_rcu_reader->depth > 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-02-16 09:23:31 +01:00
|
|
|
/* Ensure that the critical section is seen to precede the
|
|
|
|
* store to p_rcu_reader->ctr. Together with the following
|
2018-02-16 10:04:18 +01:00
|
|
|
* smp_mb_placeholder(), this ensures writes to p_rcu_reader->ctr
|
2018-02-16 09:23:31 +01:00
|
|
|
* are sequentially consistent.
|
|
|
|
*/
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_store_release(&p_rcu_reader->ctr, 0);
|
2018-02-16 09:23:31 +01:00
|
|
|
|
|
|
|
/* Write p_rcu_reader->ctr before reading p_rcu_reader->waiting. */
|
2018-02-16 10:04:18 +01:00
|
|
|
smp_mb_placeholder();
|
2020-09-23 12:56:46 +02:00
|
|
|
if (unlikely(qatomic_read(&p_rcu_reader->waiting))) {
|
|
|
|
qatomic_set(&p_rcu_reader->waiting, false);
|
2013-05-13 13:29:47 +02:00
|
|
|
qemu_event_set(&rcu_gp_event);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-20 14:21:29 +01:00
|
|
|
void synchronize_rcu(void);
|
2013-05-13 13:29:47 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reader thread registration.
|
|
|
|
*/
|
2023-03-20 14:21:29 +01:00
|
|
|
void rcu_register_thread(void);
|
|
|
|
void rcu_unregister_thread(void);
|
2016-01-27 08:49:21 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Support for fork(). fork() support is enabled at startup.
|
|
|
|
*/
|
2023-03-20 14:21:29 +01:00
|
|
|
void rcu_enable_atfork(void);
|
|
|
|
void rcu_disable_atfork(void);
|
2013-05-13 13:29:47 +02:00
|
|
|
|
2013-05-13 17:49:24 +02:00
|
|
|
struct rcu_head;
|
|
|
|
typedef void RCUCBFunc(struct rcu_head *head);
|
|
|
|
|
|
|
|
struct rcu_head {
|
|
|
|
struct rcu_head *next;
|
|
|
|
RCUCBFunc *func;
|
|
|
|
};
|
|
|
|
|
2023-03-20 14:21:29 +01:00
|
|
|
void call_rcu1(struct rcu_head *head, RCUCBFunc *func);
|
|
|
|
void drain_call_rcu(void);
|
2013-05-13 17:49:24 +02:00
|
|
|
|
|
|
|
/* The operands of the minus operator must have the same type,
|
|
|
|
* which must be the one that we specify in the cast.
|
|
|
|
*/
|
|
|
|
#define call_rcu(head, func, field) \
|
|
|
|
call_rcu1(({ \
|
|
|
|
char __attribute__((unused)) \
|
|
|
|
offset_must_be_zero[-offsetof(typeof(*(head)), field)], \
|
|
|
|
func_type_invalid = (func) - (void (*)(typeof(head)))(func); \
|
|
|
|
&(head)->field; \
|
|
|
|
}), \
|
|
|
|
(RCUCBFunc *)(func))
|
|
|
|
|
2015-02-11 15:00:12 +01:00
|
|
|
#define g_free_rcu(obj, field) \
|
|
|
|
call_rcu1(({ \
|
|
|
|
char __attribute__((unused)) \
|
|
|
|
offset_must_be_zero[-offsetof(typeof(*(obj)), field)]; \
|
|
|
|
&(obj)->field; \
|
|
|
|
}), \
|
|
|
|
(RCUCBFunc *)g_free);
|
|
|
|
|
2019-10-07 16:36:37 +02:00
|
|
|
typedef void RCUReadAuto;
|
|
|
|
static inline RCUReadAuto *rcu_read_auto_lock(void)
|
|
|
|
{
|
|
|
|
rcu_read_lock();
|
|
|
|
/* Anything non-NULL causes the cleanup function to be called */
|
|
|
|
return (void *)(uintptr_t)0x1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void rcu_read_auto_unlock(RCUReadAuto *r)
|
|
|
|
{
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
G_DEFINE_AUTOPTR_CLEANUP_FUNC(RCUReadAuto, rcu_read_auto_unlock)
|
|
|
|
|
|
|
|
#define WITH_RCU_READ_LOCK_GUARD() \
|
2020-04-04 06:21:07 +02:00
|
|
|
WITH_RCU_READ_LOCK_GUARD_(glue(_rcu_read_auto, __COUNTER__))
|
2019-10-07 16:36:37 +02:00
|
|
|
|
|
|
|
#define WITH_RCU_READ_LOCK_GUARD_(var) \
|
|
|
|
for (g_autoptr(RCUReadAuto) var = rcu_read_auto_lock(); \
|
|
|
|
(var); rcu_read_auto_unlock(var), (var) = NULL)
|
|
|
|
|
|
|
|
#define RCU_READ_LOCK_GUARD() \
|
|
|
|
g_autoptr(RCUReadAuto) _rcu_read_auto __attribute__((unused)) = rcu_read_auto_lock()
|
|
|
|
|
2021-11-09 19:35:22 +01:00
|
|
|
/*
|
|
|
|
* Force-RCU notifiers tell readers that they should exit their
|
|
|
|
* read-side critical section.
|
|
|
|
*/
|
|
|
|
void rcu_add_force_rcu_notifier(Notifier *n);
|
|
|
|
void rcu_remove_force_rcu_notifier(Notifier *n);
|
|
|
|
|
2013-05-13 13:29:47 +02:00
|
|
|
#endif /* QEMU_RCU_H */
|