membarrier: introduce qemu/sys_membarrier.h
This new header file provides heavy-weight "global" memory barriers that enforce memory ordering on each running thread belonging to the current process. For now, use a dummy implementation that issues memory barriers on both sides (matching what QEMU has been doing so far). Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
77a8b8462b
commit
c8d3877e48
@ -27,6 +27,7 @@
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "qemu/atomic.h"
|
||||
#include "qemu/sys_membarrier.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@ -82,7 +83,7 @@ static inline void rcu_read_lock(void)
|
||||
atomic_set(&p_rcu_reader->ctr, ctr);
|
||||
|
||||
/* Write p_rcu_reader->ctr before reading RCU-protected pointers. */
|
||||
smp_mb();
|
||||
smp_mb_placeholder();
|
||||
}
|
||||
|
||||
static inline void rcu_read_unlock(void)
|
||||
@ -96,13 +97,13 @@ static inline void rcu_read_unlock(void)
|
||||
|
||||
/* Ensure that the critical section is seen to precede the
|
||||
* store to p_rcu_reader->ctr. Together with the following
|
||||
* smp_mb(), this ensures writes to p_rcu_reader->ctr
|
||||
* smp_mb_placeholder(), this ensures writes to p_rcu_reader->ctr
|
||||
* are sequentially consistent.
|
||||
*/
|
||||
atomic_store_release(&p_rcu_reader->ctr, 0);
|
||||
|
||||
/* Write p_rcu_reader->ctr before reading p_rcu_reader->waiting. */
|
||||
smp_mb();
|
||||
smp_mb_placeholder();
|
||||
if (unlikely(atomic_read(&p_rcu_reader->waiting))) {
|
||||
atomic_set(&p_rcu_reader->waiting, false);
|
||||
qemu_event_set(&rcu_gp_event);
|
||||
|
17
include/qemu/sys_membarrier.h
Normal file
17
include/qemu/sys_membarrier.h
Normal file
@ -0,0 +1,17 @@
|
||||
/*
|
||||
* Process-global memory barriers
|
||||
*
|
||||
* Copyright (c) 2018 Red Hat, Inc.
|
||||
*
|
||||
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
||||
*/
|
||||
|
||||
#ifndef QEMU_SYS_MEMBARRIER_H
|
||||
#define QEMU_SYS_MEMBARRIER_H 1
|
||||
|
||||
/* Keep it simple, execute a real memory barrier on both sides. */
|
||||
static inline void smp_mb_global_init(void) {}
|
||||
#define smp_mb_global() smp_mb()
|
||||
#define smp_mb_placeholder() smp_mb()
|
||||
|
||||
#endif
|
@ -93,10 +93,10 @@ static void wait_for_readers(void)
|
||||
}
|
||||
|
||||
/* Here, order the stores to index->waiting before the loads of
|
||||
* index->ctr. Pairs with smp_mb() in rcu_read_unlock(),
|
||||
* index->ctr. Pairs with smp_mb_placeholder() in rcu_read_unlock(),
|
||||
* ensuring that the loads of index->ctr are sequentially consistent.
|
||||
*/
|
||||
smp_mb();
|
||||
smp_mb_global();
|
||||
|
||||
QLIST_FOREACH_SAFE(index, ®istry, node, tmp) {
|
||||
if (!rcu_gp_ongoing(&index->ctr)) {
|
||||
@ -145,9 +145,9 @@ void synchronize_rcu(void)
|
||||
qemu_mutex_lock(&rcu_sync_lock);
|
||||
|
||||
/* Write RCU-protected pointers before reading p_rcu_reader->ctr.
|
||||
* Pairs with smp_mb() in rcu_read_lock().
|
||||
* Pairs with smp_mb_placeholder() in rcu_read_lock().
|
||||
*/
|
||||
smp_mb();
|
||||
smp_mb_global();
|
||||
|
||||
qemu_mutex_lock(&rcu_registry_lock);
|
||||
if (!QLIST_EMPTY(®istry)) {
|
||||
@ -376,6 +376,7 @@ static void rcu_init_child(void)
|
||||
|
||||
static void __attribute__((__constructor__)) rcu_init(void)
|
||||
{
|
||||
smp_mb_global_init();
|
||||
#ifdef CONFIG_POSIX
|
||||
pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_child);
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user