libbpf: remove dependency on barrier.h in xsk.h
The use of smp_rmb() and smp_wmb() creates a Linux header dependency
on barrier.h that is unnecessary in most parts. This patch implements
the two small defines that are needed from barrier.h. As a bonus, the
new implementations are faster than the default ones as they default
to sfence and lfence for x86, while we only need a compiler barrier in
our case. Just as it is when the same ring access code is compiled in
the kernel.
Fixes: 1cad078842
("libbpf: add support for using AF_XDP sockets")
Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
a06d729646
commit
b7e3a28019
|
@ -23,6 +23,31 @@ do { \
|
|||
#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
|
||||
#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
|
||||
|
||||
/* Use these barrier functions instead of smp_[rw]mb() when they are
|
||||
* used in a libbpf header file. That way they can be built into the
|
||||
* application that uses libbpf.
|
||||
*/
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
# define libbpf_smp_rmb() asm volatile("" : : : "memory")
|
||||
# define libbpf_smp_wmb() asm volatile("" : : : "memory")
|
||||
# define libbpf_smp_mb() \
|
||||
asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc")
|
||||
#elif defined(__aarch64__)
|
||||
# define libbpf_smp_rmb() asm volatile("dmb ishld" : : : "memory")
|
||||
# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
|
||||
# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
|
||||
#elif defined(__arm__)
|
||||
/* These are only valid for armv7 and above */
|
||||
# define libbpf_smp_rmb() asm volatile("dmb ish" : : : "memory")
|
||||
# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
|
||||
# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
|
||||
#else
|
||||
# warning Architecture missing native barrier functions in libbpf_util.h.
|
||||
# define libbpf_smp_rmb() __sync_synchronize()
|
||||
# define libbpf_smp_wmb() __sync_synchronize()
|
||||
# define libbpf_smp_mb() __sync_synchronize()
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/if_xdp.h>
|
||||
|
||||
#include "libbpf.h"
|
||||
#include "libbpf_util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -123,7 +124,7 @@ static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, size_t nb)
|
|||
/* Make sure everything has been written to the ring before indicating
|
||||
* this to the kernel by writing the producer pointer.
|
||||
*/
|
||||
smp_wmb();
|
||||
libbpf_smp_wmb();
|
||||
|
||||
*prod->producer += nb;
|
||||
}
|
||||
|
@ -137,7 +138,7 @@ static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons,
|
|||
/* Make sure we do not speculatively read the data before
|
||||
* we have received the packet buffers from the ring.
|
||||
*/
|
||||
smp_rmb();
|
||||
libbpf_smp_rmb();
|
||||
|
||||
*idx = cons->cached_cons;
|
||||
cons->cached_cons += entries;
|
||||
|
@ -151,7 +152,7 @@ static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb)
|
|||
/* Make sure data has been read before indicating we are done
|
||||
* with the entries by updating the consumer pointer.
|
||||
*/
|
||||
smp_mb();
|
||||
libbpf_smp_mb();
|
||||
|
||||
*cons->consumer += nb;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue