diff options
author | Andrii Nakryiko <andrii@kernel.org> | 2021-03-08 08:51:01 -0800 |
---|---|---|
committer | Andrii Nakryiko <andrii@kernel.org> | 2021-03-08 08:52:45 -0800 |
commit | bbb41728e61a602ec76cbfec2a49ccc763d305b7 (patch) | |
tree | af863fd112a199148f11c746556393b48d9029d7 /tools/lib/bpf/libbpf_util.h | |
parent | 299194a91451263020c73dd2a3b7e0218c88dbd0 (diff) | |
parent | 291471dd1559528a4c2ad5026eff94ed1030562b (diff) |
Merge branch 'load-acquire/store-release barriers for'
Björn Töpel says:
====================
This two-patch series introduces load-acquire/store-release barriers
for the AF_XDP rings.
For most contemporary architectures, this is more effective than a
SPSC ring based on smp_{r,w,}mb() barriers. More importantly,
load-acquire/store-release semantics make the ring code easier to
follow.
This is effectively the change done in commit 6c43c091bdc5
("documentation: Update circular buffer for
load-acquire/store-release"), but for the AF_XDP rings.
Both libbpf and the kernel-side are updated.
Full details are outlined in the commits!
Thanks to the LKMM-folks (Paul/Alan/Will) for helping me out in this
complicated matter!
Changelog
v1[1]->v2:
* Expanded the commit message for patch 1, and included the LKMM
litmus tests. Hopefully this clear things up. (Daniel)
* Clarified why the smp_mb()/smp_load_acquire() is not needed in (A);
control dependency with load to store. (Toke)
[1] https://lore.kernel.org/bpf/20210301104318.263262-1-bjorn.topel@gmail.com/
Thanks,
Björn
====================
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Diffstat (limited to 'tools/lib/bpf/libbpf_util.h')
-rw-r--r-- | tools/lib/bpf/libbpf_util.h | 72 |
1 files changed, 50 insertions, 22 deletions
diff --git a/tools/lib/bpf/libbpf_util.h b/tools/lib/bpf/libbpf_util.h index 59c779c5790c..94a0d7bb6f3c 100644 --- a/tools/lib/bpf/libbpf_util.h +++ b/tools/lib/bpf/libbpf_util.h @@ -5,6 +5,7 @@ #define __LIBBPF_LIBBPF_UTIL_H #include <stdbool.h> +#include <linux/compiler.h> #ifdef __cplusplus extern "C" { @@ -15,29 +16,56 @@ extern "C" { * application that uses libbpf. */ #if defined(__i386__) || defined(__x86_64__) -# define libbpf_smp_rmb() asm volatile("" : : : "memory") -# define libbpf_smp_wmb() asm volatile("" : : : "memory") -# define libbpf_smp_mb() \ - asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc") -/* Hinders stores to be observed before older loads. */ -# define libbpf_smp_rwmb() asm volatile("" : : : "memory") +# define libbpf_smp_store_release(p, v) \ + do { \ + asm volatile("" : : : "memory"); \ + WRITE_ONCE(*p, v); \ + } while (0) +# define libbpf_smp_load_acquire(p) \ + ({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + asm volatile("" : : : "memory"); \ + ___p1; \ + }) #elif defined(__aarch64__) -# define libbpf_smp_rmb() asm volatile("dmb ishld" : : : "memory") -# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory") -# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory") -# define libbpf_smp_rwmb() libbpf_smp_mb() -#elif defined(__arm__) -/* These are only valid for armv7 and above */ -# define libbpf_smp_rmb() asm volatile("dmb ish" : : : "memory") -# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory") -# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory") -# define libbpf_smp_rwmb() libbpf_smp_mb() -#else -/* Architecture missing native barrier functions. */ -# define libbpf_smp_rmb() __sync_synchronize() -# define libbpf_smp_wmb() __sync_synchronize() -# define libbpf_smp_mb() __sync_synchronize() -# define libbpf_smp_rwmb() __sync_synchronize() +# define libbpf_smp_store_release(p, v) \ + asm volatile ("stlr %w1, %0" : "=Q" (*p) : "r" (v) : "memory") +# define libbpf_smp_load_acquire(p) \ + ({ \ + typeof(*p) ___p1; \ + asm volatile ("ldar %w0, %1" \ + : "=r" (___p1) : "Q" (*p) : "memory"); \ + __p1; \ + }) +#elif defined(__riscv) +# define libbpf_smp_store_release(p, v) \ + do { \ + asm volatile ("fence rw,w" : : : "memory"); \ + WRITE_ONCE(*p, v); \ + } while (0) +# define libbpf_smp_load_acquire(p) \ + ({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + asm volatile ("fence r,rw" : : : "memory"); \ + ___p1; \ + }) +#endif + +#ifndef libbpf_smp_store_release +#define libbpf_smp_store_release(p, v) \ + do { \ + __sync_synchronize(); \ + WRITE_ONCE(*p, v); \ + } while (0) +#endif + +#ifndef libbpf_smp_load_acquire +#define libbpf_smp_load_acquire(p) \ + ({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + __sync_synchronize(); \ + ___p1; \ + }) #endif #ifdef __cplusplus |