summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2023-09-05 17:41:44 -0700
committerAlexei Starovoitov <ast@kernel.org>2023-09-08 08:42:18 -0700
commit1e4a6d975e5cd114509aa447750d68d295a501a7 (patch)
treec1501208a0bd5ca5d3613c400329655fb172c581 /tools/testing/selftests/bpf/progs/percpu_alloc_array.c
parent3903802bb99a263a3c26422c3d30a121b1f6f939 (diff)
parent9bc95a95abbe91e9315c1fe27dc124019bd2592c (diff)
Merge branch 'bpf-add-support-for-local-percpu-kptr'
Yonghong Song says: ==================== bpf: Add support for local percpu kptr Patch set [1] implemented cgroup local storage BPF_MAP_TYPE_CGRP_STORAGE similar to sk/task/inode local storage and old BPF_MAP_TYPE_CGROUP_STORAGE map is marked as deprecated since old BPF_MAP_TYPE_CGROUP_STORAGE map can only work with current cgroup. Similarly, the existing BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE map is a percpu version of BPF_MAP_TYPE_CGROUP_STORAGE and only works with current cgroup. But there is no replacement which can work with arbitrary cgroup. This patch set solved this problem but adding support for local percpu kptr. The map value can have a percpu kptr field which holds a bpf prog allocated percpu data. The below is an example, struct percpu_val_t { ... fields ... } struct map_value_t { struct percpu_val_t __percpu_kptr *percpu_data_ptr; } In the above, 'map_value_t' is the map value type for a BPF_MAP_TYPE_CGRP_STORAGE map. User can access 'percpu_data_ptr' and then read/write percpu data. This covers BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE and more. So BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE map type is marked as deprecated. In additional, local percpu kptr supports the same map type as other kptrs including hash, lru_hash, array, sk/inode/task/cgrp local storage. Currently, percpu data structure does not support non-scalars or special fields (e.g., bpf_spin_lock, bpf_rb_root, etc.). They can be supported in the future if there exist use cases. Please for individual patches for details. [1] https://lore.kernel.org/all/20221026042835.672317-1-yhs@fb.com/ Changelog: v2 -> v3: - fix libbpf_str test failure. v1 -> v2: - does not support special fields in percpu data structure. - rename __percpu attr to __percpu_kptr attr. - rename BPF_KPTR_PERCPU_REF to BPF_KPTR_PERCPU. - better code to handle bpf_{this,per}_cpu_ptr() helpers. - add more negative tests. - fix a bpftool related test failure. ==================== Link: https://lore.kernel.org/r/20230827152729.1995219-1-yonghong.song@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing/selftests/bpf/progs/percpu_alloc_array.c')
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_array.c183
1 files changed, 183 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_array.c b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
new file mode 100644
index 000000000000..bbc45346e006
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
@@ -0,0 +1,183 @@
+#include "bpf_experimental.h"
+
+struct val_t {
+ long b, c, d;
+};
+
+struct elem {
+ long sum;
+ struct val_t __percpu_kptr *pc;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+const volatile int nr_cpus;
+
+/* Initialize the percpu object */
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(test_array_map_1)
+{
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ return 0;
+}
+
+/* Update percpu data */
+SEC("?fentry/bpf_fentry_test2")
+int BPF_PROG(test_array_map_2)
+{
+ struct val_t __percpu_kptr *p;
+ struct val_t *v;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ return 0;
+ v->c = 1;
+ v->d = 2;
+
+ return 0;
+}
+
+int cpu0_field_d, sum_field_c;
+
+/* Summarize percpu data */
+SEC("?fentry/bpf_fentry_test3")
+int BPF_PROG(test_array_map_3)
+{
+ struct val_t __percpu_kptr *p;
+ int i, index = 0;
+ struct val_t *v;
+ struct elem *e;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ return 0;
+}
+
+/* Explicitly free allocated percpu data */
+SEC("?fentry/bpf_fentry_test4")
+int BPF_PROG(test_array_map_4)
+{
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ /* delete */
+ p = bpf_kptr_xchg(&e->pc, NULL);
+ if (p) {
+ bpf_percpu_obj_drop(p);
+ }
+
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+int BPF_PROG(test_array_map_10)
+{
+ struct val_t __percpu_kptr *p, *p1;
+ int i, index = 0;
+ struct val_t *v;
+ struct elem *e;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ bpf_rcu_read_lock();
+ p = e->pc;
+ if (!p) {
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ goto out;
+
+ p1 = bpf_kptr_xchg(&e->pc, p);
+ if (p1) {
+ /* race condition */
+ bpf_percpu_obj_drop(p1);
+ }
+ }
+
+ v = bpf_this_cpu_ptr(p);
+ v->c = 3;
+ v = bpf_this_cpu_ptr(p);
+ v->c = 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ goto out;
+ v->c = 1;
+ v->d = 2;
+
+ /* delete */
+ p1 = bpf_kptr_xchg(&e->pc, NULL);
+ if (!p1)
+ goto out;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ /* finally release p */
+ bpf_percpu_obj_drop(p1);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";