summaryrefslogtreecommitdiff
path: root/tools/perf/include/bpf/bpf.h
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/include/bpf/bpf.h')
-rw-r--r--tools/perf/include/bpf/bpf.h21
1 files changed, 21 insertions, 0 deletions
diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h
index 52b6d87fe822..e667577207dc 100644
--- a/tools/perf/include/bpf/bpf.h
+++ b/tools/perf/include/bpf/bpf.h
@@ -18,6 +18,25 @@ struct bpf_map {
unsigned int numa_node;
};
+/*
+ * FIXME: this should receive .max_entries as a parameter, as careful
+ * tuning of these limits is needed to avoid hitting limits that
+ * prevents other BPF constructs, such as tracepoint handlers,
+ * to get installed, with cryptic messages from libbpf, etc.
+ * For the current need, 'perf trace --filter-pids', 64 should
+ * be good enough, but this surely needs to be revisited.
+ */
+#define pid_map(name, value_type) \
+struct bpf_map SEC("maps") name = { \
+ .type = BPF_MAP_TYPE_HASH, \
+ .key_size = sizeof(pid_t), \
+ .value_size = sizeof(value_type), \
+ .max_entries = 64, \
+}
+
+static int (*bpf_map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags) = (void *)BPF_FUNC_map_update_elem;
+static void *(*bpf_map_lookup_elem)(struct bpf_map *map, void *key) = (void *)BPF_FUNC_map_lookup_elem;
+
#define SEC(NAME) __attribute__((section(NAME), used))
#define probe(function, vars) \
@@ -36,4 +55,6 @@ int _version SEC("version") = LINUX_VERSION_CODE;
static int (*probe_read)(void *dst, int size, const void *unsafe_addr) = (void *)BPF_FUNC_probe_read;
static int (*probe_read_str)(void *dst, int size, const void *unsafe_addr) = (void *)BPF_FUNC_probe_read_str;
+static int (*perf_event_output)(void *, struct bpf_map *, int, void *, unsigned long) = (void *)BPF_FUNC_perf_event_output;
+
#endif /* _PERF_BPF_H */