summaryrefslogtreecommitdiff
path: root/tools/lib
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib')
-rw-r--r--tools/lib/bpf/Makefile35
-rw-r--r--tools/lib/bpf/bpf.c63
-rw-r--r--tools/lib/bpf/bpf_core_read.h2
-rw-r--r--tools/lib/bpf/bpf_gen_internal.h24
-rw-r--r--tools/lib/bpf/bpf_tracing.h32
-rw-r--r--tools/lib/bpf/btf.c321
-rw-r--r--tools/lib/bpf/btf.h39
-rw-r--r--tools/lib/bpf/btf_dump.c64
-rw-r--r--tools/lib/bpf/gen_loader.c419
-rw-r--r--tools/lib/bpf/libbpf.c1241
-rw-r--r--tools/lib/bpf/libbpf.h85
-rw-r--r--tools/lib/bpf/libbpf.map13
-rw-r--r--tools/lib/bpf/libbpf_internal.h64
-rw-r--r--tools/lib/bpf/libbpf_legacy.h9
-rw-r--r--tools/lib/bpf/libbpf_probes.c2
-rw-r--r--tools/lib/bpf/linker.c45
-rw-r--r--tools/lib/bpf/relo_core.c2
-rw-r--r--tools/lib/bpf/xsk.c6
-rw-r--r--tools/lib/bpf/xsk.h90
19 files changed, 1762 insertions, 794 deletions
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 0f766345506f..b393b5e82380 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -146,12 +146,6 @@ $(BPF_IN_SHARED): force $(BPF_GENERATED)
@(test -f ../../include/uapi/linux/bpf_common.h -a -f ../../../include/uapi/linux/bpf_common.h && ( \
(diff -B ../../include/uapi/linux/bpf_common.h ../../../include/uapi/linux/bpf_common.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf_common.h' differs from latest version at 'include/uapi/linux/bpf_common.h'" >&2 )) || true
- @(test -f ../../include/uapi/linux/netlink.h -a -f ../../../include/uapi/linux/netlink.h && ( \
- (diff -B ../../include/uapi/linux/netlink.h ../../../include/uapi/linux/netlink.h >/dev/null) || \
- echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/netlink.h' differs from latest version at 'include/uapi/linux/netlink.h'" >&2 )) || true
- @(test -f ../../include/uapi/linux/if_link.h -a -f ../../../include/uapi/linux/if_link.h && ( \
- (diff -B ../../include/uapi/linux/if_link.h ../../../include/uapi/linux/if_link.h >/dev/null) || \
- echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_link.h' differs from latest version at 'include/uapi/linux/if_link.h'" >&2 )) || true
@(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \
(diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
@@ -208,8 +202,8 @@ check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT)
exit 1; \
fi
-HDR_MAJ_VERSION := $(shell grep -oE '^\#define LIBBPF_MAJOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3)
-HDR_MIN_VERSION := $(shell grep -oE '^\#define LIBBPF_MINOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3)
+HDR_MAJ_VERSION := $(shell grep -oE '^$(pound)define LIBBPF_MAJOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3)
+HDR_MIN_VERSION := $(shell grep -oE '^$(pound)define LIBBPF_MINOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3)
check_version: $(VERSION_SCRIPT) libbpf_version.h
@if [ "$(HDR_MAJ_VERSION)" != "$(LIBBPF_MAJOR_VERSION)" ]; then \
@@ -241,15 +235,24 @@ install_lib: all_cmd
$(call do_install_mkdir,$(libdir_SQ)); \
cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
-INSTALL_HEADERS = bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \
- bpf_helpers.h $(BPF_GENERATED) bpf_tracing.h \
- bpf_endian.h bpf_core_read.h skel_internal.h \
- libbpf_version.h
+SRC_HDRS := bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \
+ bpf_helpers.h bpf_tracing.h bpf_endian.h bpf_core_read.h \
+ skel_internal.h libbpf_version.h
+GEN_HDRS := $(BPF_GENERATED)
-install_headers: $(BPF_GENERATED)
- $(call QUIET_INSTALL, headers) \
- $(foreach hdr,$(INSTALL_HEADERS), \
- $(call do_install,$(hdr),$(prefix)/include/bpf,644);)
+INSTALL_PFX := $(DESTDIR)$(prefix)/include/bpf
+INSTALL_SRC_HDRS := $(addprefix $(INSTALL_PFX)/, $(SRC_HDRS))
+INSTALL_GEN_HDRS := $(addprefix $(INSTALL_PFX)/, $(notdir $(GEN_HDRS)))
+
+$(INSTALL_SRC_HDRS): $(INSTALL_PFX)/%.h: %.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(prefix)/include/bpf,644)
+
+$(INSTALL_GEN_HDRS): $(INSTALL_PFX)/%.h: $(OUTPUT)%.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(prefix)/include/bpf,644)
+
+install_headers: $(BPF_GENERATED) $(INSTALL_SRC_HDRS) $(INSTALL_GEN_HDRS)
install_pkgconfig: $(PC_FILE)
$(call QUIET_INSTALL, $(PC_FILE)) \
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 2401fad090c5..c09cbb868c9f 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -65,19 +65,28 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
return syscall(__NR_bpf, cmd, attr, size);
}
+static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
+ unsigned int size)
+{
+ int fd;
+
+ fd = sys_bpf(cmd, attr, size);
+ return ensure_good_fd(fd);
+}
+
static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
{
int retries = 5;
int fd;
do {
- fd = sys_bpf(BPF_PROG_LOAD, attr, size);
+ fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size);
} while (fd < 0 && errno == EAGAIN && retries-- > 0);
return fd;
}
-int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
+int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr)
{
union bpf_attr attr;
int fd;
@@ -102,11 +111,36 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
create_attr->btf_vmlinux_value_type_id;
else
attr.inner_map_fd = create_attr->inner_map_fd;
+ attr.map_extra = create_attr->map_extra;
- fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
+int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
+{
+ struct bpf_create_map_params p = {};
+
+ p.map_type = create_attr->map_type;
+ p.key_size = create_attr->key_size;
+ p.value_size = create_attr->value_size;
+ p.max_entries = create_attr->max_entries;
+ p.map_flags = create_attr->map_flags;
+ p.name = create_attr->name;
+ p.numa_node = create_attr->numa_node;
+ p.btf_fd = create_attr->btf_fd;
+ p.btf_key_type_id = create_attr->btf_key_type_id;
+ p.btf_value_type_id = create_attr->btf_value_type_id;
+ p.map_ifindex = create_attr->map_ifindex;
+ if (p.map_type == BPF_MAP_TYPE_STRUCT_OPS)
+ p.btf_vmlinux_value_type_id =
+ create_attr->btf_vmlinux_value_type_id;
+ else
+ p.inner_map_fd = create_attr->inner_map_fd;
+
+ return libbpf__bpf_create_map_xattr(&p);
+}
+
int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
int key_size, int value_size, int max_entries,
__u32 map_flags, int node)
@@ -181,7 +215,7 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
attr.numa_node = node;
}
- fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
@@ -264,6 +298,7 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
attr.line_info_rec_size = load_attr->line_info_rec_size;
attr.line_info_cnt = load_attr->line_info_cnt;
attr.line_info = ptr_to_u64(load_attr->line_info);
+ attr.fd_array = ptr_to_u64(load_attr->fd_array);
if (load_attr->name)
memcpy(attr.prog_name, load_attr->name,
@@ -608,7 +643,7 @@ int bpf_obj_get(const char *pathname)
memset(&attr, 0, sizeof(attr));
attr.pathname = ptr_to_u64((void *)pathname);
- fd = sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
@@ -719,7 +754,7 @@ int bpf_link_create(int prog_fd, int target_fd,
break;
}
proceed:
- fd = sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
@@ -762,7 +797,7 @@ int bpf_iter_create(int link_fd)
memset(&attr, 0, sizeof(attr));
attr.iter_create.link_fd = link_fd;
- fd = sys_bpf(BPF_ITER_CREATE, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
@@ -920,7 +955,7 @@ int bpf_prog_get_fd_by_id(__u32 id)
memset(&attr, 0, sizeof(attr));
attr.prog_id = id;
- fd = sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
@@ -932,7 +967,7 @@ int bpf_map_get_fd_by_id(__u32 id)
memset(&attr, 0, sizeof(attr));
attr.map_id = id;
- fd = sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
@@ -944,7 +979,7 @@ int bpf_btf_get_fd_by_id(__u32 id)
memset(&attr, 0, sizeof(attr));
attr.btf_id = id;
- fd = sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
@@ -956,7 +991,7 @@ int bpf_link_get_fd_by_id(__u32 id)
memset(&attr, 0, sizeof(attr));
attr.link_id = id;
- fd = sys_bpf(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
@@ -987,7 +1022,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
attr.raw_tracepoint.name = ptr_to_u64(name);
attr.raw_tracepoint.prog_fd = prog_fd;
- fd = sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
@@ -1007,7 +1042,7 @@ retry:
attr.btf_log_buf = ptr_to_u64(log_buf);
}
- fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, sizeof(attr));
if (fd < 0 && !do_log && log_buf && log_buf_size) {
do_log = true;
@@ -1049,7 +1084,7 @@ int bpf_enable_stats(enum bpf_stats_type type)
memset(&attr, 0, sizeof(attr));
attr.enable_stats.type = type;
- fd = sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr));
return libbpf_err_errno(fd);
}
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
index 09ebe3db5f2f..e4aa9996a550 100644
--- a/tools/lib/bpf/bpf_core_read.h
+++ b/tools/lib/bpf/bpf_core_read.h
@@ -40,7 +40,7 @@ enum bpf_enum_value_kind {
#define __CORE_RELO(src, field, info) \
__builtin_preserve_field_info((src)->field, BPF_FIELD_##info)
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
bpf_probe_read_kernel( \
(void *)dst, \
diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h
index 615400391e57..d26e5472fe50 100644
--- a/tools/lib/bpf/bpf_gen_internal.h
+++ b/tools/lib/bpf/bpf_gen_internal.h
@@ -7,6 +7,21 @@ struct ksym_relo_desc {
const char *name;
int kind;
int insn_idx;
+ bool is_weak;
+ bool is_typeless;
+};
+
+struct ksym_desc {
+ const char *name;
+ int ref;
+ int kind;
+ union {
+ /* used for kfunc */
+ int off;
+ /* used for typeless ksym */
+ bool typeless;
+ };
+ int insn;
};
struct bpf_gen {
@@ -24,18 +39,23 @@ struct bpf_gen {
int relo_cnt;
char attach_target[128];
int attach_kind;
+ struct ksym_desc *ksyms;
+ __u32 nr_ksyms;
+ int fd_array;
+ int nr_fd_array;
};
void bpf_gen__init(struct bpf_gen *gen, int log_level);
int bpf_gen__finish(struct bpf_gen *gen);
void bpf_gen__free(struct bpf_gen *gen);
void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
-void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_attr *map_attr, int map_idx);
+void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx);
struct bpf_prog_load_params;
void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_attr, int prog_idx);
void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
-void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind, int insn_idx);
+void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
+ bool is_typeless, int kind, int insn_idx);
#endif
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index d6bfbe009296..db05a5937105 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -24,6 +24,9 @@
#elif defined(__TARGET_ARCH_sparc)
#define bpf_target_sparc
#define bpf_target_defined
+#elif defined(__TARGET_ARCH_riscv)
+ #define bpf_target_riscv
+ #define bpf_target_defined
#else
/* Fall back to what the compiler says */
@@ -48,6 +51,9 @@
#elif defined(__sparc__)
#define bpf_target_sparc
#define bpf_target_defined
+#elif defined(__riscv) && __riscv_xlen == 64
+ #define bpf_target_riscv
+ #define bpf_target_defined
#endif /* no compiler target */
#endif
@@ -288,6 +294,32 @@ struct pt_regs;
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), pc)
#endif
+#elif defined(bpf_target_riscv)
+
+struct pt_regs;
+#define PT_REGS_RV const volatile struct user_regs_struct
+#define PT_REGS_PARM1(x) (((PT_REGS_RV *)(x))->a0)
+#define PT_REGS_PARM2(x) (((PT_REGS_RV *)(x))->a1)
+#define PT_REGS_PARM3(x) (((PT_REGS_RV *)(x))->a2)
+#define PT_REGS_PARM4(x) (((PT_REGS_RV *)(x))->a3)
+#define PT_REGS_PARM5(x) (((PT_REGS_RV *)(x))->a4)
+#define PT_REGS_RET(x) (((PT_REGS_RV *)(x))->ra)
+#define PT_REGS_FP(x) (((PT_REGS_RV *)(x))->s5)
+#define PT_REGS_RC(x) (((PT_REGS_RV *)(x))->a5)
+#define PT_REGS_SP(x) (((PT_REGS_RV *)(x))->sp)
+#define PT_REGS_IP(x) (((PT_REGS_RV *)(x))->epc)
+
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a0)
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a1)
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a2)
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a3)
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a4)
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), ra)
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), fp)
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a5)
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), sp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), epc)
+
#endif
#if defined(bpf_target_powerpc)
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 6ad63e4d418a..7e4c5586bd87 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -57,7 +57,7 @@ struct btf {
* representation is broken up into three independently allocated
* memory regions to be able to modify them independently.
* raw_data is nulled out at that point, but can be later allocated
- * and cached again if user calls btf__get_raw_data(), at which point
+ * and cached again if user calls btf__raw_data(), at which point
* raw_data will contain a contiguous copy of header, types, and
* strings:
*
@@ -189,12 +189,17 @@ int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_
return 0;
}
+static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
+{
+ return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
+ btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
+}
+
static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
{
__u32 *p;
- p = libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
- btf->nr_types, BTF_MAX_NR_TYPES, 1);
+ p = btf_add_type_offs_mem(btf, 1);
if (!p)
return -ENOMEM;
@@ -231,17 +236,23 @@ static int btf_parse_hdr(struct btf *btf)
}
btf_bswap_hdr(hdr);
} else if (hdr->magic != BTF_MAGIC) {
- pr_debug("Invalid BTF magic:%x\n", hdr->magic);
+ pr_debug("Invalid BTF magic: %x\n", hdr->magic);
+ return -EINVAL;
+ }
+
+ if (btf->raw_size < hdr->hdr_len) {
+ pr_debug("BTF header len %u larger than data size %u\n",
+ hdr->hdr_len, btf->raw_size);
return -EINVAL;
}
- meta_left = btf->raw_size - sizeof(*hdr);
- if (meta_left < hdr->str_off + hdr->str_len) {
- pr_debug("Invalid BTF total size:%u\n", btf->raw_size);
+ meta_left = btf->raw_size - hdr->hdr_len;
+ if (meta_left < (long long)hdr->str_off + hdr->str_len) {
+ pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
return -EINVAL;
}
- if (hdr->type_off + hdr->type_len > hdr->str_off) {
+ if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
return -EINVAL;
@@ -304,8 +315,8 @@ static int btf_type_size(const struct btf_type *t)
return base_size + sizeof(struct btf_var);
case BTF_KIND_DATASEC:
return base_size + vlen * sizeof(struct btf_var_secinfo);
- case BTF_KIND_TAG:
- return base_size + sizeof(struct btf_tag);
+ case BTF_KIND_DECL_TAG:
+ return base_size + sizeof(struct btf_decl_tag);
default:
pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
return -EINVAL;
@@ -378,8 +389,8 @@ static int btf_bswap_type_rest(struct btf_type *t)
v->size = bswap_32(v->size);
}
return 0;
- case BTF_KIND_TAG:
- btf_tag(t)->component_idx = bswap_32(btf_tag(t)->component_idx);
+ case BTF_KIND_DECL_TAG:
+ btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
return 0;
default:
pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
@@ -430,6 +441,11 @@ __u32 btf__get_nr_types(const struct btf *btf)
return btf->start_id + btf->nr_types - 1;
}
+__u32 btf__type_cnt(const struct btf *btf)
+{
+ return btf->start_id + btf->nr_types;
+}
+
const struct btf *btf__base_btf(const struct btf *btf)
{
return btf->base_btf;
@@ -461,8 +477,8 @@ static int determine_ptr_size(const struct btf *btf)
if (btf->base_btf && btf->base_btf->ptr_sz > 0)
return btf->base_btf->ptr_sz;
- n = btf__get_nr_types(btf);
- for (i = 1; i <= n; i++) {
+ n = btf__type_cnt(btf);
+ for (i = 1; i < n; i++) {
t = btf__type_by_id(btf, i);
if (!btf_is_int(t))
continue;
@@ -522,9 +538,9 @@ int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
static bool is_host_big_endian(void)
{
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
return false;
-#elif __BYTE_ORDER == __BIG_ENDIAN
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
return true;
#else
# error "Unrecognized __BYTE_ORDER__"
@@ -591,7 +607,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_VAR:
- case BTF_KIND_TAG:
+ case BTF_KIND_DECL_TAG:
type_id = t->type;
break;
case BTF_KIND_ARRAY:
@@ -679,12 +695,12 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id)
__s32 btf__find_by_name(const struct btf *btf, const char *type_name)
{
- __u32 i, nr_types = btf__get_nr_types(btf);
+ __u32 i, nr_types = btf__type_cnt(btf);
if (!strcmp(type_name, "void"))
return 0;
- for (i = 1; i <= nr_types; i++) {
+ for (i = 1; i < nr_types; i++) {
const struct btf_type *t = btf__type_by_id(btf, i);
const char *name = btf__name_by_offset(btf, t->name_off);
@@ -695,15 +711,15 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
return libbpf_err(-ENOENT);
}
-__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
- __u32 kind)
+static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
+ const char *type_name, __u32 kind)
{
- __u32 i, nr_types = btf__get_nr_types(btf);
+ __u32 i, nr_types = btf__type_cnt(btf);
if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
return 0;
- for (i = 1; i <= nr_types; i++) {
+ for (i = start_id; i < nr_types; i++) {
const struct btf_type *t = btf__type_by_id(btf, i);
const char *name;
@@ -717,6 +733,18 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
return libbpf_err(-ENOENT);
}
+__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
+ __u32 kind)
+{
+ return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
+}
+
+__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
+ __u32 kind)
+{
+ return btf_find_by_name_kind(btf, 1, type_name, kind);
+}
+
static bool btf_is_modifiable(const struct btf *btf)
{
return (void *)btf->hdr != btf->raw_data;
@@ -764,7 +792,7 @@ static struct btf *btf_new_empty(struct btf *base_btf)
if (base_btf) {
btf->base_btf = base_btf;
- btf->start_id = btf__get_nr_types(base_btf) + 1;
+ btf->start_id = btf__type_cnt(base_btf);
btf->start_str_off = base_btf->hdr->str_len;
}
@@ -814,7 +842,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
if (base_btf) {
btf->base_btf = base_btf;
- btf->start_id = btf__get_nr_types(base_btf) + 1;
+ btf->start_id = btf__type_cnt(base_btf);
btf->start_str_off = base_btf->hdr->str_len;
}
@@ -869,7 +897,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
}
- fd = open(path, O_RDONLY);
+ fd = open(path, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
err = -errno;
pr_warn("failed to open %s: %s\n", path, strerror(errno));
@@ -1090,99 +1118,6 @@ struct btf *btf__parse_split(const char *path, struct btf *base_btf)
return libbpf_ptr(btf_parse(path, base_btf, NULL));
}
-static int compare_vsi_off(const void *_a, const void *_b)
-{
- const struct btf_var_secinfo *a = _a;
- const struct btf_var_secinfo *b = _b;
-
- return a->offset - b->offset;
-}
-
-static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
- struct btf_type *t)
-{
- __u32 size = 0, off = 0, i, vars = btf_vlen(t);
- const char *name = btf__name_by_offset(btf, t->name_off);
- const struct btf_type *t_var;
- struct btf_var_secinfo *vsi;
- const struct btf_var *var;
- int ret;
-
- if (!name) {
- pr_debug("No name found in string section for DATASEC kind.\n");
- return -ENOENT;
- }
-
- /* .extern datasec size and var offsets were set correctly during
- * extern collection step, so just skip straight to sorting variables
- */
- if (t->size)
- goto sort_vars;
-
- ret = bpf_object__section_size(obj, name, &size);
- if (ret || !size || (t->size && t->size != size)) {
- pr_debug("Invalid size for section %s: %u bytes\n", name, size);
- return -ENOENT;
- }
-
- t->size = size;
-
- for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
- t_var = btf__type_by_id(btf, vsi->type);
- var = btf_var(t_var);
-
- if (!btf_is_var(t_var)) {
- pr_debug("Non-VAR type seen in section %s\n", name);
- return -EINVAL;
- }
-
- if (var->linkage == BTF_VAR_STATIC)
- continue;
-
- name = btf__name_by_offset(btf, t_var->name_off);
- if (!name) {
- pr_debug("No name found in string section for VAR kind\n");
- return -ENOENT;
- }
-
- ret = bpf_object__variable_offset(obj, name, &off);
- if (ret) {
- pr_debug("No offset found in symbol table for VAR %s\n",
- name);
- return -ENOENT;
- }
-
- vsi->offset = off;
- }
-
-sort_vars:
- qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
- return 0;
-}
-
-int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
-{
- int err = 0;
- __u32 i;
-
- for (i = 1; i <= btf->nr_types; i++) {
- struct btf_type *t = btf_type_by_id(btf, i);
-
- /* Loader needs to fix up some of the things compiler
- * couldn't get its hands on while emitting BTF. This
- * is section size and global variable offset. We use
- * the info from the ELF itself for this purpose.
- */
- if (btf_is_datasec(t)) {
- err = btf_fixup_datasec(obj, btf, t);
- if (err)
- break;
- }
- }
-
- return libbpf_err(err);
-}
-
static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
int btf__load_into_kernel(struct btf *btf)
@@ -1300,7 +1235,7 @@ err_out:
return NULL;
}
-const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size)
+const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
{
struct btf *btf = (struct btf *)btf_ro;
__u32 data_sz;
@@ -1308,7 +1243,7 @@ const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size)
data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
if (!data)
- return errno = -ENOMEM, NULL;
+ return errno = ENOMEM, NULL;
btf->raw_size = data_sz;
if (btf->swapped_endian)
@@ -1319,6 +1254,9 @@ const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size)
return data;
}
+__attribute__((alias("btf__raw_data")))
+const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
+
const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
{
if (offset < btf->start_str_off)
@@ -1691,6 +1629,111 @@ int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_t
return btf_commit_type(btf, sz);
}
+static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
+{
+ struct btf *btf = ctx;
+
+ if (!*type_id) /* nothing to do for VOID references */
+ return 0;
+
+ /* we haven't updated btf's type count yet, so
+ * btf->start_id + btf->nr_types - 1 is the type ID offset we should
+ * add to all newly added BTF types
+ */
+ *type_id += btf->start_id + btf->nr_types - 1;
+ return 0;
+}
+
+int btf__add_btf(struct btf *btf, const struct btf *src_btf)
+{
+ struct btf_pipe p = { .src = src_btf, .dst = btf };
+ int data_sz, sz, cnt, i, err, old_strs_len;
+ __u32 *off;
+ void *t;
+
+ /* appending split BTF isn't supported yet */
+ if (src_btf->base_btf)
+ return libbpf_err(-ENOTSUP);
+
+ /* deconstruct BTF, if necessary, and invalidate raw_data */
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ /* remember original strings section size if we have to roll back
+ * partial strings section changes
+ */
+ old_strs_len = btf->hdr->str_len;
+
+ data_sz = src_btf->hdr->type_len;
+ cnt = btf__type_cnt(src_btf) - 1;
+
+ /* pre-allocate enough memory for new types */
+ t = btf_add_type_mem(btf, data_sz);
+ if (!t)
+ return libbpf_err(-ENOMEM);
+
+ /* pre-allocate enough memory for type offset index for new types */
+ off = btf_add_type_offs_mem(btf, cnt);
+ if (!off)
+ return libbpf_err(-ENOMEM);
+
+ /* bulk copy types data for all types from src_btf */
+ memcpy(t, src_btf->types_data, data_sz);
+
+ for (i = 0; i < cnt; i++) {
+ sz = btf_type_size(t);
+ if (sz < 0) {
+ /* unlikely, has to be corrupted src_btf */
+ err = sz;
+ goto err_out;
+ }
+
+ /* fill out type ID to type offset mapping for lookups by type ID */
+ *off = t - btf->types_data;
+
+ /* add, dedup, and remap strings referenced by this BTF type */
+ err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
+ if (err)
+ goto err_out;
+
+ /* remap all type IDs referenced from this BTF type */
+ err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
+ if (err)
+ goto err_out;
+
+ /* go to next type data and type offset index entry */
+ t += sz;
+ off++;
+ }
+
+ /* Up until now any of the copied type data was effectively invisible,
+ * so if we exited early before this point due to error, BTF would be
+ * effectively unmodified. There would be extra internal memory
+ * pre-allocated, but it would not be available for querying. But now
+ * that we've copied and rewritten all the data successfully, we can
+ * update type count and various internal offsets and sizes to
+ * "commit" the changes and made them visible to the outside world.
+ */
+ btf->hdr->type_len += data_sz;
+ btf->hdr->str_off += data_sz;
+ btf->nr_types += cnt;
+
+ /* return type ID of the first added BTF type */
+ return btf->start_id + btf->nr_types - cnt;
+err_out:
+ /* zero out preallocated memory as if it was just allocated with
+ * libbpf_add_mem()
+ */
+ memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
+ memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
+
+ /* and now restore original strings section size; types data size
+ * wasn't modified, so doesn't need restoring, see big comment above */
+ btf->hdr->str_len = old_strs_len;
+
+ return libbpf_err(err);
+}
+
/*
* Append new BTF_KIND_INT type with:
* - *name* - non-empty, non-NULL type name;
@@ -1939,7 +1982,7 @@ int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
static struct btf_type *btf_last_type(struct btf *btf)
{
- return btf_type_by_id(btf, btf__get_nr_types(btf));
+ return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
}
/*
@@ -2447,7 +2490,7 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __
}
/*
- * Append new BTF_KIND_TAG type with:
+ * Append new BTF_KIND_DECL_TAG type with:
* - *value* - non-empty/non-NULL string;
* - *ref_type_id* - referenced type ID, it might not exist yet;
* - *component_idx* - -1 for tagging reference type, otherwise struct/union
@@ -2456,7 +2499,7 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __
* - >0, type ID of newly added BTF type;
* - <0, on error.
*/
-int btf__add_tag(struct btf *btf, const char *value, int ref_type_id,
+int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
int component_idx)
{
struct btf_type *t;
@@ -2471,7 +2514,7 @@ int btf__add_tag(struct btf *btf, const char *value, int ref_type_id,
if (btf_ensure_modifiable(btf))
return libbpf_err(-ENOMEM);
- sz = sizeof(struct btf_type) + sizeof(struct btf_tag);
+ sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
t = btf_add_type_mem(btf, sz);
if (!t)
return libbpf_err(-ENOMEM);
@@ -2481,9 +2524,9 @@ int btf__add_tag(struct btf *btf, const char *value, int ref_type_id,
return value_off;
t->name_off = value_off;
- t->info = btf_type_info(BTF_KIND_TAG, 0, false);
+ t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
t->type = ref_type_id;
- btf_tag(t)->component_idx = component_idx;
+ btf_decl_tag(t)->component_idx = component_idx;
return btf_commit_type(btf, sz);
}
@@ -2962,8 +3005,10 @@ int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
return libbpf_err(-EINVAL);
}
- if (btf_ensure_modifiable(btf))
- return libbpf_err(-ENOMEM);
+ if (btf_ensure_modifiable(btf)) {
+ err = -ENOMEM;
+ goto done;
+ }
err = btf_dedup_prep(d);
if (err) {
@@ -3143,7 +3188,7 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
goto done;
}
- type_cnt = btf__get_nr_types(btf) + 1;
+ type_cnt = btf__type_cnt(btf);
d->map = malloc(sizeof(__u32) * type_cnt);
if (!d->map) {
err = -ENOMEM;
@@ -3305,7 +3350,7 @@ static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
}
/* Calculate type signature hash of INT or TAG. */
-static long btf_hash_int_tag(struct btf_type *t)
+static long btf_hash_int_decl_tag(struct btf_type *t)
{
__u32 info = *(__u32 *)(t + 1);
long h;
@@ -3583,8 +3628,8 @@ static int btf_dedup_prep(struct btf_dedup *d)
h = btf_hash_common(t);
break;
case BTF_KIND_INT:
- case BTF_KIND_TAG:
- h = btf_hash_int_tag(t);
+ case BTF_KIND_DECL_TAG:
+ h = btf_hash_int_decl_tag(t);
break;
case BTF_KIND_ENUM:
h = btf_hash_enum(t);
@@ -3639,11 +3684,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_FUNC_PROTO:
case BTF_KIND_VAR:
case BTF_KIND_DATASEC:
- case BTF_KIND_TAG:
+ case BTF_KIND_DECL_TAG:
return 0;
case BTF_KIND_INT:
- h = btf_hash_int_tag(t);
+ h = btf_hash_int_decl_tag(t);
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
cand = btf_type_by_id(d->btf, cand_id);
@@ -4260,13 +4305,13 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
}
break;
- case BTF_KIND_TAG:
+ case BTF_KIND_DECL_TAG:
ref_type_id = btf_dedup_ref_type(d, t->type);
if (ref_type_id < 0)
return ref_type_id;
t->type = ref_type_id;
- h = btf_hash_int_tag(t);
+ h = btf_hash_int_decl_tag(t);
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
cand = btf_type_by_id(d->btf, cand_id);
@@ -4549,7 +4594,7 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
- case BTF_KIND_TAG:
+ case BTF_KIND_DECL_TAG:
return visit(&t->type, ctx);
case BTF_KIND_ARRAY: {
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 2cfe31327920..bc005ba3ceec 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -123,6 +123,7 @@ LIBBPF_API struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *b
LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_from_kernel_by_id instead")
LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
+LIBBPF_DEPRECATED_SINCE(0, 6, "intended for internal libbpf use only")
LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_into_kernel instead")
LIBBPF_API int btf__load(struct btf *btf);
@@ -131,7 +132,9 @@ LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
const char *type_name);
LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
const char *type_name, __u32 kind);
+LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__type_cnt() instead; note that btf__get_nr_types() == btf__type_cnt() - 1")
LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
+LIBBPF_API __u32 btf__type_cnt(const struct btf *btf);
LIBBPF_API const struct btf *btf__base_btf(const struct btf *btf);
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
__u32 id);
@@ -144,7 +147,9 @@ LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);
LIBBPF_API int btf__fd(const struct btf *btf);
LIBBPF_API void btf__set_fd(struct btf *btf, int fd);
+LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__raw_data() instead")
LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
+LIBBPF_API const void *btf__raw_data(const struct btf *btf, __u32 *size);
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset);
LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
@@ -173,6 +178,28 @@ LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
LIBBPF_API int btf__add_type(struct btf *btf, const struct btf *src_btf,
const struct btf_type *src_type);
+/**
+ * @brief **btf__add_btf()** appends all the BTF types from *src_btf* into *btf*
+ * @param btf BTF object which all the BTF types and strings are added to
+ * @param src_btf BTF object which all BTF types and referenced strings are copied from
+ * @return BTF type ID of the first appended BTF type, or negative error code
+ *
+ * **btf__add_btf()** can be used to simply and efficiently append the entire
+ * contents of one BTF object to another one. All the BTF type data is copied
+ * over, all referenced type IDs are adjusted by adding a necessary ID offset.
+ * Only strings referenced from BTF types are copied over and deduplicated, so
+ * if there were some unused strings in *src_btf*, those won't be copied over,
+ * which is consistent with the general string deduplication semantics of BTF
+ * writing APIs.
+ *
+ * If any error is encountered during this process, the contents of *btf* is
+ * left intact, which means that **btf__add_btf()** follows the transactional
+ * semantics and the operation as a whole is all-or-nothing.
+ *
+ * *src_btf* has to be non-split BTF, as of now copying types from split BTF
+ * is not supported and will result in -ENOTSUP error code returned.
+ */
+LIBBPF_API int btf__add_btf(struct btf *btf, const struct btf *src_btf);
LIBBPF_API int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding);
LIBBPF_API int btf__add_float(struct btf *btf, const char *name, size_t byte_sz);
@@ -214,7 +241,7 @@ LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id,
__u32 offset, __u32 byte_sz);
/* tag construction API */
-LIBBPF_API int btf__add_tag(struct btf *btf, const char *value, int ref_type_id,
+LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
int component_idx);
struct btf_dedup_opts {
@@ -404,9 +431,9 @@ static inline bool btf_is_float(const struct btf_type *t)
return btf_kind(t) == BTF_KIND_FLOAT;
}
-static inline bool btf_is_tag(const struct btf_type *t)
+static inline bool btf_is_decl_tag(const struct btf_type *t)
{
- return btf_kind(t) == BTF_KIND_TAG;
+ return btf_kind(t) == BTF_KIND_DECL_TAG;
}
static inline __u8 btf_int_encoding(const struct btf_type *t)
@@ -477,10 +504,10 @@ btf_var_secinfos(const struct btf_type *t)
return (struct btf_var_secinfo *)(t + 1);
}
-struct btf_tag;
-static inline struct btf_tag *btf_tag(const struct btf_type *t)
+struct btf_decl_tag;
+static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t)
{
- return (struct btf_tag *)(t + 1);
+ return (struct btf_decl_tag *)(t + 1);
}
#ifdef __cplusplus
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index ad6df97295ae..17db62b5002e 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -188,7 +188,7 @@ err:
static int btf_dump_resize(struct btf_dump *d)
{
- int err, last_id = btf__get_nr_types(d->btf);
+ int err, last_id = btf__type_cnt(d->btf) - 1;
if (last_id <= d->last_id)
return 0;
@@ -262,7 +262,7 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
{
int err, i;
- if (id > btf__get_nr_types(d->btf))
+ if (id >= btf__type_cnt(d->btf))
return libbpf_err(-EINVAL);
err = btf_dump_resize(d);
@@ -294,11 +294,11 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
*/
static int btf_dump_mark_referenced(struct btf_dump *d)
{
- int i, j, n = btf__get_nr_types(d->btf);
+ int i, j, n = btf__type_cnt(d->btf);
const struct btf_type *t;
__u16 vlen;
- for (i = d->last_id + 1; i <= n; i++) {
+ for (i = d->last_id + 1; i < n; i++) {
t = btf__type_by_id(d->btf, i);
vlen = btf_vlen(t);
@@ -316,7 +316,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
- case BTF_KIND_TAG:
+ case BTF_KIND_DECL_TAG:
d->type_states[t->type].referenced = 1;
break;
@@ -584,7 +584,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DATASEC:
- case BTF_KIND_TAG:
+ case BTF_KIND_DECL_TAG:
d->type_states[id].order_state = ORDERED;
return 0;
@@ -1562,29 +1562,28 @@ static int btf_dump_get_bitfield_value(struct btf_dump *d,
__u64 *value)
{
__u16 left_shift_bits, right_shift_bits;
- __u8 nr_copy_bits, nr_copy_bytes;
const __u8 *bytes = data;
- int sz = t->size;
+ __u8 nr_copy_bits;
__u64 num = 0;
int i;
/* Maximum supported bitfield size is 64 bits */
- if (sz > 8) {
- pr_warn("unexpected bitfield size %d\n", sz);
+ if (t->size > 8) {
+ pr_warn("unexpected bitfield size %d\n", t->size);
return -EINVAL;
}
/* Bitfield value retrieval is done in two steps; first relevant bytes are
* stored in num, then we left/right shift num to eliminate irrelevant bits.
*/
- nr_copy_bits = bit_sz + bits_offset;
- nr_copy_bytes = t->size;
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- for (i = nr_copy_bytes - 1; i >= 0; i--)
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ for (i = t->size - 1; i >= 0; i--)
num = num * 256 + bytes[i];
-#elif __BYTE_ORDER == __BIG_ENDIAN
- for (i = 0; i < nr_copy_bytes; i++)
+ nr_copy_bits = bit_sz + bits_offset;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ for (i = 0; i < t->size; i++)
num = num * 256 + bytes[i];
+ nr_copy_bits = t->size * 8 - bits_offset;
#else
# error "Unrecognized __BYTE_ORDER__"
#endif
@@ -1658,9 +1657,15 @@ static int btf_dump_base_type_check_zero(struct btf_dump *d,
return 0;
}
-static bool ptr_is_aligned(const void *data, int data_sz)
+static bool ptr_is_aligned(const struct btf *btf, __u32 type_id,
+ const void *data)
{
- return ((uintptr_t)data) % data_sz == 0;
+ int alignment = btf__align_of(btf, type_id);
+
+ if (alignment == 0)
+ return false;
+
+ return ((uintptr_t)data) % alignment == 0;
}
static int btf_dump_int_data(struct btf_dump *d,
@@ -1671,9 +1676,10 @@ static int btf_dump_int_data(struct btf_dump *d,
{
__u8 encoding = btf_int_encoding(t);
bool sign = encoding & BTF_INT_SIGNED;
+ char buf[16] __attribute__((aligned(16)));
int sz = t->size;
- if (sz == 0) {
+ if (sz == 0 || sz > sizeof(buf)) {
pr_warn("unexpected size %d for id [%u]\n", sz, type_id);
return -EINVAL;
}
@@ -1681,8 +1687,10 @@ static int btf_dump_int_data(struct btf_dump *d,
/* handle packed int data - accesses of integers not aligned on
* int boundaries can cause problems on some platforms.
*/
- if (!ptr_is_aligned(data, sz))
- return btf_dump_bitfield_data(d, t, data, 0, 0);
+ if (!ptr_is_aligned(d->btf, type_id, data)) {
+ memcpy(buf, data, sz);
+ data = buf;
+ }
switch (sz) {
case 16: {
@@ -1692,10 +1700,10 @@ static int btf_dump_int_data(struct btf_dump *d,
/* avoid use of __int128 as some 32-bit platforms do not
* support it.
*/
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
lsi = ints[0];
msi = ints[1];
-#elif __BYTE_ORDER == __BIG_ENDIAN
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
lsi = ints[1];
msi = ints[0];
#else
@@ -1768,7 +1776,7 @@ static int btf_dump_float_data(struct btf_dump *d,
int sz = t->size;
/* handle unaligned data; copy to local union */
- if (!ptr_is_aligned(data, sz)) {
+ if (!ptr_is_aligned(d->btf, type_id, data)) {
memcpy(&fl, data, sz);
flp = &fl;
}
@@ -1931,7 +1939,7 @@ static int btf_dump_ptr_data(struct btf_dump *d,
__u32 id,
const void *data)
{
- if (ptr_is_aligned(data, d->ptr_sz) && d->ptr_sz == sizeof(void *)) {
+ if (ptr_is_aligned(d->btf, id, data) && d->ptr_sz == sizeof(void *)) {
btf_dump_type_values(d, "%p", *(void **)data);
} else {
union ptr_data pt;
@@ -1951,10 +1959,8 @@ static int btf_dump_get_enum_value(struct btf_dump *d,
__u32 id,
__s64 *value)
{
- int sz = t->size;
-
/* handle unaligned enum value */
- if (!ptr_is_aligned(data, sz)) {
+ if (!ptr_is_aligned(d->btf, id, data)) {
__u64 val;
int err;
@@ -2217,7 +2223,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d,
case BTF_KIND_FWD:
case BTF_KIND_FUNC:
case BTF_KIND_FUNC_PROTO:
- case BTF_KIND_TAG:
+ case BTF_KIND_DECL_TAG:
err = btf_dump_unsupported_data(d, t, id);
break;
case BTF_KIND_INT:
diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
index 80087b13877f..502dea53a742 100644
--- a/tools/lib/bpf/gen_loader.c
+++ b/tools/lib/bpf/gen_loader.c
@@ -13,9 +13,12 @@
#include "hashmap.h"
#include "bpf_gen_internal.h"
#include "skel_internal.h"
+#include <asm/byteorder.h>
-#define MAX_USED_MAPS 64
-#define MAX_USED_PROGS 32
+#define MAX_USED_MAPS 64
+#define MAX_USED_PROGS 32
+#define MAX_KFUNC_DESCS 256
+#define MAX_FD_ARRAY_SZ (MAX_USED_PROGS + MAX_KFUNC_DESCS)
/* The following structure describes the stack layout of the loader program.
* In addition R6 contains the pointer to context.
@@ -30,7 +33,6 @@
*/
struct loader_stack {
__u32 btf_fd;
- __u32 map_fd[MAX_USED_MAPS];
__u32 prog_fd[MAX_USED_PROGS];
__u32 inner_map_fd;
};
@@ -143,13 +145,49 @@ static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
if (realloc_data_buf(gen, size8))
return 0;
prev = gen->data_cur;
- memcpy(gen->data_cur, data, size);
- gen->data_cur += size;
- memcpy(gen->data_cur, &zero, size8 - size);
- gen->data_cur += size8 - size;
+ if (data) {
+ memcpy(gen->data_cur, data, size);
+ memcpy(gen->data_cur + size, &zero, size8 - size);
+ } else {
+ memset(gen->data_cur, 0, size8);
+ }
+ gen->data_cur += size8;
return prev - gen->data_start;
}
+/* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative
+ * to start of fd_array. Caller can decide if it is usable or not.
+ */
+static int add_map_fd(struct bpf_gen *gen)
+{
+ if (!gen->fd_array)
+ gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
+ if (gen->nr_maps == MAX_USED_MAPS) {
+ pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
+ gen->error = -E2BIG;
+ return 0;
+ }
+ return gen->nr_maps++;
+}
+
+static int add_kfunc_btf_fd(struct bpf_gen *gen)
+{
+ int cur;
+
+ if (!gen->fd_array)
+ gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
+ if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
+ cur = add_data(gen, NULL, sizeof(int));
+ return (cur - gen->fd_array) / sizeof(int);
+ }
+ return MAX_USED_MAPS + gen->nr_fd_array++;
+}
+
+static int blob_fd_array_off(struct bpf_gen *gen, int index)
+{
+ return gen->fd_array + index * sizeof(int);
+}
+
static int insn_bytes_to_bpf_size(__u32 sz)
{
switch (sz) {
@@ -171,14 +209,22 @@ static void emit_rel_store(struct bpf_gen *gen, int off, int data)
emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
}
-/* *(u64 *)(blob + off) = (u64)(void *)(%sp + stack_off) */
-static void emit_rel_store_sp(struct bpf_gen *gen, int off, int stack_off)
+static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off)
{
- emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_10));
- emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, stack_off));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, blob_off));
+ emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0));
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, off));
- emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
+ emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
+}
+
+static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off)
+{
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, blob_off));
+ emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0));
+ emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
}
static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
@@ -326,11 +372,11 @@ int bpf_gen__finish(struct bpf_gen *gen)
offsetof(struct bpf_prog_desc, prog_fd), 4,
stack_off(prog_fd[i]));
for (i = 0; i < gen->nr_maps; i++)
- move_stack2ctx(gen,
- sizeof(struct bpf_loader_ctx) +
- sizeof(struct bpf_map_desc) * i +
- offsetof(struct bpf_map_desc, map_fd), 4,
- stack_off(map_fd[i]));
+ move_blob2ctx(gen,
+ sizeof(struct bpf_loader_ctx) +
+ sizeof(struct bpf_map_desc) * i +
+ offsetof(struct bpf_map_desc, map_fd), 4,
+ blob_fd_array_off(gen, i));
emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
emit(gen, BPF_EXIT_INSN());
pr_debug("gen: finish %d\n", gen->error);
@@ -386,11 +432,11 @@ void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
}
void bpf_gen__map_create(struct bpf_gen *gen,
- struct bpf_create_map_attr *map_attr, int map_idx)
+ struct bpf_create_map_params *map_attr, int map_idx)
{
int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id);
bool close_inner_map_fd = false;
- int map_create_attr;
+ int map_create_attr, idx;
union bpf_attr attr;
memset(&attr, 0, attr_size);
@@ -398,6 +444,7 @@ void bpf_gen__map_create(struct bpf_gen *gen,
attr.key_size = map_attr->key_size;
attr.value_size = map_attr->value_size;
attr.map_flags = map_attr->map_flags;
+ attr.map_extra = map_attr->map_extra;
memcpy(attr.map_name, map_attr->name,
min((unsigned)strlen(map_attr->name), BPF_OBJ_NAME_LEN - 1));
attr.numa_node = map_attr->numa_node;
@@ -467,9 +514,11 @@ void bpf_gen__map_create(struct bpf_gen *gen,
gen->error = -EDOM; /* internal bug */
return;
} else {
- emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
- stack_off(map_fd[map_idx])));
- gen->nr_maps++;
+ /* add_map_fd does gen->nr_maps++ */
+ idx = add_map_fd(gen);
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, blob_fd_array_off(gen, idx)));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0));
}
if (close_inner_map_fd)
emit_sys_close_stack(gen, stack_off(inner_map_fd));
@@ -511,8 +560,8 @@ static void emit_find_attach_target(struct bpf_gen *gen)
*/
}
-void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind,
- int insn_idx)
+void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
+ bool is_typeless, int kind, int insn_idx)
{
struct ksym_relo_desc *relo;
@@ -524,38 +573,292 @@ void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind,
gen->relos = relo;
relo += gen->relo_cnt;
relo->name = name;
+ relo->is_weak = is_weak;
+ relo->is_typeless = is_typeless;
relo->kind = kind;
relo->insn_idx = insn_idx;
gen->relo_cnt++;
}
-static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
+/* returns existing ksym_desc with ref incremented, or inserts a new one */
+static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo)
{
- int name, insn, len = strlen(relo->name) + 1;
+ struct ksym_desc *kdesc;
- pr_debug("gen: emit_relo: %s at %d\n", relo->name, relo->insn_idx);
- name = add_data(gen, relo->name, len);
+ for (int i = 0; i < gen->nr_ksyms; i++) {
+ if (!strcmp(gen->ksyms[i].name, relo->name)) {
+ gen->ksyms[i].ref++;
+ return &gen->ksyms[i];
+ }
+ }
+ kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc));
+ if (!kdesc) {
+ gen->error = -ENOMEM;
+ return NULL;
+ }
+ gen->ksyms = kdesc;
+ kdesc = &gen->ksyms[gen->nr_ksyms++];
+ kdesc->name = relo->name;
+ kdesc->kind = relo->kind;
+ kdesc->ref = 1;
+ kdesc->off = 0;
+ kdesc->insn = 0;
+ return kdesc;
+}
+
+/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
+ * Returns result in BPF_REG_7
+ */
+static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo)
+{
+ int name_off, len = strlen(relo->name) + 1;
+ name_off = add_data(gen, relo->name, len);
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
- 0, 0, 0, name));
+ 0, 0, 0, name_off));
emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
- emit_check_err(gen);
+}
+
+/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
+ * Returns result in BPF_REG_7
+ * Returns u64 symbol addr in BPF_REG_9
+ */
+static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo)
+{
+ int name_off, len = strlen(relo->name) + 1, res_off;
+
+ name_off = add_data(gen, relo->name, len);
+ res_off = add_data(gen, NULL, 8); /* res is u64 */
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, name_off));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, res_off));
+ emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name));
+ emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0));
+ emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
+ debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind);
+}
+
+/* Expects:
+ * BPF_REG_8 - pointer to instruction
+ *
+ * We need to reuse BTF fd for same symbol otherwise each relocation takes a new
+ * index, while kernel limits total kfunc BTFs to 256. For duplicate symbols,
+ * this would mean a new BTF fd index for each entry. By pairing symbol name
+ * with index, we get the insn->imm, insn->off pairing that kernel uses for
+ * kfunc_tab, which becomes the effective limit even though all of them may
+ * share same index in fd_array (such that kfunc_btf_tab has 1 element).
+ */
+static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
+{
+ struct ksym_desc *kdesc;
+ int btf_fd_idx;
+
+ kdesc = get_ksym_desc(gen, relo);
+ if (!kdesc)
+ return;
+ /* try to copy from existing bpf_insn */
+ if (kdesc->ref > 1) {
+ move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
+ kdesc->insn + offsetof(struct bpf_insn, imm));
+ move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2,
+ kdesc->insn + offsetof(struct bpf_insn, off));
+ goto log;
+ }
+ /* remember insn offset, so we can copy BTF ID and FD later */
+ kdesc->insn = insn;
+ emit_bpf_find_by_name_kind(gen, relo);
+ if (!relo->is_weak)
+ emit_check_err(gen);
+ /* get index in fd_array to store BTF FD at */
+ btf_fd_idx = add_kfunc_btf_fd(gen);
+ if (btf_fd_idx > INT16_MAX) {
+ pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n",
+ btf_fd_idx, relo->name);
+ gen->error = -E2BIG;
+ return;
+ }
+ kdesc->off = btf_fd_idx;
+ /* set a default value for imm */
+ emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
+ /* skip success case store if ret < 0 */
+ emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 1));
/* store btf_id into insn[insn_idx].imm */
- insn = insns + sizeof(struct bpf_insn) * relo->insn_idx +
- offsetof(struct bpf_insn, imm);
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
+ /* load fd_array slot pointer */
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
- 0, 0, 0, insn));
- emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, 0));
- if (relo->kind == BTF_KIND_VAR) {
- /* store btf_obj_fd into insn[insn_idx + 1].imm */
- emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
- emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
- sizeof(struct bpf_insn)));
+ 0, 0, 0, blob_fd_array_off(gen, btf_fd_idx)));
+ /* skip store of BTF fd if ret < 0 */
+ emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 3));
+ /* store BTF fd in slot */
+ emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7));
+ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0));
+ /* set a default value for off */
+ emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
+ /* skip insn->off store if ret < 0 */
+ emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 2));
+ /* skip if vmlinux BTF */
+ emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1));
+ /* store index into insn[insn_idx].off */
+ emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx));
+log:
+ if (!gen->log_level)
+ return;
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
+ offsetof(struct bpf_insn, imm)));
+ emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8,
+ offsetof(struct bpf_insn, off)));
+ debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d",
+ relo->name, kdesc->ref);
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, blob_fd_array_off(gen, kdesc->off)));
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0));
+ debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd",
+ relo->name, kdesc->ref);
+}
+
+static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo,
+ int ref)
+{
+ if (!gen->log_level)
+ return;
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
+ offsetof(struct bpf_insn, imm)));
+ emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
+ offsetof(struct bpf_insn, imm)));
+ debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
+ relo->is_typeless, relo->is_weak, relo->name, ref);
+ emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
+ debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg",
+ relo->is_typeless, relo->is_weak, relo->name, ref);
+}
+
+/* Expects:
+ * BPF_REG_8 - pointer to instruction
+ */
+static void emit_relo_ksym_typeless(struct bpf_gen *gen,
+ struct ksym_relo_desc *relo, int insn)
+{
+ struct ksym_desc *kdesc;
+
+ kdesc = get_ksym_desc(gen, relo);
+ if (!kdesc)
+ return;
+ /* try to copy from existing ldimm64 insn */
+ if (kdesc->ref > 1) {
+ move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
+ kdesc->insn + offsetof(struct bpf_insn, imm));
+ move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
+ kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
+ goto log;
+ }
+ /* remember insn offset, so we can copy ksym addr later */
+ kdesc->insn = insn;
+ /* skip typeless ksym_desc in fd closing loop in cleanup_relos */
+ kdesc->typeless = true;
+ emit_bpf_kallsyms_lookup_name(gen, relo);
+ emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1));
+ emit_check_err(gen);
+ /* store lower half of addr into insn[insn_idx].imm */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm)));
+ /* store upper half of addr into insn[insn_idx + 1].imm */
+ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9,
+ sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
+log:
+ emit_ksym_relo_log(gen, relo, kdesc->ref);
+}
+
+static __u32 src_reg_mask(void)
+{
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ return 0x0f; /* src_reg,dst_reg,... */
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ return 0xf0; /* dst_reg,src_reg,... */
+#else
+#error "Unsupported bit endianness, cannot proceed"
+#endif
+}
+
+/* Expects:
+ * BPF_REG_8 - pointer to instruction
+ */
+static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
+{
+ struct ksym_desc *kdesc;
+ __u32 reg_mask;
+
+ kdesc = get_ksym_desc(gen, relo);
+ if (!kdesc)
+ return;
+ /* try to copy from existing ldimm64 insn */
+ if (kdesc->ref > 1) {
+ move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
+ kdesc->insn + offsetof(struct bpf_insn, imm));
+ move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
+ kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_8, offsetof(struct bpf_insn, imm)));
+ /* jump over src_reg adjustment if imm is not 0 */
+ emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 3));
+ goto clear_src_reg;
+ }
+ /* remember insn offset, so we can copy BTF ID and FD later */
+ kdesc->insn = insn;
+ emit_bpf_find_by_name_kind(gen, relo);
+ if (!relo->is_weak)
+ emit_check_err(gen);
+ /* set default values as 0 */
+ emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
+ emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0));
+ /* skip success case stores if ret < 0 */
+ emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 4));
+ /* store btf_id into insn[insn_idx].imm */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
+ /* store btf_obj_fd into insn[insn_idx + 1].imm */
+ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
+ sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
+ emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
+clear_src_reg:
+ /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
+ reg_mask = src_reg_mask();
+ emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
+ emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
+ emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
+
+ emit_ksym_relo_log(gen, relo, kdesc->ref);
+}
+
+static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
+{
+ int insn;
+
+ pr_debug("gen: emit_relo (%d): %s at %d\n", relo->kind, relo->name, relo->insn_idx);
+ insn = insns + sizeof(struct bpf_insn) * relo->insn_idx;
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
+ switch (relo->kind) {
+ case BTF_KIND_VAR:
+ if (relo->is_typeless)
+ emit_relo_ksym_typeless(gen, relo, insn);
+ else
+ emit_relo_ksym_btf(gen, relo, insn);
+ break;
+ case BTF_KIND_FUNC:
+ emit_relo_kfunc_btf(gen, relo, insn);
+ break;
+ default:
+ pr_warn("Unknown relocation kind '%d'\n", relo->kind);
+ gen->error = -EDOM;
+ return;
}
}
@@ -571,14 +874,23 @@ static void cleanup_relos(struct bpf_gen *gen, int insns)
{
int i, insn;
- for (i = 0; i < gen->relo_cnt; i++) {
- if (gen->relos[i].kind != BTF_KIND_VAR)
- continue;
- /* close fd recorded in insn[insn_idx + 1].imm */
- insn = insns +
- sizeof(struct bpf_insn) * (gen->relos[i].insn_idx + 1) +
- offsetof(struct bpf_insn, imm);
- emit_sys_close_blob(gen, insn);
+ for (i = 0; i < gen->nr_ksyms; i++) {
+ /* only close fds for typed ksyms and kfuncs */
+ if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) {
+ /* close fd recorded in insn[insn_idx + 1].imm */
+ insn = gen->ksyms[i].insn;
+ insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
+ emit_sys_close_blob(gen, insn);
+ } else if (gen->ksyms[i].kind == BTF_KIND_FUNC) {
+ emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off));
+ if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ)
+ gen->nr_fd_array--;
+ }
+ }
+ if (gen->nr_ksyms) {
+ free(gen->ksyms);
+ gen->nr_ksyms = 0;
+ gen->ksyms = NULL;
}
if (gen->relo_cnt) {
free(gen->relos);
@@ -637,9 +949,8 @@ void bpf_gen__prog_load(struct bpf_gen *gen,
/* populate union bpf_attr with a pointer to line_info */
emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
- /* populate union bpf_attr fd_array with a pointer to stack where map_fds are saved */
- emit_rel_store_sp(gen, attr_field(prog_load_attr, fd_array),
- stack_off(map_fd[0]));
+ /* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */
+ emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array);
/* populate union bpf_attr with user provided log details */
move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
@@ -706,8 +1017,8 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
map_update_attr = add_data(gen, &attr, attr_size);
- move_stack2blob(gen, attr_field(map_update_attr, map_fd), 4,
- stack_off(map_fd[map_idx]));
+ move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
+ blob_fd_array_off(gen, map_idx));
emit_rel_store(gen, attr_field(map_update_attr, key), key);
emit_rel_store(gen, attr_field(map_update_attr, value), value);
/* emit MAP_UPDATE_ELEM command */
@@ -725,8 +1036,8 @@ void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
memset(&attr, 0, attr_size);
pr_debug("gen: map_freeze: idx %d\n", map_idx);
map_freeze_attr = add_data(gen, &attr, attr_size);
- move_stack2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
- stack_off(map_fd[map_idx]));
+ move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
+ blob_fd_array_off(gen, map_idx));
/* emit MAP_FREEZE command */
emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
debug_ret(gen, "map_freeze");
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 8892f2f1bbcc..a1bea1953df6 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -195,8 +195,8 @@ enum kern_feature_id {
FEAT_BTF_FLOAT,
/* BPF perf link support */
FEAT_PERF_LINK,
- /* BTF_KIND_TAG support */
- FEAT_BTF_TAG,
+ /* BTF_KIND_DECL_TAG support */
+ FEAT_BTF_DECL_TAG,
__FEAT_CNT,
};
@@ -285,7 +285,7 @@ struct bpf_program {
size_t sub_insn_off;
char *name;
- /* sec_name with / replaced by _; makes recursive pinning
+ /* name with / replaced by _; makes recursive pinning
* in bpf_object__pin_programs easier
*/
char *pin_name;
@@ -370,15 +370,14 @@ enum libbpf_map_type {
LIBBPF_MAP_KCONFIG,
};
-static const char * const libbpf_type_to_btf_name[] = {
- [LIBBPF_MAP_DATA] = DATA_SEC,
- [LIBBPF_MAP_BSS] = BSS_SEC,
- [LIBBPF_MAP_RODATA] = RODATA_SEC,
- [LIBBPF_MAP_KCONFIG] = KCONFIG_SEC,
-};
-
struct bpf_map {
char *name;
+ /* real_name is defined for special internal maps (.rodata*,
+ * .data*, .bss, .kconfig) and preserves their original ELF section
+ * name. This is important to be be able to find corresponding BTF
+ * DATASEC information.
+ */
+ char *real_name;
int fd;
int sec_idx;
size_t sec_offset;
@@ -401,6 +400,7 @@ struct bpf_map {
char *pin_path;
bool pinned;
bool reused;
+ __u64 map_extra;
};
enum extern_type {
@@ -443,6 +443,11 @@ struct extern_desc {
/* local btf_id of the ksym extern's type. */
__u32 type_id;
+ /* BTF fd index to be patched in for insn->off, this is
+ * 0 for vmlinux BTF, index in obj->fd_array for module
+ * BTF
+ */
+ __s16 btf_fd_idx;
} ksym;
};
};
@@ -454,6 +459,41 @@ struct module_btf {
char *name;
__u32 id;
int fd;
+ int fd_array_idx;
+};
+
+enum sec_type {
+ SEC_UNUSED = 0,
+ SEC_RELO,
+ SEC_BSS,
+ SEC_DATA,
+ SEC_RODATA,
+};
+
+struct elf_sec_desc {
+ enum sec_type sec_type;
+ Elf64_Shdr *shdr;
+ Elf_Data *data;
+};
+
+struct elf_state {
+ int fd;
+ const void *obj_buf;
+ size_t obj_buf_sz;
+ Elf *elf;
+ Elf64_Ehdr *ehdr;
+ Elf_Data *symbols;
+ Elf_Data *st_ops_data;
+ size_t shstrndx; /* section index for section name strings */
+ size_t strtabidx;
+ struct elf_sec_desc *secs;
+ int sec_cnt;
+ int maps_shndx;
+ int btf_maps_shndx;
+ __u32 btf_maps_sec_btf_id;
+ int text_shndx;
+ int symbols_shndx;
+ int st_ops_shndx;
};
struct bpf_object {
@@ -471,47 +511,17 @@ struct bpf_object {
struct extern_desc *externs;
int nr_extern;
int kconfig_map_idx;
- int rodata_map_idx;
bool loaded;
bool has_subcalls;
+ bool has_rodata;
struct bpf_gen *gen_loader;
+ /* Information when doing ELF related work. Only valid if efile.elf is not NULL */
+ struct elf_state efile;
/*
- * Information when doing elf related work. Only valid if fd
- * is valid.
- */
- struct {
- int fd;
- const void *obj_buf;
- size_t obj_buf_sz;
- Elf *elf;
- GElf_Ehdr ehdr;
- Elf_Data *symbols;
- Elf_Data *data;
- Elf_Data *rodata;
- Elf_Data *bss;
- Elf_Data *st_ops_data;
- size_t shstrndx; /* section index for section name strings */
- size_t strtabidx;
- struct {
- GElf_Shdr shdr;
- Elf_Data *data;
- } *reloc_sects;
- int nr_reloc_sects;
- int maps_shndx;
- int btf_maps_shndx;
- __u32 btf_maps_sec_btf_id;
- int text_shndx;
- int symbols_shndx;
- int data_shndx;
- int rodata_shndx;
- int bss_shndx;
- int st_ops_shndx;
- } efile;
- /*
- * All loaded bpf_object is linked in a list, which is
+ * All loaded bpf_object are linked in a list, which is
* hidden to caller. bpf_objects__<func> handlers deal with
* all objects.
*/
@@ -539,17 +549,22 @@ struct bpf_object {
void *priv;
bpf_object_clear_priv_t clear_priv;
+ int *fd_array;
+ size_t fd_array_cap;
+ size_t fd_array_cnt;
+
char path[];
};
-#define obj_elf_valid(o) ((o)->efile.elf)
static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
-static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
+static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
+static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
+static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
void bpf_program__unload(struct bpf_program *prog)
{
@@ -604,7 +619,16 @@ static char *__bpf_program__pin_name(struct bpf_program *prog)
{
char *name, *p;
- name = p = strdup(prog->sec_name);
+ if (libbpf_mode & LIBBPF_STRICT_SEC_NAME)
+ name = strdup(prog->name);
+ else
+ name = strdup(prog->sec_name);
+
+ if (!name)
+ return NULL;
+
+ p = name;
+
while ((p = strchr(p, '/')))
*p = '_';
@@ -691,25 +715,25 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
int nr_progs, err, i;
const char *name;
- GElf_Sym sym;
+ Elf64_Sym *sym;
progs = obj->programs;
nr_progs = obj->nr_programs;
- nr_syms = symbols->d_size / sizeof(GElf_Sym);
+ nr_syms = symbols->d_size / sizeof(Elf64_Sym);
sec_off = 0;
for (i = 0; i < nr_syms; i++) {
- if (!gelf_getsym(symbols, i, &sym))
- continue;
- if (sym.st_shndx != sec_idx)
+ sym = elf_sym_by_idx(obj, i);
+
+ if (sym->st_shndx != sec_idx)
continue;
- if (GELF_ST_TYPE(sym.st_info) != STT_FUNC)
+ if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
continue;
- prog_sz = sym.st_size;
- sec_off = sym.st_value;
+ prog_sz = sym->st_size;
+ sec_off = sym->st_value;
- name = elf_sym_str(obj, sym.st_name);
+ name = elf_sym_str(obj, sym->st_name);
if (!name) {
pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
sec_name, sec_off);
@@ -722,7 +746,7 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
return -LIBBPF_ERRNO__FORMAT;
}
- if (sec_idx != obj->efile.text_shndx && GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
+ if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
return -ENOTSUP;
}
@@ -755,9 +779,9 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
* as static to enable more permissive BPF verification mode
* with more outside context available to BPF verifier
*/
- if (GELF_ST_BIND(sym.st_info) != STB_LOCAL
- && (GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN
- || GELF_ST_VISIBILITY(sym.st_other) == STV_INTERNAL))
+ if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL
+ && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
+ || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
prog->mark_btf_static = true;
nr_progs++;
@@ -1125,6 +1149,7 @@ static struct bpf_object *bpf_object__new(const char *path,
size_t obj_buf_sz,
const char *obj_name)
{
+ bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST);
struct bpf_object *obj;
char *end;
@@ -1158,24 +1183,21 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.maps_shndx = -1;
obj->efile.btf_maps_shndx = -1;
- obj->efile.data_shndx = -1;
- obj->efile.rodata_shndx = -1;
- obj->efile.bss_shndx = -1;
obj->efile.st_ops_shndx = -1;
obj->kconfig_map_idx = -1;
- obj->rodata_map_idx = -1;
obj->kern_version = get_kernel_version();
obj->loaded = false;
INIT_LIST_HEAD(&obj->list);
- list_add(&obj->list, &bpf_objects_list);
+ if (!strict)
+ list_add(&obj->list, &bpf_objects_list);
return obj;
}
static void bpf_object__elf_finish(struct bpf_object *obj)
{
- if (!obj_elf_valid(obj))
+ if (!obj->efile.elf)
return;
if (obj->efile.elf) {
@@ -1183,13 +1205,10 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
obj->efile.elf = NULL;
}
obj->efile.symbols = NULL;
- obj->efile.data = NULL;
- obj->efile.rodata = NULL;
- obj->efile.bss = NULL;
obj->efile.st_ops_data = NULL;
- zfree(&obj->efile.reloc_sects);
- obj->efile.nr_reloc_sects = 0;
+ zfree(&obj->efile.secs);
+ obj->efile.sec_cnt = 0;
zclose(obj->efile.fd);
obj->efile.obj_buf = NULL;
obj->efile.obj_buf_sz = 0;
@@ -1197,10 +1216,11 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
static int bpf_object__elf_init(struct bpf_object *obj)
{
+ Elf64_Ehdr *ehdr;
int err = 0;
- GElf_Ehdr *ep;
+ Elf *elf;
- if (obj_elf_valid(obj)) {
+ if (obj->efile.elf) {
pr_warn("elf: init internal error\n");
return -LIBBPF_ERRNO__LIBELF;
}
@@ -1210,10 +1230,9 @@ static int bpf_object__elf_init(struct bpf_object *obj)
* obj_buf should have been validated by
* bpf_object__open_buffer().
*/
- obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
- obj->efile.obj_buf_sz);
+ elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
} else {
- obj->efile.fd = open(obj->path, O_RDONLY);
+ obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
if (obj->efile.fd < 0) {
char errmsg[STRERR_BUFSIZE], *cp;
@@ -1223,23 +1242,37 @@ static int bpf_object__elf_init(struct bpf_object *obj)
return err;
}
- obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
+ elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
}
- if (!obj->efile.elf) {
+ if (!elf) {
pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__LIBELF;
goto errout;
}
- if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
+ obj->efile.elf = elf;
+
+ if (elf_kind(elf) != ELF_K_ELF) {
+ err = -LIBBPF_ERRNO__FORMAT;
+ pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
+ goto errout;
+ }
+
+ if (gelf_getclass(elf) != ELFCLASS64) {
+ err = -LIBBPF_ERRNO__FORMAT;
+ pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
+ goto errout;
+ }
+
+ obj->efile.ehdr = ehdr = elf64_getehdr(elf);
+ if (!obj->efile.ehdr) {
pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
}
- ep = &obj->efile.ehdr;
- if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
+ if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
pr_warn("elf: failed to get section names section index for %s: %s\n",
obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__FORMAT;
@@ -1247,7 +1280,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
}
/* Elf is corrupted/truncated, avoid calling elf_strptr. */
- if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
+ if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
pr_warn("elf: failed to get section names strings from %s: %s\n",
obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__FORMAT;
@@ -1255,8 +1288,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
}
/* Old LLVM set e_machine to EM_NONE */
- if (ep->e_type != ET_REL ||
- (ep->e_machine && ep->e_machine != EM_BPF)) {
+ if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
@@ -1270,11 +1302,11 @@ errout:
static int bpf_object__check_endianness(struct bpf_object *obj)
{
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
return 0;
-#elif __BYTE_ORDER == __BIG_ENDIAN
- if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
return 0;
#else
# error "Unrecognized __BYTE_ORDER__"
@@ -1314,41 +1346,27 @@ static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
return false;
}
-int bpf_object__section_size(const struct bpf_object *obj, const char *name,
- __u32 *size)
+static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
{
int ret = -ENOENT;
+ Elf_Data *data;
+ Elf_Scn *scn;
*size = 0;
- if (!name) {
+ if (!name)
return -EINVAL;
- } else if (!strcmp(name, DATA_SEC)) {
- if (obj->efile.data)
- *size = obj->efile.data->d_size;
- } else if (!strcmp(name, BSS_SEC)) {
- if (obj->efile.bss)
- *size = obj->efile.bss->d_size;
- } else if (!strcmp(name, RODATA_SEC)) {
- if (obj->efile.rodata)
- *size = obj->efile.rodata->d_size;
- } else if (!strcmp(name, STRUCT_OPS_SEC)) {
- if (obj->efile.st_ops_data)
- *size = obj->efile.st_ops_data->d_size;
- } else {
- Elf_Scn *scn = elf_sec_by_name(obj, name);
- Elf_Data *data = elf_sec_data(obj, scn);
- if (data) {
- ret = 0; /* found it */
- *size = data->d_size;
- }
+ scn = elf_sec_by_name(obj, name);
+ data = elf_sec_data(obj, scn);
+ if (data) {
+ ret = 0; /* found it */
+ *size = data->d_size;
}
return *size ? 0 : ret;
}
-int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
- __u32 *off)
+static int find_elf_var_offset(const struct bpf_object *obj, const char *name, __u32 *off)
{
Elf_Data *symbols = obj->efile.symbols;
const char *sname;
@@ -1357,23 +1375,20 @@ int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
if (!name || !off)
return -EINVAL;
- for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
- GElf_Sym sym;
+ for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
+ Elf64_Sym *sym = elf_sym_by_idx(obj, si);
- if (!gelf_getsym(symbols, si, &sym))
- continue;
- if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
- GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
+ if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL ||
+ ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
continue;
- sname = elf_sym_str(obj, sym.st_name);
+ sname = elf_sym_str(obj, sym->st_name);
if (!sname) {
- pr_warn("failed to get sym name string for var %s\n",
- name);
+ pr_warn("failed to get sym name string for var %s\n", name);
return -EIO;
}
if (strcmp(name, sname) == 0) {
- *off = sym.st_value;
+ *off = sym->st_value;
return 0;
}
}
@@ -1425,17 +1440,55 @@ static size_t bpf_map_mmap_sz(const struct bpf_map *map)
return map_sz;
}
-static char *internal_map_name(struct bpf_object *obj,
- enum libbpf_map_type type)
+static char *internal_map_name(struct bpf_object *obj, const char *real_name)
{
char map_name[BPF_OBJ_NAME_LEN], *p;
- const char *sfx = libbpf_type_to_btf_name[type];
- int sfx_len = max((size_t)7, strlen(sfx));
- int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
- strlen(obj->name));
+ int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
+
+ /* This is one of the more confusing parts of libbpf for various
+ * reasons, some of which are historical. The original idea for naming
+ * internal names was to include as much of BPF object name prefix as
+ * possible, so that it can be distinguished from similar internal
+ * maps of a different BPF object.
+ * As an example, let's say we have bpf_object named 'my_object_name'
+ * and internal map corresponding to '.rodata' ELF section. The final
+ * map name advertised to user and to the kernel will be
+ * 'my_objec.rodata', taking first 8 characters of object name and
+ * entire 7 characters of '.rodata'.
+ * Somewhat confusingly, if internal map ELF section name is shorter
+ * than 7 characters, e.g., '.bss', we still reserve 7 characters
+ * for the suffix, even though we only have 4 actual characters, and
+ * resulting map will be called 'my_objec.bss', not even using all 15
+ * characters allowed by the kernel. Oh well, at least the truncated
+ * object name is somewhat consistent in this case. But if the map
+ * name is '.kconfig', we'll still have entirety of '.kconfig' added
+ * (8 chars) and thus will be left with only first 7 characters of the
+ * object name ('my_obje'). Happy guessing, user, that the final map
+ * name will be "my_obje.kconfig".
+ * Now, with libbpf starting to support arbitrarily named .rodata.*
+ * and .data.* data sections, it's possible that ELF section name is
+ * longer than allowed 15 chars, so we now need to be careful to take
+ * only up to 15 first characters of ELF name, taking no BPF object
+ * name characters at all. So '.rodata.abracadabra' will result in
+ * '.rodata.abracad' kernel and user-visible name.
+ * We need to keep this convoluted logic intact for .data, .bss and
+ * .rodata maps, but for new custom .data.custom and .rodata.custom
+ * maps we use their ELF names as is, not prepending bpf_object name
+ * in front. We still need to truncate them to 15 characters for the
+ * kernel. Full name can be recovered for such maps by using DATASEC
+ * BTF type associated with such map's value type, though.
+ */
+ if (sfx_len >= BPF_OBJ_NAME_LEN)
+ sfx_len = BPF_OBJ_NAME_LEN - 1;
+
+ /* if there are two or more dots in map name, it's a custom dot map */
+ if (strchr(real_name + 1, '.') != NULL)
+ pfx_len = 0;
+ else
+ pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
- sfx_len, libbpf_type_to_btf_name[type]);
+ sfx_len, real_name);
/* sanitise map name to characters allowed by kernel */
for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
@@ -1447,7 +1500,7 @@ static char *internal_map_name(struct bpf_object *obj,
static int
bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
- int sec_idx, void *data, size_t data_sz)
+ const char *real_name, int sec_idx, void *data, size_t data_sz)
{
struct bpf_map_def *def;
struct bpf_map *map;
@@ -1460,9 +1513,11 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
map->libbpf_type = type;
map->sec_idx = sec_idx;
map->sec_offset = 0;
- map->name = internal_map_name(obj, type);
- if (!map->name) {
- pr_warn("failed to alloc map name\n");
+ map->real_name = strdup(real_name);
+ map->name = internal_map_name(obj, real_name);
+ if (!map->real_name || !map->name) {
+ zfree(&map->real_name);
+ zfree(&map->name);
return -ENOMEM;
}
@@ -1485,6 +1540,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
map->mmaped = NULL;
pr_warn("failed to alloc map '%s' content buffer: %d\n",
map->name, err);
+ zfree(&map->real_name);
zfree(&map->name);
return err;
}
@@ -1498,34 +1554,43 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
static int bpf_object__init_global_data_maps(struct bpf_object *obj)
{
- int err;
+ struct elf_sec_desc *sec_desc;
+ const char *sec_name;
+ int err = 0, sec_idx;
/*
* Populate obj->maps with libbpf internal maps.
*/
- if (obj->efile.data_shndx >= 0) {
- err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
- obj->efile.data_shndx,
- obj->efile.data->d_buf,
- obj->efile.data->d_size);
- if (err)
- return err;
- }
- if (obj->efile.rodata_shndx >= 0) {
- err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
- obj->efile.rodata_shndx,
- obj->efile.rodata->d_buf,
- obj->efile.rodata->d_size);
- if (err)
- return err;
-
- obj->rodata_map_idx = obj->nr_maps - 1;
- }
- if (obj->efile.bss_shndx >= 0) {
- err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
- obj->efile.bss_shndx,
- NULL,
- obj->efile.bss->d_size);
+ for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
+ sec_desc = &obj->efile.secs[sec_idx];
+
+ switch (sec_desc->sec_type) {
+ case SEC_DATA:
+ sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
+ err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
+ sec_name, sec_idx,
+ sec_desc->data->d_buf,
+ sec_desc->data->d_size);
+ break;
+ case SEC_RODATA:
+ obj->has_rodata = true;
+ sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
+ err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
+ sec_name, sec_idx,
+ sec_desc->data->d_buf,
+ sec_desc->data->d_size);
+ break;
+ case SEC_BSS:
+ sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
+ err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
+ sec_name, sec_idx,
+ NULL,
+ sec_desc->data->d_size);
+ break;
+ default:
+ /* skip */
+ break;
+ }
if (err)
return err;
}
@@ -1822,7 +1887,7 @@ static int bpf_object__init_kconfig_map(struct bpf_object *obj)
map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
- obj->efile.symbols_shndx,
+ ".kconfig", obj->efile.symbols_shndx,
NULL, map_sz);
if (err)
return err;
@@ -1860,15 +1925,13 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
*
* TODO: Detect array of map and report error.
*/
- nr_syms = symbols->d_size / sizeof(GElf_Sym);
+ nr_syms = symbols->d_size / sizeof(Elf64_Sym);
for (i = 0; i < nr_syms; i++) {
- GElf_Sym sym;
+ Elf64_Sym *sym = elf_sym_by_idx(obj, i);
- if (!gelf_getsym(symbols, i, &sym))
+ if (sym->st_shndx != obj->efile.maps_shndx)
continue;
- if (sym.st_shndx != obj->efile.maps_shndx)
- continue;
- if (GELF_ST_TYPE(sym.st_info) == STT_SECTION)
+ if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION)
continue;
nr_maps++;
}
@@ -1885,40 +1948,38 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
/* Fill obj->maps using data in "maps" section. */
for (i = 0; i < nr_syms; i++) {
- GElf_Sym sym;
+ Elf64_Sym *sym = elf_sym_by_idx(obj, i);
const char *map_name;
struct bpf_map_def *def;
struct bpf_map *map;
- if (!gelf_getsym(symbols, i, &sym))
- continue;
- if (sym.st_shndx != obj->efile.maps_shndx)
+ if (sym->st_shndx != obj->efile.maps_shndx)
continue;
- if (GELF_ST_TYPE(sym.st_info) == STT_SECTION)
+ if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION)
continue;
map = bpf_object__add_map(obj);
if (IS_ERR(map))
return PTR_ERR(map);
- map_name = elf_sym_str(obj, sym.st_name);
+ map_name = elf_sym_str(obj, sym->st_name);
if (!map_name) {
pr_warn("failed to get map #%d name sym string for obj %s\n",
i, obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
- if (GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
+ if (ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
pr_warn("map '%s' (legacy): static maps are not supported\n", map_name);
return -ENOTSUP;
}
map->libbpf_type = LIBBPF_MAP_UNSPEC;
- map->sec_idx = sym.st_shndx;
- map->sec_offset = sym.st_value;
+ map->sec_idx = sym->st_shndx;
+ map->sec_offset = sym->st_value;
pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
map_name, map->sec_idx, map->sec_offset);
- if (sym.st_value + map_def_sz > data->d_size) {
+ if (sym->st_value + map_def_sz > data->d_size) {
pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
obj->path, map_name);
return -EINVAL;
@@ -1926,11 +1987,11 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
map->name = strdup(map_name);
if (!map->name) {
- pr_warn("failed to alloc map name\n");
+ pr_warn("map '%s': failed to alloc map name\n", map_name);
return -ENOMEM;
}
pr_debug("map %d is \"%s\"\n", i, map->name);
- def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
+ def = (struct bpf_map_def *)(data->d_buf + sym->st_value);
/*
* If the definition of the map in the object file fits in
* bpf_map_def, copy it. Any extra fields in our version
@@ -2014,7 +2075,7 @@ static const char *__btf_kind_str(__u16 kind)
case BTF_KIND_VAR: return "var";
case BTF_KIND_DATASEC: return "datasec";
case BTF_KIND_FLOAT: return "float";
- case BTF_KIND_TAG: return "tag";
+ case BTF_KIND_DECL_TAG: return "decl_tag";
default: return "unknown";
}
}
@@ -2264,6 +2325,13 @@ int parse_btf_map_def(const char *map_name, struct btf *btf,
}
map_def->pinning = val;
map_def->parts |= MAP_DEF_PINNING;
+ } else if (strcmp(name, "map_extra") == 0) {
+ __u32 map_extra;
+
+ if (!get_map_field_int(map_name, btf, m, &map_extra))
+ return -EINVAL;
+ map_def->map_extra = map_extra;
+ map_def->parts |= MAP_DEF_MAP_EXTRA;
} else {
if (strict) {
pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
@@ -2288,6 +2356,7 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
map->def.value_size = def->value_size;
map->def.max_entries = def->max_entries;
map->def.map_flags = def->map_flags;
+ map->map_extra = def->map_extra;
map->numa_node = def->numa_node;
map->btf_key_type_id = def->key_type_id;
@@ -2311,7 +2380,10 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
if (def->parts & MAP_DEF_MAX_ENTRIES)
pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
if (def->parts & MAP_DEF_MAP_FLAGS)
- pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags);
+ pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
+ if (def->parts & MAP_DEF_MAP_EXTRA)
+ pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
+ (unsigned long long)def->map_extra);
if (def->parts & MAP_DEF_PINNING)
pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
if (def->parts & MAP_DEF_NUMA_NODE)
@@ -2448,8 +2520,8 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
return -EINVAL;
}
- nr_types = btf__get_nr_types(obj->btf);
- for (i = 1; i <= nr_types; i++) {
+ nr_types = btf__type_cnt(obj->btf);
+ for (i = 1; i < nr_types; i++) {
t = btf__type_by_id(obj->btf, i);
if (!btf_is_datasec(t))
continue;
@@ -2500,12 +2572,13 @@ static int bpf_object__init_maps(struct bpf_object *obj,
static bool section_have_execinstr(struct bpf_object *obj, int idx)
{
- GElf_Shdr sh;
+ Elf64_Shdr *sh;
- if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
+ sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
+ if (!sh)
return false;
- return sh.sh_flags & SHF_EXECINSTR;
+ return sh->sh_flags & SHF_EXECINSTR;
}
static bool btf_needs_sanitization(struct bpf_object *obj)
@@ -2514,9 +2587,9 @@ static bool btf_needs_sanitization(struct bpf_object *obj)
bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
- bool has_tag = kernel_supports(obj, FEAT_BTF_TAG);
+ bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
- return !has_func || !has_datasec || !has_func_global || !has_float || !has_tag;
+ return !has_func || !has_datasec || !has_func_global || !has_float || !has_decl_tag;
}
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
@@ -2525,15 +2598,15 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
- bool has_tag = kernel_supports(obj, FEAT_BTF_TAG);
+ bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
struct btf_type *t;
int i, j, vlen;
- for (i = 1; i <= btf__get_nr_types(btf); i++) {
+ for (i = 1; i < btf__type_cnt(btf); i++) {
t = (struct btf_type *)btf__type_by_id(btf, i);
- if ((!has_datasec && btf_is_var(t)) || (!has_tag && btf_is_tag(t))) {
- /* replace VAR/TAG with INT */
+ if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
+ /* replace VAR/DECL_TAG with INT */
t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
/*
* using size = 1 is the safest choice, 4 will be too
@@ -2640,6 +2713,104 @@ out:
return 0;
}
+static int compare_vsi_off(const void *_a, const void *_b)
+{
+ const struct btf_var_secinfo *a = _a;
+ const struct btf_var_secinfo *b = _b;
+
+ return a->offset - b->offset;
+}
+
+static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
+ struct btf_type *t)
+{
+ __u32 size = 0, off = 0, i, vars = btf_vlen(t);
+ const char *name = btf__name_by_offset(btf, t->name_off);
+ const struct btf_type *t_var;
+ struct btf_var_secinfo *vsi;
+ const struct btf_var *var;
+ int ret;
+
+ if (!name) {
+ pr_debug("No name found in string section for DATASEC kind.\n");
+ return -ENOENT;
+ }
+
+ /* .extern datasec size and var offsets were set correctly during
+ * extern collection step, so just skip straight to sorting variables
+ */
+ if (t->size)
+ goto sort_vars;
+
+ ret = find_elf_sec_sz(obj, name, &size);
+ if (ret || !size || (t->size && t->size != size)) {
+ pr_debug("Invalid size for section %s: %u bytes\n", name, size);
+ return -ENOENT;
+ }
+
+ t->size = size;
+
+ for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
+ t_var = btf__type_by_id(btf, vsi->type);
+ var = btf_var(t_var);
+
+ if (!btf_is_var(t_var)) {
+ pr_debug("Non-VAR type seen in section %s\n", name);
+ return -EINVAL;
+ }
+
+ if (var->linkage == BTF_VAR_STATIC)
+ continue;
+
+ name = btf__name_by_offset(btf, t_var->name_off);
+ if (!name) {
+ pr_debug("No name found in string section for VAR kind\n");
+ return -ENOENT;
+ }
+
+ ret = find_elf_var_offset(obj, name, &off);
+ if (ret) {
+ pr_debug("No offset found in symbol table for VAR %s\n",
+ name);
+ return -ENOENT;
+ }
+
+ vsi->offset = off;
+ }
+
+sort_vars:
+ qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
+ return 0;
+}
+
+static int btf_finalize_data(struct bpf_object *obj, struct btf *btf)
+{
+ int err = 0;
+ __u32 i, n = btf__type_cnt(btf);
+
+ for (i = 1; i < n; i++) {
+ struct btf_type *t = btf_type_by_id(btf, i);
+
+ /* Loader needs to fix up some of the things compiler
+ * couldn't get its hands on while emitting BTF. This
+ * is section size and global variable offset. We use
+ * the info from the ELF itself for this purpose.
+ */
+ if (btf_is_datasec(t)) {
+ err = btf_fixup_datasec(obj, btf, t);
+ if (err)
+ break;
+ }
+ }
+
+ return libbpf_err(err);
+}
+
+int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
+{
+ return btf_finalize_data(obj, btf);
+}
+
static int bpf_object__finalize_btf(struct bpf_object *obj)
{
int err;
@@ -2647,7 +2818,7 @@ static int bpf_object__finalize_btf(struct bpf_object *obj)
if (!obj->btf)
return 0;
- err = btf__finalize_data(obj, obj->btf);
+ err = btf_finalize_data(obj, obj->btf);
if (err) {
pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
return err;
@@ -2757,8 +2928,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
continue;
- n = btf__get_nr_types(obj->btf);
- for (j = 1; j <= n; j++) {
+ n = btf__type_cnt(obj->btf);
+ for (j = 1; j < n; j++) {
t = btf_type_by_id(obj->btf, j);
if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
continue;
@@ -2778,7 +2949,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
__u32 sz;
/* clone BTF to sanitize a copy and leave the original intact */
- raw_data = btf__get_raw_data(obj->btf, &sz);
+ raw_data = btf__raw_data(obj->btf, &sz);
kern_btf = btf__new(raw_data, sz);
err = libbpf_get_error(kern_btf);
if (err)
@@ -2791,7 +2962,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
if (obj->gen_loader) {
__u32 raw_size = 0;
- const void *raw_data = btf__get_raw_data(kern_btf, &raw_size);
+ const void *raw_data = btf__raw_data(kern_btf, &raw_size);
if (!raw_data)
return -ENOMEM;
@@ -2883,32 +3054,36 @@ static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
return NULL;
}
-static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
+static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
{
+ Elf64_Shdr *shdr;
+
if (!scn)
- return -EINVAL;
+ return NULL;
- if (gelf_getshdr(scn, hdr) != hdr) {
+ shdr = elf64_getshdr(scn);
+ if (!shdr) {
pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
elf_ndxscn(scn), obj->path, elf_errmsg(-1));
- return -EINVAL;
+ return NULL;
}
- return 0;
+ return shdr;
}
static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
{
const char *name;
- GElf_Shdr sh;
+ Elf64_Shdr *sh;
if (!scn)
return NULL;
- if (elf_sec_hdr(obj, scn, &sh))
+ sh = elf_sec_hdr(obj, scn);
+ if (!sh)
return NULL;
- name = elf_sec_str(obj, sh.sh_name);
+ name = elf_sec_str(obj, sh->sh_name);
if (!name) {
pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
elf_ndxscn(scn), obj->path, elf_errmsg(-1));
@@ -2936,13 +3111,29 @@ static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
return data;
}
+static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
+{
+ if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
+ return NULL;
+
+ return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
+}
+
+static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
+{
+ if (idx >= data->d_size / sizeof(Elf64_Rel))
+ return NULL;
+
+ return (Elf64_Rel *)data->d_buf + idx;
+}
+
static bool is_sec_name_dwarf(const char *name)
{
/* approximation, but the actual list is too long */
return str_has_pfx(name, ".debug_");
}
-static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
+static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
{
/* no special handling of .strtab */
if (hdr->sh_type == SHT_STRTAB)
@@ -2990,6 +3181,7 @@ static int cmp_progs(const void *_a, const void *_b)
static int bpf_object__elf_collect(struct bpf_object *obj)
{
+ struct elf_sec_desc *sec_desc;
Elf *elf = obj->efile.elf;
Elf_Data *btf_ext_data = NULL;
Elf_Data *btf_data = NULL;
@@ -2997,17 +3189,27 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
const char *name;
Elf_Data *data;
Elf_Scn *scn;
- GElf_Shdr sh;
+ Elf64_Shdr *sh;
+
+ /* ELF section indices are 1-based, so allocate +1 element to keep
+ * indexing simple. Also include 0th invalid section into sec_cnt for
+ * simpler and more traditional iteration logic.
+ */
+ obj->efile.sec_cnt = 1 + obj->efile.ehdr->e_shnum;
+ obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
+ if (!obj->efile.secs)
+ return -ENOMEM;
/* a bunch of ELF parsing functionality depends on processing symbols,
* so do the first pass and find the symbol table
*/
scn = NULL;
while ((scn = elf_nextscn(elf, scn)) != NULL) {
- if (elf_sec_hdr(obj, scn, &sh))
+ sh = elf_sec_hdr(obj, scn);
+ if (!sh)
return -LIBBPF_ERRNO__FORMAT;
- if (sh.sh_type == SHT_SYMTAB) {
+ if (sh->sh_type == SHT_SYMTAB) {
if (obj->efile.symbols) {
pr_warn("elf: multiple symbol tables in %s\n", obj->path);
return -LIBBPF_ERRNO__FORMAT;
@@ -3017,9 +3219,11 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
if (!data)
return -LIBBPF_ERRNO__FORMAT;
+ idx = elf_ndxscn(scn);
+
obj->efile.symbols = data;
- obj->efile.symbols_shndx = elf_ndxscn(scn);
- obj->efile.strtabidx = sh.sh_link;
+ obj->efile.symbols_shndx = idx;
+ obj->efile.strtabidx = sh->sh_link;
}
}
@@ -3031,16 +3235,18 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
scn = NULL;
while ((scn = elf_nextscn(elf, scn)) != NULL) {
- idx++;
+ idx = elf_ndxscn(scn);
+ sec_desc = &obj->efile.secs[idx];
- if (elf_sec_hdr(obj, scn, &sh))
+ sh = elf_sec_hdr(obj, scn);
+ if (!sh)
return -LIBBPF_ERRNO__FORMAT;
- name = elf_sec_str(obj, sh.sh_name);
+ name = elf_sec_str(obj, sh->sh_name);
if (!name)
return -LIBBPF_ERRNO__FORMAT;
- if (ignore_elf_section(&sh, name))
+ if (ignore_elf_section(sh, name))
continue;
data = elf_sec_data(obj, scn);
@@ -3049,8 +3255,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
idx, name, (unsigned long)data->d_size,
- (int)sh.sh_link, (unsigned long)sh.sh_flags,
- (int)sh.sh_type);
+ (int)sh->sh_link, (unsigned long)sh->sh_flags,
+ (int)sh->sh_type);
if (strcmp(name, "license") == 0) {
err = bpf_object__init_license(obj, data->d_buf, data->d_size);
@@ -3068,21 +3274,25 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
btf_data = data;
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
btf_ext_data = data;
- } else if (sh.sh_type == SHT_SYMTAB) {
+ } else if (sh->sh_type == SHT_SYMTAB) {
/* already processed during the first pass above */
- } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
- if (sh.sh_flags & SHF_EXECINSTR) {
+ } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
+ if (sh->sh_flags & SHF_EXECINSTR) {
if (strcmp(name, ".text") == 0)
obj->efile.text_shndx = idx;
err = bpf_object__add_programs(obj, data, name, idx);
if (err)
return err;
- } else if (strcmp(name, DATA_SEC) == 0) {
- obj->efile.data = data;
- obj->efile.data_shndx = idx;
- } else if (strcmp(name, RODATA_SEC) == 0) {
- obj->efile.rodata = data;
- obj->efile.rodata_shndx = idx;
+ } else if (strcmp(name, DATA_SEC) == 0 ||
+ str_has_pfx(name, DATA_SEC ".")) {
+ sec_desc->sec_type = SEC_DATA;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
+ } else if (strcmp(name, RODATA_SEC) == 0 ||
+ str_has_pfx(name, RODATA_SEC ".")) {
+ sec_desc->sec_type = SEC_RODATA;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
} else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
obj->efile.st_ops_data = data;
obj->efile.st_ops_shndx = idx;
@@ -3090,37 +3300,29 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
pr_info("elf: skipping unrecognized data section(%d) %s\n",
idx, name);
}
- } else if (sh.sh_type == SHT_REL) {
- int nr_sects = obj->efile.nr_reloc_sects;
- void *sects = obj->efile.reloc_sects;
- int sec = sh.sh_info; /* points to other section */
+ } else if (sh->sh_type == SHT_REL) {
+ int targ_sec_idx = sh->sh_info; /* points to other section */
/* Only do relo for section with exec instructions */
- if (!section_have_execinstr(obj, sec) &&
+ if (!section_have_execinstr(obj, targ_sec_idx) &&
strcmp(name, ".rel" STRUCT_OPS_SEC) &&
strcmp(name, ".rel" MAPS_ELF_SEC)) {
pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
- idx, name, sec,
- elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
+ idx, name, targ_sec_idx,
+ elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
continue;
}
- sects = libbpf_reallocarray(sects, nr_sects + 1,
- sizeof(*obj->efile.reloc_sects));
- if (!sects)
- return -ENOMEM;
-
- obj->efile.reloc_sects = sects;
- obj->efile.nr_reloc_sects++;
-
- obj->efile.reloc_sects[nr_sects].shdr = sh;
- obj->efile.reloc_sects[nr_sects].data = data;
- } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
- obj->efile.bss = data;
- obj->efile.bss_shndx = idx;
+ sec_desc->sec_type = SEC_RELO;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
+ } else if (sh->sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
+ sec_desc->sec_type = SEC_BSS;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
} else {
pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
- (size_t)sh.sh_size);
+ (size_t)sh->sh_size);
}
}
@@ -3136,19 +3338,19 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
return bpf_object__init_btf(obj, btf_data, btf_ext_data);
}
-static bool sym_is_extern(const GElf_Sym *sym)
+static bool sym_is_extern(const Elf64_Sym *sym)
{
- int bind = GELF_ST_BIND(sym->st_info);
+ int bind = ELF64_ST_BIND(sym->st_info);
/* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
return sym->st_shndx == SHN_UNDEF &&
(bind == STB_GLOBAL || bind == STB_WEAK) &&
- GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
+ ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
}
-static bool sym_is_subprog(const GElf_Sym *sym, int text_shndx)
+static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
{
- int bind = GELF_ST_BIND(sym->st_info);
- int type = GELF_ST_TYPE(sym->st_info);
+ int bind = ELF64_ST_BIND(sym->st_info);
+ int type = ELF64_ST_TYPE(sym->st_info);
/* in .text section */
if (sym->st_shndx != text_shndx)
@@ -3171,8 +3373,8 @@ static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
if (!btf)
return -ESRCH;
- n = btf__get_nr_types(btf);
- for (i = 1; i <= n; i++) {
+ n = btf__type_cnt(btf);
+ for (i = 1; i < n; i++) {
t = btf__type_by_id(btf, i);
if (!btf_is_var(t) && !btf_is_func(t))
@@ -3203,8 +3405,8 @@ static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
if (!btf)
return -ESRCH;
- n = btf__get_nr_types(btf);
- for (i = 1; i <= n; i++) {
+ n = btf__type_cnt(btf);
+ for (i = 1; i < n; i++) {
t = btf__type_by_id(btf, i);
if (!btf_is_datasec(t))
@@ -3288,8 +3490,8 @@ static int find_int_btf_id(const struct btf *btf)
const struct btf_type *t;
int i, n;
- n = btf__get_nr_types(btf);
- for (i = 1; i <= n; i++) {
+ n = btf__type_cnt(btf);
+ for (i = 1; i < n; i++) {
t = btf__type_by_id(btf, i);
if (btf_is_int(t) && btf_int_bits(t) == 32)
@@ -3346,30 +3548,31 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
int i, n, off, dummy_var_btf_id;
const char *ext_name, *sec_name;
Elf_Scn *scn;
- GElf_Shdr sh;
+ Elf64_Shdr *sh;
if (!obj->efile.symbols)
return 0;
scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
- if (elf_sec_hdr(obj, scn, &sh))
+ sh = elf_sec_hdr(obj, scn);
+ if (!sh)
return -LIBBPF_ERRNO__FORMAT;
dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
if (dummy_var_btf_id < 0)
return dummy_var_btf_id;
- n = sh.sh_size / sh.sh_entsize;
+ n = sh->sh_size / sh->sh_entsize;
pr_debug("looking for externs among %d symbols...\n", n);
for (i = 0; i < n; i++) {
- GElf_Sym sym;
+ Elf64_Sym *sym = elf_sym_by_idx(obj, i);
- if (!gelf_getsym(obj->efile.symbols, i, &sym))
+ if (!sym)
return -LIBBPF_ERRNO__FORMAT;
- if (!sym_is_extern(&sym))
+ if (!sym_is_extern(sym))
continue;
- ext_name = elf_sym_str(obj, sym.st_name);
+ ext_name = elf_sym_str(obj, sym->st_name);
if (!ext_name || !ext_name[0])
continue;
@@ -3391,7 +3594,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
t = btf__type_by_id(obj->btf, ext->btf_id);
ext->name = btf__name_by_offset(obj->btf, t->name_off);
ext->sym_idx = i;
- ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
+ ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
if (ext->sec_btf_id <= 0) {
@@ -3429,11 +3632,6 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return -ENOTSUP;
}
} else if (strcmp(sec_name, KSYMS_SEC) == 0) {
- if (btf_is_func(t) && ext->is_weak) {
- pr_warn("extern weak function %s is unsupported\n",
- ext->name);
- return -ENOTSUP;
- }
ksym_sec = sec;
ext->type = EXT_KSYM;
skip_mods_and_typedefs(obj->btf, t->type,
@@ -3601,9 +3799,14 @@ bpf_object__find_program_by_name(const struct bpf_object *obj,
static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
int shndx)
{
- return shndx == obj->efile.data_shndx ||
- shndx == obj->efile.bss_shndx ||
- shndx == obj->efile.rodata_shndx;
+ switch (obj->efile.secs[shndx].sec_type) {
+ case SEC_BSS:
+ case SEC_DATA:
+ case SEC_RODATA:
+ return true;
+ default:
+ return false;
+ }
}
static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
@@ -3616,22 +3819,25 @@ static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
static enum libbpf_map_type
bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
{
- if (shndx == obj->efile.data_shndx)
- return LIBBPF_MAP_DATA;
- else if (shndx == obj->efile.bss_shndx)
+ if (shndx == obj->efile.symbols_shndx)
+ return LIBBPF_MAP_KCONFIG;
+
+ switch (obj->efile.secs[shndx].sec_type) {
+ case SEC_BSS:
return LIBBPF_MAP_BSS;
- else if (shndx == obj->efile.rodata_shndx)
+ case SEC_DATA:
+ return LIBBPF_MAP_DATA;
+ case SEC_RODATA:
return LIBBPF_MAP_RODATA;
- else if (shndx == obj->efile.symbols_shndx)
- return LIBBPF_MAP_KCONFIG;
- else
+ default:
return LIBBPF_MAP_UNSPEC;
+ }
}
static int bpf_program__record_reloc(struct bpf_program *prog,
struct reloc_desc *reloc_desc,
__u32 insn_idx, const char *sym_name,
- const GElf_Sym *sym, const GElf_Rel *rel)
+ const Elf64_Sym *sym, const Elf64_Rel *rel)
{
struct bpf_insn *insn = &prog->insns[insn_idx];
size_t map_idx, nr_maps = prog->obj->nr_maps;
@@ -3648,7 +3854,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
}
if (sym_is_extern(sym)) {
- int sym_idx = GELF_R_SYM(rel->r_info);
+ int sym_idx = ELF64_R_SYM(rel->r_info);
int i, n = obj->nr_extern;
struct extern_desc *ext;
@@ -3761,7 +3967,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
}
for (map_idx = 0; map_idx < nr_maps; map_idx++) {
map = &obj->maps[map_idx];
- if (map->libbpf_type != type)
+ if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
continue;
pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
prog->name, map_idx, map->name, map->sec_idx,
@@ -3813,9 +4019,8 @@ static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
}
static int
-bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
+bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
{
- Elf_Data *symbols = obj->efile.symbols;
const char *relo_sec_name, *sec_name;
size_t sec_idx = shdr->sh_info;
struct bpf_program *prog;
@@ -3825,8 +4030,8 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data
__u32 insn_idx;
Elf_Scn *scn;
Elf_Data *scn_data;
- GElf_Sym sym;
- GElf_Rel rel;
+ Elf64_Sym *sym;
+ Elf64_Rel *rel;
scn = elf_sec_by_idx(obj, sec_idx);
scn_data = elf_sec_data(obj, scn);
@@ -3841,33 +4046,36 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data
nrels = shdr->sh_size / shdr->sh_entsize;
for (i = 0; i < nrels; i++) {
- if (!gelf_getrel(data, i, &rel)) {
+ rel = elf_rel_by_idx(data, i);
+ if (!rel) {
pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
return -LIBBPF_ERRNO__FORMAT;
}
- if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
+
+ sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
+ if (!sym) {
pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
- relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
+ relo_sec_name, (size_t)ELF64_R_SYM(rel->r_info), i);
return -LIBBPF_ERRNO__FORMAT;
}
- if (rel.r_offset % BPF_INSN_SZ || rel.r_offset >= scn_data->d_size) {
+ if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
- relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
+ relo_sec_name, (size_t)ELF64_R_SYM(rel->r_info), i);
return -LIBBPF_ERRNO__FORMAT;
}
- insn_idx = rel.r_offset / BPF_INSN_SZ;
+ insn_idx = rel->r_offset / BPF_INSN_SZ;
/* relocations against static functions are recorded as
* relocations against the section that contains a function;
* in such case, symbol will be STT_SECTION and sym.st_name
* will point to empty string (0), so fetch section name
* instead
*/
- if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
- sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
+ if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
+ sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
else
- sym_name = elf_sym_str(obj, sym.st_name);
+ sym_name = elf_sym_str(obj, sym->st_name);
sym_name = sym_name ?: "<?";
pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
@@ -3889,7 +4097,7 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data
/* adjust insn_idx to local BPF program frame of reference */
insn_idx -= prog->sec_insn_off;
err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
- insn_idx, sym_name, &sym, &rel);
+ insn_idx, sym_name, sym, rel);
if (err)
return err;
@@ -3921,8 +4129,7 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
* LLVM annotates global data differently in BTF, that is,
* only as '.data', '.bss' or '.rodata'.
*/
- ret = btf__find_by_name(obj->btf,
- libbpf_type_to_btf_name[map->libbpf_type]);
+ ret = btf__find_by_name(obj->btf, map->real_name);
}
if (ret < 0)
return ret;
@@ -4015,6 +4222,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
map->btf_key_type_id = info.btf_key_type_id;
map->btf_value_type_id = info.btf_value_type_id;
map->reused = true;
+ map->map_extra = info.map_extra;
return 0;
@@ -4243,7 +4451,7 @@ static int probe_kern_btf_float(void)
strs, sizeof(strs)));
}
-static int probe_kern_btf_tag(void)
+static int probe_kern_btf_decl_tag(void)
{
static const char strs[] = "\0tag";
__u32 types[] = {
@@ -4253,7 +4461,7 @@ static int probe_kern_btf_tag(void)
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
BTF_VAR_STATIC,
/* attr */
- BTF_TYPE_TAG_ENC(1, 2, -1),
+ BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
};
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
@@ -4476,8 +4684,8 @@ static struct kern_feature_desc {
[FEAT_PERF_LINK] = {
"BPF perf link support", probe_perf_link,
},
- [FEAT_BTF_TAG] = {
- "BTF_KIND_TAG support", probe_kern_btf_tag,
+ [FEAT_BTF_DECL_TAG] = {
+ "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
},
};
@@ -4529,7 +4737,8 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
map_info.key_size == map->def.key_size &&
map_info.value_size == map->def.value_size &&
map_info.max_entries == map->def.max_entries &&
- map_info.map_flags == map->def.map_flags);
+ map_info.map_flags == map->def.map_flags &&
+ map_info.map_extra == map->map_extra);
}
static int
@@ -4612,7 +4821,7 @@ static void bpf_map__destroy(struct bpf_map *map);
static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
{
- struct bpf_create_map_attr create_attr;
+ struct bpf_create_map_params create_attr;
struct bpf_map_def *def = &map->def;
int err = 0;
@@ -4626,6 +4835,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
create_attr.key_size = def->key_size;
create_attr.value_size = def->value_size;
create_attr.numa_node = map->numa_node;
+ create_attr.map_extra = map->map_extra;
if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
int nr_cpus;
@@ -4700,7 +4910,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
*/
map->fd = 0;
} else {
- map->fd = bpf_create_map_xattr(&create_attr);
+ map->fd = libbpf__bpf_create_map_xattr(&create_attr);
}
if (map->fd < 0 && (create_attr.btf_key_type_id ||
create_attr.btf_value_type_id)) {
@@ -4715,7 +4925,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
create_attr.btf_value_type_id = 0;
map->btf_key_type_id = 0;
map->btf_value_type_id = 0;
- map->fd = bpf_create_map_xattr(&create_attr);
+ map->fd = libbpf__bpf_create_map_xattr(&create_attr);
}
err = map->fd < 0 ? -errno : 0;
@@ -4892,8 +5102,8 @@ static int bpf_core_add_cands(struct bpf_core_cand *local_cand,
size_t targ_essent_len;
int n, i;
- n = btf__get_nr_types(targ_btf);
- for (i = targ_start_id; i <= n; i++) {
+ n = btf__type_cnt(targ_btf);
+ for (i = targ_start_id; i < n; i++) {
t = btf__type_by_id(targ_btf, i);
if (btf_kind(t) != btf_kind(local_cand->t))
continue;
@@ -5068,7 +5278,7 @@ bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 l
err = bpf_core_add_cands(&local_cand, local_essent_len,
obj->btf_modules[i].btf,
obj->btf_modules[i].name,
- btf__get_nr_types(obj->btf_vmlinux) + 1,
+ btf__type_cnt(obj->btf_vmlinux),
cands);
if (err)
goto err_out;
@@ -5212,7 +5422,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
* relocated, so it's enough to just subtract in-section offset
*/
insn_idx = insn_idx - prog->sec_insn_off;
- if (insn_idx > prog->insns_cnt)
+ if (insn_idx >= prog->insns_cnt)
return -EINVAL;
insn = &prog->insns[insn_idx];
@@ -5406,7 +5616,13 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
case RELO_EXTERN_FUNC:
ext = &obj->externs[relo->sym_off];
insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
- insn[0].imm = ext->ksym.kernel_btf_id;
+ if (ext->is_set) {
+ insn[0].imm = ext->ksym.kernel_btf_id;
+ insn[0].off = ext->ksym.btf_fd_idx;
+ } else { /* unresolved weak kfunc */
+ insn[0].imm = 0;
+ insn[0].off = 0;
+ }
break;
case RELO_SUBPROG_ADDR:
if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
@@ -5931,10 +6147,10 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
}
static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
- GElf_Shdr *shdr, Elf_Data *data);
+ Elf64_Shdr *shdr, Elf_Data *data);
static int bpf_object__collect_map_relos(struct bpf_object *obj,
- GElf_Shdr *shdr, Elf_Data *data)
+ Elf64_Shdr *shdr, Elf_Data *data)
{
const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
int i, j, nrels, new_sz;
@@ -5943,10 +6159,9 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
struct bpf_map *map = NULL, *targ_map;
const struct btf_member *member;
const char *name, *mname;
- Elf_Data *symbols;
unsigned int moff;
- GElf_Sym sym;
- GElf_Rel rel;
+ Elf64_Sym *sym;
+ Elf64_Rel *rel;
void *tmp;
if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
@@ -5955,28 +6170,30 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
if (!sec)
return -EINVAL;
- symbols = obj->efile.symbols;
nrels = shdr->sh_size / shdr->sh_entsize;
for (i = 0; i < nrels; i++) {
- if (!gelf_getrel(data, i, &rel)) {
+ rel = elf_rel_by_idx(data, i);
+ if (!rel) {
pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
return -LIBBPF_ERRNO__FORMAT;
}
- if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
+
+ sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
+ if (!sym) {
pr_warn(".maps relo #%d: symbol %zx not found\n",
- i, (size_t)GELF_R_SYM(rel.r_info));
+ i, (size_t)ELF64_R_SYM(rel->r_info));
return -LIBBPF_ERRNO__FORMAT;
}
- name = elf_sym_str(obj, sym.st_name) ?: "<?>";
- if (sym.st_shndx != obj->efile.btf_maps_shndx) {
+ name = elf_sym_str(obj, sym->st_name) ?: "<?>";
+ if (sym->st_shndx != obj->efile.btf_maps_shndx) {
pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
i, name);
return -LIBBPF_ERRNO__RELOC;
}
- pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n",
- i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value,
- (size_t)rel.r_offset, sym.st_name, name);
+ pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
+ i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
+ (size_t)rel->r_offset, sym->st_name, name);
for (j = 0; j < obj->nr_maps; j++) {
map = &obj->maps[j];
@@ -5984,13 +6201,13 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
continue;
vi = btf_var_secinfos(sec) + map->btf_var_idx;
- if (vi->offset <= rel.r_offset &&
- rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
+ if (vi->offset <= rel->r_offset &&
+ rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
break;
}
if (j == obj->nr_maps) {
- pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n",
- i, name, (size_t)rel.r_offset);
+ pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
+ i, name, (size_t)rel->r_offset);
return -EINVAL;
}
@@ -6017,10 +6234,10 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
return -EINVAL;
moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
- if (rel.r_offset - vi->offset < moff)
+ if (rel->r_offset - vi->offset < moff)
return -EINVAL;
- moff = rel.r_offset - vi->offset - moff;
+ moff = rel->r_offset - vi->offset - moff;
/* here we use BPF pointer size, which is always 64 bit, as we
* are parsing ELF that was built for BPF target
*/
@@ -6065,10 +6282,18 @@ static int bpf_object__collect_relos(struct bpf_object *obj)
{
int i, err;
- for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
- GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
- Elf_Data *data = obj->efile.reloc_sects[i].data;
- int idx = shdr->sh_info;
+ for (i = 0; i < obj->efile.sec_cnt; i++) {
+ struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
+ Elf64_Shdr *shdr;
+ Elf_Data *data;
+ int idx;
+
+ if (sec_desc->sec_type != SEC_RELO)
+ continue;
+
+ shdr = sec_desc->shdr;
+ data = sec_desc->data;
+ idx = shdr->sh_info;
if (shdr->sh_type != SHT_REL) {
pr_warn("internal error at %d\n", __LINE__);
@@ -6191,6 +6416,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
char *license, __u32 kern_version, int *pfd)
{
struct bpf_prog_load_params load_attr = {};
+ struct bpf_object *obj = prog->obj;
char *cp, errmsg[STRERR_BUFSIZE];
size_t log_buf_size = 0;
char *log_buf = NULL;
@@ -6211,7 +6437,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.prog_type = prog->type;
load_attr.expected_attach_type = prog->expected_attach_type;
- if (kernel_supports(prog->obj, FEAT_PROG_NAME))
+ if (kernel_supports(obj, FEAT_PROG_NAME))
load_attr.name = prog->name;
load_attr.insns = insns;
load_attr.insn_cnt = insns_cnt;
@@ -6224,8 +6450,8 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.prog_ifindex = prog->prog_ifindex;
/* specify func_info/line_info only if kernel supports them */
- btf_fd = bpf_object__btf_fd(prog->obj);
- if (btf_fd >= 0 && kernel_supports(prog->obj, FEAT_BTF_FUNC)) {
+ btf_fd = bpf_object__btf_fd(obj);
+ if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
load_attr.prog_btf_fd = btf_fd;
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
@@ -6236,6 +6462,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
}
load_attr.log_level = prog->log_level;
load_attr.prog_flags = prog->prog_flags;
+ load_attr.fd_array = obj->fd_array;
/* adjust load_attr if sec_def provides custom preload callback */
if (prog->sec_def && prog->sec_def->preload_fn) {
@@ -6247,9 +6474,9 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
}
}
- if (prog->obj->gen_loader) {
- bpf_gen__prog_load(prog->obj->gen_loader, &load_attr,
- prog - prog->obj->programs);
+ if (obj->gen_loader) {
+ bpf_gen__prog_load(obj->gen_loader, &load_attr,
+ prog - obj->programs);
*pfd = -1;
return 0;
}
@@ -6270,16 +6497,21 @@ retry_load:
if (log_buf && load_attr.log_level)
pr_debug("verifier log:\n%s", log_buf);
- if (prog->obj->rodata_map_idx >= 0 &&
- kernel_supports(prog->obj, FEAT_PROG_BIND_MAP)) {
- struct bpf_map *rodata_map =
- &prog->obj->maps[prog->obj->rodata_map_idx];
+ if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
+ struct bpf_map *map;
+ int i;
+
+ for (i = 0; i < obj->nr_maps; i++) {
+ map = &prog->obj->maps[i];
+ if (map->libbpf_type != LIBBPF_MAP_RODATA)
+ continue;
- if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warn("prog '%s': failed to bind .rodata map: %s\n",
- prog->name, cp);
- /* Don't fail hard if can't bind rodata. */
+ if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) {
+ cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+ pr_warn("prog '%s': failed to bind .rodata map: %s\n",
+ prog->name, cp);
+ /* Don't fail hard if can't bind rodata. */
+ }
}
}
@@ -6343,16 +6575,13 @@ static int bpf_program__record_externs(struct bpf_program *prog)
case RELO_EXTERN_VAR:
if (ext->type != EXT_KSYM)
continue;
- if (!ext->ksym.type_id) {
- pr_warn("typeless ksym %s is not supported yet\n",
- ext->name);
- return -ENOTSUP;
- }
- bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_VAR,
- relo->insn_idx);
+ bpf_gen__record_extern(obj->gen_loader, ext->name,
+ ext->is_weak, !ext->ksym.type_id,
+ BTF_KIND_VAR, relo->insn_idx);
break;
case RELO_EXTERN_FUNC:
- bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_FUNC,
+ bpf_gen__record_extern(obj->gen_loader, ext->name,
+ ext->is_weak, false, BTF_KIND_FUNC,
relo->insn_idx);
break;
default:
@@ -6438,8 +6667,6 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
out:
if (err)
pr_warn("failed to load program '%s'\n", prog->name);
- zfree(&prog->insns);
- prog->insns_cnt = 0;
return libbpf_err(err);
}
@@ -6660,7 +6887,7 @@ bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, &opts));
}
-int bpf_object__unload(struct bpf_object *obj)
+static int bpf_object_unload(struct bpf_object *obj)
{
size_t i;
@@ -6679,6 +6906,8 @@ int bpf_object__unload(struct bpf_object *obj)
return 0;
}
+int bpf_object__unload(struct bpf_object *obj) __attribute__((alias("bpf_object_unload")));
+
static int bpf_object__sanitize_maps(struct bpf_object *obj)
{
struct bpf_map *m;
@@ -6752,13 +6981,14 @@ out:
static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
__u16 kind, struct btf **res_btf,
- int *res_btf_fd)
+ struct module_btf **res_mod_btf)
{
- int i, id, btf_fd, err;
+ struct module_btf *mod_btf;
struct btf *btf;
+ int i, id, err;
btf = obj->btf_vmlinux;
- btf_fd = 0;
+ mod_btf = NULL;
id = btf__find_by_name_kind(btf, ksym_name, kind);
if (id == -ENOENT) {
@@ -6767,10 +6997,10 @@ static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
return err;
for (i = 0; i < obj->btf_module_cnt; i++) {
- btf = obj->btf_modules[i].btf;
- /* we assume module BTF FD is always >0 */
- btf_fd = obj->btf_modules[i].fd;
- id = btf__find_by_name_kind(btf, ksym_name, kind);
+ /* we assume module_btf's BTF FD is always >0 */
+ mod_btf = &obj->btf_modules[i];
+ btf = mod_btf->btf;
+ id = btf__find_by_name_kind_own(btf, ksym_name, kind);
if (id != -ENOENT)
break;
}
@@ -6779,7 +7009,7 @@ static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
return -ESRCH;
*res_btf = btf;
- *res_btf_fd = btf_fd;
+ *res_mod_btf = mod_btf;
return id;
}
@@ -6788,14 +7018,15 @@ static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
{
const struct btf_type *targ_var, *targ_type;
__u32 targ_type_id, local_type_id;
+ struct module_btf *mod_btf = NULL;
const char *targ_var_name;
- int id, btf_fd = 0, err;
struct btf *btf = NULL;
+ int id, err;
- id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &btf_fd);
- if (id == -ESRCH && ext->is_weak) {
- return 0;
- } else if (id < 0) {
+ id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
+ if (id < 0) {
+ if (id == -ESRCH && ext->is_weak)
+ return 0;
pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
ext->name);
return id;
@@ -6827,7 +7058,7 @@ static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
}
ext->is_set = true;
- ext->ksym.kernel_btf_obj_fd = btf_fd;
+ ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
ext->ksym.kernel_btf_id = id;
pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
ext->name, id, btf_kind_str(targ_var), targ_var_name);
@@ -6839,26 +7070,22 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
struct extern_desc *ext)
{
int local_func_proto_id, kfunc_proto_id, kfunc_id;
+ struct module_btf *mod_btf = NULL;
const struct btf_type *kern_func;
struct btf *kern_btf = NULL;
- int ret, kern_btf_fd = 0;
+ int ret;
local_func_proto_id = ext->ksym.type_id;
- kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC,
- &kern_btf, &kern_btf_fd);
+ kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf);
if (kfunc_id < 0) {
- pr_warn("extern (func ksym) '%s': not found in kernel BTF\n",
+ if (kfunc_id == -ESRCH && ext->is_weak)
+ return 0;
+ pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
ext->name);
return kfunc_id;
}
- if (kern_btf != obj->btf_vmlinux) {
- pr_warn("extern (func ksym) '%s': function in kernel module is not supported\n",
- ext->name);
- return -ENOTSUP;
- }
-
kern_func = btf__type_by_id(kern_btf, kfunc_id);
kfunc_proto_id = kern_func->type;
@@ -6870,9 +7097,30 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
return -EINVAL;
}
+ /* set index for module BTF fd in fd_array, if unset */
+ if (mod_btf && !mod_btf->fd_array_idx) {
+ /* insn->off is s16 */
+ if (obj->fd_array_cnt == INT16_MAX) {
+ pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
+ ext->name, mod_btf->fd_array_idx);
+ return -E2BIG;
+ }
+ /* Cannot use index 0 for module BTF fd */
+ if (!obj->fd_array_cnt)
+ obj->fd_array_cnt = 1;
+
+ ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
+ obj->fd_array_cnt + 1);
+ if (ret)
+ return ret;
+ mod_btf->fd_array_idx = obj->fd_array_cnt;
+ /* we assume module BTF FD is always >0 */
+ obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
+ }
+
ext->is_set = true;
- ext->ksym.kernel_btf_obj_fd = kern_btf_fd;
ext->ksym.kernel_btf_id = kfunc_id;
+ ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
ext->name, kfunc_id);
@@ -7032,6 +7280,9 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
err = bpf_gen__finish(obj->gen_loader);
}
+ /* clean up fd_array */
+ zfree(&obj->fd_array);
+
/* clean up module BTFs */
for (i = 0; i < obj->btf_module_cnt; i++) {
close(obj->btf_modules[i].fd);
@@ -7056,7 +7307,7 @@ out:
if (obj->maps[i].pinned && !obj->maps[i].reused)
bpf_map__unpin(&obj->maps[i], NULL);
- bpf_object__unload(obj);
+ bpf_object_unload(obj);
pr_warn("failed to load object '%s'\n", obj->path);
return libbpf_err(err);
}
@@ -7122,8 +7373,7 @@ static int check_path(const char *path)
return err;
}
-int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
- int instance)
+static int bpf_program_pin_instance(struct bpf_program *prog, const char *path, int instance)
{
char *cp, errmsg[STRERR_BUFSIZE];
int err;
@@ -7158,8 +7408,7 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
return 0;
}
-int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
- int instance)
+static int bpf_program_unpin_instance(struct bpf_program *prog, const char *path, int instance)
{
int err;
@@ -7187,6 +7436,12 @@ int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
return 0;
}
+__attribute__((alias("bpf_program_pin_instance")))
+int bpf_object__pin_instance(struct bpf_program *prog, const char *path, int instance);
+
+__attribute__((alias("bpf_program_unpin_instance")))
+int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, int instance);
+
int bpf_program__pin(struct bpf_program *prog, const char *path)
{
int i, err;
@@ -7211,7 +7466,7 @@ int bpf_program__pin(struct bpf_program *prog, const char *path)
if (prog->instances.nr == 1) {
/* don't create subdirs when pinning single instance */
- return bpf_program__pin_instance(prog, path, 0);
+ return bpf_program_pin_instance(prog, path, 0);
}
for (i = 0; i < prog->instances.nr; i++) {
@@ -7227,7 +7482,7 @@ int bpf_program__pin(struct bpf_program *prog, const char *path)
goto err_unpin;
}
- err = bpf_program__pin_instance(prog, buf, i);
+ err = bpf_program_pin_instance(prog, buf, i);
if (err)
goto err_unpin;
}
@@ -7245,7 +7500,7 @@ err_unpin:
else if (len >= PATH_MAX)
continue;
- bpf_program__unpin_instance(prog, buf, i);
+ bpf_program_unpin_instance(prog, buf, i);
}
rmdir(path);
@@ -7273,7 +7528,7 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path)
if (prog->instances.nr == 1) {
/* don't create subdirs when pinning single instance */
- return bpf_program__unpin_instance(prog, path, 0);
+ return bpf_program_unpin_instance(prog, path, 0);
}
for (i = 0; i < prog->instances.nr; i++) {
@@ -7286,7 +7541,7 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path)
else if (len >= PATH_MAX)
return libbpf_err(-ENAMETOOLONG);
- err = bpf_program__unpin_instance(prog, buf, i);
+ err = bpf_program_unpin_instance(prog, buf, i);
if (err)
return err;
}
@@ -7647,6 +7902,7 @@ static void bpf_map__destroy(struct bpf_map *map)
}
zfree(&map->name);
+ zfree(&map->real_name);
zfree(&map->pin_path);
if (map->fd >= 0)
@@ -7665,7 +7921,7 @@ void bpf_object__close(struct bpf_object *obj)
bpf_gen__free(obj->gen_loader);
bpf_object__elf_finish(obj);
- bpf_object__unload(obj);
+ bpf_object_unload(obj);
btf__free(obj->btf);
btf_ext__free(obj->btf_ext);
@@ -7694,6 +7950,10 @@ struct bpf_object *
bpf_object__next(struct bpf_object *prev)
{
struct bpf_object *next;
+ bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST);
+
+ if (strict)
+ return NULL;
if (!prev)
next = list_first_entry(&bpf_objects_list,
@@ -7800,6 +8060,12 @@ __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
struct bpf_program *
bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
{
+ return bpf_object__next_program(obj, prev);
+}
+
+struct bpf_program *
+bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
+{
struct bpf_program *prog = prev;
do {
@@ -7812,6 +8078,12 @@ bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
struct bpf_program *
bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
{
+ return bpf_object__prev_program(obj, next);
+}
+
+struct bpf_program *
+bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
+{
struct bpf_program *prog = next;
do {
@@ -7892,6 +8164,16 @@ size_t bpf_program__size(const struct bpf_program *prog)
return prog->insns_cnt * BPF_INSN_SZ;
}
+const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
+{
+ return prog->insns;
+}
+
+size_t bpf_program__insn_cnt(const struct bpf_program *prog)
+{
+ return prog->insns_cnt;
+}
+
int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
bpf_program_prep_t prep)
{
@@ -8030,6 +8312,8 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("tp/", TRACEPOINT, 0, SEC_NONE, attach_tp),
SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
SEC_DEF("raw_tp/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
+ SEC_DEF("raw_tracepoint.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
+ SEC_DEF("raw_tp.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
SEC_DEF("tp_btf/", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
SEC_DEF("fentry/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
SEC_DEF("fmod_ret/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
@@ -8219,7 +8503,7 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
/* Collect the reloc from ELF and populate the st_ops->progs[] */
static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
- GElf_Shdr *shdr, Elf_Data *data)
+ Elf64_Shdr *shdr, Elf_Data *data)
{
const struct btf_member *member;
struct bpf_struct_ops *st_ops;
@@ -8227,58 +8511,58 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
unsigned int shdr_idx;
const struct btf *btf;
struct bpf_map *map;
- Elf_Data *symbols;
unsigned int moff, insn_idx;
const char *name;
__u32 member_idx;
- GElf_Sym sym;
- GElf_Rel rel;
+ Elf64_Sym *sym;
+ Elf64_Rel *rel;
int i, nrels;
- symbols = obj->efile.symbols;
btf = obj->btf;
nrels = shdr->sh_size / shdr->sh_entsize;
for (i = 0; i < nrels; i++) {
- if (!gelf_getrel(data, i, &rel)) {
+ rel = elf_rel_by_idx(data, i);
+ if (!rel) {
pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
return -LIBBPF_ERRNO__FORMAT;
}
- if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
+ sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
+ if (!sym) {
pr_warn("struct_ops reloc: symbol %zx not found\n",
- (size_t)GELF_R_SYM(rel.r_info));
+ (size_t)ELF64_R_SYM(rel->r_info));
return -LIBBPF_ERRNO__FORMAT;
}
- name = elf_sym_str(obj, sym.st_name) ?: "<?>";
- map = find_struct_ops_map_by_offset(obj, rel.r_offset);
+ name = elf_sym_str(obj, sym->st_name) ?: "<?>";
+ map = find_struct_ops_map_by_offset(obj, rel->r_offset);
if (!map) {
- pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
- (size_t)rel.r_offset);
+ pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
+ (size_t)rel->r_offset);
return -EINVAL;
}
- moff = rel.r_offset - map->sec_offset;
- shdr_idx = sym.st_shndx;
+ moff = rel->r_offset - map->sec_offset;
+ shdr_idx = sym->st_shndx;
st_ops = map->st_ops;
- pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
+ pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
map->name,
- (long long)(rel.r_info >> 32),
- (long long)sym.st_value,
- shdr_idx, (size_t)rel.r_offset,
- map->sec_offset, sym.st_name, name);
+ (long long)(rel->r_info >> 32),
+ (long long)sym->st_value,
+ shdr_idx, (size_t)rel->r_offset,
+ map->sec_offset, sym->st_name, name);
if (shdr_idx >= SHN_LORESERVE) {
- pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
- map->name, (size_t)rel.r_offset, shdr_idx);
+ pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
+ map->name, (size_t)rel->r_offset, shdr_idx);
return -LIBBPF_ERRNO__RELOC;
}
- if (sym.st_value % BPF_INSN_SZ) {
+ if (sym->st_value % BPF_INSN_SZ) {
pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
- map->name, (unsigned long long)sym.st_value);
+ map->name, (unsigned long long)sym->st_value);
return -LIBBPF_ERRNO__FORMAT;
}
- insn_idx = sym.st_value / BPF_INSN_SZ;
+ insn_idx = sym->st_value / BPF_INSN_SZ;
member = find_member_by_offset(st_ops->type, moff * 8);
if (!member) {
@@ -8412,28 +8696,27 @@ int libbpf_find_vmlinux_btf_id(const char *name,
static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
{
- struct bpf_prog_info_linear *info_linear;
- struct bpf_prog_info *info;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
struct btf *btf;
int err;
- info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
- err = libbpf_get_error(info_linear);
+ err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len);
if (err) {
- pr_warn("failed get_prog_info_linear for FD %d\n",
- attach_prog_fd);
+ pr_warn("failed bpf_obj_get_info_by_fd for FD %d: %d\n",
+ attach_prog_fd, err);
return err;
}
err = -EINVAL;
- info = &info_linear->info;
- if (!info->btf_id) {
+ if (!info.btf_id) {
pr_warn("The target program doesn't have BTF\n");
goto out;
}
- btf = btf__load_from_kernel_by_id(info->btf_id);
- if (libbpf_get_error(btf)) {
- pr_warn("Failed to get BTF of the program\n");
+ btf = btf__load_from_kernel_by_id(info.btf_id);
+ err = libbpf_get_error(btf);
+ if (err) {
+ pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
goto out;
}
err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
@@ -8443,7 +8726,6 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
goto out;
}
out:
- free(info_linear);
return err;
}
@@ -8559,9 +8841,30 @@ const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
return map ? &map->def : libbpf_err_ptr(-EINVAL);
}
+static bool map_uses_real_name(const struct bpf_map *map)
+{
+ /* Since libbpf started to support custom .data.* and .rodata.* maps,
+ * their user-visible name differs from kernel-visible name. Users see
+ * such map's corresponding ELF section name as a map name.
+ * This check distinguishes .data/.rodata from .data.* and .rodata.*
+ * maps to know which name has to be returned to the user.
+ */
+ if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
+ return true;
+ if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
+ return true;
+ return false;
+}
+
const char *bpf_map__name(const struct bpf_map *map)
{
- return map ? map->name : NULL;
+ if (!map)
+ return NULL;
+
+ if (map_uses_real_name(map))
+ return map->real_name;
+
+ return map->name;
}
enum bpf_map_type bpf_map__type(const struct bpf_map *map)
@@ -8590,6 +8893,19 @@ int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
return 0;
}
+__u64 bpf_map__map_extra(const struct bpf_map *map)
+{
+ return map->map_extra;
+}
+
+int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
+{
+ if (map->fd >= 0)
+ return libbpf_err(-EBUSY);
+ map->map_extra = map_extra;
+ return 0;
+}
+
__u32 bpf_map__numa_node(const struct bpf_map *map)
{
return map->numa_node;
@@ -8744,6 +9060,12 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
struct bpf_map *
bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
{
+ return bpf_object__next_map(obj, prev);
+}
+
+struct bpf_map *
+bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
+{
if (prev == NULL)
return obj->maps;
@@ -8753,6 +9075,12 @@ bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
struct bpf_map *
bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
{
+ return bpf_object__prev_map(obj, next);
+}
+
+struct bpf_map *
+bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
+{
if (next == NULL) {
if (!obj->nr_maps)
return NULL;
@@ -8768,7 +9096,22 @@ bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
struct bpf_map *pos;
bpf_object__for_each_map(pos, obj) {
- if (pos->name && !strcmp(pos->name, name))
+ /* if it's a special internal map name (which always starts
+ * with dot) then check if that special name matches the
+ * real map name (ELF section name)
+ */
+ if (name[0] == '.') {
+ if (pos->real_name && strcmp(pos->real_name, name) == 0)
+ return pos;
+ continue;
+ }
+ /* otherwise map name has to be an exact match */
+ if (map_uses_real_name(pos)) {
+ if (strcmp(pos->real_name, name) == 0)
+ return pos;
+ continue;
+ }
+ if (strcmp(pos->name, name) == 0)
return pos;
}
return errno = ENOENT, NULL;
@@ -9273,7 +9616,7 @@ static int append_to_file(const char *file, const char *fmt, ...)
int fd, n, err = 0;
va_list ap;
- fd = open(file, O_WRONLY | O_APPEND, 0);
+ fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
if (fd < 0)
return -errno;
@@ -9787,12 +10130,26 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *pr
static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie)
{
- const char *tp_name;
+ static const char *const prefixes[] = {
+ "raw_tp/",
+ "raw_tracepoint/",
+ "raw_tp.w/",
+ "raw_tracepoint.w/",
+ };
+ size_t i;
+ const char *tp_name = NULL;
- if (str_has_pfx(prog->sec_name, "raw_tp/"))
- tp_name = prog->sec_name + sizeof("raw_tp/") - 1;
- else
- tp_name = prog->sec_name + sizeof("raw_tracepoint/") - 1;
+ for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
+ if (str_has_pfx(prog->sec_name, prefixes[i])) {
+ tp_name = prog->sec_name + strlen(prefixes[i]);
+ break;
+ }
+ }
+ if (!tp_name) {
+ pr_warn("prog '%s': invalid section name '%s'\n",
+ prog->name, prog->sec_name);
+ return libbpf_err_ptr(-EINVAL);
+ }
return bpf_program__attach_raw_tracepoint(prog, tp_name);
}
@@ -10904,7 +11261,7 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
int fd, err = 0, len;
char buf[128];
- fd = open(fcpu, O_RDONLY);
+ fd = open(fcpu, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
err = -errno;
pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index e35490c54eb3..9de0f299706b 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -150,6 +150,7 @@ struct bpf_object_load_attr {
/* Load/unload object into/from kernel */
LIBBPF_API int bpf_object__load(struct bpf_object *obj);
LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr);
+LIBBPF_DEPRECATED_SINCE(0, 6, "bpf_object__unload() is deprecated, use bpf_object__close() instead")
LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
@@ -167,7 +168,8 @@ LIBBPF_API struct bpf_program *
bpf_object__find_program_by_name(const struct bpf_object *obj,
const char *name);
-LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "track bpf_objects in application code instead")
+struct bpf_object *bpf_object__next(struct bpf_object *prev);
#define bpf_object__for_each_safe(pos, tmp) \
for ((pos) = bpf_object__next(NULL), \
(tmp) = bpf_object__next(pos); \
@@ -189,16 +191,22 @@ LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name,
/* Accessors of bpf_program */
struct bpf_program;
-LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog,
- const struct bpf_object *obj);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_program() instead")
+struct bpf_program *bpf_program__next(struct bpf_program *prog,
+ const struct bpf_object *obj);
+LIBBPF_API struct bpf_program *
+bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog);
-#define bpf_object__for_each_program(pos, obj) \
- for ((pos) = bpf_program__next(NULL, (obj)); \
- (pos) != NULL; \
- (pos) = bpf_program__next((pos), (obj)))
+#define bpf_object__for_each_program(pos, obj) \
+ for ((pos) = bpf_object__next_program((obj), NULL); \
+ (pos) != NULL; \
+ (pos) = bpf_object__next_program((obj), (pos)))
-LIBBPF_API struct bpf_program *bpf_program__prev(struct bpf_program *prog,
- const struct bpf_object *obj);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_program() instead")
+struct bpf_program *bpf_program__prev(struct bpf_program *prog,
+ const struct bpf_object *obj);
+LIBBPF_API struct bpf_program *
+bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog);
typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *);
@@ -217,14 +225,51 @@ LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
/* returns program size in bytes */
+LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insn_cnt() instead")
LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog);
+struct bpf_insn;
+
+/**
+ * @brief **bpf_program__insns()** gives read-only access to BPF program's
+ * underlying BPF instructions.
+ * @param prog BPF program for which to return instructions
+ * @return a pointer to an array of BPF instructions that belong to the
+ * specified BPF program
+ *
+ * Returned pointer is always valid and not NULL. Number of `struct bpf_insn`
+ * pointed to can be fetched using **bpf_program__insn_cnt()** API.
+ *
+ * Keep in mind, libbpf can modify and append/delete BPF program's
+ * instructions as it processes BPF object file and prepares everything for
+ * uploading into the kernel. So depending on the point in BPF object
+ * lifetime, **bpf_program__insns()** can return different sets of
+ * instructions. As an example, during BPF object load phase BPF program
+ * instructions will be CO-RE-relocated, BPF subprograms instructions will be
+ * appended, ldimm64 instructions will have FDs embedded, etc. So instructions
+ * returned before **bpf_object__load()** and after it might be quite
+ * different.
+ */
+LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog);
+/**
+ * @brief **bpf_program__insn_cnt()** returns number of `struct bpf_insn`'s
+ * that form specified BPF program.
+ * @param prog BPF program for which to return number of BPF instructions
+ *
+ * See **bpf_program__insns()** documentation for notes on how libbpf can
+ * change instructions and their count during different phases of
+ * **bpf_object** lifetime.
+ */
+LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog);
+
LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license,
__u32 kern_version);
LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
+LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog,
const char *path,
int instance);
+LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
LIBBPF_API int bpf_program__unpin_instance(struct bpf_program *prog,
const char *path,
int instance);
@@ -358,8 +403,6 @@ LIBBPF_API struct bpf_link *
bpf_program__attach_iter(const struct bpf_program *prog,
const struct bpf_iter_attach_opts *opts);
-struct bpf_insn;
-
/*
* Libbpf allows callers to adjust BPF programs before being loaded
* into kernel. One program in an object file can be transformed into
@@ -388,7 +431,7 @@ struct bpf_insn;
* one instance. In this case bpf_program__fd(prog) is equal to
* bpf_program__nth_fd(prog, 0).
*/
-
+LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insns() for getting bpf_program instructions")
struct bpf_prog_prep_result {
/*
* If not NULL, load new instruction array.
@@ -417,9 +460,11 @@ typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n,
struct bpf_insn *insns, int insns_cnt,
struct bpf_prog_prep_result *res);
+LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insns() for getting bpf_program instructions")
LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
bpf_program_prep_t prep);
+LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n);
/*
@@ -502,16 +547,21 @@ bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name);
LIBBPF_API struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_map() instead")
+struct bpf_map *bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
LIBBPF_API struct bpf_map *
-bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
+bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map);
+
#define bpf_object__for_each_map(pos, obj) \
- for ((pos) = bpf_map__next(NULL, (obj)); \
+ for ((pos) = bpf_object__next_map((obj), NULL); \
(pos) != NULL; \
- (pos) = bpf_map__next((pos), (obj)))
+ (pos) = bpf_object__next_map((obj), (pos)))
#define bpf_map__for_each bpf_object__for_each_map
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_map() instead")
+struct bpf_map *bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
LIBBPF_API struct bpf_map *
-bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
+bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
/**
* @brief **bpf_map__fd()** gets the file descriptor of the passed
@@ -550,6 +600,9 @@ LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
/* get/set map if_index */
LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
+/* get/set map map_extra flags */
+LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 9e649cf9e771..43580eb47740 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -389,5 +389,16 @@ LIBBPF_0.5.0 {
LIBBPF_0.6.0 {
global:
- btf__add_tag;
+ bpf_map__map_extra;
+ bpf_map__set_map_extra;
+ bpf_object__next_map;
+ bpf_object__next_program;
+ bpf_object__prev_map;
+ bpf_object__prev_program;
+ bpf_program__insn_cnt;
+ bpf_program__insns;
+ btf__add_btf;
+ btf__add_decl_tag;
+ btf__raw_data;
+ btf__type_cnt;
} LIBBPF_0.5.0;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index ec79400517d4..aeb79e3a8ff9 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -13,6 +13,8 @@
#include <limits.h>
#include <errno.h>
#include <linux/err.h>
+#include <fcntl.h>
+#include <unistd.h>
#include "libbpf_legacy.h"
#include "relo_core.h"
@@ -52,8 +54,8 @@
#endif
/* Older libelf all end up in this expression, for both 32 and 64 bit */
-#ifndef GELF_ST_VISIBILITY
-#define GELF_ST_VISIBILITY(o) ((o) & 0x03)
+#ifndef ELF64_ST_VISIBILITY
+#define ELF64_ST_VISIBILITY(o) ((o) & 0x03)
#endif
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
@@ -69,8 +71,8 @@
#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
#define BTF_TYPE_FLOAT_ENC(name, sz) \
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz)
-#define BTF_TYPE_TAG_ENC(value, type, component_idx) \
- BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TAG, 0, 0), type), (component_idx)
+#define BTF_TYPE_DECL_TAG_ENC(value, type, component_idx) \
+ BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx)
#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
@@ -193,8 +195,9 @@ enum map_def_parts {
MAP_DEF_NUMA_NODE = 0x080,
MAP_DEF_PINNING = 0x100,
MAP_DEF_INNER_MAP = 0x200,
+ MAP_DEF_MAP_EXTRA = 0x400,
- MAP_DEF_ALL = 0x3ff, /* combination of all above */
+ MAP_DEF_ALL = 0x7ff, /* combination of all above */
};
struct btf_map_def {
@@ -208,6 +211,7 @@ struct btf_map_def {
__u32 map_flags;
__u32 numa_node;
__u32 pinning;
+ __u64 map_extra;
};
int parse_btf_map_def(const char *map_name, struct btf *btf,
@@ -298,14 +302,32 @@ struct bpf_prog_load_params {
__u32 log_level;
char *log_buf;
size_t log_buf_sz;
+ int *fd_array;
};
int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr);
-int bpf_object__section_size(const struct bpf_object *obj, const char *name,
- __u32 *size);
-int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
- __u32 *off);
+struct bpf_create_map_params {
+ const char *name;
+ enum bpf_map_type map_type;
+ __u32 map_flags;
+ __u32 key_size;
+ __u32 value_size;
+ __u32 max_entries;
+ __u32 numa_node;
+ __u32 btf_fd;
+ __u32 btf_key_type_id;
+ __u32 btf_value_type_id;
+ __u32 map_ifindex;
+ union {
+ __u32 inner_map_fd;
+ __u32 btf_vmlinux_value_type_id;
+ };
+ __u64 map_extra;
+};
+
+int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr);
+
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
const char **prefix, int *kind);
@@ -408,6 +430,8 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
+__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
+ __u32 kind);
extern enum libbpf_strict_mode libbpf_mode;
@@ -469,4 +493,26 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn)
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
}
+/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2
+ * Takes ownership of the fd passed in, and closes it if calling
+ * fcntl(fd, F_DUPFD_CLOEXEC, 3).
+ */
+static inline int ensure_good_fd(int fd)
+{
+ int old_fd = fd, saved_errno;
+
+ if (fd < 0)
+ return fd;
+ if (fd < 3) {
+ fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
+ saved_errno = errno;
+ close(old_fd);
+ if (fd < 0) {
+ pr_warn("failed to dup FD %d to FD > 2: %d\n", old_fd, -saved_errno);
+ errno = saved_errno;
+ }
+ }
+ return fd;
+}
+
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h
index 74e6f860f703..5ba5c9beccfa 100644
--- a/tools/lib/bpf/libbpf_legacy.h
+++ b/tools/lib/bpf/libbpf_legacy.h
@@ -52,8 +52,17 @@ enum libbpf_strict_mode {
* allowed, with LIBBPF_STRICT_SEC_PREFIX this will become
* unrecognized by libbpf and would have to be just SEC("xdp") and
* SEC("xdp") and SEC("perf_event").
+ *
+ * Note, in this mode the program pin path will be based on the
+ * function name instead of section name.
*/
LIBBPF_STRICT_SEC_NAME = 0x04,
+ /*
+ * Disable the global 'bpf_objects_list'. Maintaining this list adds
+ * a race condition to bpf_object__open() and bpf_object__close().
+ * Clients can maintain it on their own if it is valuable for them.
+ */
+ LIBBPF_STRICT_NO_OBJECT_LIST = 0x08,
__LIBBPF_STRICT_LAST,
};
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index cd8c703dde71..68f2dbf364aa 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -33,7 +33,7 @@ static int get_vendor_id(int ifindex)
snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
- fd = open(path, O_RDONLY);
+ fd = open(path, O_RDONLY | O_CLOEXEC);
if (fd < 0)
return -1;
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 2df880cefdae..f677dccdeae4 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -15,7 +15,6 @@
#include <linux/btf.h>
#include <elf.h>
#include <libelf.h>
-#include <gelf.h>
#include <fcntl.h>
#include "libbpf.h"
#include "btf.h"
@@ -302,7 +301,7 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
if (!linker->filename)
return -ENOMEM;
- linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
if (linker->fd < 0) {
err = -errno;
pr_warn("failed to create '%s': %d\n", file, err);
@@ -324,12 +323,12 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
linker->elf_hdr->e_machine = EM_BPF;
linker->elf_hdr->e_type = ET_REL;
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB;
-#elif __BYTE_ORDER == __BIG_ENDIAN
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB;
#else
-#error "Unknown __BYTE_ORDER"
+#error "Unknown __BYTE_ORDER__"
#endif
/* STRTAB */
@@ -539,12 +538,12 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
const struct bpf_linker_file_opts *opts,
struct src_obj *obj)
{
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
const int host_endianness = ELFDATA2LSB;
-#elif __BYTE_ORDER == __BIG_ENDIAN
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
const int host_endianness = ELFDATA2MSB;
#else
-#error "Unknown __BYTE_ORDER"
+#error "Unknown __BYTE_ORDER__"
#endif
int err = 0;
Elf_Scn *scn;
@@ -557,7 +556,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
obj->filename = filename;
- obj->fd = open(filename, O_RDONLY);
+ obj->fd = open(filename, O_RDONLY | O_CLOEXEC);
if (obj->fd < 0) {
err = -errno;
pr_warn("failed to open file '%s': %d\n", filename, err);
@@ -921,7 +920,7 @@ static int check_btf_type_id(__u32 *type_id, void *ctx)
{
struct btf *btf = ctx;
- if (*type_id > btf__get_nr_types(btf))
+ if (*type_id >= btf__type_cnt(btf))
return -EINVAL;
return 0;
@@ -948,8 +947,8 @@ static int linker_sanity_check_btf(struct src_obj *obj)
if (!obj->btf)
return 0;
- n = btf__get_nr_types(obj->btf);
- for (i = 1; i <= n; i++) {
+ n = btf__type_cnt(obj->btf);
+ for (i = 1; i < n; i++) {
t = btf_type_by_id(obj->btf, i);
err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf);
@@ -1659,8 +1658,8 @@ static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sy
return -EINVAL;
}
- n = btf__get_nr_types(obj->btf);
- for (i = 1; i <= n; i++) {
+ n = btf__type_cnt(obj->btf);
+ for (i = 1; i < n; i++) {
t = btf__type_by_id(obj->btf, i);
/* some global and extern FUNCs and VARs might not be associated with any
@@ -2131,8 +2130,8 @@ static int linker_fixup_btf(struct src_obj *obj)
if (!obj->btf)
return 0;
- n = btf__get_nr_types(obj->btf);
- for (i = 1; i <= n; i++) {
+ n = btf__type_cnt(obj->btf);
+ for (i = 1; i < n; i++) {
struct btf_var_secinfo *vi;
struct btf_type *t;
@@ -2235,14 +2234,14 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
if (!obj->btf)
return 0;
- start_id = btf__get_nr_types(linker->btf) + 1;
- n = btf__get_nr_types(obj->btf);
+ start_id = btf__type_cnt(linker->btf);
+ n = btf__type_cnt(obj->btf);
obj->btf_type_map = calloc(n + 1, sizeof(int));
if (!obj->btf_type_map)
return -ENOMEM;
- for (i = 1; i <= n; i++) {
+ for (i = 1; i < n; i++) {
struct glob_sym *glob_sym = NULL;
t = btf__type_by_id(obj->btf, i);
@@ -2297,8 +2296,8 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
}
/* remap all the types except DATASECs */
- n = btf__get_nr_types(linker->btf);
- for (i = start_id; i <= n; i++) {
+ n = btf__type_cnt(linker->btf);
+ for (i = start_id; i < n; i++) {
struct btf_type *dst_t = btf_type_by_id(linker->btf, i);
if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map))
@@ -2657,7 +2656,7 @@ static int finalize_btf(struct bpf_linker *linker)
__u32 raw_sz;
/* bail out if no BTF data was produced */
- if (btf__get_nr_types(linker->btf) == 0)
+ if (btf__type_cnt(linker->btf) == 1)
return 0;
for (i = 1; i < linker->sec_cnt; i++) {
@@ -2694,7 +2693,7 @@ static int finalize_btf(struct bpf_linker *linker)
}
/* Emit .BTF section */
- raw_data = btf__get_raw_data(linker->btf, &raw_sz);
+ raw_data = btf__raw_data(linker->btf, &raw_sz);
if (!raw_data)
return -ENOMEM;
diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c
index 4016ed492d0c..b5b8956a1be8 100644
--- a/tools/lib/bpf/relo_core.c
+++ b/tools/lib/bpf/relo_core.c
@@ -662,7 +662,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
*validate = true; /* signedness is never ambiguous */
break;
case BPF_FIELD_LSHIFT_U64:
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
*val = 64 - (bit_off + bit_sz - byte_off * 8);
#else
*val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index a2111696ba91..81f8fbc85e70 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -300,7 +300,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
if (!umem)
return -ENOMEM;
- umem->fd = socket(AF_XDP, SOCK_RAW, 0);
+ umem->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
if (umem->fd < 0) {
err = -errno;
goto out_umem_alloc;
@@ -549,7 +549,7 @@ static int xsk_get_max_queues(struct xsk_socket *xsk)
struct ifreq ifr = {};
int fd, err, ret;
- fd = socket(AF_LOCAL, SOCK_DGRAM, 0);
+ fd = socket(AF_LOCAL, SOCK_DGRAM | SOCK_CLOEXEC, 0);
if (fd < 0)
return -errno;
@@ -1046,7 +1046,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
}
if (umem->refcount++ > 0) {
- xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
+ xsk->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
if (xsk->fd < 0) {
err = -errno;
goto out_xsk_alloc;
diff --git a/tools/lib/bpf/xsk.h b/tools/lib/bpf/xsk.h
index 01c12dca9c10..64e9c57fd792 100644
--- a/tools/lib/bpf/xsk.h
+++ b/tools/lib/bpf/xsk.h
@@ -23,6 +23,12 @@
extern "C" {
#endif
+/* This whole API has been deprecated and moved to libxdp that can be found at
+ * https://github.com/xdp-project/xdp-tools. The APIs are exactly the same so
+ * it should just be linking with libxdp instead of libbpf for this set of
+ * functionality. If not, please submit a bug report on the aforementioned page.
+ */
+
/* Load-Acquire Store-Release barriers used by the XDP socket
* library. The following macros should *NOT* be considered part of
* the xsk.h API, and is subject to change anytime.
@@ -245,8 +251,10 @@ static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr);
}
-LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem);
-LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
+int xsk_umem__fd(const struct xsk_umem *umem);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
+int xsk_socket__fd(const struct xsk_socket *xsk);
#define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048
#define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048
@@ -263,10 +271,10 @@ struct xsk_umem_config {
__u32 flags;
};
-LIBBPF_API int xsk_setup_xdp_prog(int ifindex,
- int *xsks_map_fd);
-LIBBPF_API int xsk_socket__update_xskmap(struct xsk_socket *xsk,
- int xsks_map_fd);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
+int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
+int xsk_socket__update_xskmap(struct xsk_socket *xsk, int xsks_map_fd);
/* Flags for the libbpf_flags field. */
#define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)
@@ -280,40 +288,46 @@ struct xsk_socket_config {
};
/* Set config to NULL to get the default configuration. */
-LIBBPF_API int xsk_umem__create(struct xsk_umem **umem,
- void *umem_area, __u64 size,
- struct xsk_ring_prod *fill,
- struct xsk_ring_cons *comp,
- const struct xsk_umem_config *config);
-LIBBPF_API int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
- void *umem_area, __u64 size,
- struct xsk_ring_prod *fill,
- struct xsk_ring_cons *comp,
- const struct xsk_umem_config *config);
-LIBBPF_API int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
- void *umem_area, __u64 size,
- struct xsk_ring_prod *fill,
- struct xsk_ring_cons *comp,
- const struct xsk_umem_config *config);
-LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk,
- const char *ifname, __u32 queue_id,
- struct xsk_umem *umem,
- struct xsk_ring_cons *rx,
- struct xsk_ring_prod *tx,
- const struct xsk_socket_config *config);
-LIBBPF_API int
-xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
- const char *ifname,
- __u32 queue_id, struct xsk_umem *umem,
- struct xsk_ring_cons *rx,
- struct xsk_ring_prod *tx,
- struct xsk_ring_prod *fill,
- struct xsk_ring_cons *comp,
- const struct xsk_socket_config *config);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
+int xsk_umem__create(struct xsk_umem **umem,
+ void *umem_area, __u64 size,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *config);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
+int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
+ void *umem_area, __u64 size,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *config);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
+int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
+ void *umem_area, __u64 size,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *config);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
+int xsk_socket__create(struct xsk_socket **xsk,
+ const char *ifname, __u32 queue_id,
+ struct xsk_umem *umem,
+ struct xsk_ring_cons *rx,
+ struct xsk_ring_prod *tx,
+ const struct xsk_socket_config *config);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
+int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
+ const char *ifname,
+ __u32 queue_id, struct xsk_umem *umem,
+ struct xsk_ring_cons *rx,
+ struct xsk_ring_prod *tx,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_socket_config *config);
/* Returns 0 for success and -EBUSY if the umem is still in use. */
-LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem);
-LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
+int xsk_umem__delete(struct xsk_umem *umem);
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
+void xsk_socket__delete(struct xsk_socket *xsk);
#ifdef __cplusplus
} /* extern "C" */