diff options
Diffstat (limited to 'tools/include/uapi/linux/bpf.h')
| -rw-r--r-- | tools/include/uapi/linux/bpf.h | 188 | 
1 files changed, 132 insertions, 56 deletions
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 77c6be96d676..dbbcf0b02970 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -173,6 +173,7 @@ enum bpf_prog_type {  	BPF_PROG_TYPE_CGROUP_SYSCTL,  	BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,  	BPF_PROG_TYPE_CGROUP_SOCKOPT, +	BPF_PROG_TYPE_TRACING,  };  enum bpf_attach_type { @@ -199,6 +200,9 @@ enum bpf_attach_type {  	BPF_CGROUP_UDP6_RECVMSG,  	BPF_CGROUP_GETSOCKOPT,  	BPF_CGROUP_SETSOCKOPT, +	BPF_TRACE_RAW_TP, +	BPF_TRACE_FENTRY, +	BPF_TRACE_FEXIT,  	__MAX_BPF_ATTACH_TYPE  }; @@ -344,6 +348,9 @@ enum bpf_attach_type {  /* Clone map from listener for newly accepted socket */  #define BPF_F_CLONE		(1U << 9) +/* Enable memory-mapping BPF map */ +#define BPF_F_MMAPABLE		(1U << 10) +  /* flags for BPF_PROG_QUERY */  #define BPF_F_QUERY_EFFECTIVE	(1U << 0) @@ -420,6 +427,8 @@ union bpf_attr {  		__u32		line_info_rec_size;	/* userspace bpf_line_info size */  		__aligned_u64	line_info;	/* line info */  		__u32		line_info_cnt;	/* number of bpf_line_info records */ +		__u32		attach_btf_id;	/* in-kernel BTF type id to attach to */ +		__u32		attach_prog_fd; /* 0 to attach to vmlinux */  	};  	struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -560,10 +569,13 @@ union bpf_attr {   * 	Return   * 		0 on success, or a negative error in case of failure.   * - * int bpf_probe_read(void *dst, u32 size, const void *src) + * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)   * 	Description   * 		For tracing programs, safely attempt to read *size* bytes from - * 		address *src* and store the data in *dst*. + * 		kernel space address *unsafe_ptr* and store the data in *dst*. + * + * 		Generally, use bpf_probe_read_user() or bpf_probe_read_kernel() + * 		instead.   * 	Return   * 		0 on success, or a negative error in case of failure.   * @@ -794,7 +806,7 @@ union bpf_attr {   * 		A 64-bit integer containing the current GID and UID, and   * 		created as such: *current_gid* **<< 32 \|** *current_uid*.   * - * int bpf_get_current_comm(char *buf, u32 size_of_buf) + * int bpf_get_current_comm(void *buf, u32 size_of_buf)   * 	Description   * 		Copy the **comm** attribute of the current task into *buf* of   * 		*size_of_buf*. The **comm** attribute contains the name of @@ -1023,7 +1035,7 @@ union bpf_attr {   * 		The realm of the route for the packet associated to *skb*, or 0   * 		if none was found.   * - * int bpf_perf_event_output(struct pt_regs *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)   * 	Description   * 		Write raw *data* blob into a special BPF perf event held by   * 		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -1068,7 +1080,7 @@ union bpf_attr {   * 	Return   * 		0 on success, or a negative error in case of failure.   * - * int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len) + * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)   * 	Description   * 		This helper was provided as an easy way to load data from a   * 		packet. It can be used to load *len* bytes from *offset* from @@ -1085,7 +1097,7 @@ union bpf_attr {   * 	Return   * 		0 on success, or a negative error in case of failure.   * - * int bpf_get_stackid(struct pt_regs *ctx, struct bpf_map *map, u64 flags) + * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)   * 	Description   * 		Walk a user or a kernel stack and return its id. To achieve   * 		this, the helper needs *ctx*, which is a pointer to the context @@ -1154,7 +1166,7 @@ union bpf_attr {   * 		The checksum result, or a negative error code in case of   * 		failure.   * - * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size) + * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)   * 	Description   * 		Retrieve tunnel options metadata for the packet associated to   * 		*skb*, and store the raw tunnel option data to the buffer *opt* @@ -1172,7 +1184,7 @@ union bpf_attr {   * 	Return   * 		The size of the option data retrieved.   * - * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size) + * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)   * 	Description   * 		Set tunnel options metadata for the packet associated to *skb*   * 		to the option data contained in the raw buffer *opt* of *size*. @@ -1425,45 +1437,14 @@ union bpf_attr {   * 	Return   * 		0 on success, or a negative error in case of failure.   * - * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) + * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)   * 	Description - * 		Copy a NUL terminated string from an unsafe address - * 		*unsafe_ptr* to *dst*. The *size* should include the - * 		terminating NUL byte. In case the string length is smaller than - * 		*size*, the target is not padded with further NUL bytes. If the - * 		string length is larger than *size*, just *size*-1 bytes are - * 		copied and the last byte is set to NUL. - * - * 		On success, the length of the copied string is returned. This - * 		makes this helper useful in tracing programs for reading - * 		strings, and more importantly to get its length at runtime. See - * 		the following snippet: - * - * 		:: - * - * 			SEC("kprobe/sys_open") - * 			void bpf_sys_open(struct pt_regs *ctx) - * 			{ - * 			        char buf[PATHLEN]; // PATHLEN is defined to 256 - * 			        int res = bpf_probe_read_str(buf, sizeof(buf), - * 				                             ctx->di); - * - * 				// Consume buf, for example push it to - * 				// userspace via bpf_perf_event_output(); we - * 				// can use res (the string length) as event - * 				// size, after checking its boundaries. - * 			} + * 		Copy a NUL terminated string from an unsafe kernel address + * 		*unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for + * 		more details.   * - * 		In comparison, using **bpf_probe_read()** helper here instead - * 		to read the string would require to estimate the length at - * 		compile time, and would often result in copying more memory - * 		than necessary. - * - * 		Another useful use case is when parsing individual process - * 		arguments or individual environment variables navigating - * 		*current*\ **->mm->arg_start** and *current*\ - * 		**->mm->env_start**: using this helper and the return value, - * 		one can quickly iterate at the right offset of the memory area. + * 		Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str() + * 		instead.   * 	Return   * 		On success, the strictly positive length of the string,   * 		including the trailing NUL character. On error, a negative @@ -1511,7 +1492,7 @@ union bpf_attr {   * 	Return   * 		0   * - * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen) + * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)   * 	Description   * 		Emulate a call to **setsockopt()** on the socket associated to   * 		*bpf_socket*, which must be a full socket. The *level* at @@ -1595,7 +1576,7 @@ union bpf_attr {   * 	Return   * 		**XDP_REDIRECT** on success, or **XDP_ABORTED** on error.   * - * int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags) + * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)   * 	Description   * 		Redirect the packet to the socket referenced by *map* (of type   * 		**BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and @@ -1715,7 +1696,7 @@ union bpf_attr {   * 	Return   * 		0 on success, or a negative error in case of failure.   * - * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen) + * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)   * 	Description   * 		Emulate a call to **getsockopt()** on the socket associated to   * 		*bpf_socket*, which must be a full socket. The *level* at @@ -1947,7 +1928,7 @@ union bpf_attr {   * 	Return   * 		0 on success, or a negative error in case of failure.   * - * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags) + * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)   * 	Description   * 		Return a user or a kernel stack in bpf program provided buffer.   * 		To achieve this, the helper needs *ctx*, which is a pointer @@ -1980,7 +1961,7 @@ union bpf_attr {   * 		A non-negative value equal to or less than *size* on success,   * 		or a negative error in case of failure.   * - * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) + * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)   * 	Description   * 		This helper is similar to **bpf_skb_load_bytes**\ () in that   * 		it provides an easy way to load *len* bytes from *offset* @@ -2033,7 +2014,7 @@ union bpf_attr {   *		* > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the   *		  packet is not forwarded or needs assist from full stack   * - * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) + * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)   *	Description   *		Add an entry to, or update a sockhash *map* referencing sockets.   *		The *skops* is used as a new value for the entry associated to @@ -2392,7 +2373,7 @@ union bpf_attr {   * 	Return   * 		0 on success, or a negative error in case of failure.   * - * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags) + * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)   *	Description   *		For socket policies, insert *len* bytes into *msg* at offset   *		*start*. @@ -2408,9 +2389,9 @@ union bpf_attr {   *	Return   *		0 on success, or a negative error in case of failure.   * - * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags) + * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)   *	Description - *		Will remove *pop* bytes from a *msg* starting at byte *start*. + *		Will remove *len* bytes from a *msg* starting at byte *start*.   *		This may result in **ENOMEM** errors under certain situations if   *		an allocation and copy are required due to a full ring buffer.   *		However, the helper will try to avoid doing the allocation @@ -2505,7 +2486,7 @@ union bpf_attr {   *		A **struct bpf_tcp_sock** pointer on success, or **NULL** in   *		case of failure.   * - * int bpf_skb_ecn_set_ce(struct sk_buf *skb) + * int bpf_skb_ecn_set_ce(struct sk_buff *skb)   *	Description   *		Set ECN (Explicit Congestion Notification) field of IP header   *		to **CE** (Congestion Encountered) if current value is **ECT** @@ -2750,6 +2731,96 @@ union bpf_attr {   *		**-EOPNOTSUPP** kernel configuration does not enable SYN cookies   *   *		**-EPROTONOSUPPORT** IP packet version is not 4 or 6 + * + * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * 	Description + * 		Write raw *data* blob into a special BPF perf event held by + * 		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * 		event must have the following attributes: **PERF_SAMPLE_RAW** + * 		as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * 		**PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * 		The *flags* are used to indicate the index in *map* for which + * 		the value must be put, masked with **BPF_F_INDEX_MASK**. + * 		Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * 		to indicate that the index of the current CPU core should be + * 		used. + * + * 		The value to write, of *size*, is passed through eBPF stack and + * 		pointed by *data*. + * + * 		*ctx* is a pointer to in-kernel struct sk_buff. + * + * 		This helper is similar to **bpf_perf_event_output**\ () but + * 		restricted to raw_tracepoint bpf programs. + * 	Return + * 		0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) + * 	Description + * 		Safely attempt to read *size* bytes from user space address + * 		*unsafe_ptr* and store the data in *dst*. + * 	Return + * 		0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) + * 	Description + * 		Safely attempt to read *size* bytes from kernel space address + * 		*unsafe_ptr* and store the data in *dst*. + * 	Return + * 		0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) + * 	Description + * 		Copy a NUL terminated string from an unsafe user address + * 		*unsafe_ptr* to *dst*. The *size* should include the + * 		terminating NUL byte. In case the string length is smaller than + * 		*size*, the target is not padded with further NUL bytes. If the + * 		string length is larger than *size*, just *size*-1 bytes are + * 		copied and the last byte is set to NUL. + * + * 		On success, the length of the copied string is returned. This + * 		makes this helper useful in tracing programs for reading + * 		strings, and more importantly to get its length at runtime. See + * 		the following snippet: + * + * 		:: + * + * 			SEC("kprobe/sys_open") + * 			void bpf_sys_open(struct pt_regs *ctx) + * 			{ + * 			        char buf[PATHLEN]; // PATHLEN is defined to 256 + * 			        int res = bpf_probe_read_user_str(buf, sizeof(buf), + * 				                                  ctx->di); + * + * 				// Consume buf, for example push it to + * 				// userspace via bpf_perf_event_output(); we + * 				// can use res (the string length) as event + * 				// size, after checking its boundaries. + * 			} + * + * 		In comparison, using **bpf_probe_read_user()** helper here + * 		instead to read the string would require to estimate the length + * 		at compile time, and would often result in copying more memory + * 		than necessary. + * + * 		Another useful use case is when parsing individual process + * 		arguments or individual environment variables navigating + * 		*current*\ **->mm->arg_start** and *current*\ + * 		**->mm->env_start**: using this helper and the return value, + * 		one can quickly iterate at the right offset of the memory area. + * 	Return + * 		On success, the strictly positive length of the string, + * 		including the trailing NUL character. On error, a negative + * 		value. + * + * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) + * 	Description + * 		Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* + * 		to *dst*. Same semantics as with bpf_probe_read_user_str() apply. + * 	Return + * 		On success, the strictly positive length of the string,	including + * 		the trailing NUL character. On error, a negative value.   */  #define __BPF_FUNC_MAPPER(FN)		\  	FN(unspec),			\ @@ -2862,7 +2933,12 @@ union bpf_attr {  	FN(sk_storage_get),		\  	FN(sk_storage_delete),		\  	FN(send_signal),		\ -	FN(tcp_gen_syncookie), +	FN(tcp_gen_syncookie),		\ +	FN(skb_output),			\ +	FN(probe_read_user),		\ +	FN(probe_read_kernel),		\ +	FN(probe_read_user_str),	\ +	FN(probe_read_kernel_str),  /* integer value in 'imm' field of BPF_CALL instruction selects which helper   * function eBPF program intends to call  | 
