diff options
Diffstat (limited to 'include/linux/bpf.h')
| -rw-r--r-- | include/linux/bpf.h | 111 | 
1 files changed, 108 insertions, 3 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f58895830ada..eced6400f778 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -228,6 +228,18 @@ struct btf_record {  	struct btf_field fields[];  }; +/* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */ +struct bpf_rb_node_kern { +	struct rb_node rb_node; +	void *owner; +} __attribute__((aligned(8))); + +/* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */ +struct bpf_list_node_kern { +	struct list_head list_head; +	void *owner; +} __attribute__((aligned(8))); +  struct bpf_map {  	/* The first two cachelines with read-mostly members of which some  	 * are also accessed in fast-path (e.g. ops, max_entries). @@ -275,6 +287,7 @@ struct bpf_map {  	} owner;  	bool bypass_spec_v1;  	bool frozen; /* write-once; write-protected by freeze_mutex */ +	s64 __percpu *elem_count;  };  static inline const char *btf_field_type_name(enum btf_field_type type) @@ -1537,6 +1550,53 @@ struct bpf_struct_ops_value;  struct btf_member;  #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 +/** + * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to + *			   define a BPF_MAP_TYPE_STRUCT_OPS map type composed + *			   of BPF_PROG_TYPE_STRUCT_OPS progs. + * @verifier_ops: A structure of callbacks that are invoked by the verifier + *		  when determining whether the struct_ops progs in the + *		  struct_ops map are valid. + * @init: A callback that is invoked a single time, and before any other + *	  callback, to initialize the structure. A nonzero return value means + *	  the subsystem could not be initialized. + * @check_member: When defined, a callback invoked by the verifier to allow + *		  the subsystem to determine if an entry in the struct_ops map + *		  is valid. A nonzero return value means that the map is + *		  invalid and should be rejected by the verifier. + * @init_member: A callback that is invoked for each member of the struct_ops + *		 map to allow the subsystem to initialize the member. A nonzero + *		 value means the member could not be initialized. This callback + *		 is exclusive with the @type, @type_id, @value_type, and + *		 @value_id fields. + * @reg: A callback that is invoked when the struct_ops map has been + *	 initialized and is being attached to. Zero means the struct_ops map + *	 has been successfully registered and is live. A nonzero return value + *	 means the struct_ops map could not be registered. + * @unreg: A callback that is invoked when the struct_ops map should be + *	   unregistered. + * @update: A callback that is invoked when the live struct_ops map is being + *	    updated to contain new values. This callback is only invoked when + *	    the struct_ops map is loaded with BPF_F_LINK. If not defined, the + *	    it is assumed that the struct_ops map cannot be updated. + * @validate: A callback that is invoked after all of the members have been + *	      initialized. This callback should perform static checks on the + *	      map, meaning that it should either fail or succeed + *	      deterministically. A struct_ops map that has been validated may + *	      not necessarily succeed in being registered if the call to @reg + *	      fails. For example, a valid struct_ops map may be loaded, but + *	      then fail to be registered due to there being another active + *	      struct_ops map on the system in the subsystem already. For this + *	      reason, if this callback is not defined, the check is skipped as + *	      the struct_ops map will have final verification performed in + *	      @reg. + * @type: BTF type. + * @value_type: Value type. + * @name: The name of the struct bpf_struct_ops object. + * @func_models: Func models + * @type_id: BTF type id. + * @value_id: BTF value id. + */  struct bpf_struct_ops {  	const struct bpf_verifier_ops *verifier_ops;  	int (*init)(struct btf *btf); @@ -1806,6 +1866,7 @@ struct bpf_cg_run_ctx {  struct bpf_trace_run_ctx {  	struct bpf_run_ctx run_ctx;  	u64 bpf_cookie; +	bool is_uprobe;  };  struct bpf_tramp_run_ctx { @@ -1854,6 +1915,8 @@ bpf_prog_run_array(const struct bpf_prog_array *array,  	if (unlikely(!array))  		return ret; +	run_ctx.is_uprobe = false; +  	migrate_disable();  	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);  	item = &array->items[0]; @@ -1878,8 +1941,8 @@ bpf_prog_run_array(const struct bpf_prog_array *array,   * rcu-protected dynamically sized maps.   */  static __always_inline u32 -bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu, -			     const void *ctx, bpf_prog_run_fn run_prog) +bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu, +			  const void *ctx, bpf_prog_run_fn run_prog)  {  	const struct bpf_prog_array_item *item;  	const struct bpf_prog *prog; @@ -1893,6 +1956,8 @@ bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,  	rcu_read_lock_trace();  	migrate_disable(); +	run_ctx.is_uprobe = true; +  	array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());  	if (unlikely(!array))  		goto out; @@ -2040,6 +2105,35 @@ bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,  }  #endif +static inline int +bpf_map_init_elem_count(struct bpf_map *map) +{ +	size_t size = sizeof(*map->elem_count), align = size; +	gfp_t flags = GFP_USER | __GFP_NOWARN; + +	map->elem_count = bpf_map_alloc_percpu(map, size, align, flags); +	if (!map->elem_count) +		return -ENOMEM; + +	return 0; +} + +static inline void +bpf_map_free_elem_count(struct bpf_map *map) +{ +	free_percpu(map->elem_count); +} + +static inline void bpf_map_inc_elem_count(struct bpf_map *map) +{ +	this_cpu_inc(*map->elem_count); +} + +static inline void bpf_map_dec_elem_count(struct bpf_map *map) +{ +	this_cpu_dec(*map->elem_count); +} +  extern int sysctl_unprivileged_bpf_disabled;  static inline bool bpf_allow_ptr_leaks(void) @@ -2073,7 +2167,6 @@ void bpf_link_cleanup(struct bpf_link_primer *primer);  void bpf_link_inc(struct bpf_link *link);  void bpf_link_put(struct bpf_link *link);  int bpf_link_new_fd(struct bpf_link *link); -struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);  struct bpf_link *bpf_link_get_from_fd(u32 ufd);  struct bpf_link *bpf_link_get_curr_or_next(u32 *id); @@ -2619,6 +2712,18 @@ static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)  }  #endif /* CONFIG_BPF_SYSCALL */ +static __always_inline int +bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) +{ +	int ret = -EFAULT; + +	if (IS_ENABLED(CONFIG_BPF_EVENTS)) +		ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); +	if (unlikely(ret < 0)) +		memset(dst, 0, size); +	return ret; +} +  void __bpf_free_used_btfs(struct bpf_prog_aux *aux,  			  struct btf_mod_pair *used_btfs, u32 len);  | 
