diff options
Diffstat (limited to 'kernel/bpf/devmap.c')
| -rw-r--r-- | kernel/bpf/devmap.c | 112 | 
1 files changed, 52 insertions, 60 deletions
| diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 40e86a7e0ef0..d83cf8ccc872 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -17,9 +17,8 @@   * datapath always has a valid copy. However, the datapath does a "flush"   * operation that pushes any pending packets in the driver outside the RCU   * critical section. Each bpf_dtab_netdev tracks these pending operations using - * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed - * until all bits are cleared indicating outstanding flush operations have - * completed. + * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until + * this list is empty, indicating outstanding flush operations have completed.   *   * BPF syscalls may race with BPF program calls on any of the update, delete   * or lookup operations. As noted above the xchg() operation also keep the @@ -48,9 +47,13 @@  	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)  #define DEV_MAP_BULK_SIZE 16 +struct bpf_dtab_netdev; +  struct xdp_bulk_queue {  	struct xdp_frame *q[DEV_MAP_BULK_SIZE]; +	struct list_head flush_node;  	struct net_device *dev_rx; +	struct bpf_dtab_netdev *obj;  	unsigned int count;  }; @@ -65,23 +68,18 @@ struct bpf_dtab_netdev {  struct bpf_dtab {  	struct bpf_map map;  	struct bpf_dtab_netdev **netdev_map; -	unsigned long __percpu *flush_needed; +	struct list_head __percpu *flush_list;  	struct list_head list;  };  static DEFINE_SPINLOCK(dev_map_lock);  static LIST_HEAD(dev_map_list); -static u64 dev_map_bitmap_size(const union bpf_attr *attr) -{ -	return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long); -} -  static struct bpf_map *dev_map_alloc(union bpf_attr *attr)  {  	struct bpf_dtab *dtab; +	int err, cpu;  	u64 cost; -	int err;  	if (!capable(CAP_NET_ADMIN))  		return ERR_PTR(-EPERM); @@ -91,6 +89,11 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)  	    attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)  		return ERR_PTR(-EINVAL); +	/* Lookup returns a pointer straight to dev->ifindex, so make sure the +	 * verifier prevents writes from the BPF side +	 */ +	attr->map_flags |= BPF_F_RDONLY_PROG; +  	dtab = kzalloc(sizeof(*dtab), GFP_USER);  	if (!dtab)  		return ERR_PTR(-ENOMEM); @@ -99,7 +102,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)  	/* make sure page count doesn't overflow */  	cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); -	cost += dev_map_bitmap_size(attr) * num_possible_cpus(); +	cost += sizeof(struct list_head) * num_possible_cpus();  	/* if map size is larger than memlock limit, reject it */  	err = bpf_map_charge_init(&dtab->map.memory, cost); @@ -108,28 +111,30 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)  	err = -ENOMEM; -	/* A per cpu bitfield with a bit per possible net device */ -	dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr), -						__alignof__(unsigned long), -						GFP_KERNEL | __GFP_NOWARN); -	if (!dtab->flush_needed) +	dtab->flush_list = alloc_percpu(struct list_head); +	if (!dtab->flush_list)  		goto free_charge; +	for_each_possible_cpu(cpu) +		INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu)); +  	dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *  					      sizeof(struct bpf_dtab_netdev *),  					      dtab->map.numa_node);  	if (!dtab->netdev_map) -		goto free_charge; +		goto free_percpu;  	spin_lock(&dev_map_lock);  	list_add_tail_rcu(&dtab->list, &dev_map_list);  	spin_unlock(&dev_map_lock);  	return &dtab->map; + +free_percpu: +	free_percpu(dtab->flush_list);  free_charge:  	bpf_map_charge_finish(&dtab->map.memory);  free_dtab: -	free_percpu(dtab->flush_needed);  	kfree(dtab);  	return ERR_PTR(err);  } @@ -158,14 +163,14 @@ static void dev_map_free(struct bpf_map *map)  	rcu_barrier();  	/* To ensure all pending flush operations have completed wait for flush -	 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus. +	 * list to empty on _all_ cpus.  	 * Because the above synchronize_rcu() ensures the map is disconnected -	 * from the program we can assume no new bits will be set. +	 * from the program we can assume no new items will be added.  	 */  	for_each_online_cpu(cpu) { -		unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu); +		struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu); -		while (!bitmap_empty(bitmap, dtab->map.max_entries)) +		while (!list_empty(flush_list))  			cond_resched();  	} @@ -181,7 +186,7 @@ static void dev_map_free(struct bpf_map *map)  		kfree(dev);  	} -	free_percpu(dtab->flush_needed); +	free_percpu(dtab->flush_list);  	bpf_map_area_free(dtab->netdev_map);  	kfree(dtab);  } @@ -203,18 +208,10 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)  	return 0;  } -void __dev_map_insert_ctx(struct bpf_map *map, u32 bit) -{ -	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); -	unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); - -	__set_bit(bit, bitmap); -} - -static int bq_xmit_all(struct bpf_dtab_netdev *obj, -		       struct xdp_bulk_queue *bq, u32 flags, +static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,  		       bool in_napi_ctx)  { +	struct bpf_dtab_netdev *obj = bq->obj;  	struct net_device *dev = obj->dev;  	int sent = 0, drops = 0, err = 0;  	int i; @@ -241,6 +238,7 @@ out:  	trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit,  			      sent, drops, bq->dev_rx, dev, err);  	bq->dev_rx = NULL; +	__list_del_clearprev(&bq->flush_node);  	return 0;  error:  	/* If ndo_xdp_xmit fails with an errno, no frames have been @@ -263,31 +261,18 @@ error:   * from the driver before returning from its napi->poll() routine. The poll()   * routine is called either from busy_poll context or net_rx_action signaled   * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the - * net device can be torn down. On devmap tear down we ensure the ctx bitmap - * is zeroed before completing to ensure all flush operations have completed. + * net device can be torn down. On devmap tear down we ensure the flush list + * is empty before completing to ensure all flush operations have completed.   */  void __dev_map_flush(struct bpf_map *map)  {  	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); -	unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); -	u32 bit; +	struct list_head *flush_list = this_cpu_ptr(dtab->flush_list); +	struct xdp_bulk_queue *bq, *tmp;  	rcu_read_lock(); -	for_each_set_bit(bit, bitmap, map->max_entries) { -		struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]); -		struct xdp_bulk_queue *bq; - -		/* This is possible if the dev entry is removed by user space -		 * between xdp redirect and flush op. -		 */ -		if (unlikely(!dev)) -			continue; - -		bq = this_cpu_ptr(dev->bulkq); -		bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true); - -		__clear_bit(bit, bitmap); -	} +	list_for_each_entry_safe(bq, tmp, flush_list, flush_node) +		bq_xmit_all(bq, XDP_XMIT_FLUSH, true);  	rcu_read_unlock();  } @@ -314,10 +299,11 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,  		      struct net_device *dev_rx)  { +	struct list_head *flush_list = this_cpu_ptr(obj->dtab->flush_list);  	struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);  	if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) -		bq_xmit_all(obj, bq, 0, true); +		bq_xmit_all(bq, 0, true);  	/* Ingress dev_rx will be the same for all xdp_frame's in  	 * bulk_queue, because bq stored per-CPU and must be flushed @@ -327,6 +313,10 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,  		bq->dev_rx = dev_rx;  	bq->q[bq->count++] = xdpf; + +	if (!bq->flush_node.prev) +		list_add(&bq->flush_node, flush_list); +  	return 0;  } @@ -377,17 +367,12 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)  {  	if (dev->dev->netdev_ops->ndo_xdp_xmit) {  		struct xdp_bulk_queue *bq; -		unsigned long *bitmap; -  		int cpu;  		rcu_read_lock();  		for_each_online_cpu(cpu) { -			bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu); -			__clear_bit(dev->bit, bitmap); -  			bq = per_cpu_ptr(dev->bulkq, cpu); -			bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false); +			bq_xmit_all(bq, XDP_XMIT_FLUSH, false);  		}  		rcu_read_unlock();  	} @@ -434,8 +419,10 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,  	struct net *net = current->nsproxy->net_ns;  	gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;  	struct bpf_dtab_netdev *dev, *old_dev; -	u32 i = *(u32 *)key;  	u32 ifindex = *(u32 *)value; +	struct xdp_bulk_queue *bq; +	u32 i = *(u32 *)key; +	int cpu;  	if (unlikely(map_flags > BPF_EXIST))  		return -EINVAL; @@ -458,6 +445,11 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,  			return -ENOMEM;  		} +		for_each_possible_cpu(cpu) { +			bq = per_cpu_ptr(dev->bulkq, cpu); +			bq->obj = dev; +		} +  		dev->dev = dev_get_by_index(net, ifindex);  		if (!dev->dev) {  			free_percpu(dev->bulkq); | 
