summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/bpf_sk_storage.c804
-rw-r--r--net/core/devlink.c242
-rw-r--r--net/core/drop_monitor.c3
-rw-r--r--net/core/fib_rules.c6
-rw-r--r--net/core/filter.c133
-rw-r--r--net/core/flow_dissector.c129
-rw-r--r--net/core/lwt_bpf.c9
-rw-r--r--net/core/lwtunnel.c9
-rw-r--r--net/core/neighbour.c27
-rw-r--r--net/core/net_namespace.c19
-rw-r--r--net/core/netprio_cgroup.c2
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/rtnetlink.c155
-rw-r--r--net/core/skbuff.c71
-rw-r--r--net/core/sock.c56
16 files changed, 1385 insertions, 283 deletions
diff --git a/net/core/Makefile b/net/core/Makefile
index f97d6254e564..a104dc8faafc 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -34,3 +34,4 @@ obj-$(CONFIG_HWBM) += hwbm.o
obj-$(CONFIG_NET_DEVLINK) += devlink.o
obj-$(CONFIG_GRO_CELLS) += gro_cells.o
obj-$(CONFIG_FAILOVER) += failover.o
+obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
new file mode 100644
index 000000000000..a8e9ac71b22d
--- /dev/null
+++ b/net/core/bpf_sk_storage.c
@@ -0,0 +1,804 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <linux/rculist.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/bpf.h>
+#include <net/bpf_sk_storage.h>
+#include <net/sock.h>
+#include <uapi/linux/btf.h>
+
+static atomic_t cache_idx;
+
+struct bucket {
+ struct hlist_head list;
+ raw_spinlock_t lock;
+};
+
+/* Thp map is not the primary owner of a bpf_sk_storage_elem.
+ * Instead, the sk->sk_bpf_storage is.
+ *
+ * The map (bpf_sk_storage_map) is for two purposes
+ * 1. Define the size of the "sk local storage". It is
+ * the map's value_size.
+ *
+ * 2. Maintain a list to keep track of all elems such
+ * that they can be cleaned up during the map destruction.
+ *
+ * When a bpf local storage is being looked up for a
+ * particular sk, the "bpf_map" pointer is actually used
+ * as the "key" to search in the list of elem in
+ * sk->sk_bpf_storage.
+ *
+ * Hence, consider sk->sk_bpf_storage is the mini-map
+ * with the "bpf_map" pointer as the searching key.
+ */
+struct bpf_sk_storage_map {
+ struct bpf_map map;
+ /* Lookup elem does not require accessing the map.
+ *
+ * Updating/Deleting requires a bucket lock to
+ * link/unlink the elem from the map. Having
+ * multiple buckets to improve contention.
+ */
+ struct bucket *buckets;
+ u32 bucket_log;
+ u16 elem_size;
+ u16 cache_idx;
+};
+
+struct bpf_sk_storage_data {
+ /* smap is used as the searching key when looking up
+ * from sk->sk_bpf_storage.
+ *
+ * Put it in the same cacheline as the data to minimize
+ * the number of cachelines access during the cache hit case.
+ */
+ struct bpf_sk_storage_map __rcu *smap;
+ u8 data[0] __aligned(8);
+};
+
+/* Linked to bpf_sk_storage and bpf_sk_storage_map */
+struct bpf_sk_storage_elem {
+ struct hlist_node map_node; /* Linked to bpf_sk_storage_map */
+ struct hlist_node snode; /* Linked to bpf_sk_storage */
+ struct bpf_sk_storage __rcu *sk_storage;
+ struct rcu_head rcu;
+ /* 8 bytes hole */
+ /* The data is stored in aother cacheline to minimize
+ * the number of cachelines access during a cache hit.
+ */
+ struct bpf_sk_storage_data sdata ____cacheline_aligned;
+};
+
+#define SELEM(_SDATA) container_of((_SDATA), struct bpf_sk_storage_elem, sdata)
+#define SDATA(_SELEM) (&(_SELEM)->sdata)
+#define BPF_SK_STORAGE_CACHE_SIZE 16
+
+struct bpf_sk_storage {
+ struct bpf_sk_storage_data __rcu *cache[BPF_SK_STORAGE_CACHE_SIZE];
+ struct hlist_head list; /* List of bpf_sk_storage_elem */
+ struct sock *sk; /* The sk that owns the the above "list" of
+ * bpf_sk_storage_elem.
+ */
+ struct rcu_head rcu;
+ raw_spinlock_t lock; /* Protect adding/removing from the "list" */
+};
+
+static struct bucket *select_bucket(struct bpf_sk_storage_map *smap,
+ struct bpf_sk_storage_elem *selem)
+{
+ return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
+}
+
+static int omem_charge(struct sock *sk, unsigned int size)
+{
+ /* same check as in sock_kmalloc() */
+ if (size <= sysctl_optmem_max &&
+ atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
+ atomic_add(size, &sk->sk_omem_alloc);
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+static bool selem_linked_to_sk(const struct bpf_sk_storage_elem *selem)
+{
+ return !hlist_unhashed(&selem->snode);
+}
+
+static bool selem_linked_to_map(const struct bpf_sk_storage_elem *selem)
+{
+ return !hlist_unhashed(&selem->map_node);
+}
+
+static struct bpf_sk_storage_elem *selem_alloc(struct bpf_sk_storage_map *smap,
+ struct sock *sk, void *value,
+ bool charge_omem)
+{
+ struct bpf_sk_storage_elem *selem;
+
+ if (charge_omem && omem_charge(sk, smap->elem_size))
+ return NULL;
+
+ selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN);
+ if (selem) {
+ if (value)
+ memcpy(SDATA(selem)->data, value, smap->map.value_size);
+ return selem;
+ }
+
+ if (charge_omem)
+ atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
+
+ return NULL;
+}
+
+/* sk_storage->lock must be held and selem->sk_storage == sk_storage.
+ * The caller must ensure selem->smap is still valid to be
+ * dereferenced for its smap->elem_size and smap->cache_idx.
+ */
+static bool __selem_unlink_sk(struct bpf_sk_storage *sk_storage,
+ struct bpf_sk_storage_elem *selem,
+ bool uncharge_omem)
+{
+ struct bpf_sk_storage_map *smap;
+ bool free_sk_storage;
+ struct sock *sk;
+
+ smap = rcu_dereference(SDATA(selem)->smap);
+ sk = sk_storage->sk;
+
+ /* All uncharging on sk->sk_omem_alloc must be done first.
+ * sk may be freed once the last selem is unlinked from sk_storage.
+ */
+ if (uncharge_omem)
+ atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
+
+ free_sk_storage = hlist_is_singular_node(&selem->snode,
+ &sk_storage->list);
+ if (free_sk_storage) {
+ atomic_sub(sizeof(struct bpf_sk_storage), &sk->sk_omem_alloc);
+ sk_storage->sk = NULL;
+ /* After this RCU_INIT, sk may be freed and cannot be used */
+ RCU_INIT_POINTER(sk->sk_bpf_storage, NULL);
+
+ /* sk_storage is not freed now. sk_storage->lock is
+ * still held and raw_spin_unlock_bh(&sk_storage->lock)
+ * will be done by the caller.
+ *
+ * Although the unlock will be done under
+ * rcu_read_lock(), it is more intutivie to
+ * read if kfree_rcu(sk_storage, rcu) is done
+ * after the raw_spin_unlock_bh(&sk_storage->lock).
+ *
+ * Hence, a "bool free_sk_storage" is returned
+ * to the caller which then calls the kfree_rcu()
+ * after unlock.
+ */
+ }
+ hlist_del_init_rcu(&selem->snode);
+ if (rcu_access_pointer(sk_storage->cache[smap->cache_idx]) ==
+ SDATA(selem))
+ RCU_INIT_POINTER(sk_storage->cache[smap->cache_idx], NULL);
+
+ kfree_rcu(selem, rcu);
+
+ return free_sk_storage;
+}
+
+static void selem_unlink_sk(struct bpf_sk_storage_elem *selem)
+{
+ struct bpf_sk_storage *sk_storage;
+ bool free_sk_storage = false;
+
+ if (unlikely(!selem_linked_to_sk(selem)))
+ /* selem has already been unlinked from sk */
+ return;
+
+ sk_storage = rcu_dereference(selem->sk_storage);
+ raw_spin_lock_bh(&sk_storage->lock);
+ if (likely(selem_linked_to_sk(selem)))
+ free_sk_storage = __selem_unlink_sk(sk_storage, selem, true);
+ raw_spin_unlock_bh(&sk_storage->lock);
+
+ if (free_sk_storage)
+ kfree_rcu(sk_storage, rcu);
+}
+
+/* sk_storage->lock must be held and sk_storage->list cannot be empty */
+static void __selem_link_sk(struct bpf_sk_storage *sk_storage,
+ struct bpf_sk_storage_elem *selem)
+{
+ RCU_INIT_POINTER(selem->sk_storage, sk_storage);
+ hlist_add_head(&selem->snode, &sk_storage->list);
+}
+
+static void selem_unlink_map(struct bpf_sk_storage_elem *selem)
+{
+ struct bpf_sk_storage_map *smap;
+ struct bucket *b;
+
+ if (unlikely(!selem_linked_to_map(selem)))
+ /* selem has already be unlinked from smap */
+ return;
+
+ smap = rcu_dereference(SDATA(selem)->smap);
+ b = select_bucket(smap, selem);
+ raw_spin_lock_bh(&b->lock);
+ if (likely(selem_linked_to_map(selem)))
+ hlist_del_init_rcu(&selem->map_node);
+ raw_spin_unlock_bh(&b->lock);
+}
+
+static void selem_link_map(struct bpf_sk_storage_map *smap,
+ struct bpf_sk_storage_elem *selem)
+{
+ struct bucket *b = select_bucket(smap, selem);
+
+ raw_spin_lock_bh(&b->lock);
+ RCU_INIT_POINTER(SDATA(selem)->smap, smap);
+ hlist_add_head_rcu(&selem->map_node, &b->list);
+ raw_spin_unlock_bh(&b->lock);
+}
+
+static void selem_unlink(struct bpf_sk_storage_elem *selem)
+{
+ /* Always unlink from map before unlinking from sk_storage
+ * because selem will be freed after successfully unlinked from
+ * the sk_storage.
+ */
+ selem_unlink_map(selem);
+ selem_unlink_sk(selem);
+}
+
+static struct bpf_sk_storage_data *
+__sk_storage_lookup(struct bpf_sk_storage *sk_storage,
+ struct bpf_sk_storage_map *smap,
+ bool cacheit_lockit)
+{
+ struct bpf_sk_storage_data *sdata;
+ struct bpf_sk_storage_elem *selem;
+
+ /* Fast path (cache hit) */
+ sdata = rcu_dereference(sk_storage->cache[smap->cache_idx]);
+ if (sdata && rcu_access_pointer(sdata->smap) == smap)
+ return sdata;
+
+ /* Slow path (cache miss) */
+ hlist_for_each_entry_rcu(selem, &sk_storage->list, snode)
+ if (rcu_access_pointer(SDATA(selem)->smap) == smap)
+ break;
+
+ if (!selem)
+ return NULL;
+
+ sdata = SDATA(selem);
+ if (cacheit_lockit) {
+ /* spinlock is needed to avoid racing with the
+ * parallel delete. Otherwise, publishing an already
+ * deleted sdata to the cache will become a use-after-free
+ * problem in the next __sk_storage_lookup().
+ */
+ raw_spin_lock_bh(&sk_storage->lock);
+ if (selem_linked_to_sk(selem))
+ rcu_assign_pointer(sk_storage->cache[smap->cache_idx],
+ sdata);
+ raw_spin_unlock_bh(&sk_storage->lock);
+ }
+
+ return sdata;
+}
+
+static struct bpf_sk_storage_data *
+sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
+{
+ struct bpf_sk_storage *sk_storage;
+ struct bpf_sk_storage_map *smap;
+
+ sk_storage = rcu_dereference(sk->sk_bpf_storage);
+ if (!sk_storage)
+ return NULL;
+
+ smap = (struct bpf_sk_storage_map *)map;
+ return __sk_storage_lookup(sk_storage, smap, cacheit_lockit);
+}
+
+static int check_flags(const struct bpf_sk_storage_data *old_sdata,
+ u64 map_flags)
+{
+ if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
+ /* elem already exists */
+ return -EEXIST;
+
+ if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
+ /* elem doesn't exist, cannot update it */
+ return -ENOENT;
+
+ return 0;
+}
+
+static int sk_storage_alloc(struct sock *sk,
+ struct bpf_sk_storage_map *smap,
+ struct bpf_sk_storage_elem *first_selem)
+{
+ struct bpf_sk_storage *prev_sk_storage, *sk_storage;
+ int err;
+
+ err = omem_charge(sk, sizeof(*sk_storage));
+ if (err)
+ return err;
+
+ sk_storage = kzalloc(sizeof(*sk_storage), GFP_ATOMIC | __GFP_NOWARN);
+ if (!sk_storage) {
+ err = -ENOMEM;
+ goto uncharge;
+ }
+ INIT_HLIST_HEAD(&sk_storage->list);
+ raw_spin_lock_init(&sk_storage->lock);
+ sk_storage->sk = sk;
+
+ __selem_link_sk(sk_storage, first_selem);
+ selem_link_map(smap, first_selem);
+ /* Publish sk_storage to sk. sk->sk_lock cannot be acquired.
+ * Hence, atomic ops is used to set sk->sk_bpf_storage
+ * from NULL to the newly allocated sk_storage ptr.
+ *
+ * From now on, the sk->sk_bpf_storage pointer is protected
+ * by the sk_storage->lock. Hence, when freeing
+ * the sk->sk_bpf_storage, the sk_storage->lock must
+ * be held before setting sk->sk_bpf_storage to NULL.
+ */
+ prev_sk_storage = cmpxchg((struct bpf_sk_storage **)&sk->sk_bpf_storage,
+ NULL, sk_storage);
+ if (unlikely(prev_sk_storage)) {
+ selem_unlink_map(first_selem);
+ err = -EAGAIN;
+ goto uncharge;
+
+ /* Note that even first_selem was linked to smap's
+ * bucket->list, first_selem can be freed immediately
+ * (instead of kfree_rcu) because
+ * bpf_sk_storage_map_free() does a
+ * synchronize_rcu() before walking the bucket->list.
+ * Hence, no one is accessing selem from the
+ * bucket->list under rcu_read_lock().
+ */
+ }
+
+ return 0;
+
+uncharge:
+ kfree(sk_storage);
+ atomic_sub(sizeof(*sk_storage), &sk->sk_omem_alloc);
+ return err;
+}
+
+/* sk cannot be going away because it is linking new elem
+ * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
+ * Otherwise, it will become a leak (and other memory issues
+ * during map destruction).
+ */
+static struct bpf_sk_storage_data *sk_storage_update(struct sock *sk,
+ struct bpf_map *map,
+ void *value,
+ u64 map_flags)
+{
+ struct bpf_sk_storage_data *old_sdata = NULL;
+ struct bpf_sk_storage_elem *selem;
+ struct bpf_sk_storage *sk_storage;
+ struct bpf_sk_storage_map *smap;
+ int err;
+
+ /* BPF_EXIST and BPF_NOEXIST cannot be both set */
+ if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
+ /* BPF_F_LOCK can only be used in a value with spin_lock */
+ unlikely((map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)))
+ return ERR_PTR(-EINVAL);
+
+ smap = (struct bpf_sk_storage_map *)map;
+ sk_storage = rcu_dereference(sk->sk_bpf_storage);
+ if (!sk_storage || hlist_empty(&sk_storage->list)) {
+ /* Very first elem for this sk */
+ err = check_flags(NULL, map_flags);
+ if (err)
+ return ERR_PTR(err);
+
+ selem = selem_alloc(smap, sk, value, true);
+ if (!selem)
+ return ERR_PTR(-ENOMEM);
+
+ err = sk_storage_alloc(sk, smap, selem);
+ if (err) {
+ kfree(selem);
+ atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
+ return ERR_PTR(err);
+ }
+
+ return SDATA(selem);
+ }
+
+ if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
+ /* Hoping to find an old_sdata to do inline update
+ * such that it can avoid taking the sk_storage->lock
+ * and changing the lists.
+ */
+ old_sdata = __sk_storage_lookup(sk_storage, smap, false);
+ err = check_flags(old_sdata, map_flags);
+ if (err)
+ return ERR_PTR(err);
+ if (old_sdata && selem_linked_to_sk(SELEM(old_sdata))) {
+ copy_map_value_locked(map, old_sdata->data,
+ value, false);
+ return old_sdata;
+ }
+ }
+
+ raw_spin_lock_bh(&sk_storage->lock);
+
+ /* Recheck sk_storage->list under sk_storage->lock */
+ if (unlikely(hlist_empty(&sk_storage->list))) {
+ /* A parallel del is happening and sk_storage is going
+ * away. It has just been checked before, so very
+ * unlikely. Return instead of retry to keep things
+ * simple.
+ */
+ err = -EAGAIN;
+ goto unlock_err;
+ }
+
+ old_sdata = __sk_storage_lookup(sk_storage, smap, false);
+ err = check_flags(old_sdata, map_flags);
+ if (err)
+ goto unlock_err;
+
+ if (old_sdata && (map_flags & BPF_F_LOCK)) {
+ copy_map_value_locked(map, old_sdata->data, value, false);
+ selem = SELEM(old_sdata);
+ goto unlock;
+ }
+
+ /* sk_storage->lock is held. Hence, we are sure
+ * we can unlink and uncharge the old_sdata successfully
+ * later. Hence, instead of charging the new selem now
+ * and then uncharge the old selem later (which may cause
+ * a potential but unnecessary charge failure), avoid taking
+ * a charge at all here (the "!old_sdata" check) and the
+ * old_sdata will not be uncharged later during __selem_unlink_sk().
+ */
+ selem = selem_alloc(smap, sk, value, !old_sdata);
+ if (!selem) {
+ err = -ENOMEM;
+ goto unlock_err;
+ }
+
+ /* First, link the new selem to the map */
+ selem_link_map(smap, selem);
+
+ /* Second, link (and publish) the new selem to sk_storage */
+ __selem_link_sk(sk_storage, selem);
+
+ /* Third, remove old selem, SELEM(old_sdata) */
+ if (old_sdata) {
+ selem_unlink_map(SELEM(old_sdata));
+ __selem_unlink_sk(sk_storage, SELEM(old_sdata), false);
+ }
+
+unlock:
+ raw_spin_unlock_bh(&sk_storage->lock);
+ return SDATA(selem);
+
+unlock_err:
+ raw_spin_unlock_bh(&sk_storage->lock);
+ return ERR_PTR(err);
+}
+
+static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
+{
+ struct bpf_sk_storage_data *sdata;
+
+ sdata = sk_storage_lookup(sk, map, false);
+ if (!sdata)
+ return -ENOENT;
+
+ selem_unlink(SELEM(sdata));
+
+ return 0;
+}
+
+/* Called by __sk_destruct() */
+void bpf_sk_storage_free(struct sock *sk)
+{
+ struct bpf_sk_storage_elem *selem;
+ struct bpf_sk_storage *sk_storage;
+ bool free_sk_storage = false;
+ struct hlist_node *n;
+
+ rcu_read_lock();
+ sk_storage = rcu_dereference(sk->sk_bpf_storage);
+ if (!sk_storage) {
+ rcu_read_unlock();
+ return;
+ }
+
+ /* Netiher the bpf_prog nor the bpf-map's syscall
+ * could be modifying the sk_storage->list now.
+ * Thus, no elem can be added-to or deleted-from the
+ * sk_storage->list by the bpf_prog or by the bpf-map's syscall.
+ *
+ * It is racing with bpf_sk_storage_map_free() alone
+ * when unlinking elem from the sk_storage->list and
+ * the map's bucket->list.
+ */
+ raw_spin_lock_bh(&sk_storage->lock);
+ hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) {
+ /* Always unlink from map before unlinking from
+ * sk_storage.
+ */
+ selem_unlink_map(selem);
+ free_sk_storage = __selem_unlink_sk(sk_storage, selem, true);
+ }
+ raw_spin_unlock_bh(&sk_storage->lock);
+ rcu_read_unlock();
+
+ if (free_sk_storage)
+ kfree_rcu(sk_storage, rcu);
+}
+
+static void bpf_sk_storage_map_free(struct bpf_map *map)
+{
+ struct bpf_sk_storage_elem *selem;
+ struct bpf_sk_storage_map *smap;
+ struct bucket *b;
+ unsigned int i;
+
+ smap = (struct bpf_sk_storage_map *)map;
+
+ synchronize_rcu();
+
+ /* bpf prog and the userspace can no longer access this map
+ * now. No new selem (of this map) can be added
+ * to the sk->sk_bpf_storage or to the map bucket's list.
+ *
+ * The elem of this map can be cleaned up here
+ * or
+ * by bpf_sk_storage_free() during __sk_destruct().
+ */
+ for (i = 0; i < (1U << smap->bucket_log); i++) {
+ b = &smap->buckets[i];
+
+ rcu_read_lock();
+ /* No one is adding to b->list now */
+ while ((selem = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(&b->list)),
+ struct bpf_sk_storage_elem,
+ map_node))) {
+ selem_unlink(selem);
+ cond_resched_rcu();
+ }
+ rcu_read_unlock();
+ }
+
+ /* bpf_sk_storage_free() may still need to access the map.
+ * e.g. bpf_sk_storage_free() has unlinked selem from the map
+ * which then made the above while((selem = ...)) loop
+ * exited immediately.
+ *
+ * However, the bpf_sk_storage_free() still needs to access
+ * the smap->elem_size to do the uncharging in
+ * __selem_unlink_sk().
+ *
+ * Hence, wait another rcu grace period for the
+ * bpf_sk_storage_free() to finish.
+ */
+ synchronize_rcu();
+
+ kvfree(smap->buckets);
+ kfree(map);
+}
+
+static int bpf_sk_storage_map_alloc_check(union bpf_attr *attr)
+{
+ if (attr->map_flags != BPF_F_NO_PREALLOC || attr->max_entries ||
+ attr->key_size != sizeof(int) || !attr->value_size ||
+ /* Enforce BTF for userspace sk dumping */
+ !attr->btf_key_type_id || !attr->btf_value_type_id)
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (attr->value_size >= KMALLOC_MAX_SIZE -
+ MAX_BPF_STACK - sizeof(struct bpf_sk_storage_elem) ||
+ /* U16_MAX is much more than enough for sk local storage
+ * considering a tcp_sock is ~2k.
+ */
+ attr->value_size > U16_MAX - sizeof(struct bpf_sk_storage_elem))
+ return -E2BIG;
+
+ return 0;
+}
+
+static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
+{
+ struct bpf_sk_storage_map *smap;
+ unsigned int i;
+ u32 nbuckets;
+ u64 cost;
+
+ smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN);
+ if (!smap)
+ return ERR_PTR(-ENOMEM);
+ bpf_map_init_from_attr(&smap->map, attr);
+
+ smap->bucket_log = ilog2(roundup_pow_of_two(num_possible_cpus()));
+ nbuckets = 1U << smap->bucket_log;
+ smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
+ GFP_USER | __GFP_NOWARN);
+ if (!smap->buckets) {
+ kfree(smap);
+ return ERR_PTR(-ENOMEM);
+ }
+ cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
+
+ for (i = 0; i < nbuckets; i++) {
+ INIT_HLIST_HEAD(&smap->buckets[i].list);
+ raw_spin_lock_init(&smap->buckets[i].lock);
+ }
+
+ smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size;
+ smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) %
+ BPF_SK_STORAGE_CACHE_SIZE;
+ smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+
+ return &smap->map;
+}
+
+static int notsupp_get_next_key(struct bpf_map *map, void *key,
+ void *next_key)
+{
+ return -ENOTSUPP;
+}
+
+static int bpf_sk_storage_map_check_btf(const struct bpf_map *map,
+ const struct btf *btf,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type)
+{
+ u32 int_data;
+
+ if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
+ return -EINVAL;
+
+ int_data = *(u32 *)(key_type + 1);
+ if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_sk_storage_data *sdata;
+ struct socket *sock;
+ int fd, err;
+
+ fd = *(int *)key;
+ sock = sockfd_lookup(fd, &err);
+ if (sock) {
+ sdata = sk_storage_lookup(sock->sk, map, true);
+ sockfd_put(sock);
+ return sdata ? sdata->data : NULL;
+ }
+
+ return ERR_PTR(err);
+}
+
+static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags)
+{
+ struct bpf_sk_storage_data *sdata;
+ struct socket *sock;
+ int fd, err;
+
+ fd = *(int *)key;
+ sock = sockfd_lookup(fd, &err);
+ if (sock) {
+ sdata = sk_storage_update(sock->sk, map, value, map_flags);
+ sockfd_put(sock);
+ return IS_ERR(sdata) ? PTR_ERR(sdata) : 0;
+ }
+
+ return err;
+}
+
+static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
+{
+ struct socket *sock;
+ int fd, err;
+
+ fd = *(int *)key;
+ sock = sockfd_lookup(fd, &err);
+ if (sock) {
+ err = sk_storage_delete(sock->sk, map);
+ sockfd_put(sock);
+ return err;
+ }
+
+ return err;
+}
+
+BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
+ void *, value, u64, flags)
+{
+ struct bpf_sk_storage_data *sdata;
+
+ if (flags > BPF_SK_STORAGE_GET_F_CREATE)
+ return (unsigned long)NULL;
+
+ sdata = sk_storage_lookup(sk, map, true);
+ if (sdata)
+ return (unsigned long)sdata->data;
+
+ if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
+ /* Cannot add new elem to a going away sk.
+ * Otherwise, the new elem may become a leak
+ * (and also other memory issues during map
+ * destruction).
+ */
+ refcount_inc_not_zero(&sk->sk_refcnt)) {
+ sdata = sk_storage_update(sk, map, value, BPF_NOEXIST);
+ /* sk must be a fullsock (guaranteed by verifier),
+ * so sock_gen_put() is unnecessary.
+ */
+ sock_put(sk);
+ return IS_ERR(sdata) ?
+ (unsigned long)NULL : (unsigned long)sdata->data;
+ }
+
+ return (unsigned long)NULL;
+}
+
+BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
+{
+ if (refcount_inc_not_zero(&sk->sk_refcnt)) {
+ int err;
+
+ err = sk_storage_delete(sk, map);
+ sock_put(sk);
+ return err;
+ }
+
+ return -ENOENT;
+}
+
+const struct bpf_map_ops sk_storage_map_ops = {
+ .map_alloc_check = bpf_sk_storage_map_alloc_check,
+ .map_alloc = bpf_sk_storage_map_alloc,
+ .map_free = bpf_sk_storage_map_free,
+ .map_get_next_key = notsupp_get_next_key,
+ .map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
+ .map_update_elem = bpf_fd_sk_storage_update_elem,
+ .map_delete_elem = bpf_fd_sk_storage_delete_elem,
+ .map_check_btf = bpf_sk_storage_map_check_btf,
+};
+
+const struct bpf_func_proto bpf_sk_storage_get_proto = {
+ .func = bpf_sk_storage_get,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_SOCKET,
+ .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
+ .arg4_type = ARG_ANYTHING,
+};
+
+const struct bpf_func_proto bpf_sk_storage_delete_proto = {
+ .func = bpf_sk_storage_delete,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_SOCKET,
+};
diff --git a/net/core/devlink.c b/net/core/devlink.c
index b2715a187a11..d43bc52b8840 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -20,6 +20,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
+#include <linux/refcount.h>
#include <rdma/ib_verbs.h>
#include <net/netlink.h>
#include <net/genetlink.h>
@@ -1047,14 +1048,15 @@ out:
static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
u16 pool_index, u32 size,
- enum devlink_sb_threshold_type threshold_type)
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
{
const struct devlink_ops *ops = devlink->ops;
if (ops->sb_pool_set)
return ops->sb_pool_set(devlink, sb_index, pool_index,
- size, threshold_type);
+ size, threshold_type, extack);
return -EOPNOTSUPP;
}
@@ -1082,7 +1084,8 @@ static int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb,
size = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]);
return devlink_sb_pool_set(devlink, devlink_sb->index,
- pool_index, size, threshold_type);
+ pool_index, size, threshold_type,
+ info->extack);
}
static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
@@ -1243,14 +1246,15 @@ out:
static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
- u32 threshold)
+ u32 threshold,
+ struct netlink_ext_ack *extack)
{
const struct devlink_ops *ops = devlink_port->devlink->ops;
if (ops->sb_port_pool_set)
return ops->sb_port_pool_set(devlink_port, sb_index,
- pool_index, threshold);
+ pool_index, threshold, extack);
return -EOPNOTSUPP;
}
@@ -1273,7 +1277,7 @@ static int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb,
threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
return devlink_sb_port_pool_set(devlink_port, devlink_sb->index,
- pool_index, threshold);
+ pool_index, threshold, info->extack);
}
static int
@@ -1472,7 +1476,8 @@ out:
static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
- u16 pool_index, u32 threshold)
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
{
const struct devlink_ops *ops = devlink_port->devlink->ops;
@@ -1480,7 +1485,7 @@ static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
if (ops->sb_tc_pool_bind_set)
return ops->sb_tc_pool_bind_set(devlink_port, sb_index,
tc_index, pool_type,
- pool_index, threshold);
+ pool_index, threshold, extack);
return -EOPNOTSUPP;
}
@@ -1515,7 +1520,7 @@ static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
return devlink_sb_tc_pool_bind_set(devlink_port, devlink_sb->index,
tc_index, pool_type,
- pool_index, threshold);
+ pool_index, threshold, info->extack);
}
static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
@@ -1667,7 +1672,7 @@ int devlink_dpipe_match_put(struct sk_buff *skb,
struct devlink_dpipe_field *field = &header->fields[match->field_id];
struct nlattr *match_attr;
- match_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_MATCH);
+ match_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_MATCH);
if (!match_attr)
return -EMSGSIZE;
@@ -1692,7 +1697,8 @@ static int devlink_dpipe_matches_put(struct devlink_dpipe_table *table,
{
struct nlattr *matches_attr;
- matches_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_TABLE_MATCHES);
+ matches_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_TABLE_MATCHES);
if (!matches_attr)
return -EMSGSIZE;
@@ -1714,7 +1720,7 @@ int devlink_dpipe_action_put(struct sk_buff *skb,
struct devlink_dpipe_field *field = &header->fields[action->field_id];
struct nlattr *action_attr;
- action_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_ACTION);
+ action_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_ACTION);
if (!action_attr)
return -EMSGSIZE;
@@ -1739,7 +1745,8 @@ static int devlink_dpipe_actions_put(struct devlink_dpipe_table *table,
{
struct nlattr *actions_attr;
- actions_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_TABLE_ACTIONS);
+ actions_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_TABLE_ACTIONS);
if (!actions_attr)
return -EMSGSIZE;
@@ -1761,7 +1768,7 @@ static int devlink_dpipe_table_put(struct sk_buff *skb,
u64 table_size;
table_size = table->table_ops->size_get(table->priv);
- table_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_TABLE);
+ table_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_TABLE);
if (!table_attr)
return -EMSGSIZE;
@@ -1841,7 +1848,7 @@ start_again:
if (devlink_nl_put_handle(skb, devlink))
goto nla_put_failure;
- tables_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_TABLES);
+ tables_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_TABLES);
if (!tables_attr)
goto nla_put_failure;
@@ -1942,8 +1949,8 @@ static int devlink_dpipe_action_values_put(struct sk_buff *skb,
int err;
for (i = 0; i < values_count; i++) {
- action_attr = nla_nest_start(skb,
- DEVLINK_ATTR_DPIPE_ACTION_VALUE);
+ action_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_ACTION_VALUE);
if (!action_attr)
return -EMSGSIZE;
err = devlink_dpipe_action_value_put(skb, &values[i]);
@@ -1979,8 +1986,8 @@ static int devlink_dpipe_match_values_put(struct sk_buff *skb,
int err;
for (i = 0; i < values_count; i++) {
- match_attr = nla_nest_start(skb,
- DEVLINK_ATTR_DPIPE_MATCH_VALUE);
+ match_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_MATCH_VALUE);
if (!match_attr)
return -EMSGSIZE;
err = devlink_dpipe_match_value_put(skb, &values[i]);
@@ -2001,7 +2008,7 @@ static int devlink_dpipe_entry_put(struct sk_buff *skb,
struct nlattr *entry_attr, *matches_attr, *actions_attr;
int err;
- entry_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_ENTRY);
+ entry_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_ENTRY);
if (!entry_attr)
return -EMSGSIZE;
@@ -2013,8 +2020,8 @@ static int devlink_dpipe_entry_put(struct sk_buff *skb,
entry->counter, DEVLINK_ATTR_PAD))
goto nla_put_failure;
- matches_attr = nla_nest_start(skb,
- DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES);
+ matches_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES);
if (!matches_attr)
goto nla_put_failure;
@@ -2026,8 +2033,8 @@ static int devlink_dpipe_entry_put(struct sk_buff *skb,
}
nla_nest_end(skb, matches_attr);
- actions_attr = nla_nest_start(skb,
- DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES);
+ actions_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES);
if (!actions_attr)
goto nla_put_failure;
@@ -2084,8 +2091,8 @@ int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx)
devlink = dump_ctx->info->user_ptr[0];
if (devlink_nl_put_handle(dump_ctx->skb, devlink))
goto nla_put_failure;
- dump_ctx->nest = nla_nest_start(dump_ctx->skb,
- DEVLINK_ATTR_DPIPE_ENTRIES);
+ dump_ctx->nest = nla_nest_start_noflag(dump_ctx->skb,
+ DEVLINK_ATTR_DPIPE_ENTRIES);
if (!dump_ctx->nest)
goto nla_put_failure;
return 0;
@@ -2195,7 +2202,8 @@ static int devlink_dpipe_fields_put(struct sk_buff *skb,
for (i = 0; i < header->fields_count; i++) {
field = &header->fields[i];
- field_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_FIELD);
+ field_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_FIELD);
if (!field_attr)
return -EMSGSIZE;
if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_FIELD_NAME, field->name) ||
@@ -2218,7 +2226,7 @@ static int devlink_dpipe_header_put(struct sk_buff *skb,
struct nlattr *fields_attr, *header_attr;
int err;
- header_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_HEADER);
+ header_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_HEADER);
if (!header_attr)
return -EMSGSIZE;
@@ -2227,7 +2235,8 @@ static int devlink_dpipe_header_put(struct sk_buff *skb,
nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
goto nla_put_failure;
- fields_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_HEADER_FIELDS);
+ fields_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_HEADER_FIELDS);
if (!fields_attr)
goto nla_put_failure;
@@ -2274,7 +2283,7 @@ start_again:
if (devlink_nl_put_handle(skb, devlink))
goto nla_put_failure;
- headers_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_HEADERS);
+ headers_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_HEADERS);
if (!headers_attr)
goto nla_put_failure;
@@ -2498,7 +2507,7 @@ static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
struct nlattr *child_resource_attr;
struct nlattr *resource_attr;
- resource_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE);
+ resource_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_RESOURCE);
if (!resource_attr)
return -EMSGSIZE;
@@ -2522,7 +2531,8 @@ static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
resource->size_valid))
goto nla_put_failure;
- child_resource_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE_LIST);
+ child_resource_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_RESOURCE_LIST);
if (!child_resource_attr)
goto nla_put_failure;
@@ -2573,7 +2583,8 @@ start_again:
if (devlink_nl_put_handle(skb, devlink))
goto nla_put_failure;
- resources_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE_LIST);
+ resources_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_RESOURCE_LIST);
if (!resources_attr)
goto nla_put_failure;
@@ -2827,7 +2838,8 @@ devlink_nl_param_value_fill_one(struct sk_buff *msg,
{
struct nlattr *param_value_attr;
- param_value_attr = nla_nest_start(msg, DEVLINK_ATTR_PARAM_VALUE);
+ param_value_attr = nla_nest_start_noflag(msg,
+ DEVLINK_ATTR_PARAM_VALUE);
if (!param_value_attr)
goto nla_put_failure;
@@ -2918,7 +2930,7 @@ static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, port_index))
goto genlmsg_cancel;
- param_attr = nla_nest_start(msg, DEVLINK_ATTR_PARAM);
+ param_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PARAM);
if (!param_attr)
goto genlmsg_cancel;
if (nla_put_string(msg, DEVLINK_ATTR_PARAM_NAME, param->name))
@@ -2932,7 +2944,8 @@ static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_TYPE, nla_type))
goto param_nest_cancel;
- param_values_list = nla_nest_start(msg, DEVLINK_ATTR_PARAM_VALUES_LIST);
+ param_values_list = nla_nest_start_noflag(msg,
+ DEVLINK_ATTR_PARAM_VALUES_LIST);
if (!param_values_list)
goto param_nest_cancel;
@@ -3332,7 +3345,7 @@ static int devlink_nl_region_snapshot_id_put(struct sk_buff *msg,
struct nlattr *snap_attr;
int err;
- snap_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_SNAPSHOT);
+ snap_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_REGION_SNAPSHOT);
if (!snap_attr)
return -EINVAL;
@@ -3356,7 +3369,8 @@ static int devlink_nl_region_snapshots_id_put(struct sk_buff *msg,
struct nlattr *snapshots_attr;
int err;
- snapshots_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_SNAPSHOTS);
+ snapshots_attr = nla_nest_start_noflag(msg,
+ DEVLINK_ATTR_REGION_SNAPSHOTS);
if (!snapshots_attr)
return -EINVAL;
@@ -3572,7 +3586,7 @@ static int devlink_nl_cmd_region_read_chunk_fill(struct sk_buff *msg,
struct nlattr *chunk_attr;
int err;
- chunk_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_CHUNK);
+ chunk_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_REGION_CHUNK);
if (!chunk_attr)
return -EINVAL;
@@ -3661,9 +3675,10 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
if (!attrs)
return -ENOMEM;
- err = nlmsg_parse(cb->nlh, GENL_HDRLEN + devlink_nl_family.hdrsize,
- attrs, DEVLINK_ATTR_MAX, devlink_nl_family.policy,
- cb->extack);
+ err = nlmsg_parse_deprecated(cb->nlh,
+ GENL_HDRLEN + devlink_nl_family.hdrsize,
+ attrs, DEVLINK_ATTR_MAX,
+ devlink_nl_family.policy, cb->extack);
if (err)
goto out_free;
@@ -3705,7 +3720,7 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
if (err)
goto nla_put_failure;
- chunks_attr = nla_nest_start(skb, DEVLINK_ATTR_REGION_CHUNKS);
+ chunks_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_REGION_CHUNKS);
if (!chunks_attr) {
err = -EMSGSIZE;
goto nla_put_failure;
@@ -3781,7 +3796,7 @@ static int devlink_info_version_put(struct devlink_info_req *req, int attr,
struct nlattr *nest;
int err;
- nest = nla_nest_start(req->msg, attr);
+ nest = nla_nest_start_noflag(req->msg, attr);
if (!nest)
return -EMSGSIZE;
@@ -4309,7 +4324,7 @@ devlink_fmsg_prepare_skb(struct devlink_fmsg *fmsg, struct sk_buff *skb,
int i = 0;
int err;
- fmsg_nlattr = nla_nest_start(skb, DEVLINK_ATTR_FMSG);
+ fmsg_nlattr = nla_nest_start_noflag(skb, DEVLINK_ATTR_FMSG);
if (!fmsg_nlattr)
return -EMSGSIZE;
@@ -4418,6 +4433,7 @@ struct devlink_health_reporter {
u64 error_count;
u64 recovery_count;
u64 last_recovery_ts;
+ refcount_t refcount;
};
void *
@@ -4433,6 +4449,7 @@ devlink_health_reporter_find_by_name(struct devlink *devlink,
{
struct devlink_health_reporter *reporter;
+ lockdep_assert_held(&devlink->reporters_lock);
list_for_each_entry(reporter, &devlink->reporter_list, list)
if (!strcmp(reporter->ops->name, reporter_name))
return reporter;
@@ -4456,7 +4473,7 @@ devlink_health_reporter_create(struct devlink *devlink,
{
struct devlink_health_reporter *reporter;
- mutex_lock(&devlink->lock);
+ mutex_lock(&devlink->reporters_lock);
if (devlink_health_reporter_find_by_name(devlink, ops->name)) {
reporter = ERR_PTR(-EEXIST);
goto unlock;
@@ -4480,9 +4497,10 @@ devlink_health_reporter_create(struct devlink *devlink,
reporter->graceful_period = graceful_period;
reporter->auto_recover = auto_recover;
mutex_init(&reporter->dump_lock);
+ refcount_set(&reporter->refcount, 1);
list_add_tail(&reporter->list, &devlink->reporter_list);
unlock:
- mutex_unlock(&devlink->lock);
+ mutex_unlock(&devlink->reporters_lock);
return reporter;
}
EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
@@ -4495,10 +4513,12 @@ EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
void
devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
{
- mutex_lock(&reporter->devlink->lock);
+ mutex_lock(&reporter->devlink->reporters_lock);
list_del(&reporter->list);
+ mutex_unlock(&reporter->devlink->reporters_lock);
+ while (refcount_read(&reporter->refcount) > 1)
+ msleep(100);
mutex_destroy(&reporter->dump_lock);
- mutex_unlock(&reporter->devlink->lock);
if (reporter->dump_fmsg)
devlink_fmsg_free(reporter->dump_fmsg);
kfree(reporter);
@@ -4634,6 +4654,7 @@ static struct devlink_health_reporter *
devlink_health_reporter_get_from_info(struct devlink *devlink,
struct genl_info *info)
{
+ struct devlink_health_reporter *reporter;
char *reporter_name;
if (!info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME])
@@ -4641,7 +4662,18 @@ devlink_health_reporter_get_from_info(struct devlink *devlink,
reporter_name =
nla_data(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]);
- return devlink_health_reporter_find_by_name(devlink, reporter_name);
+ mutex_lock(&devlink->reporters_lock);
+ reporter = devlink_health_reporter_find_by_name(devlink, reporter_name);
+ if (reporter)
+ refcount_inc(&reporter->refcount);
+ mutex_unlock(&devlink->reporters_lock);
+ return reporter;
+}
+
+static void
+devlink_health_reporter_put(struct devlink_health_reporter *reporter)
+{
+ refcount_dec(&reporter->refcount);
}
static int
@@ -4661,7 +4693,8 @@ devlink_nl_health_reporter_fill(struct sk_buff *msg,
if (devlink_nl_put_handle(msg, devlink))
goto genlmsg_cancel;
- reporter_attr = nla_nest_start(msg, DEVLINK_ATTR_HEALTH_REPORTER);
+ reporter_attr = nla_nest_start_noflag(msg,
+ DEVLINK_ATTR_HEALTH_REPORTER);
if (!reporter_attr)
goto genlmsg_cancel;
if (nla_put_string(msg, DEVLINK_ATTR_HEALTH_REPORTER_NAME,
@@ -4715,8 +4748,10 @@ static int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb,
return -EINVAL;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
+ if (!msg) {
+ err = -ENOMEM;
+ goto out;
+ }
err = devlink_nl_health_reporter_fill(msg, devlink, reporter,
DEVLINK_CMD_HEALTH_REPORTER_GET,
@@ -4724,10 +4759,13 @@ static int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb,
0);
if (err) {
nlmsg_free(msg);
- return err;
+ goto out;
}
- return genlmsg_reply(msg, info);
+ err = genlmsg_reply(msg, info);
+out:
+ devlink_health_reporter_put(reporter);
+ return err;
}
static int
@@ -4744,7 +4782,7 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
continue;
- mutex_lock(&devlink->lock);
+ mutex_lock(&devlink->reporters_lock);
list_for_each_entry(reporter, &devlink->reporter_list,
list) {
if (idx < start) {
@@ -4758,12 +4796,12 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
cb->nlh->nlmsg_seq,
NLM_F_MULTI);
if (err) {
- mutex_unlock(&devlink->lock);
+ mutex_unlock(&devlink->reporters_lock);
goto out;
}
idx++;
}
- mutex_unlock(&devlink->lock);
+ mutex_unlock(&devlink->reporters_lock);
}
out:
mutex_unlock(&devlink_mutex);
@@ -4778,6 +4816,7 @@ devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_health_reporter *reporter;
+ int err;
reporter = devlink_health_reporter_get_from_info(devlink, info);
if (!reporter)
@@ -4785,8 +4824,10 @@ devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
if (!reporter->ops->recover &&
(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] ||
- info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]))
- return -EOPNOTSUPP;
+ info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER])) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD])
reporter->graceful_period =
@@ -4796,7 +4837,11 @@ devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
reporter->auto_recover =
nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]);
+ devlink_health_reporter_put(reporter);
return 0;
+out:
+ devlink_health_reporter_put(reporter);
+ return err;
}
static int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
@@ -4804,12 +4849,16 @@ static int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_health_reporter *reporter;
+ int err;
reporter = devlink_health_reporter_get_from_info(devlink, info);
if (!reporter)
return -EINVAL;
- return devlink_health_reporter_recover(reporter, NULL);
+ err = devlink_health_reporter_recover(reporter, NULL);
+
+ devlink_health_reporter_put(reporter);
+ return err;
}
static int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
@@ -4824,12 +4873,16 @@ static int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
if (!reporter)
return -EINVAL;
- if (!reporter->ops->diagnose)
+ if (!reporter->ops->diagnose) {
+ devlink_health_reporter_put(reporter);
return -EOPNOTSUPP;
+ }
fmsg = devlink_fmsg_alloc();
- if (!fmsg)
+ if (!fmsg) {
+ devlink_health_reporter_put(reporter);
return -ENOMEM;
+ }
err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
@@ -4848,6 +4901,7 @@ static int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
out:
devlink_fmsg_free(fmsg);
+ devlink_health_reporter_put(reporter);
return err;
}
@@ -4862,8 +4916,10 @@ static int devlink_nl_cmd_health_reporter_dump_get_doit(struct sk_buff *skb,
if (!reporter)
return -EINVAL;
- if (!reporter->ops->dump)
+ if (!reporter->ops->dump) {
+ devlink_health_reporter_put(reporter);
return -EOPNOTSUPP;
+ }
mutex_lock(&reporter->dump_lock);
err = devlink_health_do_dump(reporter, NULL);
@@ -4875,6 +4931,7 @@ static int devlink_nl_cmd_health_reporter_dump_get_doit(struct sk_buff *skb,
out:
mutex_unlock(&reporter->dump_lock);
+ devlink_health_reporter_put(reporter);
return err;
}
@@ -4889,12 +4946,15 @@ devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
if (!reporter)
return -EINVAL;
- if (!reporter->ops->dump)
+ if (!reporter->ops->dump) {
+ devlink_health_reporter_put(reporter);
return -EOPNOTSUPP;
+ }
mutex_lock(&reporter->dump_lock);
devlink_health_dump_clear(reporter);
mutex_unlock(&reporter->dump_lock);
+ devlink_health_reporter_put(reporter);
return 0;
}
@@ -4933,6 +4993,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
static const struct genl_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_get_doit,
.dumpit = devlink_nl_cmd_get_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
@@ -4940,6 +5001,7 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_PORT_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_port_get_doit,
.dumpit = devlink_nl_cmd_port_get_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
@@ -4947,12 +5009,14 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_PORT_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_port_set_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
},
{
.cmd = DEVLINK_CMD_PORT_SPLIT,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_port_split_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
@@ -4960,6 +5024,7 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_PORT_UNSPLIT,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_port_unsplit_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
@@ -4967,6 +5032,7 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_SB_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_get_doit,
.dumpit = devlink_nl_cmd_sb_get_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
@@ -4975,6 +5041,7 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_SB_POOL_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_pool_get_doit,
.dumpit = devlink_nl_cmd_sb_pool_get_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
@@ -4983,6 +5050,7 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_SB_POOL_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_pool_set_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
@@ -4990,6 +5058,7 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_port_pool_get_doit,
.dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
@@ -4998,6 +5067,7 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_port_pool_set_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
@@ -5005,6 +5075,7 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit,
.dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
@@ -5013,6 +5084,7 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
@@ -5020,6 +5092,7 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_occ_snapshot_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
@@ -5027,6 +5100,7 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_occ_max_clear_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
@@ -5034,12 +5108,14 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_ESWITCH_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_eswitch_get_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_ESWITCH_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_eswitch_set_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
@@ -5047,42 +5123,49 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_dpipe_table_get,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_dpipe_entries_get,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_DPIPE_HEADERS_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_dpipe_headers_get,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_dpipe_table_counters_set,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_RESOURCE_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_resource_set,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_RESOURCE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_resource_dump,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_RELOAD,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_reload,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
@@ -5090,6 +5173,7 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_PARAM_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_param_get_doit,
.dumpit = devlink_nl_cmd_param_get_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
@@ -5097,12 +5181,14 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_PARAM_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_param_set_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_PORT_PARAM_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_port_param_get_doit,
.dumpit = devlink_nl_cmd_port_param_get_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
@@ -5110,12 +5196,14 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_PORT_PARAM_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_port_param_set_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
},
{
.cmd = DEVLINK_CMD_REGION_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_region_get_doit,
.dumpit = devlink_nl_cmd_region_get_dumpit,
.flags = GENL_ADMIN_PERM,
@@ -5123,18 +5211,21 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_REGION_DEL,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_region_del,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_REGION_READ,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.dumpit = devlink_nl_cmd_region_read_dumpit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_INFO_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_info_get_doit,
.dumpit = devlink_nl_cmd_info_get_dumpit,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
@@ -5142,31 +5233,40 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_health_reporter_get_doit,
.dumpit = devlink_nl_cmd_health_reporter_get_dumpit,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+ DEVLINK_NL_FLAG_NO_LOCK,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_health_reporter_set_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+ DEVLINK_NL_FLAG_NO_LOCK,
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_RECOVER,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_health_reporter_recover_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+ DEVLINK_NL_FLAG_NO_LOCK,
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_health_reporter_diagnose_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+ DEVLINK_NL_FLAG_NO_LOCK,
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_health_reporter_dump_get_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
@@ -5174,6 +5274,7 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_health_reporter_dump_clear_doit,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
@@ -5181,6 +5282,7 @@ static const struct genl_ops devlink_nl_ops[] = {
},
{
.cmd = DEVLINK_CMD_FLASH_UPDATE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_flash_update,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
@@ -5231,6 +5333,7 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
INIT_LIST_HEAD(&devlink->region_list);
INIT_LIST_HEAD(&devlink->reporter_list);
mutex_init(&devlink->lock);
+ mutex_init(&devlink->reporters_lock);
return devlink;
}
EXPORT_SYMBOL_GPL(devlink_alloc);
@@ -5273,6 +5376,7 @@ EXPORT_SYMBOL_GPL(devlink_unregister);
*/
void devlink_free(struct devlink *devlink)
{
+ mutex_destroy(&devlink->reporters_lock);
mutex_destroy(&devlink->lock);
WARN_ON(!list_empty(&devlink->reporter_list));
WARN_ON(!list_empty(&devlink->region_list));
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index c7785efeea57..d4ce0542acfa 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -355,14 +355,17 @@ out:
static const struct genl_ops dropmon_ops[] = {
{
.cmd = NET_DM_CMD_CONFIG,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = net_dm_cmd_config,
},
{
.cmd = NET_DM_CMD_START,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = net_dm_cmd_trace,
},
{
.cmd = NET_DM_CMD_STOP,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = net_dm_cmd_trace,
},
};
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index ffbb827723a2..18f8dd8329ed 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -746,7 +746,8 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout;
}
- err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX,
+ ops->policy, extack);
if (err < 0) {
NL_SET_ERR_MSG(extack, "Error parsing msg");
goto errout;
@@ -853,7 +854,8 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout;
}
- err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX,
+ ops->policy, extack);
if (err < 0) {
NL_SET_ERR_MSG(extack, "Error parsing msg");
goto errout;
diff --git a/net/core/filter.c b/net/core/filter.c
index 1644a16afcec..55bfc941d17a 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -75,6 +75,7 @@
#include <net/seg6_local.h>
#include <net/lwtunnel.h>
#include <net/ipv6_stubs.h>
+#include <net/bpf_sk_storage.h>
/**
* sk_filter_trim_cap - run a packet through a socket filter
@@ -1730,6 +1731,40 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
.arg4_type = ARG_CONST_SIZE,
};
+BPF_CALL_4(bpf_flow_dissector_load_bytes,
+ const struct bpf_flow_dissector *, ctx, u32, offset,
+ void *, to, u32, len)
+{
+ void *ptr;
+
+ if (unlikely(offset > 0xffff))
+ goto err_clear;
+
+ if (unlikely(!ctx->skb))
+ goto err_clear;
+
+ ptr = skb_header_pointer(ctx->skb, offset, len, to);
+ if (unlikely(!ptr))
+ goto err_clear;
+ if (ptr != to)
+ memcpy(to, ptr, len);
+
+ return 0;
+err_clear:
+ memset(to, 0, len);
+ return -EFAULT;
+}
+
+static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = {
+ .func = bpf_flow_dissector_load_bytes,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg4_type = ARG_CONST_SIZE,
+};
+
BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
u32, offset, void *, to, u32, len, u32, start_header)
{
@@ -3047,6 +3082,14 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
skb_set_transport_header(skb, mac_len + nh_len);
}
+
+ /* Match skb->protocol to new outer l3 protocol */
+ if (skb->protocol == htons(ETH_P_IP) &&
+ flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
+ skb->protocol = htons(ETH_P_IPV6);
+ else if (skb->protocol == htons(ETH_P_IPV6) &&
+ flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
+ skb->protocol = htons(ETH_P_IP);
}
if (skb_is_gso(skb)) {
@@ -3069,6 +3112,9 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
{
int ret;
+ if (flags & ~BPF_F_ADJ_ROOM_FIXED_GSO)
+ return -EINVAL;
+
if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
/* udp gso_size delineates datagrams, only allow if fixed */
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
@@ -4434,8 +4480,7 @@ BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
return -EINVAL;
- if (val)
- tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
+ tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
}
@@ -4678,8 +4723,8 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
{
struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
+ struct fib6_result res = {};
struct neighbour *neigh;
- struct fib6_result res;
struct net_device *dev;
struct inet6_dev *idev;
struct flowi6 fl6;
@@ -5859,6 +5904,9 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
}
}
+const struct bpf_func_proto bpf_sk_storage_get_proto __weak;
+const struct bpf_func_proto bpf_sk_storage_delete_proto __weak;
+
static const struct bpf_func_proto *
cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -5867,6 +5915,10 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_local_storage_proto;
case BPF_FUNC_sk_fullsock:
return &bpf_sk_fullsock_proto;
+ case BPF_FUNC_sk_storage_get:
+ return &bpf_sk_storage_get_proto;
+ case BPF_FUNC_sk_storage_delete:
+ return &bpf_sk_storage_delete_proto;
#ifdef CONFIG_INET
case BPF_FUNC_tcp_sock:
return &bpf_tcp_sock_proto;
@@ -5948,6 +6000,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skb_fib_lookup_proto;
case BPF_FUNC_sk_fullsock:
return &bpf_sk_fullsock_proto;
+ case BPF_FUNC_sk_storage_get:
+ return &bpf_sk_storage_get_proto;
+ case BPF_FUNC_sk_storage_delete:
+ return &bpf_sk_storage_delete_proto;
#ifdef CONFIG_XFRM
case BPF_FUNC_skb_get_xfrm_state:
return &bpf_skb_get_xfrm_state_proto;
@@ -6119,7 +6175,7 @@ flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_skb_load_bytes:
- return &bpf_skb_load_bytes_proto;
+ return &bpf_flow_dissector_load_bytes_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -6246,9 +6302,7 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
return false;
break;
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
- if (size != sizeof(__u64))
- return false;
- break;
+ return false;
case bpf_ctx_range(struct __sk_buff, tstamp):
if (size != sizeof(__u64))
return false;
@@ -6283,7 +6337,6 @@ static bool sk_filter_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range(struct __sk_buff, data_end):
- case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
case bpf_ctx_range(struct __sk_buff, tstamp):
case bpf_ctx_range(struct __sk_buff, wire_len):
@@ -6310,7 +6363,6 @@ static bool cg_skb_is_valid_access(int off, int size,
switch (off) {
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range(struct __sk_buff, data_meta):
- case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
case bpf_ctx_range(struct __sk_buff, data):
@@ -6356,7 +6408,6 @@ static bool lwt_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
case bpf_ctx_range(struct __sk_buff, data_meta):
- case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range(struct __sk_buff, tstamp):
case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
@@ -6599,7 +6650,6 @@ static bool tc_cls_act_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data_end):
info->reg_type = PTR_TO_PACKET_END;
break;
- case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
return false;
}
@@ -6801,7 +6851,6 @@ static bool sk_skb_is_valid_access(int off, int size,
switch (off) {
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range(struct __sk_buff, data_meta):
- case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range(struct __sk_buff, tstamp):
case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
@@ -6875,24 +6924,65 @@ static bool flow_dissector_is_valid_access(int off, int size,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
+ const int size_default = sizeof(__u32);
+
+ if (off < 0 || off >= sizeof(struct __sk_buff))
+ return false;
+
if (type == BPF_WRITE)
return false;
switch (off) {
case bpf_ctx_range(struct __sk_buff, data):
+ if (size != size_default)
+ return false;
info->reg_type = PTR_TO_PACKET;
- break;
+ return true;
case bpf_ctx_range(struct __sk_buff, data_end):
+ if (size != size_default)
+ return false;
info->reg_type = PTR_TO_PACKET_END;
- break;
+ return true;
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+ if (size != sizeof(__u64))
+ return false;
info->reg_type = PTR_TO_FLOW_KEYS;
- break;
+ return true;
default:
return false;
}
+}
- return bpf_skb_is_valid_access(off, size, type, prog, info);
+static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size)
+
+{
+ struct bpf_insn *insn = insn_buf;
+
+ switch (si->off) {
+ case offsetof(struct __sk_buff, data):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_flow_dissector, data));
+ break;
+
+ case offsetof(struct __sk_buff, data_end):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_flow_dissector, data_end));
+ break;
+
+ case offsetof(struct __sk_buff, flow_keys):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_flow_dissector, flow_keys));
+ break;
+ }
+
+ return insn - insn_buf;
}
static u32 bpf_convert_ctx_access(enum bpf_access_type type,
@@ -7199,15 +7289,6 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
skc_num, 2, target_size));
break;
- case offsetof(struct __sk_buff, flow_keys):
- off = si->off;
- off -= offsetof(struct __sk_buff, flow_keys);
- off += offsetof(struct sk_buff, cb);
- off += offsetof(struct qdisc_skb_cb, flow_keys);
- *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
- si->src_reg, off);
- break;
-
case offsetof(struct __sk_buff, tstamp):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8);
@@ -8212,7 +8293,7 @@ const struct bpf_prog_ops sk_msg_prog_ops = {
const struct bpf_verifier_ops flow_dissector_verifier_ops = {
.get_func_proto = flow_dissector_func_proto,
.is_valid_access = flow_dissector_is_valid_access,
- .convert_ctx_access = bpf_convert_ctx_access,
+ .convert_ctx_access = flow_dissector_convert_ctx_access,
};
const struct bpf_prog_ops flow_dissector_prog_ops = {
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 795449713ba4..9ca784c592ac 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -65,6 +65,45 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
}
EXPORT_SYMBOL(skb_flow_dissector_init);
+int skb_flow_dissector_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
+ u32 prog_id, prog_cnt = 0, flags = 0;
+ struct bpf_prog *attached;
+ struct net *net;
+
+ if (attr->query.query_flags)
+ return -EINVAL;
+
+ net = get_net_ns_by_fd(attr->query.target_fd);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+
+ rcu_read_lock();
+ attached = rcu_dereference(net->flow_dissector_prog);
+ if (attached) {
+ prog_cnt = 1;
+ prog_id = attached->aux->id;
+ }
+ rcu_read_unlock();
+
+ put_net(net);
+
+ if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
+ return -EFAULT;
+ if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
+ return -EFAULT;
+
+ if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
+ return 0;
+
+ if (copy_to_user(prog_ids, &prog_id, sizeof(u32)))
+ return -EFAULT;
+
+ return 0;
+}
+
int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
struct bpf_prog *prog)
{
@@ -683,50 +722,30 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
}
}
-bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
- const struct sk_buff *skb,
- struct flow_dissector *flow_dissector,
- struct bpf_flow_keys *flow_keys)
+bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
+ __be16 proto, int nhoff, int hlen)
{
- struct bpf_skb_data_end cb_saved;
- struct bpf_skb_data_end *cb;
+ struct bpf_flow_keys *flow_keys = ctx->flow_keys;
u32 result;
- /* Note that even though the const qualifier is discarded
- * throughout the execution of the BPF program, all changes(the
- * control block) are reverted after the BPF program returns.
- * Therefore, __skb_flow_dissect does not alter the skb.
- */
-
- cb = (struct bpf_skb_data_end *)skb->cb;
-
- /* Save Control Block */
- memcpy(&cb_saved, cb, sizeof(cb_saved));
- memset(cb, 0, sizeof(*cb));
-
/* Pass parameters to the BPF program */
memset(flow_keys, 0, sizeof(*flow_keys));
- cb->qdisc_cb.flow_keys = flow_keys;
- flow_keys->n_proto = skb->protocol;
- flow_keys->nhoff = skb_network_offset(skb);
+ flow_keys->n_proto = proto;
+ flow_keys->nhoff = nhoff;
flow_keys->thoff = flow_keys->nhoff;
- bpf_compute_data_pointers((struct sk_buff *)skb);
- result = BPF_PROG_RUN(prog, skb);
+ result = BPF_PROG_RUN(prog, ctx);
- /* Restore state */
- memcpy(cb, &cb_saved, sizeof(cb_saved));
-
- flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff,
- skb_network_offset(skb), skb->len);
+ flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
- flow_keys->nhoff, skb->len);
+ flow_keys->nhoff, hlen);
return result == BPF_OK;
}
/**
* __skb_flow_dissect - extract the flow_keys struct and return it
+ * @net: associated network namespace, derived from @skb if NULL
* @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
* @flow_dissector: list of keys to dissect
* @target_container: target structure to put dissected values into
@@ -743,7 +762,8 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
*
* Caller must take care of zeroing target container memory.
*/
-bool __skb_flow_dissect(const struct sk_buff *skb,
+bool __skb_flow_dissect(const struct net *net,
+ const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
void *data, __be16 proto, int nhoff, int hlen,
@@ -756,6 +776,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector_key_icmp *key_icmp;
struct flow_dissector_key_tags *key_tags;
struct flow_dissector_key_vlan *key_vlan;
+ struct bpf_prog *attached = NULL;
enum flow_dissect_ret fdret;
enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
int num_hdrs = 0;
@@ -798,22 +819,39 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
target_container);
if (skb) {
- struct bpf_flow_keys flow_keys;
- struct bpf_prog *attached = NULL;
+ if (!net) {
+ if (skb->dev)
+ net = dev_net(skb->dev);
+ else if (skb->sk)
+ net = sock_net(skb->sk);
+ }
+ }
+ WARN_ON_ONCE(!net);
+ if (net) {
rcu_read_lock();
-
- if (skb->dev)
- attached = rcu_dereference(dev_net(skb->dev)->flow_dissector_prog);
- else if (skb->sk)
- attached = rcu_dereference(sock_net(skb->sk)->flow_dissector_prog);
- else
- WARN_ON_ONCE(1);
+ attached = rcu_dereference(net->flow_dissector_prog);
if (attached) {
- ret = __skb_flow_bpf_dissect(attached, skb,
- flow_dissector,
- &flow_keys);
+ struct bpf_flow_keys flow_keys;
+ struct bpf_flow_dissector ctx = {
+ .flow_keys = &flow_keys,
+ .data = data,
+ .data_end = data + hlen,
+ };
+ __be16 n_proto = proto;
+
+ if (skb) {
+ ctx.skb = skb;
+ /* we can't use 'proto' in the skb case
+ * because it might be set to skb->vlan_proto
+ * which has been pulled from the data
+ */
+ n_proto = skb->protocol;
+ }
+
+ ret = bpf_flow_dissect(attached, &ctx, n_proto, nhoff,
+ hlen);
__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
target_container);
rcu_read_unlock();
@@ -1410,8 +1448,8 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
__flow_hash_secret_init();
memset(&keys, 0, sizeof(keys));
- __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
- NULL, 0, 0, 0,
+ __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
+ &keys, NULL, 0, 0, 0,
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
return __flow_hash_from_keys(&keys, hashrnd);
@@ -1512,7 +1550,8 @@ u32 skb_get_poff(const struct sk_buff *skb)
{
struct flow_keys_basic keys;
- if (!skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
+ if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
+ NULL, 0, 0, 0, 0))
return 0;
return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 3c5c24a5d9f5..1c94f529f4a1 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -343,8 +343,8 @@ static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
int ret;
u32 fd;
- ret = nla_parse_nested(tb, LWT_BPF_PROG_MAX, attr, bpf_prog_policy,
- NULL);
+ ret = nla_parse_nested_deprecated(tb, LWT_BPF_PROG_MAX, attr,
+ bpf_prog_policy, NULL);
if (ret < 0)
return ret;
@@ -385,7 +385,8 @@ static int bpf_build_state(struct nlattr *nla,
if (family != AF_INET && family != AF_INET6)
return -EAFNOSUPPORT;
- ret = nla_parse_nested(tb, LWT_BPF_MAX, nla, bpf_nl_policy, extack);
+ ret = nla_parse_nested_deprecated(tb, LWT_BPF_MAX, nla, bpf_nl_policy,
+ extack);
if (ret < 0)
return ret;
@@ -453,7 +454,7 @@ static int bpf_fill_lwt_prog(struct sk_buff *skb, int attr,
if (!prog->prog)
return 0;
- nest = nla_nest_start(skb, attr);
+ nest = nla_nest_start_noflag(skb, attr);
if (!nest)
return -EMSGSIZE;
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index 19b557bd294b..69e249fbc02f 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -26,7 +26,7 @@
#include <net/lwtunnel.h>
#include <net/rtnetlink.h>
#include <net/ip6_fib.h>
-#include <net/nexthop.h>
+#include <net/rtnh.h>
#ifdef CONFIG_MODULES
@@ -223,7 +223,8 @@ void lwtstate_free(struct lwtunnel_state *lws)
}
EXPORT_SYMBOL_GPL(lwtstate_free);
-int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
+int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate,
+ int encap_attr, int encap_type_attr)
{
const struct lwtunnel_encap_ops *ops;
struct nlattr *nest;
@@ -236,7 +237,7 @@ int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
lwtstate->type > LWTUNNEL_ENCAP_MAX)
return 0;
- nest = nla_nest_start(skb, RTA_ENCAP);
+ nest = nla_nest_start_noflag(skb, encap_attr);
if (!nest)
return -EMSGSIZE;
@@ -250,7 +251,7 @@ int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
if (ret)
goto nla_put_failure;
nla_nest_end(skb, nest);
- ret = nla_put_u16(skb, RTA_ENCAP_TYPE, lwtstate->type);
+ ret = nla_put_u16(skb, encap_type_attr, lwtstate->type);
if (ret)
goto nla_put_failure;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 997cfa8f99ba..e73bfc63e473 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1862,7 +1862,8 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
ASSERT_RTNL();
- err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, nda_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
+ nda_policy, extack);
if (err < 0)
goto out;
@@ -1979,7 +1980,7 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
{
struct nlattr *nest;
- nest = nla_nest_start(skb, NDTA_PARMS);
+ nest = nla_nest_start_noflag(skb, NDTA_PARMS);
if (nest == NULL)
return -ENOBUFS;
@@ -2181,8 +2182,8 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
bool found = false;
int err, tidx;
- err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
- nl_neightbl_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
+ nl_neightbl_policy, extack);
if (err < 0)
goto errout;
@@ -2219,8 +2220,9 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
struct neigh_parms *p;
int i, ifindex = 0;
- err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
- nl_ntbl_parm_policy, extack);
+ err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
+ tb[NDTA_PARMS],
+ nl_ntbl_parm_policy, extack);
if (err < 0)
goto errout_tbl_lock;
@@ -2660,11 +2662,12 @@ static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
return -EINVAL;
}
- err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
- nda_policy, extack);
+ err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
+ tb, NDA_MAX, nda_policy,
+ extack);
} else {
- err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
- nda_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
+ NDA_MAX, nda_policy, extack);
}
if (err < 0)
return err;
@@ -2764,8 +2767,8 @@ static int neigh_valid_get_req(const struct nlmsghdr *nlh,
return -EINVAL;
}
- err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
- nda_policy, extack);
+ err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
+ NDA_MAX, nda_policy, extack);
if (err < 0)
return err;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index ebb5b6d21a13..711b161505ac 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -682,8 +682,8 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net *peer;
int nsid, err;
- err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
- rtnl_net_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
+ NETNSA_MAX, rtnl_net_policy, extack);
if (err < 0)
return err;
if (!tb[NETNSA_NSID]) {
@@ -787,11 +787,13 @@ static int rtnl_net_valid_getid_req(struct sk_buff *skb,
int i, err;
if (!netlink_strict_get_check(skb))
- return nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
- rtnl_net_policy, extack);
+ return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg),
+ tb, NETNSA_MAX, rtnl_net_policy,
+ extack);
- err = nlmsg_parse_strict(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
- rtnl_net_policy, extack);
+ err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
+ NETNSA_MAX, rtnl_net_policy,
+ extack);
if (err)
return err;
@@ -929,8 +931,9 @@ static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
struct nlattr *tb[NETNSA_MAX + 1];
int err, i;
- err = nlmsg_parse_strict(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
- rtnl_net_policy, extack);
+ err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
+ NETNSA_MAX, rtnl_net_policy,
+ extack);
if (err < 0)
return err;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index b9057478d69c..7e3d0d99dfae 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -301,6 +301,4 @@ static int __init init_cgroup_netprio(void)
register_netdevice_notifier(&netprio_device_notifier);
return 0;
}
-
subsys_initcall(init_cgroup_netprio);
-MODULE_LICENSE("GPL v2");
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index f3f5a78cd062..319ad5490fb3 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2521,7 +2521,7 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
skb->_skb_refdst = (unsigned long)&pkt_dev->xdst.u.dst | SKB_DST_NOREF;
rcu_read_lock_bh();
- err = x->outer_mode->output(x, skb);
+ err = pktgen_xfrm_outer_mode_output(x, skb);
rcu_read_unlock_bh();
if (err) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5fa5bf3e9945..2bd12afb9297 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -634,7 +634,7 @@ static int rtnl_link_slave_info_fill(struct sk_buff *skb,
if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
return -EMSGSIZE;
if (ops->fill_slave_info) {
- slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA);
+ slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
if (!slave_data)
return -EMSGSIZE;
err = ops->fill_slave_info(skb, master_dev, dev);
@@ -666,7 +666,7 @@ static int rtnl_link_info_fill(struct sk_buff *skb,
return err;
}
if (ops->fill_info) {
- data = nla_nest_start(skb, IFLA_INFO_DATA);
+ data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
if (data == NULL)
return -EMSGSIZE;
err = ops->fill_info(skb, dev);
@@ -686,7 +686,7 @@ static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
struct nlattr *linkinfo;
int err = -EMSGSIZE;
- linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
+ linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
if (linkinfo == NULL)
goto out;
@@ -755,7 +755,7 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
struct nlattr *mx;
int i, valid = 0;
- mx = nla_nest_start(skb, RTA_METRICS);
+ mx = nla_nest_start_noflag(skb, RTA_METRICS);
if (mx == NULL)
return -ENOBUFS;
@@ -1036,12 +1036,12 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
int vf;
int err;
- vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
+ vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
if (!vf_ports)
return -EMSGSIZE;
for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
- vf_port = nla_nest_start(skb, IFLA_VF_PORT);
+ vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
if (!vf_port)
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_PORT_VF, vf))
@@ -1070,7 +1070,7 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
struct nlattr *port_self;
int err;
- port_self = nla_nest_start(skb, IFLA_PORT_SELF);
+ port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
if (!port_self)
return -EMSGSIZE;
@@ -1247,7 +1247,7 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
vf_linkstate.link_state = ivi.linkstate;
vf_rss_query_en.setting = ivi.rss_query_en;
vf_trust.setting = ivi.trusted;
- vf = nla_nest_start(skb, IFLA_VF_INFO);
+ vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
if (!vf)
goto nla_put_vfinfo_failure;
if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
@@ -1266,7 +1266,7 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
nla_put(skb, IFLA_VF_TRUST,
sizeof(vf_trust), &vf_trust))
goto nla_put_vf_failure;
- vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST);
+ vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
if (!vfvlanlist)
goto nla_put_vf_failure;
if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
@@ -1279,7 +1279,7 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
if (dev->netdev_ops->ndo_get_vf_stats)
dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
&vf_stats);
- vfstats = nla_nest_start(skb, IFLA_VF_STATS);
+ vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
if (!vfstats)
goto nla_put_vf_failure;
if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
@@ -1329,7 +1329,7 @@ static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
if (!dev->netdev_ops->ndo_get_vf_config)
return 0;
- vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
+ vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
if (!vfinfo)
return -EMSGSIZE;
@@ -1414,7 +1414,7 @@ static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
int err;
u8 mode;
- xdp = nla_nest_start(skb, IFLA_XDP);
+ xdp = nla_nest_start_noflag(skb, IFLA_XDP);
if (!xdp)
return -EMSGSIZE;
@@ -1541,7 +1541,7 @@ static int rtnl_fill_link_af(struct sk_buff *skb,
const struct rtnl_af_ops *af_ops;
struct nlattr *af_spec;
- af_spec = nla_nest_start(skb, IFLA_AF_SPEC);
+ af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
if (!af_spec)
return -EMSGSIZE;
@@ -1552,7 +1552,7 @@ static int rtnl_fill_link_af(struct sk_buff *skb,
if (!af_ops->fill_link_af)
continue;
- af = nla_nest_start(skb, af_ops->family);
+ af = nla_nest_start_noflag(skb, af_ops->family);
if (!af)
return -EMSGSIZE;
@@ -1797,8 +1797,7 @@ static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla
const struct rtnl_link_ops *ops = NULL;
struct nlattr *linfo[IFLA_INFO_MAX + 1];
- if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla,
- ifla_info_policy, NULL) < 0)
+ if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
return NULL;
if (linfo[IFLA_INFO_KIND]) {
@@ -1897,8 +1896,9 @@ static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
return -EINVAL;
}
- return nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
- ifla_policy, extack);
+ return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
+ IFLA_MAX, ifla_policy,
+ extack);
}
/* A hack to preserve kernel<->userspace interface.
@@ -1911,7 +1911,8 @@ static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
- return nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, extack);
+ return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
+ extack);
}
static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
@@ -2019,7 +2020,8 @@ out_err:
int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
struct netlink_ext_ack *exterr)
{
- return nla_parse(tb, IFLA_MAX, head, len, ifla_policy, exterr);
+ return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
+ exterr);
}
EXPORT_SYMBOL(rtnl_nla_parse_ifla);
@@ -2564,8 +2566,10 @@ static int do_setlink(const struct sk_buff *skb,
err = -EINVAL;
goto errout;
}
- err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
- ifla_vf_policy, NULL);
+ err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
+ attr,
+ ifla_vf_policy,
+ NULL);
if (err < 0)
goto errout;
err = do_setvfinfo(dev, vfinfo);
@@ -2592,8 +2596,10 @@ static int do_setlink(const struct sk_buff *skb,
err = -EINVAL;
goto errout;
}
- err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
- ifla_port_policy, NULL);
+ err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
+ attr,
+ ifla_port_policy,
+ NULL);
if (err < 0)
goto errout;
if (!port[IFLA_PORT_VF]) {
@@ -2612,9 +2618,9 @@ static int do_setlink(const struct sk_buff *skb,
if (tb[IFLA_PORT_SELF]) {
struct nlattr *port[IFLA_PORT_MAX+1];
- err = nla_parse_nested(port, IFLA_PORT_MAX,
- tb[IFLA_PORT_SELF], ifla_port_policy,
- NULL);
+ err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
+ tb[IFLA_PORT_SELF],
+ ifla_port_policy, NULL);
if (err < 0)
goto errout;
@@ -2661,8 +2667,9 @@ static int do_setlink(const struct sk_buff *skb,
struct nlattr *xdp[IFLA_XDP_MAX + 1];
u32 xdp_flags = 0;
- err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP],
- ifla_xdp_policy, NULL);
+ err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
+ tb[IFLA_XDP],
+ ifla_xdp_policy, NULL);
if (err < 0)
goto errout;
@@ -2716,8 +2723,8 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr *tb[IFLA_MAX+1];
char ifname[IFNAMSIZ];
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
- extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
+ ifla_policy, extack);
if (err < 0)
goto errout;
@@ -2813,7 +2820,8 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
int netnsid = -1;
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
+ ifla_policy, extack);
if (err < 0)
return err;
@@ -2990,7 +2998,8 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
#ifdef CONFIG_MODULES
replay:
#endif
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
+ ifla_policy, extack);
if (err < 0)
return err;
@@ -3024,9 +3033,9 @@ replay:
return err;
if (tb[IFLA_LINKINFO]) {
- err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
- tb[IFLA_LINKINFO], ifla_info_policy,
- NULL);
+ err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
+ tb[IFLA_LINKINFO],
+ ifla_info_policy, NULL);
if (err < 0)
return err;
} else
@@ -3046,9 +3055,9 @@ replay:
return -EINVAL;
if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
- err = nla_parse_nested(attr, ops->maxtype,
- linkinfo[IFLA_INFO_DATA],
- ops->policy, extack);
+ err = nla_parse_nested_deprecated(attr, ops->maxtype,
+ linkinfo[IFLA_INFO_DATA],
+ ops->policy, extack);
if (err < 0)
return err;
data = attr;
@@ -3067,9 +3076,11 @@ replay:
if (m_ops->slave_maxtype &&
linkinfo[IFLA_INFO_SLAVE_DATA]) {
- err = nla_parse_nested(slave_attr, m_ops->slave_maxtype,
- linkinfo[IFLA_INFO_SLAVE_DATA],
- m_ops->slave_policy, extack);
+ err = nla_parse_nested_deprecated(slave_attr,
+ m_ops->slave_maxtype,
+ linkinfo[IFLA_INFO_SLAVE_DATA],
+ m_ops->slave_policy,
+ extack);
if (err < 0)
return err;
slave_data = slave_attr;
@@ -3250,8 +3261,8 @@ static int rtnl_valid_getlink_req(struct sk_buff *skb,
}
if (!netlink_strict_get_check(skb))
- return nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
- extack);
+ return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
+ ifla_policy, extack);
ifm = nlmsg_data(nlh);
if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
@@ -3260,8 +3271,8 @@ static int rtnl_valid_getlink_req(struct sk_buff *skb,
return -EINVAL;
}
- err = nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
- extack);
+ err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
+ ifla_policy, extack);
if (err)
return err;
@@ -3366,7 +3377,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
- if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
+ if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
if (tb[IFLA_EXT_MASK])
ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
}
@@ -3639,7 +3650,8 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
u16 vid;
int err;
- err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
+ extack);
if (err < 0)
return err;
@@ -3749,7 +3761,8 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
- err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
+ extack);
if (err < 0)
return err;
@@ -3898,8 +3911,8 @@ static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
return -EINVAL;
}
- err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
- NULL, extack);
+ err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
+ NDA_MAX, NULL, extack);
if (err < 0)
return err;
@@ -3951,8 +3964,9 @@ static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
nla_attr_size(sizeof(u32)))) {
struct ifinfomsg *ifm;
- err = nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
- ifla_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
+ tb, IFLA_MAX, ifla_policy,
+ extack);
if (err < 0) {
return -EINVAL;
} else if (err == 0) {
@@ -4091,8 +4105,8 @@ static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
return -EINVAL;
}
- err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
- nda_policy, extack);
+ err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
+ NDA_MAX, nda_policy, extack);
if (err < 0)
return err;
@@ -4273,7 +4287,7 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
goto nla_put_failure;
- br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
+ br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
if (!br_afspec)
goto nla_put_failure;
@@ -4297,7 +4311,7 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
}
nla_nest_end(skb, br_afspec);
- protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
+ protinfo = nla_nest_start(skb, IFLA_PROTINFO);
if (!protinfo)
goto nla_put_failure;
@@ -4354,11 +4368,14 @@ static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
return -EINVAL;
}
- err = nlmsg_parse_strict(nlh, sizeof(struct ifinfomsg), tb,
- IFLA_MAX, ifla_policy, extack);
+ err = nlmsg_parse_deprecated_strict(nlh,
+ sizeof(struct ifinfomsg),
+ tb, IFLA_MAX, ifla_policy,
+ extack);
} else {
- err = nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb,
- IFLA_MAX, ifla_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
+ tb, IFLA_MAX, ifla_policy,
+ extack);
}
if (err < 0)
return err;
@@ -4776,8 +4793,8 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
if (ops && ops->fill_linkxstats) {
*idxattr = IFLA_STATS_LINK_XSTATS;
- attr = nla_nest_start(skb,
- IFLA_STATS_LINK_XSTATS);
+ attr = nla_nest_start_noflag(skb,
+ IFLA_STATS_LINK_XSTATS);
if (!attr)
goto nla_put_failure;
@@ -4799,8 +4816,8 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
ops = master->rtnl_link_ops;
if (ops && ops->fill_linkxstats) {
*idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
- attr = nla_nest_start(skb,
- IFLA_STATS_LINK_XSTATS_SLAVE);
+ attr = nla_nest_start_noflag(skb,
+ IFLA_STATS_LINK_XSTATS_SLAVE);
if (!attr)
goto nla_put_failure;
@@ -4815,7 +4832,8 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
*idxattr)) {
*idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
- attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS);
+ attr = nla_nest_start_noflag(skb,
+ IFLA_STATS_LINK_OFFLOAD_XSTATS);
if (!attr)
goto nla_put_failure;
@@ -4834,7 +4852,7 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
struct rtnl_af_ops *af_ops;
*idxattr = IFLA_STATS_AF_SPEC;
- attr = nla_nest_start(skb, IFLA_STATS_AF_SPEC);
+ attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
if (!attr)
goto nla_put_failure;
@@ -4844,7 +4862,8 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
struct nlattr *af;
int err;
- af = nla_nest_start(skb, af_ops->family);
+ af = nla_nest_start_noflag(skb,
+ af_ops->family);
if (!af) {
rcu_read_unlock();
goto nla_put_failure;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a083e188374f..e89be6282693 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -258,6 +258,33 @@ nodata:
}
EXPORT_SYMBOL(__alloc_skb);
+/* Caller must provide SKB that is memset cleared */
+static struct sk_buff *__build_skb_around(struct sk_buff *skb,
+ void *data, unsigned int frag_size)
+{
+ struct skb_shared_info *shinfo;
+ unsigned int size = frag_size ? : ksize(data);
+
+ size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ /* Assumes caller memset cleared SKB */
+ skb->truesize = SKB_TRUESIZE(size);
+ refcount_set(&skb->users, 1);
+ skb->head = data;
+ skb->data = data;
+ skb_reset_tail_pointer(skb);
+ skb->end = skb->tail + size;
+ skb->mac_header = (typeof(skb->mac_header))~0U;
+ skb->transport_header = (typeof(skb->transport_header))~0U;
+
+ /* make sure we initialize shinfo sequentially */
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+
+ return skb;
+}
+
/**
* __build_skb - build a network buffer
* @data: data buffer provided by caller
@@ -279,32 +306,15 @@ EXPORT_SYMBOL(__alloc_skb);
*/
struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
- struct skb_shared_info *shinfo;
struct sk_buff *skb;
- unsigned int size = frag_size ? : ksize(data);
skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
- if (!skb)
+ if (unlikely(!skb))
return NULL;
- size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
memset(skb, 0, offsetof(struct sk_buff, tail));
- skb->truesize = SKB_TRUESIZE(size);
- refcount_set(&skb->users, 1);
- skb->head = data;
- skb->data = data;
- skb_reset_tail_pointer(skb);
- skb->end = skb->tail + size;
- skb->mac_header = (typeof(skb->mac_header))~0U;
- skb->transport_header = (typeof(skb->transport_header))~0U;
- /* make sure we initialize shinfo sequentially */
- shinfo = skb_shinfo(skb);
- memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
- atomic_set(&shinfo->dataref, 1);
-
- return skb;
+ return __build_skb_around(skb, data, frag_size);
}
/* build_skb() is wrapper over __build_skb(), that specifically
@@ -325,6 +335,29 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
}
EXPORT_SYMBOL(build_skb);
+/**
+ * build_skb_around - build a network buffer around provided skb
+ * @skb: sk_buff provide by caller, must be memset cleared
+ * @data: data buffer provided by caller
+ * @frag_size: size of data, or 0 if head was kmalloced
+ */
+struct sk_buff *build_skb_around(struct sk_buff *skb,
+ void *data, unsigned int frag_size)
+{
+ if (unlikely(!skb))
+ return NULL;
+
+ skb = __build_skb_around(skb, data, frag_size);
+
+ if (skb && frag_size) {
+ skb->head_frag = 1;
+ if (page_is_pfmemalloc(virt_to_head_page(data)))
+ skb->pfmemalloc = 1;
+ }
+ return skb;
+}
+EXPORT_SYMBOL(build_skb_around);
+
#define NAPI_SKB_CACHE_SIZE 64
struct napi_alloc_cache {
diff --git a/net/core/sock.c b/net/core/sock.c
index 067878a1e4c5..75b1c950b49f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -137,6 +137,7 @@
#include <linux/filter.h>
#include <net/sock_reuseport.h>
+#include <net/bpf_sk_storage.h>
#include <trace/events/sock.h>
@@ -1709,6 +1710,10 @@ static void __sk_destruct(struct rcu_head *head)
sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
+#ifdef CONFIG_BPF_SYSCALL
+ bpf_sk_storage_free(sk);
+#endif
+
if (atomic_read(&sk->sk_omem_alloc))
pr_debug("%s: optmem leakage (%d bytes) detected\n",
__func__, atomic_read(&sk->sk_omem_alloc));
@@ -2977,39 +2982,44 @@ bool lock_sock_fast(struct sock *sk)
}
EXPORT_SYMBOL(lock_sock_fast);
-int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
+int sock_gettstamp(struct socket *sock, void __user *userstamp,
+ bool timeval, bool time32)
{
- struct timeval tv;
+ struct sock *sk = sock->sk;
+ struct timespec64 ts;
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- tv = ktime_to_timeval(sock_read_timestamp(sk));
- if (tv.tv_sec == -1)
+ ts = ktime_to_timespec64(sock_read_timestamp(sk));
+ if (ts.tv_sec == -1)
return -ENOENT;
- if (tv.tv_sec == 0) {
+ if (ts.tv_sec == 0) {
ktime_t kt = ktime_get_real();
- sock_write_timestamp(sk, kt);
- tv = ktime_to_timeval(kt);
+ sock_write_timestamp(sk, kt);;
+ ts = ktime_to_timespec64(kt);
}
- return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
-}
-EXPORT_SYMBOL(sock_get_timestamp);
-int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
-{
- struct timespec ts;
+ if (timeval)
+ ts.tv_nsec /= 1000;
- sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- ts = ktime_to_timespec(sock_read_timestamp(sk));
- if (ts.tv_sec == -1)
- return -ENOENT;
- if (ts.tv_sec == 0) {
- ktime_t kt = ktime_get_real();
- sock_write_timestamp(sk, kt);
- ts = ktime_to_timespec(sk->sk_stamp);
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ if (time32)
+ return put_old_timespec32(&ts, userstamp);
+#endif
+#ifdef CONFIG_SPARC64
+ /* beware of padding in sparc64 timeval */
+ if (timeval && !in_compat_syscall()) {
+ struct __kernel_old_timeval __user tv = {
+ .tv_sec = ts.tv_sec,
+ .tv_usec = ts.tv_nsec,
+ };
+ if (copy_to_user(userstamp, &tv, sizeof(tv)))
+ return -EFAULT;
+ return 0;
}
- return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
+#endif
+ return put_timespec64(&ts, userstamp);
}
-EXPORT_SYMBOL(sock_get_timestampns);
+EXPORT_SYMBOL(sock_gettstamp);
void sock_enable_timestamp(struct sock *sk, int flag)
{