summaryrefslogtreecommitdiff
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-09-21 09:23:55 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-09-21 09:23:55 +0200
commit33f16b25a091687388152d4b29593a39d819aa22 (patch)
tree34a8ef8acf3f14ca0e4f514756222876d00ff202 /include/linux/skbuff.h
parent23f87274f0ad4a9167de23a330ed88966634fdf6 (diff)
parentba4f184e126b751d1bffad5897f263108befc780 (diff)
Merge 5.9.0-rc6 into tty-next
We need the tty/serial fixes in here and this resolves a merge issue in the 8250 driver. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h13
1 files changed, 11 insertions, 2 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ab57cf787c1f..ed9bea924dc3 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -71,7 +71,7 @@
* NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
* TCP or UDP packets over IPv6. These are specifically
* unencapsulated packets of the form IPv6|TCP or
- * IPv4|UDP where the Next Header field in the IPv6
+ * IPv6|UDP where the Next Header field in the IPv6
* header is either TCP or UDP. IPv6 extension headers
* are not supported with this feature. This feature
* cannot be set in features for a device with
@@ -1056,7 +1056,16 @@ void kfree_skb(struct sk_buff *skb);
void kfree_skb_list(struct sk_buff *segs);
void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
void skb_tx_error(struct sk_buff *skb);
+
+#ifdef CONFIG_TRACEPOINTS
void consume_skb(struct sk_buff *skb);
+#else
+static inline void consume_skb(struct sk_buff *skb)
+{
+ return kfree_skb(skb);
+}
+#endif
+
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;
@@ -2658,7 +2667,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
*
* Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
* to reduce average number of cache lines per packet.
- * get_rps_cpus() for example only access one 64 bytes aligned block :
+ * get_rps_cpu() for example only access one 64 bytes aligned block :
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD