diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/core/skbuff.c | 10 | ||||
-rw-r--r-- | net/smc/smc_ib.c | 2 |
2 files changed, 7 insertions, 5 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 94cc40a6f797..7ee648829849 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -337,7 +337,7 @@ static struct sk_buff *napi_skb_cache_get(void) } skb = nc->skb_cache[--nc->skb_count]; - kasan_unpoison_object_data(skbuff_cache, skb); + kasan_mempool_unpoison_object(skb, kmem_cache_size(skbuff_cache)); return skb; } @@ -1309,13 +1309,15 @@ static void napi_skb_cache_put(struct sk_buff *skb) struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); u32 i; - kasan_poison_object_data(skbuff_cache, skb); + if (!kasan_mempool_poison_object(skb)) + return; + nc->skb_cache[nc->skb_count++] = skb; if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) - kasan_unpoison_object_data(skbuff_cache, - nc->skb_cache[i]); + kasan_mempool_unpoison_object(nc->skb_cache[i], + kmem_cache_size(skbuff_cache)); kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF, nc->skb_cache + NAPI_SKB_CACHE_HALF); diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 89981dbe46c9..97704a9e84c7 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -844,7 +844,7 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) goto out; /* the calculated number of cq entries fits to mlx5 cq allocation */ cqe_size_order = cache_line_size() == 128 ? 7 : 6; - smc_order = MAX_ORDER - cqe_size_order; + smc_order = MAX_PAGE_ORDER - cqe_size_order; if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE) cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2; smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev, |