diff options
author | Hyeonggon Yoo <42.hyeyoo@gmail.com> | 2022-08-17 19:18:23 +0900 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2022-09-01 10:40:27 +0200 |
commit | 11e9734bcb6a7361943f993eba4e97f5812120d8 (patch) | |
tree | b1749d7a8ec5a2c805d727df95223c1c6bf5cc62 /mm/slob.c | |
parent | 26a40990ba052e6f553256f9d0f112452b992a38 (diff) |
mm/slab_common: unify NUMA and UMA version of tracepoints
Drop kmem_alloc event class, rename kmem_alloc_node to kmem_alloc, and
remove _node postfix for NUMA version of tracepoints.
This will break some tools that depend on {kmem_cache_alloc,kmalloc}_node,
but at this point maintaining both kmem_alloc and kmem_alloc_node
event classes does not makes sense at all.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/slob.c b/mm/slob.c index 96b08acd72ce..3208c56d8f82 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -507,8 +507,8 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) *m = size; ret = (void *)m + minalign; - trace_kmalloc_node(caller, ret, NULL, - size, size + minalign, gfp, node); + trace_kmalloc(caller, ret, NULL, size, + size + minalign, gfp, node); } else { unsigned int order = get_order(size); @@ -516,8 +516,8 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) gfp |= __GFP_COMP; ret = slob_new_pages(gfp, order, node); - trace_kmalloc_node(caller, ret, NULL, - size, PAGE_SIZE << order, gfp, node); + trace_kmalloc(caller, ret, NULL, size, + PAGE_SIZE << order, gfp, node); } kmemleak_alloc(ret, size, 1, gfp); @@ -608,14 +608,14 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) if (c->size < PAGE_SIZE) { b = slob_alloc(c->size, flags, c->align, node, 0); - trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size, - SLOB_UNITS(c->size) * SLOB_UNIT, - flags, node); + trace_kmem_cache_alloc(_RET_IP_, b, NULL, c->object_size, + SLOB_UNITS(c->size) * SLOB_UNIT, + flags, node); } else { b = slob_new_pages(flags, get_order(c->size), node); - trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size, - PAGE_SIZE << get_order(c->size), - flags, node); + trace_kmem_cache_alloc(_RET_IP_, b, NULL, c->object_size, + PAGE_SIZE << get_order(c->size), + flags, node); } if (b && c->ctor) { |