summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c20
-rw-r--r--mm/slab.h12
-rw-r--r--mm/slab_common.c75
-rw-r--r--mm/slob.c6
-rw-r--r--mm/slub.c40
-rw-r--r--mm/util.c24
6 files changed, 177 insertions, 0 deletions
diff --git a/mm/slab.c b/mm/slab.c
index d7c8da9319c7..dcc55e78f353 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3635,6 +3635,26 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif /* CONFIG_NUMA */
+void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
+{
+ struct kmem_cache *cachep;
+ unsigned int objnr;
+ void *objp;
+
+ kpp->kp_ptr = object;
+ kpp->kp_page = page;
+ cachep = page->slab_cache;
+ kpp->kp_slab_cache = cachep;
+ objp = object - obj_offset(cachep);
+ kpp->kp_data_offset = obj_offset(cachep);
+ page = virt_to_head_page(objp);
+ objnr = obj_to_index(cachep, page, objp);
+ objp = index_to_obj(cachep, page, objnr);
+ kpp->kp_objp = objp;
+ if (DEBUG && cachep->flags & SLAB_STORE_USER)
+ kpp->kp_ret = *dbg_userword(cachep, objp);
+}
+
/**
* __do_kmalloc - allocate memory
* @size: how many bytes of memory are required.
diff --git a/mm/slab.h b/mm/slab.h
index 1a756a359fa8..ecad9b57bc44 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -615,4 +615,16 @@ static inline bool slab_want_init_on_free(struct kmem_cache *c)
return false;
}
+#define KS_ADDRS_COUNT 16
+struct kmem_obj_info {
+ void *kp_ptr;
+ struct page *kp_page;
+ void *kp_objp;
+ unsigned long kp_data_offset;
+ struct kmem_cache *kp_slab_cache;
+ void *kp_ret;
+ void *kp_stack[KS_ADDRS_COUNT];
+};
+void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page);
+
#endif /* MM_SLAB_H */
diff --git a/mm/slab_common.c b/mm/slab_common.c
index e981c80d216c..adbace4256ef 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -537,6 +537,81 @@ bool slab_is_available(void)
return slab_state >= UP;
}
+/**
+ * kmem_valid_obj - does the pointer reference a valid slab object?
+ * @object: pointer to query.
+ *
+ * Return: %true if the pointer is to a not-yet-freed object from
+ * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
+ * is to an already-freed object, and %false otherwise.
+ */
+bool kmem_valid_obj(void *object)
+{
+ struct page *page;
+
+ /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
+ if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
+ return false;
+ page = virt_to_head_page(object);
+ return PageSlab(page);
+}
+
+/**
+ * kmem_dump_obj - Print available slab provenance information
+ * @object: slab object for which to find provenance information.
+ *
+ * This function uses pr_cont(), so that the caller is expected to have
+ * printed out whatever preamble is appropriate. The provenance information
+ * depends on the type of object and on how much debugging is enabled.
+ * For a slab-cache object, the fact that it is a slab object is printed,
+ * and, if available, the slab name, return address, and stack trace from
+ * the allocation of that object.
+ *
+ * This function will splat if passed a pointer to a non-slab object.
+ * If you are not sure what type of object you have, you should instead
+ * use mem_dump_obj().
+ */
+void kmem_dump_obj(void *object)
+{
+ char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
+ int i;
+ struct page *page;
+ unsigned long ptroffset;
+ struct kmem_obj_info kp = { };
+
+ if (WARN_ON_ONCE(!virt_addr_valid(object)))
+ return;
+ page = virt_to_head_page(object);
+ if (WARN_ON_ONCE(!PageSlab(page))) {
+ pr_cont(" non-slab memory.\n");
+ return;
+ }
+ kmem_obj_info(&kp, object, page);
+ if (kp.kp_slab_cache)
+ pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
+ else
+ pr_cont(" slab%s", cp);
+ if (kp.kp_objp)
+ pr_cont(" start %px", kp.kp_objp);
+ if (kp.kp_data_offset)
+ pr_cont(" data offset %lu", kp.kp_data_offset);
+ if (kp.kp_objp) {
+ ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
+ pr_cont(" pointer offset %lu", ptroffset);
+ }
+ if (kp.kp_slab_cache && kp.kp_slab_cache->usersize)
+ pr_cont(" size %u", kp.kp_slab_cache->usersize);
+ if (kp.kp_ret)
+ pr_cont(" allocated at %pS\n", kp.kp_ret);
+ else
+ pr_cont("\n");
+ for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) {
+ if (!kp.kp_stack[i])
+ break;
+ pr_info(" %pS\n", kp.kp_stack[i]);
+ }
+}
+
#ifndef CONFIG_SLOB
/* Create a cache during boot when no slab services are available yet */
void __init create_boot_cache(struct kmem_cache *s, const char *name,
diff --git a/mm/slob.c b/mm/slob.c
index 8d4bfa46247f..ef87ada8705d 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -461,6 +461,12 @@ out:
spin_unlock_irqrestore(&slob_lock, flags);
}
+void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
+{
+ kpp->kp_ptr = object;
+ kpp->kp_page = page;
+}
+
/*
* End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
*/
diff --git a/mm/slub.c b/mm/slub.c
index 0c8b43a5b3b0..3c1a84316fd7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3919,6 +3919,46 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
return 0;
}
+void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
+{
+ void *base;
+ int __maybe_unused i;
+ unsigned int objnr;
+ void *objp;
+ void *objp0;
+ struct kmem_cache *s = page->slab_cache;
+ struct track __maybe_unused *trackp;
+
+ kpp->kp_ptr = object;
+ kpp->kp_page = page;
+ kpp->kp_slab_cache = s;
+ base = page_address(page);
+ objp0 = kasan_reset_tag(object);
+#ifdef CONFIG_SLUB_DEBUG
+ objp = restore_red_left(s, objp0);
+#else
+ objp = objp0;
+#endif
+ objnr = obj_to_index(s, page, objp);
+ kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
+ objp = base + s->size * objnr;
+ kpp->kp_objp = objp;
+ if (WARN_ON_ONCE(objp < base || objp >= base + page->objects * s->size || (objp - base) % s->size) ||
+ !(s->flags & SLAB_STORE_USER))
+ return;
+#ifdef CONFIG_SLUB_DEBUG
+ trackp = get_track(s, objp, TRACK_ALLOC);
+ kpp->kp_ret = (void *)trackp->addr;
+#ifdef CONFIG_STACKTRACE
+ for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
+ kpp->kp_stack[i] = (void *)trackp->addrs[i];
+ if (!kpp->kp_stack[i])
+ break;
+ }
+#endif
+#endif
+}
+
/********************************************************************
* Kmalloc subsystem
*******************************************************************/
diff --git a/mm/util.c b/mm/util.c
index 8c9b7d1e7c49..da46f9d6c382 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -982,3 +982,27 @@ int __weak memcmp_pages(struct page *page1, struct page *page2)
kunmap_atomic(addr1);
return ret;
}
+
+/**
+ * mem_dump_obj - Print available provenance information
+ * @object: object for which to find provenance information.
+ *
+ * This function uses pr_cont(), so that the caller is expected to have
+ * printed out whatever preamble is appropriate. The provenance information
+ * depends on the type of object and on how much debugging is enabled.
+ * For example, for a slab-cache object, the slab name is printed, and,
+ * if available, the return address and stack trace from the allocation
+ * of that object.
+ */
+void mem_dump_obj(void *object)
+{
+ if (!virt_addr_valid(object)) {
+ pr_cont(" non-paged (local) memory.\n");
+ return;
+ }
+ if (kmem_valid_obj(object)) {
+ kmem_dump_obj(object);
+ return;
+ }
+ pr_cont(" non-slab memory.\n");
+}