diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2023-10-03 14:52:47 +0200 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2023-12-06 11:57:21 +0100 |
commit | 0bedcc66d2a43a50ab660273842f4737a293dd8a (patch) | |
tree | be03d8e4166db1fcc2a62962b243fb67e61d8abd /mm/slab.h | |
parent | 6011be59910fb12b757f9d37793d21763268b4a1 (diff) |
mm/slab: move memcg related functions from slab.h to slub.c
We don't share those between SLAB and SLUB anymore, so most memcg
related functions can be moved to slub.c proper.
Reviewed-by: Kees Cook <keescook@chromium.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: David Rientjes <rientjes@google.com>
Tested-by: David Rientjes <rientjes@google.com>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slab.h')
-rw-r--r-- | mm/slab.h | 206 |
1 files changed, 0 insertions, 206 deletions
diff --git a/mm/slab.h b/mm/slab.h index 65ebf86b3fe9..a81ef7c9282d 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -486,12 +486,6 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); ssize_t slabinfo_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos); -static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) -{ - return (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; -} - #ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG_ON DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); @@ -551,220 +545,20 @@ int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s, gfp_t gfp, bool new_slab); void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, enum node_stat_item idx, int nr); - -static inline void memcg_free_slab_cgroups(struct slab *slab) -{ - kfree(slab_objcgs(slab)); - slab->memcg_data = 0; -} - -static inline size_t obj_full_size(struct kmem_cache *s) -{ - /* - * For each accounted object there is an extra space which is used - * to store obj_cgroup membership. Charge it too. - */ - return s->size + sizeof(struct obj_cgroup *); -} - -/* - * Returns false if the allocation should fail. - */ -static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, - struct list_lru *lru, - struct obj_cgroup **objcgp, - size_t objects, gfp_t flags) -{ - struct obj_cgroup *objcg; - - if (!memcg_kmem_online()) - return true; - - if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)) - return true; - - /* - * The obtained objcg pointer is safe to use within the current scope, - * defined by current task or set_active_memcg() pair. - * obj_cgroup_get() is used to get a permanent reference. - */ - objcg = current_obj_cgroup(); - if (!objcg) - return true; - - if (lru) { - int ret; - struct mem_cgroup *memcg; - - memcg = get_mem_cgroup_from_objcg(objcg); - ret = memcg_list_lru_alloc(memcg, lru, flags); - css_put(&memcg->css); - - if (ret) - return false; - } - - if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) - return false; - - *objcgp = objcg; - return true; -} - -static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, - struct obj_cgroup *objcg, - gfp_t flags, size_t size, - void **p) -{ - struct slab *slab; - unsigned long off; - size_t i; - - if (!memcg_kmem_online() || !objcg) - return; - - for (i = 0; i < size; i++) { - if (likely(p[i])) { - slab = virt_to_slab(p[i]); - - if (!slab_objcgs(slab) && - memcg_alloc_slab_cgroups(slab, s, flags, - false)) { - obj_cgroup_uncharge(objcg, obj_full_size(s)); - continue; - } - - off = obj_to_index(s, slab, p[i]); - obj_cgroup_get(objcg); - slab_objcgs(slab)[off] = objcg; - mod_objcg_state(objcg, slab_pgdat(slab), - cache_vmstat_idx(s), obj_full_size(s)); - } else { - obj_cgroup_uncharge(objcg, obj_full_size(s)); - } - } -} - -static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, - void **p, int objects) -{ - struct obj_cgroup **objcgs; - int i; - - if (!memcg_kmem_online()) - return; - - objcgs = slab_objcgs(slab); - if (!objcgs) - return; - - for (i = 0; i < objects; i++) { - struct obj_cgroup *objcg; - unsigned int off; - - off = obj_to_index(s, slab, p[i]); - objcg = objcgs[off]; - if (!objcg) - continue; - - objcgs[off] = NULL; - obj_cgroup_uncharge(objcg, obj_full_size(s)); - mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), - -obj_full_size(s)); - obj_cgroup_put(objcg); - } -} - #else /* CONFIG_MEMCG_KMEM */ static inline struct obj_cgroup **slab_objcgs(struct slab *slab) { return NULL; } -static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) -{ - return NULL; -} - static inline int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s, gfp_t gfp, bool new_slab) { return 0; } - -static inline void memcg_free_slab_cgroups(struct slab *slab) -{ -} - -static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, - struct list_lru *lru, - struct obj_cgroup **objcgp, - size_t objects, gfp_t flags) -{ - return true; -} - -static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, - struct obj_cgroup *objcg, - gfp_t flags, size_t size, - void **p) -{ -} - -static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, - void **p, int objects) -{ -} #endif /* CONFIG_MEMCG_KMEM */ -static inline struct kmem_cache *virt_to_cache(const void *obj) -{ - struct slab *slab; - - slab = virt_to_slab(obj); - if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", - __func__)) - return NULL; - return slab->slab_cache; -} - -static __always_inline void account_slab(struct slab *slab, int order, - struct kmem_cache *s, gfp_t gfp) -{ - if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) - memcg_alloc_slab_cgroups(slab, s, gfp, true); - - mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), - PAGE_SIZE << order); -} - -static __always_inline void unaccount_slab(struct slab *slab, int order, - struct kmem_cache *s) -{ - if (memcg_kmem_online()) - memcg_free_slab_cgroups(slab); - - mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), - -(PAGE_SIZE << order)); -} - -static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) -{ - struct kmem_cache *cachep; - - if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && - !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) - return s; - - cachep = virt_to_cache(x); - if (WARN(cachep && cachep != s, - "%s: Wrong slab cache. %s but object is from %s\n", - __func__, s->name, cachep->name)) - print_tracking(cachep, x); - return cachep; -} - void free_large_kmalloc(struct folio *folio, void *object); size_t __ksize(const void *objp); |