summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-05-31 13:29:02 +0100
committerVlastimil Babka <vbabka@suse.cz>2024-05-31 15:51:10 +0200
commit4d2bcefa965b06a1f2be6912456bcfa86a34f184 (patch)
treee94748013e551b80d29658818554bf089c5ea66b
parenta0a44d9175b349df2462089140fb7f292100bd7c (diff)
mm: Reduce the number of slab->folio casts
Mark a few more folio functions as taking a const folio pointer, which allows us to remove a few places in slab which cast away the const. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-rw-r--r--include/linux/mm.h6
-rw-r--r--mm/slab.h4
-rw-r--r--mm/slub.c6
3 files changed, 7 insertions, 9 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9849dfda44d4..a983d371fafa 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1105,7 +1105,7 @@ static inline unsigned int compound_order(struct page *page)
*
* Return: The order of the folio.
*/
-static inline unsigned int folio_order(struct folio *folio)
+static inline unsigned int folio_order(const struct folio *folio)
{
if (!folio_test_large(folio))
return 0;
@@ -2145,7 +2145,7 @@ static inline struct folio *folio_next(struct folio *folio)
* it from being split. It is not necessary for the folio to be locked.
* Return: The base-2 logarithm of the size of this folio.
*/
-static inline unsigned int folio_shift(struct folio *folio)
+static inline unsigned int folio_shift(const struct folio *folio)
{
return PAGE_SHIFT + folio_order(folio);
}
@@ -2158,7 +2158,7 @@ static inline unsigned int folio_shift(struct folio *folio)
* it from being split. It is not necessary for the folio to be locked.
* Return: The number of bytes in this folio.
*/
-static inline size_t folio_size(struct folio *folio)
+static inline size_t folio_size(const struct folio *folio)
{
return PAGE_SIZE << folio_order(folio);
}
diff --git a/mm/slab.h b/mm/slab.h
index 5f8f47c5bee0..b16e63191578 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -166,7 +166,7 @@ static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)
*/
static inline bool slab_test_pfmemalloc(const struct slab *slab)
{
- return folio_test_active((struct folio *)slab_folio(slab));
+ return folio_test_active(slab_folio(slab));
}
static inline void slab_set_pfmemalloc(struct slab *slab)
@@ -211,7 +211,7 @@ static inline struct slab *virt_to_slab(const void *addr)
static inline int slab_order(const struct slab *slab)
{
- return folio_order((struct folio *)slab_folio(slab));
+ return folio_order(slab_folio(slab));
}
static inline size_t slab_size(const struct slab *slab)
diff --git a/mm/slub.c b/mm/slub.c
index 95e0a3332c44..b8ba068ca079 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -962,11 +962,9 @@ void print_tracking(struct kmem_cache *s, void *object)
static void print_slab_info(const struct slab *slab)
{
- struct folio *folio = (struct folio *)slab_folio(slab);
-
pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
slab, slab->objects, slab->inuse, slab->freelist,
- folio_flags(folio, 0));
+ &slab->__page_flags);
}
/*
@@ -2532,7 +2530,7 @@ static void discard_slab(struct kmem_cache *s, struct slab *slab)
*/
static inline bool slab_test_node_partial(const struct slab *slab)
{
- return folio_test_workingset((struct folio *)slab_folio(slab));
+ return folio_test_workingset(slab_folio(slab));
}
static inline void slab_set_node_partial(struct slab *slab)