summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorKefeng Wang <wangkefeng.wang@huawei.com>2023-05-16 14:38:21 +0800
committerAndrew Morton <akpm@linux-foundation.org>2023-06-09 16:25:25 -0700
commitecbb490d8ee38fd84a0d682282589ff723dc62c0 (patch)
treee1ac8d8dbea064ca1bc4c0659b509046d10d0faa /mm
parente95d372c4cd46b6ec4eeacc07adcb7260ab4cfa0 (diff)
mm: page_alloc: move is_check_pages_enabled() into page_alloc.c
The is_check_pages_enabled() only used in page_alloc.c, move it into page_alloc.c, also use it in free_tail_page_prepare(). Link: https://lkml.kernel.org/r/20230516063821.121844-14-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: David Hildenbrand <david@redhat.com> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Iurii Zaikin <yzaikin@google.com> Cc: Kees Cook <keescook@chromium.org> Cc: Len Brown <len.brown@intel.com> Cc: Luis Chamberlain <mcgrof@kernel.org> Cc: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Oscar Salvador <osalvador@suse.de> Cc: Pavel Machek <pavel@ucw.cz> Cc: Rafael J. Wysocki <rafael@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h5
-rw-r--r--mm/page_alloc.c7
2 files changed, 6 insertions, 6 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 66d7ddf7e211..ec55da813c13 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -208,11 +208,6 @@ extern char * const zone_names[MAX_NR_ZONES];
/* perform sanity checks on struct pages being allocated or freed */
DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
-static inline bool is_check_pages_enabled(void)
-{
- return static_branch_unlikely(&check_pages_enabled);
-}
-
extern int min_free_kbytes;
void setup_per_zone_wmarks(void);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d19a05264125..ee23ba9c0ca7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -983,6 +983,11 @@ static inline bool free_page_is_bad(struct page *page)
return true;
}
+static inline bool is_check_pages_enabled(void)
+{
+ return static_branch_unlikely(&check_pages_enabled);
+}
+
static int free_tail_page_prepare(struct page *head_page, struct page *page)
{
struct folio *folio = (struct folio *)head_page;
@@ -994,7 +999,7 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
*/
BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
- if (!static_branch_unlikely(&check_pages_enabled)) {
+ if (!is_check_pages_enabled()) {
ret = 0;
goto out;
}