summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c32
1 files changed, 23 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8c6b4ba62ac1..7bc43cf24e3d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1230,18 +1230,23 @@ void __free_pages_core(struct page *page, unsigned int order,
* When initializing the memmap, __init_single_page() sets the refcount
* of all pages to 1 ("allocated"/"not free"). We have to set the
* refcount of all involved pages to 0.
+ *
+ * Note that hotplugged memory pages are initialized to PageOffline().
+ * Pages freed from memblock might be marked as reserved.
*/
- prefetchw(p);
- for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
- prefetchw(p + 1);
- __ClearPageReserved(p);
- set_page_count(p, 0);
- }
- __ClearPageReserved(p);
- set_page_count(p, 0);
-
if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
unlikely(context == MEMINIT_HOTPLUG)) {
+ prefetchw(p);
+ for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
+ prefetchw(p + 1);
+ VM_WARN_ON_ONCE(PageReserved(p));
+ __ClearPageOffline(p);
+ set_page_count(p, 0);
+ }
+ VM_WARN_ON_ONCE(PageReserved(p));
+ __ClearPageOffline(p);
+ set_page_count(p, 0);
+
/*
* Freeing the page with debug_pagealloc enabled will try to
* unmap it; some archs don't like double-unmappings, so
@@ -1250,6 +1255,15 @@ void __free_pages_core(struct page *page, unsigned int order,
debug_pagealloc_map_pages(page, nr_pages);
adjust_managed_page_count(page, nr_pages);
} else {
+ prefetchw(p);
+ for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
+ prefetchw(p + 1);
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
+ }
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
+
/* memblock adjusts totalram_pages() manually. */
atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
}