summaryrefslogtreecommitdiff
path: root/mm/page_isolation.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 16:55:46 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 16:55:46 -0800
commitf346b0becb1bc62e45495f9cdbae3eef35d0b635 (patch)
treeae79f3dfb8e031da51d38f0f095f89d7d23f3643 /mm/page_isolation.c
parent00d59fde8532b2d42e80909d2e58678755e04da9 (diff)
parent0f4991e8fd48987ae476a92cdee6bfec4aff31b8 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - large KASAN update to use arm's "software tag-based mode" - a few misc things - sh updates - ocfs2 updates - just about all of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (167 commits) kernel/fork.c: mark 'stack_vm_area' with __maybe_unused memcg, oom: notify on oom killer invocation from the charge path mm, swap: fix swapoff with KSM pages include/linux/gfp.h: fix typo mm/hmm: fix memremap.h, move dev_page_fault_t callback to hmm hugetlbfs: Use i_mmap_rwsem to fix page fault/truncate race hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization memory_hotplug: add missing newlines to debugging output mm: remove __hugepage_set_anon_rmap() include/linux/vmstat.h: remove unused page state adjustment macro mm/page_alloc.c: allow error injection mm: migrate: drop unused argument of migrate_page_move_mapping() blkdev: avoid migration stalls for blkdev pages mm: migrate: provide buffer_migrate_page_norefs() mm: migrate: move migrate_page_lock_buffers() mm: migrate: lock buffers before migrate_page_move_mapping() mm: migration: factor out code to compute expected number of page references mm, page_alloc: enable pcpu_drain with zone capability kmemleak: add config to select auto scan mm/page_alloc.c: don't call kasan_free_pages() at deferred mem init ...
Diffstat (limited to 'mm/page_isolation.c')
-rw-r--r--mm/page_isolation.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 43e085608846..ce323e56b34d 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -15,8 +15,7 @@
#define CREATE_TRACE_POINTS
#include <trace/events/page_isolation.h>
-static int set_migratetype_isolate(struct page *page, int migratetype,
- bool skip_hwpoisoned_pages)
+static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
{
struct zone *zone;
unsigned long flags, pfn;
@@ -60,8 +59,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype,
* FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
* We just check MOVABLE pages.
*/
- if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
- skip_hwpoisoned_pages))
+ if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags))
ret = 0;
/*
@@ -185,7 +183,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* prevents two threads from simultaneously working on overlapping ranges.
*/
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- unsigned migratetype, bool skip_hwpoisoned_pages)
+ unsigned migratetype, int flags)
{
unsigned long pfn;
unsigned long undo_pfn;
@@ -199,7 +197,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages);
if (page &&
- set_migratetype_isolate(page, migratetype, skip_hwpoisoned_pages)) {
+ set_migratetype_isolate(page, migratetype, flags)) {
undo_pfn = pfn;
goto undo;
}