summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-08-20 13:08:56 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-08-20 13:08:56 -0700
commited3bad2e4fd70047b729b64c78b97f88c4d33224 (patch)
treec79809d6d2de9e165f62c7a85c92032b75c0d21a /mm/vmscan.c
parent8ba9fbe1e4b8a28050c283792344ee8b6bc3465c (diff)
parentc7b1850dfb41d0b4154aca8dbc04777fbd75616f (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "10 patches. Subsystems affected by this patch series: MAINTAINERS and mm (shmem, pagealloc, tracing, memcg, memory-failure, vmscan, kfence, and hugetlb)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: hugetlb: don't pass page cache pages to restore_reserve_on_error kfence: fix is_kfence_address() for addresses below KFENCE_POOL_SIZE mm: vmscan: fix missing psi annotation for node_reclaim() mm/hwpoison: retry with shake_page() for unhandlable pages mm: memcontrol: fix occasional OOMs due to proportional memory.low reclaim MAINTAINERS: update ClangBuiltLinux IRC chat mmflags.h: add missing __GFP_ZEROTAGS and __GFP_SKIP_KASAN_POISON names mm/page_alloc: don't corrupt pcppage_migratetype Revert "mm: swap: check if swap backing device is congested or not" Revert "mm/shmem: fix shmem_swapin() race with swapoff"
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4620df62f0ff..eeae2f6bc532 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -100,9 +100,12 @@ struct scan_control {
unsigned int may_swap:1;
/*
- * Cgroups are not reclaimed below their configured memory.low,
- * unless we threaten to OOM. If any cgroups are skipped due to
- * memory.low and nothing was reclaimed, go back for memory.low.
+ * Cgroup memory below memory.low is protected as long as we
+ * don't threaten to OOM. If any cgroup is reclaimed at
+ * reduced force or passed over entirely due to its memory.low
+ * setting (memcg_low_skipped), and nothing is reclaimed as a
+ * result, then go back for one more cycle that reclaims the protected
+ * memory (memcg_low_reclaim) to avert OOM.
*/
unsigned int memcg_low_reclaim:1;
unsigned int memcg_low_skipped:1;
@@ -2537,15 +2540,14 @@ out:
for_each_evictable_lru(lru) {
int file = is_file_lru(lru);
unsigned long lruvec_size;
+ unsigned long low, min;
unsigned long scan;
- unsigned long protection;
lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
- protection = mem_cgroup_protection(sc->target_mem_cgroup,
- memcg,
- sc->memcg_low_reclaim);
+ mem_cgroup_protection(sc->target_mem_cgroup, memcg,
+ &min, &low);
- if (protection) {
+ if (min || low) {
/*
* Scale a cgroup's reclaim pressure by proportioning
* its current usage to its memory.low or memory.min
@@ -2576,6 +2578,15 @@ out:
* hard protection.
*/
unsigned long cgroup_size = mem_cgroup_size(memcg);
+ unsigned long protection;
+
+ /* memory.low scaling, make sure we retry before OOM */
+ if (!sc->memcg_low_reclaim && low > min) {
+ protection = low;
+ sc->memcg_low_skipped = 1;
+ } else {
+ protection = min;
+ }
/* Avoid TOCTOU with earlier protection check */
cgroup_size = max(cgroup_size, protection);
@@ -4413,11 +4424,13 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
.may_swap = 1,
.reclaim_idx = gfp_zone(gfp_mask),
};
+ unsigned long pflags;
trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
sc.gfp_mask);
cond_resched();
+ psi_memstall_enter(&pflags);
fs_reclaim_acquire(sc.gfp_mask);
/*
* We need to be able to allocate from the reserves for RECLAIM_UNMAP
@@ -4442,6 +4455,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
current->flags &= ~PF_SWAPWRITE;
memalloc_noreclaim_restore(noreclaim_flag);
fs_reclaim_release(sc.gfp_mask);
+ psi_memstall_leave(&pflags);
trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);