diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2024-01-29 20:36:56 -0500 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-02-22 10:24:45 -0800 |
commit | eb23ee4f96937ca47fe6c88151d998551642a772 (patch) | |
tree | 90f96212e498a961613c51949de94e03d69317f0 /mm/zswap.c | |
parent | 9986d35d4ceb53962f1c3bc378e317b5ca841f73 (diff) |
mm: zswap: function ordering: shrink_memcg_cb
shrink_memcg_cb() is called by the shrinker and is based on
zswap_writeback_entry(). Move it in between. Save one fwd decl.
Link: https://lkml.kernel.org/r/20240130014208.565554-21-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Nhat Pham <nphamcs@gmail.com>
Cc: Chengming Zhou <zhouchengming@bytedance.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/zswap.c')
-rw-r--r-- | mm/zswap.c | 125 |
1 files changed, 61 insertions, 64 deletions
diff --git a/mm/zswap.c b/mm/zswap.c index 667ed3e19340..2bf4bf1d356c 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1254,7 +1254,67 @@ static int zswap_writeback_entry(struct zswap_entry *entry, * shrinker functions **********************************/ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l, - spinlock_t *lock, void *arg); + spinlock_t *lock, void *arg) +{ + struct zswap_entry *entry = container_of(item, struct zswap_entry, lru); + bool *encountered_page_in_swapcache = (bool *)arg; + swp_entry_t swpentry; + enum lru_status ret = LRU_REMOVED_RETRY; + int writeback_result; + + /* + * Rotate the entry to the tail before unlocking the LRU, + * so that in case of an invalidation race concurrent + * reclaimers don't waste their time on it. + * + * If writeback succeeds, or failure is due to the entry + * being invalidated by the swap subsystem, the invalidation + * will unlink and free it. + * + * Temporary failures, where the same entry should be tried + * again immediately, almost never happen for this shrinker. + * We don't do any trylocking; -ENOMEM comes closest, + * but that's extremely rare and doesn't happen spuriously + * either. Don't bother distinguishing this case. + * + * But since they do exist in theory, the entry cannot just + * be unlinked, or we could leak it. Hence, rotate. + */ + list_move_tail(item, &l->list); + + /* + * Once the lru lock is dropped, the entry might get freed. The + * swpentry is copied to the stack, and entry isn't deref'd again + * until the entry is verified to still be alive in the tree. + */ + swpentry = entry->swpentry; + + /* + * It's safe to drop the lock here because we return either + * LRU_REMOVED_RETRY or LRU_RETRY. + */ + spin_unlock(lock); + + writeback_result = zswap_writeback_entry(entry, swpentry); + + if (writeback_result) { + zswap_reject_reclaim_fail++; + ret = LRU_RETRY; + + /* + * Encountering a page already in swap cache is a sign that we are shrinking + * into the warmer region. We should terminate shrinking (if we're in the dynamic + * shrinker context). + */ + if (writeback_result == -EEXIST && encountered_page_in_swapcache) + *encountered_page_in_swapcache = true; + } else { + zswap_written_back_pages++; + } + + spin_lock(lock); + return ret; +} static unsigned long zswap_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) @@ -1354,69 +1414,6 @@ static void zswap_alloc_shrinker(struct zswap_pool *pool) pool->shrinker->seeks = DEFAULT_SEEKS; } -static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l, - spinlock_t *lock, void *arg) -{ - struct zswap_entry *entry = container_of(item, struct zswap_entry, lru); - bool *encountered_page_in_swapcache = (bool *)arg; - swp_entry_t swpentry; - enum lru_status ret = LRU_REMOVED_RETRY; - int writeback_result; - - /* - * Rotate the entry to the tail before unlocking the LRU, - * so that in case of an invalidation race concurrent - * reclaimers don't waste their time on it. - * - * If writeback succeeds, or failure is due to the entry - * being invalidated by the swap subsystem, the invalidation - * will unlink and free it. - * - * Temporary failures, where the same entry should be tried - * again immediately, almost never happen for this shrinker. - * We don't do any trylocking; -ENOMEM comes closest, - * but that's extremely rare and doesn't happen spuriously - * either. Don't bother distinguishing this case. - * - * But since they do exist in theory, the entry cannot just - * be unlinked, or we could leak it. Hence, rotate. - */ - list_move_tail(item, &l->list); - - /* - * Once the lru lock is dropped, the entry might get freed. The - * swpentry is copied to the stack, and entry isn't deref'd again - * until the entry is verified to still be alive in the tree. - */ - swpentry = entry->swpentry; - - /* - * It's safe to drop the lock here because we return either - * LRU_REMOVED_RETRY or LRU_RETRY. - */ - spin_unlock(lock); - - writeback_result = zswap_writeback_entry(entry, swpentry); - - if (writeback_result) { - zswap_reject_reclaim_fail++; - ret = LRU_RETRY; - - /* - * Encountering a page already in swap cache is a sign that we are shrinking - * into the warmer region. We should terminate shrinking (if we're in the dynamic - * shrinker context). - */ - if (writeback_result == -EEXIST && encountered_page_in_swapcache) - *encountered_page_in_swapcache = true; - } else { - zswap_written_back_pages++; - } - - spin_lock(lock); - return ret; -} - static int shrink_memcg(struct mem_cgroup *memcg) { struct zswap_pool *pool; |