summaryrefslogtreecommitdiff
path: root/mm/page_counter.c
diff options
context:
space:
mode:
authorRoman Gushchin <roman.gushchin@linux.dev>2024-07-26 20:31:08 +0000
committerAndrew Morton <akpm@linux-foundation.org>2024-09-01 20:25:50 -0700
commitf77bd4b14ccfd38dfcfe67eecad517b8ec1b7f37 (patch)
tree4f3cac611c360d63670c008977489ec4244e9669 /mm/page_counter.c
parent6c469957cd172c1bcea8c5b77bc711a245b0934f (diff)
mm: memcg: don't call propagate_protected_usage() needlessly
Patch series "mm: memcg: page counters optimizations", v3. This patchset contains 3 independent small optimizations of page counters. This patch (of 3): Memory protection (min/low) requires a constant tracking of protected memory usage. propagate_protected_usage() is called on each page counters update and does a number of operations even in cases when the actual memory protection functionality is not supported (e.g. hugetlb cgroups or memcg swap counters). It's obviously inefficient and leads to a waste of CPU cycles. It can be addressed by calling propagate_protected_usage() only for the counters which do support memory guarantees. As of now it's only memcg->memory - the unified memory memcg counter. Link: https://lkml.kernel.org/r/20240726203110.1577216-2-roman.gushchin@linux.dev Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: Shakeel Butt <shakeel.butt@linux.dev> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_counter.c')
-rw-r--r--mm/page_counter.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/mm/page_counter.c b/mm/page_counter.c
index 0153f5bb3161..dd242245bee2 100644
--- a/mm/page_counter.c
+++ b/mm/page_counter.c
@@ -13,6 +13,11 @@
#include <linux/bug.h>
#include <asm/page.h>
+static bool track_protection(struct page_counter *c)
+{
+ return c->protection_support;
+}
+
static void propagate_protected_usage(struct page_counter *c,
unsigned long usage)
{
@@ -57,7 +62,8 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
new = 0;
atomic_long_set(&counter->usage, new);
}
- propagate_protected_usage(counter, new);
+ if (track_protection(counter))
+ propagate_protected_usage(counter, new);
}
/**
@@ -70,12 +76,14 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
{
struct page_counter *c;
+ bool protection = track_protection(counter);
for (c = counter; c; c = c->parent) {
long new;
new = atomic_long_add_return(nr_pages, &c->usage);
- propagate_protected_usage(c, new);
+ if (protection)
+ propagate_protected_usage(c, new);
/*
* This is indeed racy, but we can live with some
* inaccuracy in the watermark.
@@ -99,6 +107,7 @@ bool page_counter_try_charge(struct page_counter *counter,
struct page_counter **fail)
{
struct page_counter *c;
+ bool protection = track_protection(counter);
for (c = counter; c; c = c->parent) {
long new;
@@ -128,7 +137,9 @@ bool page_counter_try_charge(struct page_counter *counter,
*fail = c;
goto failed;
}
- propagate_protected_usage(c, new);
+ if (protection)
+ propagate_protected_usage(c, new);
+
/*
* Just like with failcnt, we can live with some
* inaccuracy in the watermark.