diff options
author | Longlong Xia <xialonglong1@huawei.com> | 2023-04-14 10:17:41 +0800 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-04-18 16:53:52 -0700 |
commit | 4248d0083ec5817eebfb916c54950d100b3468ee (patch) | |
tree | b0f64bae03987593a20444657b0afd07471bf6b2 /mm | |
parent | 4f775086a6eee07c6ae4be4734d736e13b537351 (diff) |
mm: ksm: support hwpoison for ksm page
hwpoison_user_mappings() is updated to support ksm pages, and add
collect_procs_ksm() to collect processes when the error hit an ksm page.
The difference from collect_procs_anon() is that it also needs to traverse
the rmap-item list on the stable node of the ksm page. At the same time,
add_to_kill_ksm() is added to handle ksm pages. And
task_in_to_kill_list() is added to avoid duplicate addition of tsk to the
to_kill list. This is because when scanning the list, if the pages that
make up the ksm page all come from the same process, they may be added
repeatedly.
Link: https://lkml.kernel.org/r/20230414021741.2597273-3-xialonglong1@huawei.com
Signed-off-by: Longlong Xia <xialonglong1@huawei.com>
Tested-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/ksm.c | 45 | ||||
-rw-r--r-- | mm/memory-failure.c | 34 |
2 files changed, 70 insertions, 9 deletions
@@ -2738,6 +2738,51 @@ again: goto again; } +#ifdef CONFIG_MEMORY_FAILURE +/* + * Collect processes when the error hit an ksm page. + */ +void collect_procs_ksm(struct page *page, struct list_head *to_kill, + int force_early) +{ + struct ksm_stable_node *stable_node; + struct ksm_rmap_item *rmap_item; + struct folio *folio = page_folio(page); + struct vm_area_struct *vma; + struct task_struct *tsk; + + stable_node = folio_stable_node(folio); + if (!stable_node) + return; + hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { + struct anon_vma *av = rmap_item->anon_vma; + + anon_vma_lock_read(av); + read_lock(&tasklist_lock); + for_each_process(tsk) { + struct anon_vma_chain *vmac; + unsigned long addr; + struct task_struct *t = + task_early_kill(tsk, force_early); + if (!t) + continue; + anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0, + ULONG_MAX) + { + vma = vmac->vma; + if (vma->vm_mm == t->mm) { + addr = rmap_item->address & PAGE_MASK; + add_to_kill_ksm(t, page, vma, to_kill, + addr); + } + } + } + read_unlock(&tasklist_lock); + anon_vma_unlock_read(av); + } +} +#endif + #ifdef CONFIG_MIGRATION void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) { diff --git a/mm/memory-failure.c b/mm/memory-failure.c index bb09999108ef..f69c410fb9f9 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -455,6 +455,27 @@ static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p, __add_to_kill(tsk, p, vma, to_kill, 0, FSDAX_INVALID_PGOFF); } +#ifdef CONFIG_KSM +static bool task_in_to_kill_list(struct list_head *to_kill, + struct task_struct *tsk) +{ + struct to_kill *tk, *next; + + list_for_each_entry_safe(tk, next, to_kill, nd) { + if (tk->tsk == tsk) + return true; + } + + return false; +} +void add_to_kill_ksm(struct task_struct *tsk, struct page *p, + struct vm_area_struct *vma, struct list_head *to_kill, + unsigned long ksm_addr) +{ + if (!task_in_to_kill_list(to_kill, tsk)) + __add_to_kill(tsk, p, vma, to_kill, ksm_addr, FSDAX_INVALID_PGOFF); +} +#endif /* * Kill the processes that have been collected earlier. * @@ -534,8 +555,7 @@ static struct task_struct *find_early_kill_thread(struct task_struct *tsk) * processes sharing the same error page,if the process is "early kill", the * task_struct of the dedicated thread will also be returned. */ -static struct task_struct *task_early_kill(struct task_struct *tsk, - int force_early) +struct task_struct *task_early_kill(struct task_struct *tsk, int force_early) { if (!tsk->mm) return NULL; @@ -666,8 +686,9 @@ static void collect_procs(struct page *page, struct list_head *tokill, { if (!page->mapping) return; - - if (PageAnon(page)) + if (unlikely(PageKsm(page))) + collect_procs_ksm(page, tokill, force_early); + else if (PageAnon(page)) collect_procs_anon(page, tokill, force_early); else collect_procs_file(page, tokill, force_early); @@ -1522,11 +1543,6 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, if (!page_mapped(hpage)) return true; - if (PageKsm(p)) { - pr_err("%#lx: can't handle KSM pages.\n", pfn); - return false; - } - if (PageSwapCache(p)) { pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); ttu &= ~TTU_HWPOISON; |