summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorPeter Xu <peterx@redhat.com>2022-10-30 17:41:51 -0400
committerAndrew Morton <akpm@linux-foundation.org>2022-11-30 15:58:46 -0800
commit15520a3f046998e3f57e695743e99b0875e2dae7 (patch)
tree831e756367bc6484cba5fcafc6d782b153e1b4b3 /mm
parentca92ea3dc5a2b01f98e9f02b7a6bc03be06fe124 (diff)
mm: use pte markers for swap errors
PTE markers are ideal mechanism for things like SWP_SWAPIN_ERROR. Using a whole swap entry type for this purpose can be an overkill, especially if we already have PTE markers. Define a new bit for swapin error and replace it with pte markers. Then we can safely drop SWP_SWAPIN_ERROR and give one device slot back to swap. We used to have SWP_SWAPIN_ERROR taking the page pfn as part of the swap entry, but it's never used. Neither do I see how it can be useful because normally the swapin failure should not be caused by a bad page but bad swap device. Drop it alongside. Link: https://lkml.kernel.org/r/20221030214151.402274-3-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Huang Ying <ying.huang@intel.com> Reviewed-by: Miaohe Lin <linmiaohe@huawei.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c6
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/swapfile.c2
3 files changed, 6 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index b79d27533722..142c4229549b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3668,6 +3668,10 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
if (WARN_ON_ONCE(!marker))
return VM_FAULT_SIGBUS;
+ /* Higher priority than uffd-wp when data corrupted */
+ if (marker & PTE_MARKER_SWAPIN_ERROR)
+ return VM_FAULT_SIGBUS;
+
if (pte_marker_entry_uffd_wp(entry))
return pte_marker_handle_uffd_wp(vmf);
@@ -3727,8 +3731,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
put_page(vmf->page);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
- } else if (is_swapin_error_entry(entry)) {
- ret = VM_FAULT_SIGBUS;
} else if (is_pte_marker_entry(entry)) {
ret = handle_pte_marker(vmf);
} else {
diff --git a/mm/shmem.c b/mm/shmem.c
index 0a7c4a748811..7428ae3fa4b9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1682,7 +1682,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
swp_entry_t swapin_error;
void *old;
- swapin_error = make_swapin_error_entry(&folio->page);
+ swapin_error = make_swapin_error_entry();
old = xa_cmpxchg_irq(&mapping->i_pages, index,
swp_to_radix_entry(swap),
swp_to_radix_entry(swapin_error), 0);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 72e481aacd5d..03fe0949f6b2 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1781,7 +1781,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
pte_t pteval;
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
- pteval = swp_entry_to_pte(make_swapin_error_entry(page));
+ pteval = swp_entry_to_pte(make_swapin_error_entry());
set_pte_at(vma->vm_mm, addr, pte, pteval);
swap_free(entry);
ret = 0;