From ac541f2503722943a9f13e0c92ed07632ba7fd38 Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Wed, 23 Oct 2019 12:55:14 -0700 Subject: mm/hmm: allow snapshot of the special zero page MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If a device driver like nouveau tries to use hmm_range_fault() to access the special shared zero page in system memory, hmm_range_fault() will return -EFAULT and kill the process. Allow hmm_range_fault() to return success (0) when the CPU pagetable entry points to the special shared zero page. page_to_pfn() and pfn_to_page() are defined on the zero page so just handle it like any other page. Link: https://lore.kernel.org/r/20191023195515.13168-3-rcampbell@nvidia.com Signed-off-by: Ralph Campbell Reviewed-by: "Jérôme Glisse" Acked-by: David Hildenbrand Signed-off-by: Jason Gunthorpe --- mm/hmm.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'mm/hmm.c') diff --git a/mm/hmm.c b/mm/hmm.c index 902f5fa6bf93..6b0136665407 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -532,8 +532,14 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, if (unlikely(!hmm_vma_walk->pgmap)) return -EBUSY; } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { - *pfn = range->values[HMM_PFN_SPECIAL]; - return -EFAULT; + if (!is_zero_pfn(pte_pfn(pte))) { + *pfn = range->values[HMM_PFN_SPECIAL]; + return -EFAULT; + } + /* + * Since each architecture defines a struct page for the zero + * page, just fall through and treat it like a normal page. + */ } *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags; -- cgit v1.2.3-70-g09d2