diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-09 09:54:46 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-09 09:54:46 -0700 |
commit | a5ad5742f671de906adbf29fbedf0a04705cebad (patch) | |
tree | 88d1a4c18e2025a5a8335dbbc9dea8bebeba5789 /arch/powerpc/mm | |
parent | 013b2deba9a6b80ca02f4fafd7dedf875e9b4450 (diff) | |
parent | 4fa7252338a56fbc90220e6330f136a379175a7a (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge even more updates from Andrew Morton:
- a kernel-wide sweep of show_stack()
- pagetable cleanups
- abstract out accesses to mmap_sem - prep for mmap_sem scalability work
- hch's user acess work
Subsystems affected by this patch series: debug, mm/pagemap, mm/maccess,
mm/documentation.
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (93 commits)
include/linux/cache.h: expand documentation over __read_mostly
maccess: return -ERANGE when probe_kernel_read() fails
x86: use non-set_fs based maccess routines
maccess: allow architectures to provide kernel probing directly
maccess: move user access routines together
maccess: always use strict semantics for probe_kernel_read
maccess: remove strncpy_from_unsafe
tracing/kprobes: handle mixed kernel/userspace probes better
bpf: rework the compat kernel probe handling
bpf:bpf_seq_printf(): handle potentially unsafe format string better
bpf: handle the compat string in bpf_trace_copy_string better
bpf: factor out a bpf_trace_copy_string helper
maccess: unify the probe kernel arch hooks
maccess: remove probe_read_common and probe_write_common
maccess: rename strnlen_unsafe_user to strnlen_user_nofault
maccess: rename strncpy_from_unsafe_strict to strncpy_from_kernel_nofault
maccess: rename strncpy_from_unsafe_user to strncpy_from_user_nofault
maccess: update the top of file comment
maccess: clarify kerneldoc comments
maccess: remove duplicate kerneldoc comments
...
Diffstat (limited to 'arch/powerpc/mm')
35 files changed, 52 insertions, 67 deletions
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S index 2702e8762c0d..923ad8f374eb 100644 --- a/arch/powerpc/mm/book3s32/hash_low.S +++ b/arch/powerpc/mm/book3s32/hash_low.S @@ -14,9 +14,9 @@ * hash table, so this file is not used on them.) */ +#include <linux/pgtable.h> #include <asm/reg.h> #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/cputable.h> #include <asm/ppc_asm.h> #include <asm/thread_info.h> diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index a6dcc708eee3..03b6ba54460e 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -320,7 +320,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea) if (!Hash) return; - pmd = pmd_ptr(mm, ea); + pmd = pmd_off(mm, ea); if (!pmd_none(*pmd)) add_hash_page(mm->context.id, ea, pmd_val(*pmd)); } diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c index dc9039a170aa..b6c7427daa6f 100644 --- a/arch/powerpc/mm/book3s32/tlb.c +++ b/arch/powerpc/mm/book3s32/tlb.c @@ -90,7 +90,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start, if (start >= end) return; end = (end - 1) | ~PAGE_MASK; - pmd = pmd_ptr(mm, start); + pmd = pmd_off(mm, start); for (;;) { pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; if (pmd_end > end) @@ -129,7 +129,7 @@ void flush_tlb_mm(struct mm_struct *mm) /* * It is safe to go down the mm's list of vmas when called - * from dup_mmap, holding mmap_sem. It would also be safe from + * from dup_mmap, holding mmap_lock. It would also be safe from * unmap_region or exit_mmap, but not from vmtruncate on SMP - * but it seems dup_mmap is the only SMP case which gets here. */ @@ -148,7 +148,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) return; } mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; - pmd = pmd_ptr(mm, vmaddr); + pmd = pmd_off(mm, vmaddr); if (!pmd_none(*pmd)) flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); } diff --git a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c index eefa89c6117b..25acb9c5ee1b 100644 --- a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c +++ b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c @@ -10,7 +10,6 @@ #include <linux/mm.h> #include <linux/hugetlb.h> -#include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/cacheflush.h> #include <asm/machdep.h> diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c index d2d8237ea9d5..cf20e5229ce1 100644 --- a/arch/powerpc/mm/book3s64/hash_native.c +++ b/arch/powerpc/mm/book3s64/hash_native.c @@ -14,11 +14,11 @@ #include <linux/processor.h> #include <linux/threads.h> #include <linux/smp.h> +#include <linux/pgtable.h> #include <asm/machdep.h> #include <asm/mmu.h> #include <asm/mmu_context.h> -#include <asm/pgtable.h> #include <asm/trace.h> #include <asm/tlb.h> #include <asm/cputable.h> diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index 8b4b0a602158..2a99167afbaf 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -10,7 +10,6 @@ #include <linux/mm.h> #include <asm/pgalloc.h> -#include <asm/pgtable.h> #include <asm/sections.h> #include <asm/mmu.h> #include <asm/tlb.h> @@ -238,7 +237,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres * to hugepage, we first clear the pmd, then invalidate all * the PTE entries. The assumption here is that any low level * page fault will see a none pmd and take the slow path that - * will wait on mmap_sem. But we could very well be in a + * will wait on mmap_lock. But we could very well be in a * hash_page with local ptep pointer value. Such a hash page * can result in adding new HPTE entries for normal subpages. * That means we could be modifying the page content as we @@ -252,7 +251,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres * Now invalidate the hpte entries in the range * covered by pmd. This make sure we take a * fault and will find the pmd as none, which will - * result in a major fault which takes mmap_sem and + * result in a major fault which takes mmap_lock and * hence wait for collapse to complete. Without this * the __collapse_huge_page_copy can result in copying * the old content. diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 0124003e60d0..468169e33c86 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -35,10 +35,10 @@ #include <linux/pkeys.h> #include <linux/hugetlb.h> #include <linux/cpu.h> +#include <linux/pgtable.h> #include <asm/debugfs.h> #include <asm/processor.h> -#include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/page.h> diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c index fa05bbd1f682..563faa10bb66 100644 --- a/arch/powerpc/mm/book3s64/iommu_api.c +++ b/arch/powerpc/mm/book3s64/iommu_api.c @@ -96,7 +96,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, goto unlock_exit; } - down_read(&mm->mmap_sem); + mmap_read_lock(mm); chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) / sizeof(struct vm_area_struct *); chunk = min(chunk, entries); @@ -114,7 +114,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, pinned += ret; break; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (pinned != entries) { if (!ret) ret = -EFAULT; diff --git a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c index cab06331c0c0..c812b401b66c 100644 --- a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c +++ b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c @@ -2,7 +2,6 @@ #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/security.h> -#include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/cacheflush.h> #include <asm/machdep.h> diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 8acb96de0e48..bb00e0cba119 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -17,7 +17,6 @@ #include <linux/string_helpers.h> #include <linux/stop_machine.h> -#include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/dma.h> diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c index 8141e8b40ee5..156c38f89511 100644 --- a/arch/powerpc/mm/book3s64/slb.c +++ b/arch/powerpc/mm/book3s64/slb.c @@ -10,7 +10,6 @@ */ #include <asm/asm-prototypes.h> -#include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/paca.h> @@ -21,6 +20,7 @@ #include <linux/compiler.h> #include <linux/context_tracking.h> #include <linux/mm_types.h> +#include <linux/pgtable.h> #include <asm/udbg.h> #include <asm/code-patching.h> diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c index 25a0c044bd93..60c6ea16a972 100644 --- a/arch/powerpc/mm/book3s64/subpage_prot.c +++ b/arch/powerpc/mm/book3s64/subpage_prot.c @@ -11,7 +11,7 @@ #include <linux/hugetlb.h> #include <linux/syscalls.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #include <linux/uaccess.h> /* @@ -94,7 +94,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len) size_t nw; unsigned long next, limit; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); spt = mm_ctx_subpage_prot(&mm->context); if (!spt) @@ -129,7 +129,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len) } err_out: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -219,13 +219,13 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32))) return -EFAULT; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); spt = mm_ctx_subpage_prot(&mm->context); if (!spt) { /* * Allocate subpage prot table if not already done. - * Do this with mmap_sem held + * Do this with mmap_lock held */ spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL); if (!spt) { @@ -269,11 +269,11 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, if (addr + (nw << PAGE_SHIFT) > next) nw = (next - addr) >> PAGE_SHIFT; - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); if (__copy_from_user(spp, map, nw * sizeof(u32))) return -EFAULT; map += nw; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); /* now flush any existing HPTEs for the range */ hpte_flush_range(mm, addr, nw); @@ -282,6 +282,6 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, spt->maxaddr = limit; err = 0; out: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return err; } diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index beb060b96632..b83abbead4a2 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -33,7 +33,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, if (mm->pgd == NULL) return -EFAULT; - down_read(&mm->mmap_sem); + mmap_read_lock(mm); ret = -EFAULT; vma = find_vma(mm, ea); if (!vma) @@ -82,7 +82,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, current->min_flt++; out_unlock: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return ret; } EXPORT_SYMBOL_GPL(copro_handle_mm_fault); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 2393ed9d84bb..641fc5f3d7dd 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -35,7 +35,6 @@ #include <asm/firmware.h> #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/siginfo.h> @@ -109,7 +108,7 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return __bad_area_nosemaphore(regs, address, si_code); } @@ -139,13 +138,13 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, * 2. T1 : set AMR to deny access to pkey=4, touches, page * 3. T1 : faults... * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); - * 5. T1 : enters fault handler, takes mmap_sem, etc... + * 5. T1 : enters fault handler, takes mmap_lock, etc... * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really * faulted on a pte with its pkey=4. */ pkey = vma_pkey(vma); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * If we are in kernel mode, bail out with a SEGV, this will @@ -526,9 +525,9 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* - * We want to do this outside mmap_sem, because reading code around nip + * We want to do this outside mmap_lock, because reading code around nip * can result in fault, which will cause a deadlock when called with - * mmap_sem held + * mmap_lock held */ if (is_user) flags |= FAULT_FLAG_USER; @@ -540,7 +539,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an - * erroneous fault occurring in a code path which already holds mmap_sem + * erroneous fault occurring in a code path which already holds mmap_lock * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user * space from well defined areas of code, which are listed in the @@ -552,12 +551,12 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. */ - if (unlikely(!down_read_trylock(&mm->mmap_sem))) { + if (unlikely(!mmap_read_trylock(mm))) { if (!is_user && !search_exception_tables(regs->nip)) return bad_area_nosemaphore(regs, address); retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); } else { /* * The above down_read_trylock() might have succeeded in @@ -581,7 +580,7 @@ retry: if (!must_retry) return bad_area(regs, address); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (fault_in_pages_readable((const char __user *)regs->nip, sizeof(unsigned int))) return bad_area_nosemaphore(regs, address); @@ -616,7 +615,7 @@ good_area: return user_mode(regs) ? 0 : SIGBUS; /* - * Handle the retry right now, the mmap_sem has been released in that + * Handle the retry right now, the mmap_lock has been released in that * case. */ if (unlikely(fault & VM_FAULT_RETRY)) { @@ -626,7 +625,7 @@ good_area: } } - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (unlikely(fault & VM_FAULT_ERROR)) return mm_fault_error(regs, address, fault); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 5b3d01404266..e9bfbccd975d 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -19,7 +19,6 @@ #include <linux/swap.h> #include <linux/swapops.h> #include <linux/kmemleak.h> -#include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/setup.h> diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c index 42ef7a6e6098..8e0d792ac296 100644 --- a/arch/powerpc/mm/init-common.c +++ b/arch/powerpc/mm/init-common.c @@ -17,8 +17,8 @@ #undef DEBUG #include <linux/string.h> +#include <linux/pgtable.h> #include <asm/pgalloc.h> -#include <asm/pgtable.h> #include <asm/kup.h> phys_addr_t memstart_addr __ro_after_init = (phys_addr_t)~0ull; diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 36c39bd37256..5a5469eb3174 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -32,7 +32,6 @@ #include <asm/pgalloc.h> #include <asm/prom.h> #include <asm/io.h> -#include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/smp.h> #include <asm/machdep.h> diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index c7ce4ec5060e..bc73abf0bc25 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -47,7 +47,6 @@ #include <asm/rtas.h> #include <asm/io.h> #include <asm/mmu_context.h> -#include <asm/pgtable.h> #include <asm/mmu.h> #include <linux/uaccess.h> #include <asm/smp.h> diff --git a/arch/powerpc/mm/kasan/8xx.c b/arch/powerpc/mm/kasan/8xx.c index db4ef44af22f..569d98a41881 100644 --- a/arch/powerpc/mm/kasan/8xx.c +++ b/arch/powerpc/mm/kasan/8xx.c @@ -10,7 +10,7 @@ static int __init kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block) { - pmd_t *pmd = pmd_ptr_k(k_start); + pmd_t *pmd = pmd_off_k(k_start); unsigned long k_cur, k_next; for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) { @@ -59,7 +59,7 @@ int __init kasan_init_region(void *start, size_t size) return ret; for (; k_cur < k_end; k_cur += PAGE_SIZE) { - pmd_t *pmd = pmd_ptr_k(k_cur); + pmd_t *pmd = pmd_off_k(k_cur); void *va = block + k_cur - k_start; pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c index 4bc491a4a1fd..a32b4640b9de 100644 --- a/arch/powerpc/mm/kasan/book3s_32.c +++ b/arch/powerpc/mm/kasan/book3s_32.c @@ -46,7 +46,7 @@ int __init kasan_init_region(void *start, size_t size) kasan_update_early_region(k_start, k_cur, __pte(0)); for (; k_cur < k_end; k_cur += PAGE_SIZE) { - pmd_t *pmd = pmd_ptr_k(k_cur); + pmd_t *pmd = pmd_off_k(k_cur); void *va = block + k_cur - k_start; pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index c42085801c04..0760e1e754e4 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -33,7 +33,7 @@ int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_ pmd_t *pmd; unsigned long k_cur, k_next; - pmd = pmd_ptr_k(k_start); + pmd = pmd_off_k(k_start); for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) { pte_t *new; @@ -69,7 +69,7 @@ int __init __weak kasan_init_region(void *start, size_t size) return -ENOMEM; for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { - pmd_t *pmd = pmd_ptr_k(k_cur); + pmd_t *pmd = pmd_off_k(k_cur); void *va = block + k_cur - k_start; pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); @@ -86,7 +86,7 @@ kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte) phys_addr_t pa = __pa(kasan_early_shadow_page); for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) { - pmd_t *pmd = pmd_ptr_k(k_cur); + pmd_t *pmd = pmd_off_k(k_cur); pte_t *ptep = pte_offset_kernel(pmd, k_cur); if ((pte_val(*ptep) & PTE_RPN_MASK) != pa) @@ -184,7 +184,7 @@ void __init kasan_early_init(void) unsigned long addr = KASAN_SHADOW_START; unsigned long end = KASAN_SHADOW_END; unsigned long next; - pmd_t *pmd = pmd_ptr_k(addr); + pmd_t *pmd = pmd_off_k(addr); BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index e2d6a6236aa7..c2c11eb8dcfc 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -38,7 +38,6 @@ #include <asm/prom.h> #include <asm/io.h> #include <asm/mmu_context.h> -#include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/smp.h> #include <asm/machdep.h> diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c index 4eaf462cda30..13e74bc39ba5 100644 --- a/arch/powerpc/mm/nohash/40x.c +++ b/arch/powerpc/mm/nohash/40x.c @@ -36,7 +36,6 @@ #include <asm/prom.h> #include <asm/io.h> #include <asm/mmu_context.h> -#include <asm/pgtable.h> #include <asm/mmu.h> #include <linux/uaccess.h> #include <asm/smp.h> @@ -104,7 +103,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_RW; - pmdp = pmd_ptr_k(v); + pmdp = pmd_off_k(v); *pmdp++ = __pmd(val); *pmdp++ = __pmd(val); *pmdp++ = __pmd(val); @@ -119,7 +118,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_RW; - pmdp = pmd_ptr_k(v); + pmdp = pmd_off_k(v); *pmdp = __pmd(val); v += LARGE_PAGE_SIZE_4M; diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 286441bbbe49..92e8929cbe3e 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -74,7 +74,7 @@ static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va) static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa, pgprot_t prot, int psize, bool new) { - pmd_t *pmdp = pmd_ptr_k(va); + pmd_t *pmdp = pmd_off_k(va); pte_t *ptep; if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M)) diff --git a/arch/powerpc/mm/nohash/fsl_booke.c b/arch/powerpc/mm/nohash/fsl_booke.c index b4eb06ceb189..c06dfbb771f4 100644 --- a/arch/powerpc/mm/nohash/fsl_booke.c +++ b/arch/powerpc/mm/nohash/fsl_booke.c @@ -41,7 +41,6 @@ #include <asm/prom.h> #include <asm/io.h> #include <asm/mmu_context.h> -#include <asm/pgtable.h> #include <asm/mmu.h> #include <linux/uaccess.h> #include <asm/smp.h> diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S index 1f110c3c48fb..d5e2704d0096 100644 --- a/arch/powerpc/mm/nohash/tlb_low_64e.S +++ b/arch/powerpc/mm/nohash/tlb_low_64e.S @@ -6,6 +6,7 @@ * Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. */ +#include <linux/pgtable.h> #include <asm/processor.h> #include <asm/reg.h> #include <asm/page.h> @@ -13,7 +14,6 @@ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/cputable.h> -#include <asm/pgtable.h> #include <asm/exception-64e.h> #include <asm/ppc-opcode.h> #include <asm/kvm_asm.h> diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index cea5b4e25a24..45a0556089e8 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -306,7 +306,7 @@ void assert_pte_locked(struct mm_struct *mm, unsigned long addr) pmd = pmd_offset(pud, addr); /* * khugepaged to collapse normal pages to hugepage, first set - * pmd to none to force page fault/gup to take mmap_sem. After + * pmd to none to force page fault/gup to take mmap_lock. After * pmd is set to none, we do a pte_clear which does this assertion * so if we find pmd none, return. */ diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 05902bbff8d6..6eb4eab79385 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -24,7 +24,6 @@ #include <linux/memblock.h> #include <linux/slab.h> -#include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/fixmap.h> #include <asm/setup.h> @@ -41,7 +40,7 @@ notrace void __init early_ioremap_init(void) { unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE); pte_t *ptep = (pte_t *)early_fixmap_pagetable; - pmd_t *pmdp = pmd_ptr_k(addr); + pmd_t *pmdp = pmd_off_k(addr); for (; (s32)(FIXADDR_TOP - addr) > 0; addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++) @@ -79,7 +78,7 @@ int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) int err = -ENOMEM; /* Use upper 10 bits of VA to index the first level map */ - pd = pmd_ptr_k(va); + pd = pmd_off_k(va); /* Use middle 10 bits of VA to index the second-level map */ if (likely(slab_is_available())) pg = pte_alloc_kernel(pd, va); diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 1f86a88fd4bb..bb43a8c04bee 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -35,7 +35,6 @@ #include <asm/page.h> #include <asm/prom.h> #include <asm/mmu_context.h> -#include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/smp.h> #include <asm/machdep.h> diff --git a/arch/powerpc/mm/ptdump/8xx.c b/arch/powerpc/mm/ptdump/8xx.c index 4bc350736c1d..8a797dcbf475 100644 --- a/arch/powerpc/mm/ptdump/8xx.c +++ b/arch/powerpc/mm/ptdump/8xx.c @@ -5,7 +5,7 @@ * */ #include <linux/kernel.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #include "ptdump.h" diff --git a/arch/powerpc/mm/ptdump/bats.c b/arch/powerpc/mm/ptdump/bats.c index cebb58c7e289..e29b338d499f 100644 --- a/arch/powerpc/mm/ptdump/bats.c +++ b/arch/powerpc/mm/ptdump/bats.c @@ -6,8 +6,8 @@ * This dumps the content of BATS */ +#include <linux/pgtable.h> #include <asm/debugfs.h> -#include <asm/pgtable.h> #include <asm/cpu_has_feature.h> #include "ptdump.h" diff --git a/arch/powerpc/mm/ptdump/book3s64.c b/arch/powerpc/mm/ptdump/book3s64.c index 0dfca72cb9bd..14f73868db66 100644 --- a/arch/powerpc/mm/ptdump/book3s64.c +++ b/arch/powerpc/mm/ptdump/book3s64.c @@ -5,7 +5,7 @@ * */ #include <linux/kernel.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #include "ptdump.h" diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c index 6aaeb1eb3b9c..a2c33efc7ce8 100644 --- a/arch/powerpc/mm/ptdump/hashpagetable.c +++ b/arch/powerpc/mm/ptdump/hashpagetable.c @@ -15,7 +15,6 @@ #include <linux/mm.h> #include <linux/sched.h> #include <linux/seq_file.h> -#include <asm/pgtable.h> #include <linux/const.h> #include <asm/page.h> #include <asm/pgalloc.h> diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index 3209f78297ad..de6e05ef871c 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -19,7 +19,6 @@ #include <linux/sched.h> #include <linux/seq_file.h> #include <asm/fixmap.h> -#include <asm/pgtable.h> #include <linux/const.h> #include <asm/page.h> #include <asm/pgalloc.h> diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c index 784f8df17f73..c005fe041c18 100644 --- a/arch/powerpc/mm/ptdump/shared.c +++ b/arch/powerpc/mm/ptdump/shared.c @@ -5,7 +5,7 @@ * */ #include <linux/kernel.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #include "ptdump.h" |