diff options
Diffstat (limited to 'arch/powerpc/mm/kasan/kasan_init_32.c')
-rw-r--r-- | arch/powerpc/mm/kasan/kasan_init_32.c | 89 |
1 files changed, 43 insertions, 46 deletions
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index 0e6ed4413eea..16dd95bd0749 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -12,7 +12,7 @@ #include <asm/code-patching.h> #include <mm/mmu_decl.h> -static pgprot_t kasan_prot_ro(void) +static pgprot_t __init kasan_prot_ro(void) { if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) return PAGE_READONLY; @@ -20,7 +20,7 @@ static pgprot_t kasan_prot_ro(void) return PAGE_KERNEL_RO; } -static void kasan_populate_pte(pte_t *ptep, pgprot_t prot) +static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot) { unsigned long va = (unsigned long)kasan_early_shadow_page; phys_addr_t pa = __pa(kasan_early_shadow_page); @@ -30,29 +30,25 @@ static void kasan_populate_pte(pte_t *ptep, pgprot_t prot) __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0); } -static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end) +static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end) { pmd_t *pmd; unsigned long k_cur, k_next; - pgprot_t prot = slab_is_available() ? kasan_prot_ro() : PAGE_KERNEL; + pte_t *new = NULL; pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start); for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) { - pte_t *new; - k_next = pgd_addr_end(k_cur, k_end); if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte) continue; - if (slab_is_available()) - new = pte_alloc_one_kernel(&init_mm); - else + if (!new) new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE); if (!new) return -ENOMEM; - kasan_populate_pte(new, prot); + kasan_populate_pte(new, PAGE_KERNEL); smp_wmb(); /* See comment in __pte_alloc */ @@ -63,39 +59,27 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l new = NULL; } spin_unlock(&init_mm.page_table_lock); - - if (new && slab_is_available()) - pte_free_kernel(&init_mm, new); } return 0; } -static void __ref *kasan_get_one_page(void) -{ - if (slab_is_available()) - return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); - - return memblock_alloc(PAGE_SIZE, PAGE_SIZE); -} - -static int __ref kasan_init_region(void *start, size_t size) +static int __init kasan_init_region(void *start, size_t size) { unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); unsigned long k_cur; int ret; - void *block = NULL; + void *block; ret = kasan_init_shadow_page_tables(k_start, k_end); if (ret) return ret; - if (!slab_is_available()) - block = memblock_alloc(k_end - k_start, PAGE_SIZE); + block = memblock_alloc(k_end - k_start, PAGE_SIZE); for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur); - void *va = block ? block + k_cur - k_start : kasan_get_one_page(); + void *va = block + k_cur - k_start; pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); if (!va) @@ -129,6 +113,31 @@ static void __init kasan_remap_early_shadow_ro(void) flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END); } +static void __init kasan_unmap_early_shadow_vmalloc(void) +{ + unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START); + unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END); + unsigned long k_cur; + phys_addr_t pa = __pa(kasan_early_shadow_page); + + if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) { + int ret = kasan_init_shadow_page_tables(k_start, k_end); + + if (ret) + panic("kasan: kasan_init_shadow_page_tables() failed"); + } + for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { + pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur); + pte_t *ptep = pte_offset_kernel(pmd, k_cur); + + if ((pte_val(*ptep) & PTE_RPN_MASK) != pa) + continue; + + __set_pte_at(&init_mm, k_cur, ptep, __pte(0), 0); + } + flush_tlb_kernel_range(k_start, k_end); +} + void __init kasan_mmu_init(void) { int ret; @@ -165,34 +174,22 @@ void __init kasan_init(void) pr_info("KASAN init done\n"); } -#ifdef CONFIG_MODULES -void *module_alloc(unsigned long size) +void __init kasan_late_init(void) { - void *base; - - base = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, VMALLOC_END, - GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, - NUMA_NO_NODE, __builtin_return_address(0)); - - if (!base) - return NULL; - - if (!kasan_init_region(base, size)) - return base; - - vfree(base); - - return NULL; + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) + kasan_unmap_early_shadow_vmalloc(); } -#endif #ifdef CONFIG_PPC_BOOK3S_32 u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0}; static void __init kasan_early_hash_table(void) { - modify_instruction_site(&patch__hash_page_A0, 0xffff, __pa(early_hash) >> 16); - modify_instruction_site(&patch__flush_hash_A0, 0xffff, __pa(early_hash) >> 16); + unsigned int hash = IS_ENABLED(CONFIG_VMAP_STACK) ? (unsigned int)early_hash : + __pa(early_hash); + + modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16); + modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16); Hash = (struct hash_pte *)early_hash; } |