diff options
Diffstat (limited to 'arch/riscv/mm/init.c')
-rw-r--r-- | arch/riscv/mm/init.c | 117 |
1 files changed, 109 insertions, 8 deletions
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 3cb4c1b6cee1..788eb222deac 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -31,6 +31,9 @@ unsigned long kernel_virt_addr = KERNEL_LINK_ADDR; EXPORT_SYMBOL(kernel_virt_addr); +#ifdef CONFIG_XIP_KERNEL +#define kernel_virt_addr (*((unsigned long *)XIP_FIXUP(&kernel_virt_addr))) +#endif unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; @@ -38,8 +41,8 @@ EXPORT_SYMBOL(empty_zero_page); extern char _start[]; #define DTB_EARLY_BASE_VA PGDIR_SIZE -void *dtb_early_va __initdata; -uintptr_t dtb_early_pa __initdata; +void *_dtb_early_va __initdata; +uintptr_t _dtb_early_pa __initdata; struct pt_alloc_ops { pte_t *(*get_pte_virt)(phys_addr_t pa); @@ -124,6 +127,10 @@ void __init setup_bootmem(void) phys_addr_t dram_end = memblock_end_of_DRAM(); phys_addr_t max_mapped_addr = __pa(~(ulong)0); +#ifdef CONFIG_XIP_KERNEL + vmlinux_start = __pa_symbol(&_sdata); +#endif + /* The maximal physical memory size is -PAGE_OFFSET. */ memblock_enforce_memory_limit(-PAGE_OFFSET); @@ -165,17 +172,41 @@ void __init setup_bootmem(void) memblock_allow_resize(); } +#ifdef CONFIG_XIP_KERNEL + +extern char _xiprom[], _exiprom[]; +extern char _sdata[], _edata[]; + +#endif /* CONFIG_XIP_KERNEL */ + #ifdef CONFIG_MMU -static struct pt_alloc_ops pt_ops __ro_after_init; +static struct pt_alloc_ops _pt_ops __ro_after_init; + +#ifdef CONFIG_XIP_KERNEL +#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops)) +#else +#define pt_ops _pt_ops +#endif /* Offset between linear mapping virtual address and kernel load address */ unsigned long va_pa_offset __ro_after_init; EXPORT_SYMBOL(va_pa_offset); -#ifdef CONFIG_64BIT +#ifdef CONFIG_XIP_KERNEL +#define va_pa_offset (*((unsigned long *)XIP_FIXUP(&va_pa_offset))) +#endif /* Offset between kernel mapping virtual address and kernel load address */ +#ifdef CONFIG_64BIT unsigned long va_kernel_pa_offset; EXPORT_SYMBOL(va_kernel_pa_offset); #endif +#ifdef CONFIG_XIP_KERNEL +#define va_kernel_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_pa_offset))) +#endif +unsigned long va_kernel_xip_pa_offset; +EXPORT_SYMBOL(va_kernel_xip_pa_offset); +#ifdef CONFIG_XIP_KERNEL +#define va_kernel_xip_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_xip_pa_offset))) +#endif unsigned long pfn_base __ro_after_init; EXPORT_SYMBOL(pfn_base); @@ -185,6 +216,12 @@ pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); +#ifdef CONFIG_XIP_KERNEL +#define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir)) +#define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte)) +#define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir)) +#endif /* CONFIG_XIP_KERNEL */ + void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) { unsigned long addr = __fix_to_virt(idx); @@ -260,6 +297,12 @@ pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); +#ifdef CONFIG_XIP_KERNEL +#define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd)) +#define fixmap_pmd ((pmd_t *)XIP_FIXUP(fixmap_pmd)) +#define early_pmd ((pmd_t *)XIP_FIXUP(early_pmd)) +#endif /* CONFIG_XIP_KERNEL */ + static pmd_t *__init get_pmd_virt_early(phys_addr_t pa) { /* Before MMU is enabled */ @@ -376,6 +419,19 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) return PMD_SIZE; } +#ifdef CONFIG_XIP_KERNEL +/* called from head.S with MMU off */ +asmlinkage void __init __copy_data(void) +{ + void *from = (void *)(&_sdata); + void *end = (void *)(&_end); + void *to = (void *)CONFIG_PHYS_RAM_BASE; + size_t sz = (size_t)(end - from + 1); + + memcpy(to, from, sz); +} +#endif + /* * setup_vm() is called from head.S with MMU-off. * @@ -395,7 +451,35 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) #endif uintptr_t load_pa, load_sz; +#ifdef CONFIG_XIP_KERNEL +#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa))) +#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz))) +#endif +#ifdef CONFIG_XIP_KERNEL +uintptr_t xiprom, xiprom_sz; +#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz))) +#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom))) + +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) +{ + uintptr_t va, end_va; + + /* Map the flash resident part */ + end_va = kernel_virt_addr + xiprom_sz; + for (va = kernel_virt_addr; va < end_va; va += map_size) + create_pgd_mapping(pgdir, va, + xiprom + (va - kernel_virt_addr), + map_size, PAGE_KERNEL_EXEC); + + /* Map the data in RAM */ + end_va = kernel_virt_addr + XIP_OFFSET + load_sz; + for (va = kernel_virt_addr + XIP_OFFSET; va < end_va; va += map_size) + create_pgd_mapping(pgdir, va, + load_pa + (va - (kernel_virt_addr + XIP_OFFSET)), + map_size, PAGE_KERNEL); +} +#else static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) { uintptr_t va, end_va; @@ -406,16 +490,28 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) load_pa + (va - kernel_virt_addr), map_size, PAGE_KERNEL_EXEC); } +#endif asmlinkage void __init setup_vm(uintptr_t dtb_pa) { - uintptr_t pa; + uintptr_t __maybe_unused pa; uintptr_t map_size; #ifndef __PAGETABLE_PMD_FOLDED pmd_t fix_bmap_spmd, fix_bmap_epmd; #endif + +#ifdef CONFIG_XIP_KERNEL + xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; + xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); + + load_pa = (uintptr_t)CONFIG_PHYS_RAM_BASE; + load_sz = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); + + va_kernel_xip_pa_offset = kernel_virt_addr - xiprom; +#else load_pa = (uintptr_t)(&_start); load_sz = (uintptr_t)(&_end) - load_pa; +#endif va_pa_offset = PAGE_OFFSET - load_pa; #ifdef CONFIG_64BIT @@ -451,8 +547,13 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) /* Setup trampoline PGD and PMD */ create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr, (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE); +#ifdef CONFIG_XIP_KERNEL + create_pmd_mapping(trampoline_pmd, kernel_virt_addr, + xiprom, PMD_SIZE, PAGE_KERNEL_EXEC); +#else create_pmd_mapping(trampoline_pmd, kernel_virt_addr, load_pa, PMD_SIZE, PAGE_KERNEL_EXEC); +#endif #else /* Setup trampoline PGD */ create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr, @@ -485,7 +586,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) * whereas dtb_early_va will be used before setup_vm_final installs * the linear mapping. */ - dtb_early_va = kernel_mapping_pa_to_va(dtb_pa); + dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa)); #else dtb_early_va = __va(dtb_pa); #endif /* CONFIG_64BIT */ @@ -501,7 +602,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1)); #else /* CONFIG_BUILTIN_DTB */ #ifdef CONFIG_64BIT - dtb_early_va = kernel_mapping_pa_to_va(dtb_pa); + dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa)); #else dtb_early_va = __va(dtb_pa); #endif /* CONFIG_64BIT */ @@ -540,7 +641,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) #endif } -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) void protect_kernel_linear_mapping_text_rodata(void) { unsigned long text_start = (unsigned long)lm_alias(_start); |