diff options
author | Michal Simek <michal.simek@xilinx.com> | 2020-11-26 14:32:25 +0100 |
---|---|---|
committer | Michal Simek <michal.simek@xilinx.com> | 2020-11-26 16:39:35 +0100 |
commit | 05cdf457477d6603b207d91873f0a3d4c7f8c1cd (patch) | |
tree | 187900c636e5c8b9da2fe4344cbc8658ab92541a /arch/microblaze/mm | |
parent | ed2124c0b9a8d2c09e3b5b9ca9827187c5fcbe71 (diff) |
microblaze: Remove noMMU code
This configuration is obsolete and likely none is really using it. That's
why remove it to simplify code.
Note about CONFIG_MMU in hw_exception_handler.S is left intentionally
for better comment understanding.
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/r/43486cab370e0c0a79860120b71e0caac75a7e44.1606397528.git.michal.simek@xilinx.com
Diffstat (limited to 'arch/microblaze/mm')
-rw-r--r-- | arch/microblaze/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/microblaze/mm/consistent.c | 29 | ||||
-rw-r--r-- | arch/microblaze/mm/init.c | 49 |
3 files changed, 1 insertions, 79 deletions
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile index 1b16875cea70..cd8a844bf29e 100644 --- a/arch/microblaze/mm/Makefile +++ b/arch/microblaze/mm/Makefile @@ -5,5 +5,5 @@ obj-y := consistent.o init.o -obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o +obj-y += pgtable.o mmu_context.o fault.o obj-$(CONFIG_HIGHMEM) += highmem.o diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index 81dffe43b18c..b7ad4a98636d 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -21,32 +21,3 @@ void arch_dma_prep_coherent(struct page *page, size_t size) flush_dcache_range(paddr, paddr + size); } - -#ifndef CONFIG_MMU -/* - * Consistent memory allocators. Used for DMA devices that want to share - * uncached memory with the processor core. My crufty no-MMU approach is - * simple. In the HW platform we can optionally mirror the DDR up above the - * processor cacheable region. So, memory accessed in this mirror region will - * not be cached. It's alloced from the same pool as normal memory, but the - * handle we return is shifted up into the uncached region. This will no doubt - * cause big problems if memory allocated here is not also freed properly. -- JW - * - * I have to use dcache values because I can't relate on ram size: - */ -#ifdef CONFIG_XILINX_UNCACHED_SHADOW -#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) -#else -#define UNCACHED_SHADOW_MASK 0 -#endif /* CONFIG_XILINX_UNCACHED_SHADOW */ - -void *arch_dma_set_uncached(void *ptr, size_t size) -{ - unsigned long addr = (unsigned long)ptr; - - addr |= UNCACHED_SHADOW_MASK; - if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high) - pr_warn("ERROR: Your cache coherent area is CACHED!!!\n"); - return (void *)addr; -} -#endif /* CONFIG_MMU */ diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 45da639bd22c..7129a20881ea 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -29,11 +29,6 @@ /* Use for MMU and noMMU because of PCI generic code */ int mem_init_done; -#ifndef CONFIG_MMU -unsigned int __page_offset; -EXPORT_SYMBOL(__page_offset); -#endif /* CONFIG_MMU */ - char *klimit = _end; /* @@ -82,13 +77,11 @@ static void highmem_setup(void) static void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES]; -#ifdef CONFIG_MMU int idx; /* Setup fixmaps */ for (idx = 0; idx < __end_of_fixed_addresses; idx++) clear_fixmap(idx); -#endif /* Clean every zones */ memset(zones_size, 0, sizeof(zones_size)); @@ -108,40 +101,6 @@ static void __init paging_init(void) void __init setup_memory(void) { -#ifndef CONFIG_MMU - u32 kernel_align_start, kernel_align_size; - phys_addr_t start, end; - u64 i; - - /* Find main memory where is the kernel */ - for_each_mem_range(i, &start, &end) { - memory_start = start; - lowmem_size = end - start; - if ((memory_start <= (u32)_text) && - ((u32)_text <= (memory_start + lowmem_size - 1))) { - memory_size = lowmem_size; - PAGE_OFFSET = memory_start; - pr_info("%s: Main mem: 0x%x, size 0x%08x\n", - __func__, (u32) memory_start, - (u32) memory_size); - break; - } - } - - if (!memory_start || !memory_size) { - panic("%s: Missing memory setting 0x%08x, size=0x%08x\n", - __func__, (u32) memory_start, (u32) memory_size); - } - - /* reservation of region where is the kernel */ - kernel_align_start = PAGE_DOWN((u32)_text); - /* ALIGN can be remove because _end in vmlinux.lds.S is align */ - kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; - pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n", - __func__, kernel_align_start, kernel_align_start - + kernel_align_size, kernel_align_size); - memblock_reserve(kernel_align_start, kernel_align_size); -#endif /* * Kernel: * start: base phys address of kernel - page align @@ -181,12 +140,6 @@ void __init mem_init(void) mem_init_done = 1; } -#ifndef CONFIG_MMU -int page_is_ram(unsigned long pfn) -{ - return __range_ok(pfn, 0); -} -#else int page_is_ram(unsigned long pfn) { return pfn < max_low_pfn; @@ -330,8 +283,6 @@ void __init *early_get_page(void) NUMA_NO_NODE); } -#endif /* CONFIG_MMU */ - void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) { void *p; |