diff options
author | Peter Zijlstra <peterz@infradead.org> | 2020-01-31 13:45:36 +0100 |
---|---|---|
committer | Geert Uytterhoeven <geert@linux-m68k.org> | 2020-02-10 10:57:48 +0100 |
commit | ef22d8abd876e805b604e8f655127de2beee2869 (patch) | |
tree | d5a4ba746cf6d03a129d13183934f0a20dd1ee95 /arch/m68k/mm | |
parent | 5ad272abee9fe0a781d49b03f334c0a3b3e418c1 (diff) |
m68k: mm: Restructure Motorola MMU page-table layout
The Motorola 68xxx MMUs, 040 (and later) have a fixed 7,7,{5,6}
page-table setup, where the last depends on the page-size selected (8k
vs 4k resp.), and head.S selects 4K pages. For 030 (and earlier) we
explicitly program 7,7,6 and 4K pages in %tc.
However, the current code implements this mightily weird. What it does
is group 16 of those (6 bit) pte tables into one 4k page to not waste
space. The down-side is that that forces pmd_t to be a 16-tuple
pointing to consecutive pte tables.
This breaks the generic code which assumes READ_ONCE(*pmd) will be
word sized.
Therefore implement a straight forward 7,7,6 3 level page-table setup,
with the addition (for 020/030) of (partial) large-page support. For
now this increases the memory footprint for pte-tables 15 fold.
Tested with ARAnyM/68040 emulation.
Suggested-by: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Greg Ungerer <gerg@linux-m68k.org>
Tested-by: Michael Schmitz <schmitzmic@gmail.com>
Tested-by: Greg Ungerer <gerg@linux-m68k.org>
Link: https://lore.kernel.org/r/20200131125403.711478295@infradead.org
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
Diffstat (limited to 'arch/m68k/mm')
-rw-r--r-- | arch/m68k/mm/kmap.c | 36 | ||||
-rw-r--r-- | arch/m68k/mm/motorola.c | 28 |
2 files changed, 29 insertions, 35 deletions
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index 120030ad8dc4..14d31d216cef 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c @@ -24,8 +24,6 @@ #undef DEBUG -#define PTRTREESIZE (256*1024) - /* * For 040/060 we can use the virtual memory area like other architectures, * but for 020/030 we want to use early termination page descriptors and we @@ -50,7 +48,7 @@ static inline void free_io_area(void *addr) #else -#define IO_SIZE (256*1024) +#define IO_SIZE PMD_SIZE static struct vm_struct *iolist; @@ -81,14 +79,13 @@ static void __free_io_area(void *addr, unsigned long size) #if CONFIG_PGTABLE_LEVELS == 3 if (CPU_IS_020_OR_030) { - int pmd_off = (virtaddr/PTRTREESIZE) & 15; - int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK; + int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK; if (pmd_type == _PAGE_PRESENT) { - pmd_dir->pmd[pmd_off] = 0; - virtaddr += PTRTREESIZE; - size -= PTRTREESIZE; - continue; + pmd_clear(pmd_dir); + virtaddr += PMD_SIZE; + size -= PMD_SIZE; + } else if (pmd_type == 0) continue; } @@ -249,7 +246,7 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla while ((long)size > 0) { #ifdef DEBUG - if (!(virtaddr & (PTRTREESIZE-1))) + if (!(virtaddr & (PMD_SIZE-1))) printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr); #endif pgd_dir = pgd_offset_k(virtaddr); @@ -263,10 +260,10 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla #if CONFIG_PGTABLE_LEVELS == 3 if (CPU_IS_020_OR_030) { - pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; - physaddr += PTRTREESIZE; - virtaddr += PTRTREESIZE; - size -= PTRTREESIZE; + pmd_val(*pmd_dir) = physaddr; + physaddr += PMD_SIZE; + virtaddr += PMD_SIZE; + size -= PMD_SIZE; } else #endif { @@ -367,13 +364,12 @@ void kernel_set_cachemode(void *addr, unsigned long size, int cmode) #if CONFIG_PGTABLE_LEVELS == 3 if (CPU_IS_020_OR_030) { - int pmd_off = (virtaddr/PTRTREESIZE) & 15; + unsigned long pmd = pmd_val(*pmd_dir); - if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { - pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] & - _CACHEMASK040) | cmode; - virtaddr += PTRTREESIZE; - size -= PTRTREESIZE; + if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) { + *pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode); + virtaddr += PMD_SIZE; + size -= PMD_SIZE; continue; } } diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 2102f9397c94..c888ef46da3e 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -236,8 +236,6 @@ static pmd_t * __init kernel_ptr_table(void) static void __init map_node(int node) { -#define PTRTREESIZE (256*1024) -#define ROOTTREESIZE (32*1024*1024) unsigned long physaddr, virtaddr, size; pgd_t *pgd_dir; p4d_t *p4d_dir; @@ -255,21 +253,21 @@ static void __init map_node(int node) while (size > 0) { #ifdef DEBUG - if (!(virtaddr & (PTRTREESIZE-1))) + if (!(virtaddr & (PMD_SIZE-1))) printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK, virtaddr); #endif pgd_dir = pgd_offset_k(virtaddr); if (virtaddr && CPU_IS_020_OR_030) { - if (!(virtaddr & (ROOTTREESIZE-1)) && - size >= ROOTTREESIZE) { + if (!(virtaddr & (PGDIR_SIZE-1)) && + size >= PGDIR_SIZE) { #ifdef DEBUG printk ("[very early term]"); #endif pgd_val(*pgd_dir) = physaddr; - size -= ROOTTREESIZE; - virtaddr += ROOTTREESIZE; - physaddr += ROOTTREESIZE; + size -= PGDIR_SIZE; + virtaddr += PGDIR_SIZE; + physaddr += PGDIR_SIZE; continue; } } @@ -289,8 +287,8 @@ static void __init map_node(int node) #ifdef DEBUG printk ("[early term]"); #endif - pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; - physaddr += PTRTREESIZE; + pmd_val(*pmd_dir) = physaddr; + physaddr += PMD_SIZE; } else { int i; #ifdef DEBUG @@ -298,15 +296,15 @@ static void __init map_node(int node) #endif zero_pgtable = kernel_ptr_table(); pte_dir = (pte_t *)zero_pgtable; - pmd_dir->pmd[0] = virt_to_phys(pte_dir) | - _PAGE_TABLE | _PAGE_ACCESSED; + pmd_set(pmd_dir, pte_dir); + pte_val(*pte_dir++) = 0; physaddr += PAGE_SIZE; - for (i = 1; i < 64; physaddr += PAGE_SIZE, i++) + for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++) pte_val(*pte_dir++) = physaddr; } - size -= PTRTREESIZE; - virtaddr += PTRTREESIZE; + size -= PMD_SIZE; + virtaddr += PMD_SIZE; } else { if (!pmd_present(*pmd_dir)) { #ifdef DEBUG |