diff options
author | Alexander Gordeev <agordeev@linux.ibm.com> | 2022-12-16 19:49:23 +0100 |
---|---|---|
committer | Heiko Carstens <hca@linux.ibm.com> | 2023-01-13 14:15:06 +0100 |
commit | 07493a9ca79f8a39cfddd0a20b4e6eded4de8f3d (patch) | |
tree | 8581e34759944161bb91668e24ae678c9d7fa03c | |
parent | 12cf6473d23885fe06aa7e7ca58e990fa4f0737c (diff) |
s390/kasan: remove identity mapping support
The identity mapping is created in the decompressor,
there is no need to have the same functionality in
the kasan setup code. Thus, remove it.
Remove the 4KB pages check for first 1MB since there
is no need to take care of the lowcore pages.
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
-rw-r--r-- | arch/s390/mm/kasan_init.c | 27 |
1 files changed, 5 insertions, 22 deletions
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c index 801d81c189a7..bdfbe0dcb7c6 100644 --- a/arch/s390/mm/kasan_init.c +++ b/arch/s390/mm/kasan_init.c @@ -75,7 +75,6 @@ static pte_t * __init kasan_early_pte_alloc(void) } enum populate_mode { - POPULATE_ONE2ONE, POPULATE_MAP, POPULATE_ZERO_SHADOW, POPULATE_SHALLOW @@ -101,16 +100,12 @@ static void __init kasan_early_pgtable_populate(unsigned long address, pmd_t pmd; pte_t pte; - if (!has_nx) + if (!has_nx) { pgt_prot_zero = pgprot_clear_bit(pgt_prot_zero, _PAGE_NOEXEC); - if (!has_nx || mode == POPULATE_ONE2ONE) { pgt_prot = pgprot_clear_bit(pgt_prot, _PAGE_NOEXEC); sgt_prot = pgprot_clear_bit(sgt_prot, _SEGMENT_ENTRY_NOEXEC); } - /* - * The first 1MB of 1:1 mapping is mapped with 4KB pages - */ while (address < end) { pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { @@ -167,15 +162,10 @@ static void __init kasan_early_pgtable_populate(unsigned long address, pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte); address = (address + PMD_SIZE) & PMD_MASK; continue; - } else if (has_edat && address) { - void *page; - - if (mode == POPULATE_ONE2ONE) { - page = (void *)address; - } else { - page = kasan_early_alloc_segment(); - memset(page, 0, _SEGMENT_SIZE); - } + } else if (has_edat) { + void *page = kasan_early_alloc_segment(); + + memset(page, 0, _SEGMENT_SIZE); pmd = __pmd(__pa(page)); pmd = set_pmd_bit(pmd, sgt_prot); set_pmd(pm_dir, pmd); @@ -195,12 +185,6 @@ static void __init kasan_early_pgtable_populate(unsigned long address, void *page; switch (mode) { - case POPULATE_ONE2ONE: - page = (void *)address; - pte = __pte(__pa(page)); - pte = set_pte_bit(pte, pgt_prot); - set_pte(pt_dir, pte); - break; case POPULATE_MAP: page = kasan_early_alloc_pages(0); memset(page, 0, PAGE_SIZE); @@ -259,7 +243,6 @@ void __init kasan_early_init(void) * - ident_map_size represents online + standby and memory limits * accounted. * Kasan maps "memsize" right away. - * [0, memsize] - as identity mapping * [__sha(0), __sha(memsize)] - shadow memory for identity mapping * The rest [memsize, ident_map_size] if memsize < ident_map_size * could be mapped/unmapped dynamically later during memory hotplug. |