diff options
author | Alexander Gordeev <agordeev@linux.ibm.com> | 2023-05-26 14:30:30 +0200 |
---|---|---|
committer | Alexander Gordeev <agordeev@linux.ibm.com> | 2023-06-20 19:52:13 +0200 |
commit | 3e8261003bd28208986d3c42004510083c086e24 (patch) | |
tree | c800f820adb082533ff7b3c92e0bfa394c1ab0fa /arch/s390/boot | |
parent | 2ed8b509753a0454b52b2d72e982265472c8d861 (diff) |
s390/kasan: avoid short by one page shadow memory
Kernel Address Sanitizer uses 3 bits per byte to
encode memory. That is the number of bits the start
and end address of a memory range is shifted right
when the corresponding shadow memory is created for
that memory range.
The used memory mapping routine expects page-aligned
addresses, while the above described 3-bit shift might
turn the shadow memory range start and end boundaries
into non-page-aligned in case the size of the original
memory range is less than (PAGE_SIZE << 3). As result,
the resulting shadow memory range could be short on one
page.
Align on page boundary the start and end addresses when
mapping a shadow memory range and avoid the described
issue in the future.
Note, that does not fix a real problem, since currently
no virtual regions of size less than (PAGE_SIZE << 3)
exist.
Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Diffstat (limited to 'arch/s390/boot')
-rw-r--r-- | arch/s390/boot/vmem.c | 15 |
1 files changed, 11 insertions, 4 deletions
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c index acb1f8b53105..c67f59db7a51 100644 --- a/arch/s390/boot/vmem.c +++ b/arch/s390/boot/vmem.c @@ -45,6 +45,13 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat static pte_t pte_z; +static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode) +{ + start = PAGE_ALIGN_DOWN(__sha(start)); + end = PAGE_ALIGN(__sha(end)); + pgtable_populate(start, end, mode); +} + static void kasan_populate_shadow(void) { pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY); @@ -95,17 +102,17 @@ static void kasan_populate_shadow(void) */ for_each_physmem_usable_range(i, &start, &end) - pgtable_populate(__sha(start), __sha(end), POPULATE_KASAN_MAP_SHADOW); + kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW); if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { untracked_end = VMALLOC_START; /* shallowly populate kasan shadow for vmalloc and modules */ - pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END), POPULATE_KASAN_SHALLOW); + kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW); } else { untracked_end = MODULES_VADDR; } /* populate kasan shadow for untracked memory */ - pgtable_populate(__sha(ident_map_size), __sha(untracked_end), POPULATE_KASAN_ZERO_SHADOW); - pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE), POPULATE_KASAN_ZERO_SHADOW); + kasan_populate(ident_map_size, untracked_end, POPULATE_KASAN_ZERO_SHADOW); + kasan_populate(MODULES_END, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW); } static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr, |