From ed0a6d1d973e9763989b44913ae1bd2a5d5d5777 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 10 Jun 2022 16:21:38 +0100 Subject: mm: kasan: Ensure the tags are visible before the tag in page->flags __kasan_unpoison_pages() colours the memory with a random tag and stores it in page->flags in order to re-create the tagged pointer via page_to_virt() later. When the tag from the page->flags is read, ensure that the in-memory tags are already visible by re-ordering the page_kasan_tag_set() after kasan_unpoison(). The former already has barriers in place through try_cmpxchg(). On the reader side, the order is ensured by the address dependency between page->flags and the memory access. Signed-off-by: Catalin Marinas Reviewed-by: Andrey Konovalov Cc: Andrey Ryabinin Cc: Vincenzo Frascino Reviewed-by: Vincenzo Frascino Link: https://lore.kernel.org/r/20220610152141.2148929-2-catalin.marinas@arm.com Signed-off-by: Will Deacon --- mm/kasan/common.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/kasan/common.c b/mm/kasan/common.c index c40c0e7b3b5f..78be2beb7453 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -108,9 +108,10 @@ void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init) return; tag = kasan_random_tag(); + kasan_unpoison(set_tag(page_address(page), tag), + PAGE_SIZE << order, init); for (i = 0; i < (1 << order); i++) page_kasan_tag_set(page + i, tag); - kasan_unpoison(page_address(page), PAGE_SIZE << order, init); } void __kasan_poison_pages(struct page *page, unsigned int order, bool init) -- cgit v1.2.3-70-g09d2 From 70c248aca9e7efa85a6664d5ab56c17c326c958f Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 10 Jun 2022 16:21:39 +0100 Subject: mm: kasan: Skip unpoisoning of user pages Commit c275c5c6d50a ("kasan: disable freed user page poisoning with HW tags") added __GFP_SKIP_KASAN_POISON to GFP_HIGHUSER_MOVABLE. A similar argument can be made about unpoisoning, so also add __GFP_SKIP_KASAN_UNPOISON to user pages. To ensure the user page is still accessible via page_address() without a kasan fault, reset the page->flags tag. With the above changes, there is no need for the arm64 tag_clear_highpage() to reset the page->flags tag. Signed-off-by: Catalin Marinas Cc: Andrey Ryabinin Cc: Andrey Konovalov Cc: Peter Collingbourne Cc: Vincenzo Frascino Reviewed-by: Vincenzo Frascino Reviewed-by: Andrey Konovalov Link: https://lore.kernel.org/r/20220610152141.2148929-3-catalin.marinas@arm.com Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 1 - include/linux/gfp.h | 2 +- mm/page_alloc.c | 7 +++++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index c5e11768e5c1..cdf3ffa0c223 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -927,6 +927,5 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, void tag_clear_highpage(struct page *page) { mte_zero_clear_page_tags(page_address(page)); - page_kasan_tag_reset(page); set_bit(PG_mte_tagged, &page->flags); } diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 2d2ccae933c2..0ace7759acd2 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -348,7 +348,7 @@ struct vm_area_struct; #define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \ - __GFP_SKIP_KASAN_POISON) + __GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON) #define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e008a3df0485..f6ed240870bc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2397,6 +2397,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order, bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && !should_skip_init(gfp_flags); bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS); + int i; set_page_private(page, 0); set_page_refcounted(page); @@ -2422,8 +2423,6 @@ inline void post_alloc_hook(struct page *page, unsigned int order, * should be initialized as well). */ if (init_tags) { - int i; - /* Initialize both memory and tags. */ for (i = 0; i != 1 << order; ++i) tag_clear_highpage(page + i); @@ -2438,6 +2437,10 @@ inline void post_alloc_hook(struct page *page, unsigned int order, /* Note that memory is already initialized by KASAN. */ if (kasan_has_integrated_init()) init = false; + } else { + /* Ensure page_address() dereferencing does not fault. */ + for (i = 0; i != 1 << order; ++i) + page_kasan_tag_reset(page + i); } /* If memory is still not initialized, do it now. */ if (init) -- cgit v1.2.3-70-g09d2 From 6d05141a393071e104bf5be5ad4d0c79c6dff343 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 10 Jun 2022 16:21:40 +0100 Subject: mm: kasan: Skip page unpoisoning only if __GFP_SKIP_KASAN_UNPOISON Currently post_alloc_hook() skips the kasan unpoisoning if the tags will be zeroed (__GFP_ZEROTAGS) or __GFP_SKIP_KASAN_UNPOISON is passed. Since __GFP_ZEROTAGS is now accompanied by __GFP_SKIP_KASAN_UNPOISON, remove the extra check. Signed-off-by: Catalin Marinas Cc: Andrey Ryabinin Cc: Andrey Konovalov Cc: Peter Collingbourne Cc: Vincenzo Frascino Reviewed-by: Vincenzo Frascino Reviewed-by: Andrey Konovalov Link: https://lore.kernel.org/r/20220610152141.2148929-4-catalin.marinas@arm.com Signed-off-by: Will Deacon --- mm/page_alloc.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f6ed240870bc..bf45a6aa407a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2361,7 +2361,7 @@ static inline bool check_new_pcp(struct page *page, unsigned int order) } #endif /* CONFIG_DEBUG_VM */ -static inline bool should_skip_kasan_unpoison(gfp_t flags, bool init_tags) +static inline bool should_skip_kasan_unpoison(gfp_t flags) { /* Don't skip if a software KASAN mode is enabled. */ if (IS_ENABLED(CONFIG_KASAN_GENERIC) || @@ -2373,12 +2373,10 @@ static inline bool should_skip_kasan_unpoison(gfp_t flags, bool init_tags) return true; /* - * With hardware tag-based KASAN enabled, skip if either: - * - * 1. Memory tags have already been cleared via tag_clear_highpage(). - * 2. Skipping has been requested via __GFP_SKIP_KASAN_UNPOISON. + * With hardware tag-based KASAN enabled, skip if this has been + * requested via __GFP_SKIP_KASAN_UNPOISON. */ - return init_tags || (flags & __GFP_SKIP_KASAN_UNPOISON); + return flags & __GFP_SKIP_KASAN_UNPOISON; } static inline bool should_skip_init(gfp_t flags) @@ -2430,7 +2428,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order, /* Note that memory is already initialized by the loop above. */ init = false; } - if (!should_skip_kasan_unpoison(gfp_flags, init_tags)) { + if (!should_skip_kasan_unpoison(gfp_flags)) { /* Unpoison shadow memory or set memory tags. */ kasan_unpoison_pages(page, order, init); -- cgit v1.2.3-70-g09d2 From 20794545c14692094a882d2221c251c4573e6adf Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 10 Jun 2022 16:21:41 +0100 Subject: arm64: kasan: Revert "arm64: mte: reset the page tag in page->flags" This reverts commit e5b8d9218951e59df986f627ec93569a0d22149b. Pages mapped in user-space with PROT_MTE have the allocation tags either zeroed or copied/restored to some user values. In order for the kernel to access such pages via page_address(), resetting the tag in page->flags was necessary. This tag resetting was deferred to set_pte_at() -> mte_sync_page_tags() but it can race with another CPU reading the flags (via page_to_virt()): P0 (mte_sync_page_tags): P1 (memcpy from virt_to_page): Rflags!=0xff Wflags=0xff DMB (doesn't help) Wtags=0 Rtags=0 // fault Since now the post_alloc_hook() function resets the page->flags tag when unpoisoning is skipped for user pages (including the __GFP_ZEROTAGS case), revert the arm64 commit calling page_kasan_tag_reset(). Signed-off-by: Catalin Marinas Cc: Will Deacon Cc: Vincenzo Frascino Cc: Andrey Konovalov Cc: Peter Collingbourne Reviewed-by: Vincenzo Frascino Acked-by: Andrey Konovalov Link: https://lore.kernel.org/r/20220610152141.2148929-5-catalin.marinas@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/hibernate.c | 5 ----- arch/arm64/kernel/mte.c | 9 --------- arch/arm64/mm/copypage.c | 9 --------- arch/arm64/mm/mteswap.c | 9 --------- 4 files changed, 32 deletions(-) diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 2e248342476e..af5df48ba915 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -300,11 +300,6 @@ static void swsusp_mte_restore_tags(void) unsigned long pfn = xa_state.xa_index; struct page *page = pfn_to_online_page(pfn); - /* - * It is not required to invoke page_kasan_tag_reset(page) - * at this point since the tags stored in page->flags are - * already restored. - */ mte_restore_page_tags(page_address(page), tags); mte_free_tag_storage(tags); diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index f6b00743c399..b2b730233274 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -48,15 +48,6 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte, if (!pte_is_tagged) return; - page_kasan_tag_reset(page); - /* - * We need smp_wmb() in between setting the flags and clearing the - * tags because if another thread reads page->flags and builds a - * tagged address out of it, there is an actual dependency to the - * memory access, but on the current thread we do not guarantee that - * the new page->flags are visible before the tags were updated. - */ - smp_wmb(); mte_clear_page_tags(page_address(page)); } diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 0dea80bf6de4..24913271e898 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -23,15 +23,6 @@ void copy_highpage(struct page *to, struct page *from) if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { set_bit(PG_mte_tagged, &to->flags); - page_kasan_tag_reset(to); - /* - * We need smp_wmb() in between setting the flags and clearing the - * tags because if another thread reads page->flags and builds a - * tagged address out of it, there is an actual dependency to the - * memory access, but on the current thread we do not guarantee that - * the new page->flags are visible before the tags were updated. - */ - smp_wmb(); mte_copy_page_tags(kto, kfrom); } } diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c index a9e50e930484..4334dec93bd4 100644 --- a/arch/arm64/mm/mteswap.c +++ b/arch/arm64/mm/mteswap.c @@ -53,15 +53,6 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page) if (!tags) return false; - page_kasan_tag_reset(page); - /* - * We need smp_wmb() in between setting the flags and clearing the - * tags because if another thread reads page->flags and builds a - * tagged address out of it, there is an actual dependency to the - * memory access, but on the current thread we do not guarantee that - * the new page->flags are visible before the tags were updated. - */ - smp_wmb(); mte_restore_page_tags(page_address(page), tags); return true; -- cgit v1.2.3-70-g09d2