diff options
Diffstat (limited to 'arch/arm64/lib')
-rw-r--r-- | arch/arm64/lib/clear_page.S | 10 | ||||
-rw-r--r-- | arch/arm64/lib/kasan_sw_tags.S | 4 | ||||
-rw-r--r-- | arch/arm64/lib/mte.S | 8 | ||||
-rw-r--r-- | arch/arm64/lib/xor-neon.c | 147 |
4 files changed, 164 insertions, 5 deletions
diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S index b84b179edba3..1fd5d790ab80 100644 --- a/arch/arm64/lib/clear_page.S +++ b/arch/arm64/lib/clear_page.S @@ -16,6 +16,7 @@ */ SYM_FUNC_START_PI(clear_page) mrs x1, dczid_el0 + tbnz x1, #4, 2f /* Branch if DC ZVA is prohibited */ and w1, w1, #0xf mov x2, #4 lsl x1, x2, x1 @@ -25,5 +26,14 @@ SYM_FUNC_START_PI(clear_page) tst x0, #(PAGE_SIZE - 1) b.ne 1b ret + +2: stnp xzr, xzr, [x0] + stnp xzr, xzr, [x0, #16] + stnp xzr, xzr, [x0, #32] + stnp xzr, xzr, [x0, #48] + add x0, x0, #64 + tst x0, #(PAGE_SIZE - 1) + b.ne 2b + ret SYM_FUNC_END_PI(clear_page) EXPORT_SYMBOL(clear_page) diff --git a/arch/arm64/lib/kasan_sw_tags.S b/arch/arm64/lib/kasan_sw_tags.S index 5b04464c045e..20784ce75def 100644 --- a/arch/arm64/lib/kasan_sw_tags.S +++ b/arch/arm64/lib/kasan_sw_tags.S @@ -38,9 +38,7 @@ * incremented by 256 prior to return). */ SYM_CODE_START(__hwasan_tag_mismatch) -#ifdef BTI_C - BTI_C -#endif + bti c add x29, sp, #232 stp x2, x3, [sp, #8 * 2] stp x4, x5, [sp, #8 * 4] diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S index e83643b3995f..f531dcb95174 100644 --- a/arch/arm64/lib/mte.S +++ b/arch/arm64/lib/mte.S @@ -43,17 +43,23 @@ SYM_FUNC_END(mte_clear_page_tags) * x0 - address to the beginning of the page */ SYM_FUNC_START(mte_zero_clear_page_tags) + and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag mrs x1, dczid_el0 + tbnz x1, #4, 2f // Branch if DC GZVA is prohibited and w1, w1, #0xf mov x2, #4 lsl x1, x2, x1 - and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag 1: dc gzva, x0 add x0, x0, x1 tst x0, #(PAGE_SIZE - 1) b.ne 1b ret + +2: stz2g x0, [x0], #(MTE_GRANULE_SIZE * 2) + tst x0, #(PAGE_SIZE - 1) + b.ne 2b + ret SYM_FUNC_END(mte_zero_clear_page_tags) /* diff --git a/arch/arm64/lib/xor-neon.c b/arch/arm64/lib/xor-neon.c index 11bf4f8aca68..d189cf4e70ea 100644 --- a/arch/arm64/lib/xor-neon.c +++ b/arch/arm64/lib/xor-neon.c @@ -167,7 +167,7 @@ void xor_arm64_neon_5(unsigned long bytes, unsigned long *p1, } while (--lines > 0); } -struct xor_block_template const xor_block_inner_neon = { +struct xor_block_template xor_block_inner_neon __ro_after_init = { .name = "__inner_neon__", .do_2 = xor_arm64_neon_2, .do_3 = xor_arm64_neon_3, @@ -176,6 +176,151 @@ struct xor_block_template const xor_block_inner_neon = { }; EXPORT_SYMBOL(xor_block_inner_neon); +static inline uint64x2_t eor3(uint64x2_t p, uint64x2_t q, uint64x2_t r) +{ + uint64x2_t res; + + asm(ARM64_ASM_PREAMBLE ".arch_extension sha3\n" + "eor3 %0.16b, %1.16b, %2.16b, %3.16b" + : "=w"(res) : "w"(p), "w"(q), "w"(r)); + return res; +} + +static void xor_arm64_eor3_3(unsigned long bytes, unsigned long *p1, + unsigned long *p2, unsigned long *p3) +{ + uint64_t *dp1 = (uint64_t *)p1; + uint64_t *dp2 = (uint64_t *)p2; + uint64_t *dp3 = (uint64_t *)p3; + + register uint64x2_t v0, v1, v2, v3; + long lines = bytes / (sizeof(uint64x2_t) * 4); + + do { + /* p1 ^= p2 ^ p3 */ + v0 = eor3(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0), + vld1q_u64(dp3 + 0)); + v1 = eor3(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2), + vld1q_u64(dp3 + 2)); + v2 = eor3(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4), + vld1q_u64(dp3 + 4)); + v3 = eor3(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6), + vld1q_u64(dp3 + 6)); + + /* store */ + vst1q_u64(dp1 + 0, v0); + vst1q_u64(dp1 + 2, v1); + vst1q_u64(dp1 + 4, v2); + vst1q_u64(dp1 + 6, v3); + + dp1 += 8; + dp2 += 8; + dp3 += 8; + } while (--lines > 0); +} + +static void xor_arm64_eor3_4(unsigned long bytes, unsigned long *p1, + unsigned long *p2, unsigned long *p3, + unsigned long *p4) +{ + uint64_t *dp1 = (uint64_t *)p1; + uint64_t *dp2 = (uint64_t *)p2; + uint64_t *dp3 = (uint64_t *)p3; + uint64_t *dp4 = (uint64_t *)p4; + + register uint64x2_t v0, v1, v2, v3; + long lines = bytes / (sizeof(uint64x2_t) * 4); + + do { + /* p1 ^= p2 ^ p3 */ + v0 = eor3(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0), + vld1q_u64(dp3 + 0)); + v1 = eor3(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2), + vld1q_u64(dp3 + 2)); + v2 = eor3(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4), + vld1q_u64(dp3 + 4)); + v3 = eor3(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6), + vld1q_u64(dp3 + 6)); + + /* p1 ^= p4 */ + v0 = veorq_u64(v0, vld1q_u64(dp4 + 0)); + v1 = veorq_u64(v1, vld1q_u64(dp4 + 2)); + v2 = veorq_u64(v2, vld1q_u64(dp4 + 4)); + v3 = veorq_u64(v3, vld1q_u64(dp4 + 6)); + + /* store */ + vst1q_u64(dp1 + 0, v0); + vst1q_u64(dp1 + 2, v1); + vst1q_u64(dp1 + 4, v2); + vst1q_u64(dp1 + 6, v3); + + dp1 += 8; + dp2 += 8; + dp3 += 8; + dp4 += 8; + } while (--lines > 0); +} + +static void xor_arm64_eor3_5(unsigned long bytes, unsigned long *p1, + unsigned long *p2, unsigned long *p3, + unsigned long *p4, unsigned long *p5) +{ + uint64_t *dp1 = (uint64_t *)p1; + uint64_t *dp2 = (uint64_t *)p2; + uint64_t *dp3 = (uint64_t *)p3; + uint64_t *dp4 = (uint64_t *)p4; + uint64_t *dp5 = (uint64_t *)p5; + + register uint64x2_t v0, v1, v2, v3; + long lines = bytes / (sizeof(uint64x2_t) * 4); + + do { + /* p1 ^= p2 ^ p3 */ + v0 = eor3(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0), + vld1q_u64(dp3 + 0)); + v1 = eor3(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2), + vld1q_u64(dp3 + 2)); + v2 = eor3(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4), + vld1q_u64(dp3 + 4)); + v3 = eor3(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6), + vld1q_u64(dp3 + 6)); + + /* p1 ^= p4 ^ p5 */ + v0 = eor3(v0, vld1q_u64(dp4 + 0), vld1q_u64(dp5 + 0)); + v1 = eor3(v1, vld1q_u64(dp4 + 2), vld1q_u64(dp5 + 2)); + v2 = eor3(v2, vld1q_u64(dp4 + 4), vld1q_u64(dp5 + 4)); + v3 = eor3(v3, vld1q_u64(dp4 + 6), vld1q_u64(dp5 + 6)); + + /* store */ + vst1q_u64(dp1 + 0, v0); + vst1q_u64(dp1 + 2, v1); + vst1q_u64(dp1 + 4, v2); + vst1q_u64(dp1 + 6, v3); + + dp1 += 8; + dp2 += 8; + dp3 += 8; + dp4 += 8; + dp5 += 8; + } while (--lines > 0); +} + +static int __init xor_neon_init(void) +{ + if (IS_ENABLED(CONFIG_AS_HAS_SHA3) && cpu_have_named_feature(SHA3)) { + xor_block_inner_neon.do_3 = xor_arm64_eor3_3; + xor_block_inner_neon.do_4 = xor_arm64_eor3_4; + xor_block_inner_neon.do_5 = xor_arm64_eor3_5; + } + return 0; +} +module_init(xor_neon_init); + +static void __exit xor_neon_exit(void) +{ +} +module_exit(xor_neon_exit); + MODULE_AUTHOR("Jackie Liu <liuyun01@kylinos.cn>"); MODULE_DESCRIPTION("ARMv8 XOR Extensions"); MODULE_LICENSE("GPL"); |