diff options
author | Christophe Leroy <christophe.leroy@csgroup.eu> | 2022-01-21 16:30:27 +0000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2022-02-12 22:47:43 +1100 |
commit | fd1feade75fb1a9275c39d76c5ccdbbbe6b37aa3 (patch) | |
tree | 6f34bdf2144d5a3ea59ee140bac7ac29fb3adde3 /arch/powerpc/kernel/vdso/cacheflush.S | |
parent | d88378d8d2c776154c6b606f2a423a81d7795f6f (diff) |
powerpc/vdso: Merge vdso64 and vdso32 into a single directory
merge vdso64 into vdso32 and rename it vdso.
Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/4dbe05cc130f6a0858d09ac72e436c373cb08b70.1642782130.git.christophe.leroy@csgroup.eu
Diffstat (limited to 'arch/powerpc/kernel/vdso/cacheflush.S')
-rw-r--r-- | arch/powerpc/kernel/vdso/cacheflush.S | 98 |
1 files changed, 98 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/vdso/cacheflush.S b/arch/powerpc/kernel/vdso/cacheflush.S new file mode 100644 index 000000000000..d4e43ab2d5df --- /dev/null +++ b/arch/powerpc/kernel/vdso/cacheflush.S @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * vDSO provided cache flush routines + * + * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), + * IBM Corp. + */ +#include <asm/processor.h> +#include <asm/ppc_asm.h> +#include <asm/vdso.h> +#include <asm/vdso_datapage.h> +#include <asm/asm-offsets.h> +#include <asm/cache.h> + + .text + +/* + * Default "generic" version of __kernel_sync_dicache. + * + * void __kernel_sync_dicache(unsigned long start, unsigned long end) + * + * Flushes the data cache & invalidate the instruction cache for the + * provided range [start, end[ + */ +V_FUNCTION_BEGIN(__kernel_sync_dicache) + .cfi_startproc +BEGIN_FTR_SECTION + b 3f +END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) +#ifdef CONFIG_PPC64 + mflr r12 + .cfi_register lr,r12 + get_datapage r10 + mtlr r12 + .cfi_restore lr +#endif + +#ifdef CONFIG_PPC64 + lwz r7,CFG_DCACHE_BLOCKSZ(r10) + addi r5,r7,-1 +#else + li r5, L1_CACHE_BYTES - 1 +#endif + andc r6,r3,r5 /* round low to line bdy */ + subf r8,r6,r4 /* compute length */ + add r8,r8,r5 /* ensure we get enough */ +#ifdef CONFIG_PPC64 + lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10) + PPC_SRL. r8,r8,r9 /* compute line count */ +#else + srwi. r8, r8, L1_CACHE_SHIFT + mr r7, r6 +#endif + crclr cr0*4+so + beqlr /* nothing to do? */ + mtctr r8 +1: dcbst 0,r6 +#ifdef CONFIG_PPC64 + add r6,r6,r7 +#else + addi r6, r6, L1_CACHE_BYTES +#endif + bdnz 1b + sync + +/* Now invalidate the instruction cache */ + +#ifdef CONFIG_PPC64 + lwz r7,CFG_ICACHE_BLOCKSZ(r10) + addi r5,r7,-1 + andc r6,r3,r5 /* round low to line bdy */ + subf r8,r6,r4 /* compute length */ + add r8,r8,r5 + lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10) + PPC_SRL. r8,r8,r9 /* compute line count */ + crclr cr0*4+so + beqlr /* nothing to do? */ +#endif + mtctr r8 +#ifdef CONFIG_PPC64 +2: icbi 0,r6 + add r6,r6,r7 +#else +2: icbi 0, r7 + addi r7, r7, L1_CACHE_BYTES +#endif + bdnz 2b + isync + li r3,0 + blr +3: + crclr cr0*4+so + sync + isync + li r3,0 + blr + .cfi_endproc +V_FUNCTION_END(__kernel_sync_dicache) |