From 8a8f7866663588b162031a5348c24e42161461cd Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 18 Aug 2022 19:31:25 +0200 Subject: powerpc/vdso: Don't map VDSO at a fixed address on PPC32 PPC64 removed default mapping address from VDSO in commit 30d0b3682887 ("powerpc: Move 64bit VDSO to improve context switch performance"). Do like PPC64 and let get_unmapped_area() place the VDSO mapping at the address it wants, don't force a default address. This allows randomisation of VDSO address. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/cba76f5a5b01fcc49415e632d92c11c1c5998cab.1660843877.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/vdso.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h index 8542e9bbeead..7650b6ce14c8 100644 --- a/arch/powerpc/include/asm/vdso.h +++ b/arch/powerpc/include/asm/vdso.h @@ -2,9 +2,6 @@ #ifndef _ASM_POWERPC_VDSO_H #define _ASM_POWERPC_VDSO_H -/* Default map addresses for 32bit vDSO */ -#define VDSO32_MBASE 0x100000 - #define VDSO_VERSION_STRING LINUX_2.6.15 #ifndef __ASSEMBLY__ -- cgit v1.2.3-70-g09d2 From eb316ae798b36b280ef9e6a79d3aa34d146aa0e4 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 18 Aug 2022 15:06:59 +1000 Subject: powerpc: Move patch sites out of asm-prototypes.h The definitions for the patch sites etc. don't belong in asm-prototypes.h, they are not EXPORT'ed asm symbols. Move them into sections.h which is traditionally used for asm symbols. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220818050659.187181-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/asm-prototypes.h | 13 ------------- arch/powerpc/include/asm/sections.h | 13 +++++++++++++ arch/powerpc/kernel/security.c | 1 + 3 files changed, 14 insertions(+), 13 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 81631e64dbeb..e1b3e90eec94 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -55,19 +55,6 @@ struct kvm_vcpu; void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); -/* Patch sites */ -extern s32 patch__call_flush_branch_caches1; -extern s32 patch__call_flush_branch_caches2; -extern s32 patch__call_flush_branch_caches3; -extern s32 patch__flush_count_cache_return; -extern s32 patch__flush_link_stack_return; -extern s32 patch__call_kvm_flush_link_stack; -extern s32 patch__call_kvm_flush_link_stack_p9; -extern s32 patch__memset_nocache, patch__memcpy_nocache; - -extern long flush_branch_caches; -extern long kvm_flush_link_stack; - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv); void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv); diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 8be2c491c733..183e6b8af392 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -14,6 +14,19 @@ typedef struct func_desc func_desc_t; extern char __head_end[]; +/* Patch sites */ +extern s32 patch__call_flush_branch_caches1; +extern s32 patch__call_flush_branch_caches2; +extern s32 patch__call_flush_branch_caches3; +extern s32 patch__flush_count_cache_return; +extern s32 patch__flush_link_stack_return; +extern s32 patch__call_kvm_flush_link_stack; +extern s32 patch__call_kvm_flush_link_stack_p9; +extern s32 patch__memset_nocache, patch__memcpy_nocache; + +extern long flush_branch_caches; +extern long kvm_flush_link_stack; + #ifdef __powerpc64__ extern char __start_interrupts[]; diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index d96fd14bd7c9..b562a1d2c750 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include -- cgit v1.2.3-70-g09d2 From e38cd72c17fa7d7710088365251feb6c52b501c8 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 19 Aug 2022 16:23:43 +0200 Subject: powerpc: Remove stale declarations in mmu_decl.h rtas_size and rtas_data are not used anymore since at least commit 7c8c6b9776fb ("powerpc: Merge lmb.c and make MM initialization use it.") Remove them. Since commit 4b74a35fc7e9 ("powerpc/32s: Make Hash var static") the forward declaration of struct hash_pte is unneeded. Remove it. __initial_memory_limit_addr was removed by commit e63075a3c937 ("memblock: Introduce default allocation limit and use it to replace explicit ones") Remove the declaration. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/a821e8397dd56b8177ecc04966d3b3a7c4bda6d4.1660919016.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/setup.h | 1 - arch/powerpc/mm/mmu_decl.h | 4 ---- 2 files changed, 5 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index d8c28902cf59..dd461b2c825c 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -7,7 +7,6 @@ #ifndef __ASSEMBLY__ extern void ppc_printk_progress(char *s, unsigned short hex); -extern unsigned int rtas_data; extern unsigned long long memory_limit; extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 229c72e49198..8f5afad1b6af 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -92,15 +92,11 @@ extern void mapin_ram(void); extern void setbat(int index, unsigned long virt, phys_addr_t phys, unsigned int size, pgprot_t prot); -extern unsigned int rtas_data, rtas_size; - -struct hash_pte; extern u8 early_hash[]; #endif /* CONFIG_PPC32 */ extern unsigned long __max_low_memory; -extern phys_addr_t __initial_memory_limit_addr; extern phys_addr_t total_memory; extern phys_addr_t total_lowmem; extern phys_addr_t memstart_addr; -- cgit v1.2.3-70-g09d2 From 395cac7752b905318ae454a8b859d4c190485510 Mon Sep 17 00:00:00 2001 From: Russell Currey Date: Wed, 17 Aug 2022 15:06:39 +1000 Subject: powerpc/mm: Support execute-only memory on the Radix MMU Add support for execute-only memory (XOM) for the Radix MMU by using an execute-only mapping, as opposed to the RX mapping used by powerpc's other MMUs. The Hash MMU already supports XOM through the execute-only pkey, which is a separate mechanism shared with x86. A PROT_EXEC-only mapping will map to RX, and then the pkey will be applied on top of it. mmap() and mprotect() consumers in userspace should observe the same behaviour on Hash and Radix despite the differences in implementation. Replacing the vma_is_accessible() check in access_error() with a read check should be functionally equivalent for non-Radix MMUs, since it follows write and execute checks. For Radix, the change enables detecting faults on execute-only mappings where vma_is_accessible() would return true. Signed-off-by: Russell Currey Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220817050640.406017-1-ruscur@russell.cc --- arch/powerpc/include/asm/book3s/64/pgtable.h | 2 ++ arch/powerpc/mm/book3s64/pgtable.c | 11 +++++++++-- arch/powerpc/mm/fault.c | 6 +++++- 3 files changed, 16 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 392ff48f77df..486902aff040 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -151,6 +151,8 @@ #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ) #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) +/* Radix only, Hash uses PAGE_READONLY_X + execute-only pkey instead */ +#define PAGE_EXECONLY __pgprot(_PAGE_BASE | _PAGE_EXEC) /* Permission masks used for kernel mappings */ #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 7b9966402b25..f6151a589298 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -553,8 +553,15 @@ EXPORT_SYMBOL_GPL(memremap_compat_align); pgprot_t vm_get_page_prot(unsigned long vm_flags) { - unsigned long prot = pgprot_val(protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]); + unsigned long prot; + + /* Radix supports execute-only, but protection_map maps X -> RX */ + if (radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)) { + prot = pgprot_val(PAGE_EXECONLY); + } else { + prot = pgprot_val(protection_map[vm_flags & + (VM_ACCESS_FLAGS | VM_SHARED)]); + } if (vm_flags & VM_SAO) prot |= _PAGE_SAO; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 014005428687..1566804e4b3d 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -270,7 +270,11 @@ static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma return false; } - if (unlikely(!vma_is_accessible(vma))) + /* + * Check for a read fault. This could be caused by a read on an + * inaccessible page (i.e. PROT_NONE), or a Radix MMU execute-only page. + */ + if (unlikely(!(vma->vm_flags & VM_READ))) return true; /* * We should ideally do the vma pkey access check here. But in the -- cgit v1.2.3-70-g09d2 From a8933c8d55c37f4d5eb617b4bdb72b8888b88681 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 2 Sep 2022 18:53:13 +1000 Subject: powerpc/pseries: Add wait interval counter definitions to struct lppaca The hypervisor exposes accumulated partition scheduling interval times in the VPA (lppaca). These can be used to implement a simple stolen time in the guest without complex and costly dtl scanning. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Reviewed-by: Fabiano Rosas Link: https://lore.kernel.org/r/20220902085316.2071519-2-npiggin@gmail.com --- arch/powerpc/include/asm/lppaca.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index c390ec377bae..34d44cb17c87 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -104,14 +104,18 @@ struct lppaca { volatile __be32 dispersion_count; /* dispatch changed physical cpu */ volatile __be64 cmo_faults; /* CMO page fault count */ volatile __be64 cmo_fault_time; /* CMO page fault time */ - u8 reserved10[104]; + u8 reserved10[64]; /* [S]PURR expropriated/donated */ + volatile __be64 enqueue_dispatch_tb; /* Total TB enqueue->dispatch */ + volatile __be64 ready_enqueue_tb; /* Total TB ready->enqueue */ + volatile __be64 wait_ready_tb; /* Total TB wait->ready */ + u8 reserved11[16]; /* cacheline 4-5 */ __be32 page_ins; /* CMO Hint - # page ins by OS */ - u8 reserved11[148]; + u8 reserved12[148]; volatile __be64 dtl_idx; /* Dispatch Trace Log head index */ - u8 reserved12[96]; + u8 reserved13[96]; } ____cacheline_aligned; #define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr) -- cgit v1.2.3-70-g09d2 From 0e8a63132800dd8ae5fcb19113f79bea43ea18d9 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 2 Sep 2022 18:53:14 +1000 Subject: powerpc/pseries: Implement CONFIG_PARAVIRT_TIME_ACCOUNTING CONFIG_VIRT_CPU_ACCOUNTING_GEN under pseries does not provide stolen time accounting unless CONFIG_PARAVIRT_TIME_ACCOUNTING is enabled. Implement this using the VPA accumulated wait counters. Note this will not work on current KVM hosts because KVM does not implement the VPA dispatch counters (yet). It could be implemented with the dispatch trace log as it is for VIRT_CPU_ACCOUNTING_NATIVE, but that is not necessary for the more limited accounting provided by PARAVIRT_TIME_ACCOUNTING, and it is more expensive, complex, and has downsides like potential log wrap. From Shrikanth: [...] it was tested on Power10 [PowerVM] Shared LPAR. system has two LPAR. we will call first one LPAR1 and second one as LPAR2. Test was carried out in SMT=1. Similar observation was seen in SMT=8 as well. LPAR config header from each LPAR is below. LPAR1 is twice as big as LPAR2. Since Both are sharing the same underlying hardware, work stealing will happen when both the LPAR's are contending for the same resource. LPAR1: type=Shared mode=Uncapped smt=Off lcpu=40 cpus=40 ent=20.00 LPAR2: type=Shared mode=Uncapped smt=Off lcpu=20 cpus=40 ent=10.00 mpstat was used to check for the utilization. stress-ng has been used as the workload. Few cases are tested. when the both LPAR are idle there is no steal time. when LPAR1 starts running at 100% which consumes all of the physical resource, steal time starts to get accounted. With LPAR1 running at 100% and LPAR2 starts running, steal time starts increasing. This is as expected. When the LPAR2 Load is increased further, steal time increases further. Case 1: 0% LPAR1; 0% LPAR2 %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle 0.00 0.00 0.05 0.00 0.00 0.00 0.00 0.00 0.00 99.95 Case 2: 100% LPAR1; 0% LPAR2 %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle 97.68 0.00 0.00 0.00 0.00 0.00 2.32 0.00 0.00 0.00 Case 3: 100% LPAR1; 50% LPAR2 %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle 86.34 0.00 0.10 0.00 0.00 0.03 13.54 0.00 0.00 0.00 Case 4: 100% LPAR1; 100% LPAR2 %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle 78.54 0.00 0.07 0.00 0.00 0.02 21.36 0.00 0.00 0.00 Case 5: 50% LPAR1; 100% LPAR2 %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle 49.37 0.00 0.00 0.00 0.00 0.00 1.17 0.00 0.00 49.47 Patch is accounting for the steal time and basic tests are holding good. Signed-off-by: Nicholas Piggin Tested-by: Shrikanth Hegde [mpe: Add SPDX tag to new paravirt_api_clock.h] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220902085316.2071519-3-npiggin@gmail.com --- Documentation/admin-guide/kernel-parameters.txt | 6 +++--- arch/powerpc/include/asm/paravirt.h | 12 ++++++++++++ arch/powerpc/include/asm/paravirt_api_clock.h | 2 ++ arch/powerpc/platforms/pseries/Kconfig | 8 ++++++++ arch/powerpc/platforms/pseries/lpar.c | 11 +++++++++++ arch/powerpc/platforms/pseries/setup.c | 19 +++++++++++++++++++ 6 files changed, 55 insertions(+), 3 deletions(-) create mode 100644 arch/powerpc/include/asm/paravirt_api_clock.h (limited to 'arch/powerpc/include') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index d7f30902fda0..d62fdd0ac497 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3741,9 +3741,9 @@ [X86,PV_OPS] Disable paravirtualized VMware scheduler clock and use the default one. - no-steal-acc [X86,PV_OPS,ARM64] Disable paravirtualized steal time - accounting. steal time is computed, but won't - influence scheduler behaviour + no-steal-acc [X86,PV_OPS,ARM64,PPC/PSERIES] Disable paravirtualized + steal time accounting. steal time is computed, but + won't influence scheduler behaviour nolapic [X86-32,APIC] Do not enable or use the local APIC. diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h index eb7df559ae74..f5ba1a3c41f8 100644 --- a/arch/powerpc/include/asm/paravirt.h +++ b/arch/powerpc/include/asm/paravirt.h @@ -21,6 +21,18 @@ static inline bool is_shared_processor(void) return static_branch_unlikely(&shared_processor); } +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING +extern struct static_key paravirt_steal_enabled; +extern struct static_key paravirt_steal_rq_enabled; + +u64 pseries_paravirt_steal_clock(int cpu); + +static inline u64 paravirt_steal_clock(int cpu) +{ + return pseries_paravirt_steal_clock(cpu); +} +#endif + /* If bit 0 is set, the cpu has been ceded, conferred, or preempted */ static inline u32 yield_count_of(int cpu) { diff --git a/arch/powerpc/include/asm/paravirt_api_clock.h b/arch/powerpc/include/asm/paravirt_api_clock.h new file mode 100644 index 000000000000..d25ca7ac57c7 --- /dev/null +++ b/arch/powerpc/include/asm/paravirt_api_clock.h @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index fb6499977f99..a3b4d99567cb 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -23,13 +23,21 @@ config PPC_PSERIES select SWIOTLB default y +config PARAVIRT + bool + config PARAVIRT_SPINLOCKS bool +config PARAVIRT_TIME_ACCOUNTING + select PARAVIRT + bool + config PPC_SPLPAR bool "Support for shared-processor logical partitions" depends on PPC_PSERIES select PARAVIRT_SPINLOCKS if PPC_QUEUED_SPINLOCKS + select PARAVIRT_TIME_ACCOUNTING if VIRT_CPU_ACCOUNTING_GEN default y help Enabling this option will make the kernel run more efficiently diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index e6c117fb6491..97ef6499e501 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -660,6 +660,17 @@ static int __init vcpudispatch_stats_procfs_init(void) } machine_device_initcall(pseries, vcpudispatch_stats_procfs_init); + +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING +u64 pseries_paravirt_steal_clock(int cpu) +{ + struct lppaca *lppaca = &lppaca_of(cpu); + + return be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) + + be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb)); +} +#endif + #endif /* CONFIG_PPC_SPLPAR */ void vpa_init(int cpu) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 489f4c4df468..5e44c65a032c 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -80,6 +80,20 @@ DEFINE_STATIC_KEY_FALSE(shared_processor); EXPORT_SYMBOL(shared_processor); +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING +struct static_key paravirt_steal_enabled; +struct static_key paravirt_steal_rq_enabled; + +static bool steal_acc = true; +static int __init parse_no_stealacc(char *arg) +{ + steal_acc = false; + return 0; +} + +early_param("no-steal-acc", parse_no_stealacc); +#endif + int CMO_PrPSP = -1; int CMO_SecPSP = -1; unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K); @@ -834,6 +848,11 @@ static void __init pSeries_setup_arch(void) if (lppaca_shared_proc(get_lppaca())) { static_branch_enable(&shared_processor); pv_spinlocks_init(); +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING + static_key_slow_inc(¶virt_steal_enabled); + if (steal_acc) + static_key_slow_inc(¶virt_steal_rq_enabled); +#endif } ppc_md.power_save = pseries_lpar_idle; -- cgit v1.2.3-70-g09d2 From 6ba5aa541aaa079c0ca888f7fe564b2035d5ca0a Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 2 Sep 2022 18:53:16 +1000 Subject: powerpc/pseries: Move dtl scanning and steal time accounting to pseries platform dtl is the PAPR Dispatch Trace Log, which is entirely a pseries feature. The pseries platform alrady has a file dealing with the dtl, so move scanning for stolen time accounting there from kernel/time.c. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220902085316.2071519-5-npiggin@gmail.com --- arch/powerpc/include/asm/cputime.h | 2 +- arch/powerpc/include/asm/dtl.h | 8 ---- arch/powerpc/include/asm/time.h | 5 +- arch/powerpc/kernel/time.c | 92 ++---------------------------------- arch/powerpc/platforms/pseries/dtl.c | 81 +++++++++++++++++++++++++++++++ 5 files changed, 90 insertions(+), 98 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 6d2b27997492..431ae2343022 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -95,7 +95,7 @@ static notrace inline void account_stolen_time(void) struct lppaca *lp = local_paca->lppaca_ptr; if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx))) - accumulate_stolen_time(); + pseries_accumulate_stolen_time(); } #endif } diff --git a/arch/powerpc/include/asm/dtl.h b/arch/powerpc/include/asm/dtl.h index 1625888f27ef..4bcb9f9ac764 100644 --- a/arch/powerpc/include/asm/dtl.h +++ b/arch/powerpc/include/asm/dtl.h @@ -37,14 +37,6 @@ struct dtl_entry { extern struct kmem_cache *dtl_cache; extern rwlock_t dtl_access_lock; -/* - * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls - * reading from the dispatch trace log. If other code wants to consume - * DTL entries, it can set this pointer to a function that will get - * called once for each DTL entry that gets processed. - */ -extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index); - extern void register_dtl_buffer(int cpu); extern void alloc_dtl_buffers(unsigned long *time_limit); extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity); diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 1e5643a9b1f2..9f50766c4623 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -116,8 +116,9 @@ unsigned long long tb_to_ns(unsigned long long tb_ticks); void timer_broadcast_interrupt(void); -/* SPLPAR */ -void accumulate_stolen_time(void); +/* SPLPAR and VIRT_CPU_ACCOUNTING_NATIVE */ +void pseries_accumulate_stolen_time(void); +u64 pseries_calculate_stolen_time(u64 stop_tb); #endif /* __KERNEL__ */ #endif /* __POWERPC_TIME_H */ diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 587adcc12860..ae3e33b4ef95 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -178,92 +178,6 @@ static inline unsigned long read_spurr(unsigned long tb) return tb; } -#ifdef CONFIG_PPC_SPLPAR - -#include - -void (*dtl_consumer)(struct dtl_entry *, u64); - -/* - * Scan the dispatch trace log and count up the stolen time. - * Should be called with interrupts disabled. - */ -static u64 scan_dispatch_log(u64 stop_tb) -{ - u64 i = local_paca->dtl_ridx; - struct dtl_entry *dtl = local_paca->dtl_curr; - struct dtl_entry *dtl_end = local_paca->dispatch_log_end; - struct lppaca *vpa = local_paca->lppaca_ptr; - u64 tb_delta; - u64 stolen = 0; - u64 dtb; - - if (!dtl) - return 0; - - if (i == be64_to_cpu(vpa->dtl_idx)) - return 0; - while (i < be64_to_cpu(vpa->dtl_idx)) { - dtb = be64_to_cpu(dtl->timebase); - tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) + - be32_to_cpu(dtl->ready_to_enqueue_time); - barrier(); - if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { - /* buffer has overflowed */ - i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG; - dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); - continue; - } - if (dtb > stop_tb) - break; - if (dtl_consumer) - dtl_consumer(dtl, i); - stolen += tb_delta; - ++i; - ++dtl; - if (dtl == dtl_end) - dtl = local_paca->dispatch_log; - } - local_paca->dtl_ridx = i; - local_paca->dtl_curr = dtl; - return stolen; -} - -/* - * Accumulate stolen time by scanning the dispatch trace log. - * Called on entry from user mode. - */ -void notrace accumulate_stolen_time(void) -{ - u64 sst, ust; - struct cpu_accounting_data *acct = &local_paca->accounting; - - sst = scan_dispatch_log(acct->starttime_user); - ust = scan_dispatch_log(acct->starttime); - acct->stime -= sst; - acct->utime -= ust; - acct->steal_time += ust + sst; -} - -static inline u64 calculate_stolen_time(u64 stop_tb) -{ - if (!firmware_has_feature(FW_FEATURE_SPLPAR)) - return 0; - - if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) - return scan_dispatch_log(stop_tb); - - return 0; -} - -#else /* CONFIG_PPC_SPLPAR */ -static inline u64 calculate_stolen_time(u64 stop_tb) -{ - return 0; -} - -#endif /* CONFIG_PPC_SPLPAR */ - /* * Account time for a transition between system, hard irq * or soft irq state. @@ -322,7 +236,11 @@ static unsigned long vtime_delta(struct cpu_accounting_data *acct, *stime_scaled = vtime_delta_scaled(acct, now, stime); - *steal_time = calculate_stolen_time(now); + if (IS_ENABLED(CONFIG_PPC_SPLPAR) && + firmware_has_feature(FW_FEATURE_SPLPAR)) + *steal_time = pseries_calculate_stolen_time(now); + else + *steal_time = 0; return stime; } diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 352af5b14a0f..1b1977bc78e7 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -37,6 +37,15 @@ static u8 dtl_event_mask = DTL_LOG_ALL; static int dtl_buf_entries = N_DISPATCH_LOG; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + +/* + * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls + * reading from the dispatch trace log. If other code wants to consume + * DTL entries, it can set this pointer to a function that will get + * called once for each DTL entry that gets processed. + */ +static void (*dtl_consumer)(struct dtl_entry *entry, u64 index); + struct dtl_ring { u64 write_index; struct dtl_entry *write_ptr; @@ -48,6 +57,78 @@ static DEFINE_PER_CPU(struct dtl_ring, dtl_rings); static atomic_t dtl_count; +/* + * Scan the dispatch trace log and count up the stolen time. + * Should be called with interrupts disabled. + */ +static notrace u64 scan_dispatch_log(u64 stop_tb) +{ + u64 i = local_paca->dtl_ridx; + struct dtl_entry *dtl = local_paca->dtl_curr; + struct dtl_entry *dtl_end = local_paca->dispatch_log_end; + struct lppaca *vpa = local_paca->lppaca_ptr; + u64 tb_delta; + u64 stolen = 0; + u64 dtb; + + if (!dtl) + return 0; + + if (i == be64_to_cpu(vpa->dtl_idx)) + return 0; + while (i < be64_to_cpu(vpa->dtl_idx)) { + dtb = be64_to_cpu(dtl->timebase); + tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) + + be32_to_cpu(dtl->ready_to_enqueue_time); + barrier(); + if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { + /* buffer has overflowed */ + i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG; + dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); + continue; + } + if (dtb > stop_tb) + break; + if (dtl_consumer) + dtl_consumer(dtl, i); + stolen += tb_delta; + ++i; + ++dtl; + if (dtl == dtl_end) + dtl = local_paca->dispatch_log; + } + local_paca->dtl_ridx = i; + local_paca->dtl_curr = dtl; + return stolen; +} + +/* + * Accumulate stolen time by scanning the dispatch trace log. + * Called on entry from user mode. + */ +void notrace pseries_accumulate_stolen_time(void) +{ + u64 sst, ust; + struct cpu_accounting_data *acct = &local_paca->accounting; + + sst = scan_dispatch_log(acct->starttime_user); + ust = scan_dispatch_log(acct->starttime); + acct->stime -= sst; + acct->utime -= ust; + acct->steal_time += ust + sst; +} + +u64 pseries_calculate_stolen_time(u64 stop_tb) +{ + if (!firmware_has_feature(FW_FEATURE_SPLPAR)) + return 0; + + if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) + return scan_dispatch_log(stop_tb); + + return 0; +} + /* * The cpu accounting code controls the DTL ring buffer, and we get * given entries as they are processed. -- cgit v1.2.3-70-g09d2 From 78c73c80fd860d5b3471d8eaa2778a105a56f6ab Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 7 Sep 2022 08:12:54 +0200 Subject: powerpc/math-emu: Inhibit W=1 warnings When building with W=1 you get: arch/powerpc/math-emu/fre.c:6:5: error: no previous prototype for 'fre' [-Werror=missing-prototypes] arch/powerpc/math-emu/fsqrt.c:11:1: error: no previous prototype for 'fsqrt' [-Werror=missing-prototypes] arch/powerpc/math-emu/fsqrts.c:12:1: error: no previous prototype for 'fsqrts' [-Werror=missing-prototypes] arch/powerpc/math-emu/frsqrtes.c:6:5: error: no previous prototype for 'frsqrtes' [-Werror=missing-prototypes] arch/powerpc/math-emu/mtfsf.c:10:1: error: no previous prototype for 'mtfsf' [-Werror=missing-prototypes] arch/powerpc/math-emu/mtfsfi.c:10:1: error: no previous prototype for 'mtfsfi' [-Werror=missing-prototypes] arch/powerpc/math-emu/fabs.c:7:1: error: no previous prototype for 'fabs' [-Werror=missing-prototypes] arch/powerpc/math-emu/fadd.c:11:1: error: no previous prototype for 'fadd' [-Werror=missing-prototypes] arch/powerpc/math-emu/fadds.c:12:1: error: no previous prototype for 'fadds' [-Werror=missing-prototypes] arch/powerpc/math-emu/fcmpo.c:11:1: error: no previous prototype for 'fcmpo' [-Werror=missing-prototypes] arch/powerpc/math-emu/fcmpu.c:11:1: error: no previous prototype for 'fcmpu' [-Werror=missing-prototypes] arch/powerpc/math-emu/fcmpu.c:14:19: error: variable 'B_c' set but not used [-Werror=unused-but-set-variable] arch/powerpc/math-emu/fcmpu.c:13:19: error: variable 'A_c' set but not used [-Werror=unused-but-set-variable] arch/powerpc/math-emu/fctiw.c:11:1: error: no previous prototype for 'fctiw' [-Werror=missing-prototypes] arch/powerpc/math-emu/fctiwz.c:11:1: error: no previous prototype for 'fctiwz' [-Werror=missing-prototypes] arch/powerpc/math-emu/fdiv.c:11:1: error: no previous prototype for 'fdiv' [-Werror=missing-prototypes] arch/powerpc/math-emu/fdivs.c:12:1: error: no previous prototype for 'fdivs' [-Werror=missing-prototypes] arch/powerpc/math-emu/fmadd.c:11:1: error: no previous prototype for 'fmadd' [-Werror=missing-prototypes] arch/powerpc/math-emu/fmadds.c:12:1: error: no previous prototype for 'fmadds' [-Werror=missing-prototypes] arch/powerpc/math-emu/fmsub.c:11:1: error: no previous prototype for 'fmsub' [-Werror=missing-prototypes] arch/powerpc/math-emu/fmsubs.c:12:1: error: no previous prototype for 'fmsubs' [-Werror=missing-prototypes] arch/powerpc/math-emu/fmul.c:11:1: error: no previous prototype for 'fmul' [-Werror=missing-prototypes] arch/powerpc/math-emu/fmuls.c:12:1: error: no previous prototype for 'fmuls' [-Werror=missing-prototypes] arch/powerpc/math-emu/fnabs.c:7:1: error: no previous prototype for 'fnabs' [-Werror=missing-prototypes] arch/powerpc/math-emu/fneg.c:7:1: error: no previous prototype for 'fneg' [-Werror=missing-prototypes] arch/powerpc/math-emu/fnmadd.c:11:1: error: no previous prototype for 'fnmadd' [-Werror=missing-prototypes] arch/powerpc/math-emu/fnmadds.c:12:1: error: no previous prototype for 'fnmadds' [-Werror=missing-prototypes] arch/powerpc/math-emu/fnmsub.c:11:1: error: no previous prototype for 'fnmsub' [-Werror=missing-prototypes] arch/powerpc/math-emu/fnmsubs.c:12:1: error: no previous prototype for 'fnmsubs' [-Werror=missing-prototypes] arch/powerpc/math-emu/fres.c:7:1: error: no previous prototype for 'fres' [-Werror=missing-prototypes] arch/powerpc/math-emu/frsp.c:12:1: error: no previous prototype for 'frsp' [-Werror=missing-prototypes] arch/powerpc/math-emu/fsel.c:11:1: error: no previous prototype for 'fsel' [-Werror=missing-prototypes] arch/powerpc/math-emu/lfs.c:12:1: error: no previous prototype for 'lfs' [-Werror=missing-prototypes] arch/powerpc/math-emu/frsqrte.c:7:1: error: no previous prototype for 'frsqrte' [-Werror=missing-prototypes] arch/powerpc/math-emu/fsub.c:11:1: error: no previous prototype for 'fsub' [-Werror=missing-prototypes] arch/powerpc/math-emu/fsubs.c:12:1: error: no previous prototype for 'fsubs' [-Werror=missing-prototypes] arch/powerpc/math-emu/mcrfs.c:10:1: error: no previous prototype for 'mcrfs' [-Werror=missing-prototypes] arch/powerpc/math-emu/mffs.c:10:1: error: no previous prototype for 'mffs' [-Werror=missing-prototypes] arch/powerpc/math-emu/mtfsb0.c:10:1: error: no previous prototype for 'mtfsb0' [-Werror=missing-prototypes] arch/powerpc/math-emu/mtfsb1.c:10:1: error: no previous prototype for 'mtfsb1' [-Werror=missing-prototypes] arch/powerpc/math-emu/stfiwx.c:7:1: error: no previous prototype for 'stfiwx' [-Werror=missing-prototypes] arch/powerpc/math-emu/stfs.c:12:1: error: no previous prototype for 'stfs' [-Werror=missing-prototypes] arch/powerpc/math-emu/fmr.c:7:1: error: no previous prototype for 'fmr' [-Werror=missing-prototypes] arch/powerpc/math-emu/lfd.c:10:1: error: no previous prototype for 'lfd' [-Werror=missing-prototypes] arch/powerpc/math-emu/stfd.c:7:1: error: no previous prototype for 'stfd' [-Werror=missing-prototypes] arch/powerpc/math-emu/math_efp.c:177:5: error: no previous prototype for 'do_spe_mathemu' [-Werror=missing-prototypes] arch/powerpc/math-emu/math_efp.c:726:5: error: no previous prototype for 'speround_handler' [-Werror=missing-prototypes] arch/powerpc/math-emu/math_efp.c:893:12: error: no previous prototype for 'spe_mathemu_init' [-Werror=missing-prototypes] Fix the warnings in math_efp.c by adding prototypes of do_spe_mathemu() and speround_handler() to asm/processor.h and declare spe_mathemu_init() static. The other warnings are benign and not worth the churn of fixing them, expecially the 'unused-but-set-variable' which would impact the core part of 'math-emu'. So silence them by adding -Wno-missing-prototypes -Wno-unused-but-set-variable. But then you get: arch/powerpc/math-emu/fre.c:6:5: error: no previous declaration for 'fre' [-Werror=missing-declarations] arch/powerpc/math-emu/fsqrt.c:11:1: error: no previous declaration for 'fsqrt' [-Werror=missing-declarations] arch/powerpc/math-emu/fsqrts.c:12:1: error: no previous declaration for 'fsqrts' [-Werror=missing-declarations] arch/powerpc/math-emu/frsqrtes.c:6:5: error: no previous declaration for 'frsqrtes' [-Werror=missing-declarations] arch/powerpc/math-emu/mtfsf.c:10:1: error: no previous declaration for 'mtfsf' [-Werror=missing-declarations] arch/powerpc/math-emu/mtfsfi.c:10:1: error: no previous declaration for 'mtfsfi' [-Werror=missing-declarations] arch/powerpc/math-emu/fabs.c:7:1: error: no previous declaration for 'fabs' [-Werror=missing-declarations] arch/powerpc/math-emu/fadd.c:11:1: error: no previous declaration for 'fadd' [-Werror=missing-declarations] arch/powerpc/math-emu/fadds.c:12:1: error: no previous declaration for 'fadds' [-Werror=missing-declarations] arch/powerpc/math-emu/fcmpo.c:11:1: error: no previous declaration for 'fcmpo' [-Werror=missing-declarations] arch/powerpc/math-emu/fcmpu.c:11:1: error: no previous declaration for 'fcmpu' [-Werror=missing-declarations] arch/powerpc/math-emu/fctiw.c:11:1: error: no previous declaration for 'fctiw' [-Werror=missing-declarations] arch/powerpc/math-emu/fctiwz.c:11:1: error: no previous declaration for 'fctiwz' [-Werror=missing-declarations] arch/powerpc/math-emu/fdiv.c:11:1: error: no previous declaration for 'fdiv' [-Werror=missing-declarations] arch/powerpc/math-emu/fdivs.c:12:1: error: no previous declaration for 'fdivs' [-Werror=missing-declarations] arch/powerpc/math-emu/fmadd.c:11:1: error: no previous declaration for 'fmadd' [-Werror=missing-declarations] arch/powerpc/math-emu/fmadds.c:12:1: error: no previous declaration for 'fmadds' [-Werror=missing-declarations] arch/powerpc/math-emu/fmsub.c:11:1: error: no previous declaration for 'fmsub' [-Werror=missing-declarations] arch/powerpc/math-emu/fmsubs.c:12:1: error: no previous declaration for 'fmsubs' [-Werror=missing-declarations] arch/powerpc/math-emu/fmul.c:11:1: error: no previous declaration for 'fmul' [-Werror=missing-declarations] arch/powerpc/math-emu/fmuls.c:12:1: error: no previous declaration for 'fmuls' [-Werror=missing-declarations] arch/powerpc/math-emu/fnabs.c:7:1: error: no previous declaration for 'fnabs' [-Werror=missing-declarations] arch/powerpc/math-emu/fneg.c:7:1: error: no previous declaration for 'fneg' [-Werror=missing-declarations] arch/powerpc/math-emu/fnmadd.c:11:1: error: no previous declaration for 'fnmadd' [-Werror=missing-declarations] arch/powerpc/math-emu/fnmadds.c:12:1: error: no previous declaration for 'fnmadds' [-Werror=missing-declarations] arch/powerpc/math-emu/fnmsub.c:11:1: error: no previous declaration for 'fnmsub' [-Werror=missing-declarations] arch/powerpc/math-emu/fnmsubs.c:12:1: error: no previous declaration for 'fnmsubs' [-Werror=missing-declarations] arch/powerpc/math-emu/fres.c:7:1: error: no previous declaration for 'fres' [-Werror=missing-declarations] arch/powerpc/math-emu/frsp.c:12:1: error: no previous declaration for 'frsp' [-Werror=missing-declarations] arch/powerpc/math-emu/fsel.c:11:1: error: no previous declaration for 'fsel' [-Werror=missing-declarations] arch/powerpc/math-emu/lfs.c:12:1: error: no previous declaration for 'lfs' [-Werror=missing-declarations] arch/powerpc/math-emu/frsqrte.c:7:1: error: no previous declaration for 'frsqrte' [-Werror=missing-declarations] arch/powerpc/math-emu/fsub.c:11:1: error: no previous declaration for 'fsub' [-Werror=missing-declarations] arch/powerpc/math-emu/fsubs.c:12:1: error: no previous declaration for 'fsubs' [-Werror=missing-declarations] arch/powerpc/math-emu/mcrfs.c:10:1: error: no previous declaration for 'mcrfs' [-Werror=missing-declarations] arch/powerpc/math-emu/mffs.c:10:1: error: no previous declaration for 'mffs' [-Werror=missing-declarations] arch/powerpc/math-emu/mtfsb0.c:10:1: error: no previous declaration for 'mtfsb0' [-Werror=missing-declarations] arch/powerpc/math-emu/mtfsb1.c:10:1: error: no previous declaration for 'mtfsb1' [-Werror=missing-declarations] arch/powerpc/math-emu/stfiwx.c:7:1: error: no previous declaration for 'stfiwx' [-Werror=missing-declarations] arch/powerpc/math-emu/stfs.c:12:1: error: no previous declaration for 'stfs' [-Werror=missing-declarations] arch/powerpc/math-emu/fmr.c:7:1: error: no previous declaration for 'fmr' [-Werror=missing-declarations] arch/powerpc/math-emu/lfd.c:10:1: error: no previous declaration for 'lfd' [-Werror=missing-declarations] arch/powerpc/math-emu/stfd.c:7:1: error: no previous declaration for 'stfd' [-Werror=missing-declarations] So also add -Wno-missing-declarations. Reported-by: kernel test robot Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/688084b40b5ac88f2905cb207d5dad947d8d34dc.1662531153.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/processor.h | 2 ++ arch/powerpc/kernel/traps.c | 2 -- arch/powerpc/math-emu/Makefile | 7 +++++++ arch/powerpc/math-emu/math_efp.c | 2 +- 4 files changed, 10 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index fdfaae194ddd..97a77b37daa3 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -426,6 +426,8 @@ extern int fix_alignment(struct pt_regs *); #endif int do_mathemu(struct pt_regs *regs); +int do_spe_mathemu(struct pt_regs *regs); +int speround_handler(struct pt_regs *regs); /* VMX copying */ int enter_vmx_usercopy(void); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index dadfcef5d6db..dcf4046f8565 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -2103,7 +2103,6 @@ DEFINE_INTERRUPT_HANDLER(CacheLockingException) #ifdef CONFIG_SPE DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException) { - extern int do_spe_mathemu(struct pt_regs *regs); unsigned long spefscr; int fpexc_mode; int code = FPE_FLTUNK; @@ -2153,7 +2152,6 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException) DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException) { - extern int speround_handler(struct pt_regs *regs); int err; interrupt_cond_local_irq_enable(regs); diff --git a/arch/powerpc/math-emu/Makefile b/arch/powerpc/math-emu/Makefile index 26fef2e5672e..603e59c3db10 100644 --- a/arch/powerpc/math-emu/Makefile +++ b/arch/powerpc/math-emu/Makefile @@ -16,3 +16,10 @@ obj-$(CONFIG_SPE) += math_efp.o CFLAGS_fabs.o = -fno-builtin-fabs CFLAGS_math.o = -fno-builtin-fabs + +ccflags-remove-y = -Wmissing-prototypes -Wmissing-declarations -Wunused-but-set-variable + +ifdef KBUILD_EXTRA_WARN +CFLAGS_math.o += -Wmissing-prototypes -Wmissing-declarations -Wunused-but-set-variable +CFLAGS_math_efp.o += -Wmissing-prototypes -Wmissing-declarations -Wunused-but-set-variable +endif diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c index f01e3475f689..34f62aafe706 100644 --- a/arch/powerpc/math-emu/math_efp.c +++ b/arch/powerpc/math-emu/math_efp.c @@ -890,7 +890,7 @@ int speround_handler(struct pt_regs *regs) return 0; } -int __init spe_mathemu_init(void) +static int __init spe_mathemu_init(void) { u32 pvr, maj, min; -- cgit v1.2.3-70-g09d2 From b11931e9adc1a439eab75c97ca4b9f15754cdcb1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 1 Sep 2022 21:03:34 +1000 Subject: powerpc/64s: add pte_needs_flush and huge_pmd_needs_flush Allow PTE changes to avoid flushing the TLB when access permissions are being relaxed, the dirty bit is being set, and the accessed bit is being changed. Relaxing access permissions and setting dirty and accessed bits do not require a flush because the MMU will re-load the PTE and notice the updates (it may also cause a spurious fault). Clearing the accessed bit does not require a flush because of the imprecise PTE accessed bit accounting that is already performed, as documented in ptep_clear_flush_young(). This reduces TLB flushing for some mprotect(2) calls. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Reviewed-by: Christophe Leroy Link: https://lore.kernel.org/r/20220901110334.1618913-1-npiggin@gmail.com --- arch/powerpc/include/asm/book3s/64/pgtable.h | 3 ++ arch/powerpc/include/asm/book3s/64/tlbflush.h | 56 +++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 486902aff040..b7fd9b69e828 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -413,6 +413,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, * event of it not getting flushed for a long time the delay * shouldn't really matter because there's no real memory * pressure for swapout to react to. ] + * + * Note: this optimisation also exists in pte_needs_flush() and + * huge_pmd_needs_flush(). */ #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH #define ptep_clear_flush_young ptep_test_and_clear_young diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index 206f920fe5b9..67655cd60545 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -163,6 +163,62 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, */ } +static inline bool __pte_flags_need_flush(unsigned long oldval, + unsigned long newval) +{ + unsigned long delta = oldval ^ newval; + + /* + * The return value of this function doesn't matter for hash, + * ptep_modify_prot_start() does a pte_update() which does or schedules + * any necessary hash table update and flush. + */ + if (!radix_enabled()) + return true; + + /* + * We do not expect kernel mappings or non-PTEs or not-present PTEs. + */ + VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED); + VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED); + VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE)); + VM_WARN_ON_ONCE(!(newval & _PAGE_PTE)); + VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT)); + VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT)); + + /* + * Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED. + * + * In theory, some changed software bits could be tolerated, in + * practice those should rarely if ever matter. + */ + + if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED)) + return true; + + /* + * If any of the above was present in old but cleared in new, flush. + * With the exception of _PAGE_ACCESSED, don't worry about flushing + * if that was cleared (see the comment in ptep_clear_flush_young()). + */ + if ((delta & ~_PAGE_ACCESSED) & oldval) + return true; + + return false; +} + +static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte) +{ + return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte)); +} +#define pte_needs_flush pte_needs_flush + +static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd) +{ + return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd)); +} +#define huge_pmd_needs_flush huge_pmd_needs_flush + extern bool tlbie_capable; extern bool tlbie_enabled; -- cgit v1.2.3-70-g09d2 From f88aabad33ea22be2ce1c60d8901942e4e2a9edb Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Wed, 7 Sep 2022 17:01:11 -0500 Subject: Revert "powerpc/rtas: Implement reentrant rtas call" At the time this was submitted by Leonardo, I confirmed -- or thought I had confirmed -- with PowerVM partition firmware development that the following RTAS functions: - ibm,get-xive - ibm,int-off - ibm,int-on - ibm,set-xive were safe to call on multiple CPUs simultaneously, not only with respect to themselves as indicated by PAPR, but with arbitrary other RTAS calls: https://lore.kernel.org/linuxppc-dev/875zcy2v8o.fsf@linux.ibm.com/ Recent discussion with firmware development makes it clear that this is not true, and that the code in commit b664db8e3f97 ("powerpc/rtas: Implement reentrant rtas call") is unsafe, likely explaining several strange bugs we've seen in internal testing involving DLPAR and LPM. These scenarios use ibm,configure-connector, whose internal state can be corrupted by the concurrent use of the "reentrant" functions, leading to symptoms like endless busy statuses from RTAS. Fixes: b664db8e3f97 ("powerpc/rtas: Implement reentrant rtas call") Cc: stable@vger.kernel.org # v5.8+ Signed-off-by: Nathan Lynch Reviewed-by: Laurent Dufour Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220907220111.223267-1-nathanl@linux.ibm.com --- arch/powerpc/include/asm/paca.h | 1 - arch/powerpc/include/asm/rtas.h | 1 - arch/powerpc/kernel/paca.c | 32 ---------------------- arch/powerpc/kernel/rtas.c | 54 ------------------------------------- arch/powerpc/sysdev/xics/ics-rtas.c | 22 +++++++-------- 5 files changed, 11 insertions(+), 99 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 4d7aaab82702..3537b0500f4d 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -263,7 +263,6 @@ struct paca_struct { u64 l1d_flush_size; #endif #ifdef CONFIG_PPC_PSERIES - struct rtas_args *rtas_args_reentrant; u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */ #endif /* CONFIG_PPC_PSERIES */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 00531af17ce0..56319aea646e 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -240,7 +240,6 @@ extern struct rtas_t rtas; extern int rtas_token(const char *service); extern int rtas_service_present(const char *service); extern int rtas_call(int token, int, int, int *, ...); -int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...); void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...); extern void __noreturn rtas_restart(char *cmd); diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index ba593fd60124..dfd097b79160 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -16,7 +16,6 @@ #include #include #include -#include #include "setup.h" @@ -170,30 +169,6 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit) } #endif /* CONFIG_PPC_64S_HASH_MMU */ -#ifdef CONFIG_PPC_PSERIES -/** - * new_rtas_args() - Allocates rtas args - * @cpu: CPU number - * @limit: Memory limit for this allocation - * - * Allocates a struct rtas_args and return it's pointer, - * if not in Hypervisor mode - * - * Return: Pointer to allocated rtas_args - * NULL if CPU in Hypervisor Mode - */ -static struct rtas_args * __init new_rtas_args(int cpu, unsigned long limit) -{ - limit = min_t(unsigned long, limit, RTAS_INSTANTIATE_MAX); - - if (early_cpu_has_feature(CPU_FTR_HVMODE)) - return NULL; - - return alloc_paca_data(sizeof(struct rtas_args), L1_CACHE_BYTES, - limit, cpu); -} -#endif /* CONFIG_PPC_PSERIES */ - /* The Paca is an array with one entry per processor. Each contains an * lppaca, which contains the information shared between the * hypervisor and Linux. @@ -232,10 +207,6 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) /* For now -- if we have threads this will be adjusted later */ new_paca->tcd_ptr = &new_paca->tcd; #endif - -#ifdef CONFIG_PPC_PSERIES - new_paca->rtas_args_reentrant = NULL; -#endif } /* Put the paca pointer into r13 and SPRG_PACA */ @@ -307,9 +278,6 @@ void __init allocate_paca(int cpu) #endif #ifdef CONFIG_PPC_64S_HASH_MMU paca->slb_shadow_ptr = new_slb_shadow(cpu, limit); -#endif -#ifdef CONFIG_PPC_PSERIES - paca->rtas_args_reentrant = new_rtas_args(cpu, limit); #endif paca_struct_size += sizeof(struct paca_struct); } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 693133972294..0b8a858aa847 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -43,7 +43,6 @@ #include #include #include -#include /* This is here deliberately so it's only used in this file */ void enter_rtas(unsigned long); @@ -932,59 +931,6 @@ void rtas_activate_firmware(void) pr_err("ibm,activate-firmware failed (%i)\n", fwrc); } -#ifdef CONFIG_PPC_PSERIES -/** - * rtas_call_reentrant() - Used for reentrant rtas calls - * @token: Token for desired reentrant RTAS call - * @nargs: Number of Input Parameters - * @nret: Number of Output Parameters - * @outputs: Array of outputs - * @...: Inputs for desired RTAS call - * - * According to LoPAR documentation, only "ibm,int-on", "ibm,int-off", - * "ibm,get-xive" and "ibm,set-xive" are currently reentrant. - * Reentrant calls need their own rtas_args buffer, so not using rtas.args, but - * PACA one instead. - * - * Return: -1 on error, - * First output value of RTAS call if (nret > 0), - * 0 otherwise, - */ -int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...) -{ - va_list list; - struct rtas_args *args; - unsigned long flags; - int i, ret = 0; - - if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) - return -1; - - local_irq_save(flags); - preempt_disable(); - - /* We use the per-cpu (PACA) rtas args buffer */ - args = local_paca->rtas_args_reentrant; - - va_start(list, outputs); - va_rtas_call_unlocked(args, token, nargs, nret, list); - va_end(list); - - if (nret > 1 && outputs) - for (i = 0; i < nret - 1; ++i) - outputs[i] = be32_to_cpu(args->rets[i + 1]); - - if (nret > 0) - ret = be32_to_cpu(args->rets[0]); - - local_irq_restore(flags); - preempt_enable(); - - return ret; -} - -#endif /* CONFIG_PPC_PSERIES */ - /** * get_pseries_errorlog() - Find a specific pseries error log in an RTAS * extended event log. diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c index 9e7007f9aca5..f8320f8e5bc7 100644 --- a/arch/powerpc/sysdev/xics/ics-rtas.c +++ b/arch/powerpc/sysdev/xics/ics-rtas.c @@ -36,8 +36,8 @@ static void ics_rtas_unmask_irq(struct irq_data *d) server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0); - call_status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, hw_irq, - server, DEFAULT_PRIORITY); + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, + DEFAULT_PRIORITY); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive irq %u server %x returned %d\n", @@ -46,7 +46,7 @@ static void ics_rtas_unmask_irq(struct irq_data *d) } /* Now unmask the interrupt (often a no-op) */ - call_status = rtas_call_reentrant(ibm_int_on, 1, 1, NULL, hw_irq); + call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", __func__, hw_irq, call_status); @@ -68,7 +68,7 @@ static void ics_rtas_mask_real_irq(unsigned int hw_irq) if (hw_irq == XICS_IPI) return; - call_status = rtas_call_reentrant(ibm_int_off, 1, 1, NULL, hw_irq); + call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", __func__, hw_irq, call_status); @@ -76,8 +76,8 @@ static void ics_rtas_mask_real_irq(unsigned int hw_irq) } /* Have to set XIVE to 0xff to be able to remove a slot */ - call_status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, hw_irq, - xics_default_server, 0xff); + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, + xics_default_server, 0xff); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", __func__, hw_irq, call_status); @@ -108,7 +108,7 @@ static int ics_rtas_set_affinity(struct irq_data *d, if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) return -1; - status = rtas_call_reentrant(ibm_get_xive, 1, 3, xics_status, hw_irq); + status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq); if (status) { printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", @@ -126,8 +126,8 @@ static int ics_rtas_set_affinity(struct irq_data *d, pr_debug("%s: irq %d [hw 0x%x] server: 0x%x\n", __func__, d->irq, hw_irq, irq_server); - status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, - hw_irq, irq_server, xics_status[1]); + status = rtas_call(ibm_set_xive, 3, 1, NULL, + hw_irq, irq_server, xics_status[1]); if (status) { printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", @@ -158,7 +158,7 @@ static int ics_rtas_check(struct ics *ics, unsigned int hw_irq) return -EINVAL; /* Check if RTAS knows about this interrupt */ - rc = rtas_call_reentrant(ibm_get_xive, 1, 3, status, hw_irq); + rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq); if (rc) return -ENXIO; @@ -174,7 +174,7 @@ static long ics_rtas_get_server(struct ics *ics, unsigned long vec) { int rc, status[2]; - rc = rtas_call_reentrant(ibm_get_xive, 1, 3, status, vec); + rc = rtas_call(ibm_get_xive, 1, 3, status, vec); if (rc) return -1; return status[0]; -- cgit v1.2.3-70-g09d2 From b5a472ad81ba23327ef11ca5b4ba9fd8ed38e45e Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:24 +0800 Subject: powerpc: remove unused udbg_init_debug_beat() declaration udbg_init_debug_beat() has been removed since commit bf4981a00636 ("powerpc: Remove the celleb support"), so remove it. Signed-off-by: Gaosheng Cui Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-5-cuigaosheng1@huawei.com --- arch/powerpc/include/asm/udbg.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index b4aa0d88ce2c..524c2085070f 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h @@ -42,7 +42,6 @@ extern void __init udbg_init_maple_realmode(void); extern void __init udbg_init_pas_realmode(void); extern void __init udbg_init_rtas_panel(void); extern void __init udbg_init_rtas_console(void); -extern void __init udbg_init_debug_beat(void); extern void __init udbg_init_btext(void); extern void __init udbg_init_44x_as1(void); extern void __init udbg_init_40x_realmode(void); -- cgit v1.2.3-70-g09d2 From d24b8f01fe7b848f88ff0a3204a674a092f365d0 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:25 +0800 Subject: powerpc/mm: remove orphan declarations from mmu_context.h Remove the following orphan declarations from mmu_context.h: 1. switch_cop() and drop_cop() have been removed since commit 6ff4d3e96652 ("powerpc: Remove old unused icswx based coprocessor support"). 2. mm_iommu_cleanup() has been removed since commit 4b6fad7097f8 ("powerpc/mm/iommu, vfio/spapr: Put pages on VFIO container shutdown"). So remove the declarations for them from header file. Signed-off-by: Gaosheng Cui Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-6-cuigaosheng1@huawei.com --- arch/powerpc/include/asm/mmu_context.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 3f25bd3e14eb..c1ea270bb848 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -31,7 +31,6 @@ extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, extern long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem); extern void mm_iommu_init(struct mm_struct *mm); -extern void mm_iommu_cleanup(struct mm_struct *mm); extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, unsigned long ua, unsigned long size); extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, @@ -117,7 +116,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) } #endif -extern void switch_cop(struct mm_struct *next); extern int use_cop(unsigned long acop, struct mm_struct *mm); extern void drop_cop(unsigned long acop, struct mm_struct *mm); -- cgit v1.2.3-70-g09d2 From 77d30535816e90ff6a4466210c403a6b8b42a0a5 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:26 +0800 Subject: powerpc/powernv: remove orphan declarations from opal.h Remove the following orphan declarations from opal.h: 1. opal_notifier_register() 2. opal_notifier_unregister() 3. opal_notifier_update_evt() 4. opal_notifier_enable() 5. opal_notifier_disable() They have been removed since commit 81f2f7ce4c5b ("opal: Remove events notifier"), so remove them. Signed-off-by: Gaosheng Cui Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-7-cuigaosheng1@huawei.com --- arch/powerpc/include/asm/opal.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index bfd3142cd0ba..726125a534de 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -324,16 +324,10 @@ extern int opal_flush_console(uint32_t vtermno); extern void hvc_opal_init_early(void); -extern int opal_notifier_register(struct notifier_block *nb); -extern int opal_notifier_unregister(struct notifier_block *nb); - extern int opal_message_notifier_register(enum opal_msg_type msg_type, struct notifier_block *nb); extern int opal_message_notifier_unregister(enum opal_msg_type msg_type, struct notifier_block *nb); -extern void opal_notifier_enable(void); -extern void opal_notifier_disable(void); -extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val); extern int opal_async_get_token_interruptible(void); extern int opal_async_release_token(int token); -- cgit v1.2.3-70-g09d2 From 3abed8acfe95046b27117f48d52dccd2ea82a322 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:27 +0800 Subject: powerpc/sysdev: remove unused xics_ipi_dispatch() declaration xics_ipi_dispatch() has been removed since commit 23d72bfd8f9f ("powerpc: Consolidate ipi message mux and demux"), so remove it. Signed-off-by: Gaosheng Cui Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-8-cuigaosheng1@huawei.com --- arch/powerpc/include/asm/xics.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index e2e704eca5f6..89090485bec1 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h @@ -159,7 +159,6 @@ extern void xics_setup_cpu(void); extern void xics_update_irq_servers(void); extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join); extern void xics_mask_unknown_vec(unsigned int vec); -extern irqreturn_t xics_ipi_dispatch(int cpu); extern void xics_smp_probe(void); extern void xics_register_ics(struct ics *ics); extern void xics_teardown_cpu(void); -- cgit v1.2.3-70-g09d2 From b47f0024f990803f36e6a06f82e9d0dbe8424c26 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:28 +0800 Subject: powerpc/ps3: remove orphan declarations from ps3av.h Remove the following orphan declarations from ps3av.h: 1. ps3av_dev_open() 2. ps3av_dev_close() They have been removed since commit 13a5e30cf740 ("[POWERPC] PS3: Rework AV settings driver"), so remove them. Signed-off-by: Gaosheng Cui Acked-by: Geoff Levand Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-9-cuigaosheng1@huawei.com --- arch/powerpc/include/asm/ps3av.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h index 82db78fc169d..c8b0f2ffcd35 100644 --- a/arch/powerpc/include/asm/ps3av.h +++ b/arch/powerpc/include/asm/ps3av.h @@ -726,6 +726,4 @@ extern int ps3av_video_mode2res(u32, u32 *, u32 *); extern int ps3av_video_mute(int); extern int ps3av_audio_mute(int); extern int ps3av_audio_mute_analog(int); -extern int ps3av_dev_open(void); -extern int ps3av_dev_close(void); #endif /* _ASM_POWERPC_PS3AV_H_ */ -- cgit v1.2.3-70-g09d2 From 3d7a198cfdb47405cfb4a3ea523876569fe341e6 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:29 +0800 Subject: KVM: PPC: remove orphan declarations from kvm_ppc.h Remove the following orphan declarations from kvm_ppc.h: 1. kvmppc_mmu_priv_switch() has been removed since commit dd9ebf1f9435 ("KVM: PPC: e500: Add shadow PID support"). 2. kvmppc_core_destroy_mmu() has been removed since commit ecc0981ff07c ("KVM: ppc: cosmetic changes to mmu hook names"). 3. kvmppc_prepare_vrma() has been removed since commit aa04b4cc5be6 ("KVM: PPC: Allocate RMAs (Real Mode Areas) at boot for use by guests"). So remove the declarations for them from header file. Signed-off-by: Gaosheng Cui Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-10-cuigaosheng1@huawei.com --- arch/powerpc/include/asm/kvm_ppc.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 9f625af3b65b..bfacf12784dd 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -104,7 +104,6 @@ extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, unsigned int gtlb_idx); -extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); @@ -153,7 +152,6 @@ extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); extern int kvmppc_booke_init(void); extern void kvmppc_booke_exit(void); -extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); @@ -162,8 +160,6 @@ extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info); extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order); extern void kvmppc_free_hpt(struct kvm_hpt_info *info); extern void kvmppc_rmap_reset(struct kvm *kvm); -extern long kvmppc_prepare_vrma(struct kvm *kvm, - struct kvm_userspace_memory_region *mem); extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, unsigned long porder); extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); -- cgit v1.2.3-70-g09d2 From 0c32903197ce9f7119aee75a6bcaa4b49e0cd21a Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Sat, 17 Sep 2022 16:36:47 +1000 Subject: powerpc/64: Remove unused prom_init_toc symbols Commit 24d33ac5b8ff ("powerpc/64s: Make prom_init require RELOCATABLE") made prom_init depend on CONFIG_RELOCATABLE. But it missed cleaning up a case in the linker script for RELOCATABLE=n, and associated symbols. Remove them now. Fixes: 24d33ac5b8ff ("powerpc/64s: Make prom_init require RELOCATABLE") Reported-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220920131157.1032707-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/sections.h | 3 --- arch/powerpc/kernel/prom_init_check.sh | 3 +-- arch/powerpc/kernel/vmlinux.lds.S | 5 ----- 3 files changed, 1 insertion(+), 10 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 183e6b8af392..babda2677b30 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -32,9 +32,6 @@ extern long kvm_flush_link_stack; extern char __start_interrupts[]; extern char __end_interrupts[]; -extern char __prom_init_toc_start[]; -extern char __prom_init_toc_end[]; - #ifdef CONFIG_PPC_POWERNV extern char start_real_trampolines[]; extern char end_real_trampolines[]; diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index dfa5f729f774..311890d71c4c 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -26,8 +26,7 @@ _end enter_prom $MEM_FUNCS reloc_offset __secondary_hold __secondary_hold_acknowledge __secondary_hold_spinloop __start logo_linux_clut224 btext_prepare_BAT reloc_got2 kernstart_addr memstart_addr linux_banner _stext -__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC. -relocate" +btext_setup_display TOC. relocate" NM="$1" OBJ="$2" diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index fe22d940412f..0f2a10b029e8 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -337,11 +337,6 @@ SECTIONS .got : AT(ADDR(.got) - LOAD_OFFSET) ALIGN(256) { *(.got) -#ifndef CONFIG_RELOCATABLE - __prom_init_toc_start = .; - arch/powerpc/kernel/prom_init.o*(.toc) - __prom_init_toc_end = .; -#endif *(.toc) } #endif -- cgit v1.2.3-70-g09d2 From b150a4d12b919baf956b807aa305cf78df03d0fe Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 16 Sep 2022 14:41:24 +1000 Subject: powerpc/vmlinux.lds: Add an explicit symbol for the SRWX boundary Currently __init_begin is used as the boundary for strict RWX between executable/read-only text and data, and non-executable (after boot) code and data. But that's a little subtle, so add an explicit symbol to document that the SRWX boundary lies there, and add a comment making it clear that __init_begin must also begin there. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220916131422.318752-2-mpe@ellerman.id.au --- arch/powerpc/include/asm/sections.h | 1 + arch/powerpc/kernel/vmlinux.lds.S | 9 +++++++-- arch/powerpc/mm/book3s32/mmu.c | 2 +- arch/powerpc/mm/book3s64/radix_pgtable.c | 4 ++-- 4 files changed, 11 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index babda2677b30..9c00c9c0ca8f 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -13,6 +13,7 @@ typedef struct func_desc func_desc_t; #include extern char __head_end[]; +extern char __srwx_boundary[]; /* Patch sites */ extern s32 patch__call_flush_branch_caches1; diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index dacf8b4302d9..29d891329856 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -214,11 +214,16 @@ SECTIONS } #endif + /* + * Various code relies on __init_begin being at the strict RWX boundary. + */ + . = ALIGN(STRICT_ALIGN_SIZE); + __srwx_boundary = .; + __init_begin = .; + /* * Init sections discarded at runtime */ - . = ALIGN(STRICT_ALIGN_SIZE); - __init_begin = .; .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { _sinittext = .; INIT_TEXT diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index a96b73006dfb..250d174459d4 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -158,7 +158,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { unsigned long done; - unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; + unsigned long border = (unsigned long)__srwx_boundary - PAGE_OFFSET; unsigned long size; size = roundup_pow_of_two((unsigned long)_einittext - PAGE_OFFSET); diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 698274109c91..9f880f91f584 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -259,8 +259,8 @@ print_mapping(unsigned long start, unsigned long end, unsigned long size, bool e static unsigned long next_boundary(unsigned long addr, unsigned long end) { #ifdef CONFIG_STRICT_KERNEL_RWX - if (addr < __pa_symbol(__init_begin)) - return __pa_symbol(__init_begin); + if (addr < __pa_symbol(__srwx_boundary)) + return __pa_symbol(__srwx_boundary); #endif return end; } -- cgit v1.2.3-70-g09d2 From 51da853e3708852f47cd95e6f5e1821c3d54c3ef Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Sat, 3 Sep 2022 22:36:39 +1000 Subject: powerpc/mm/64s: Drop pgd_huge() On powerpc there are two ways for huge pages to be represented in the top level page table, aka PGD (Page Global Directory). If the address space mapped by an individual PGD entry does not correspond to a given huge page size, then the PGD entry points to a non-standard page table, known as a "hugepd" (Huge Page Directory). The hugepd contains some number of huge page PTEs sufficient to map the address space with the given huge page size. On the other hand, if the address space mapped by an individual PGD entry does correspond exactly to a given huge page size, that PGD entry is used to directly encode the huge page PTE in place. In this case the pgd_huge() wrapper indicates to generic code that the PGD entry is actually a huge page PTE. This commit deals with the pgd_huge() case only, it does nothing with respect to the hugepd case. Over time the size of the virtual address space supported on powerpc has increased several times, which means the location at which huge pages can sit in the tree has also changed. There have also been new huge page sizes added, with the introduction of the Radix MMU. On Power9 and later with the Radix MMU, the largest huge page size in any implementation is 1GB. Since the introduction of Radix, 1GB entries have been supported at the PUD level, with both 4K and 64K base page size. Radix has never had a supported huge page size at the PGD level. On Power8 or earlier, which uses the Hash MMU, or Power9 or later with the Hash MMU enabled, the largest huge page size is 16GB. Using the Hash MMU and a base page size of 4K, 16GB has never been a supported huge page size at the PGD level, due to the geometry being incompatible. The two supported huge page sizes (16M & 16GB) both use the hugepd format. Using the Hash MMU and a base page size of 64K, 16GB pages were supported in the past at the PGD level. However in commit ba95b5d03596 ("powerpc/mm/book3s/64: Rework page table geometry for lower memory usage") the page table layout was reworked to shrink the size of the PGD. As a result the 16GB page size now fits at the PUD level when using 64K base page size. Therefore there are no longer any supported configurations where pgd_huge() can be true, so drop the definitions for pgd_huge(), and fallback to the generic definition which is always false. Fixes: ba95b5d03596 ("powerpc/mm/book3s/64: Rework page table geometry for lower memory usage") Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220903123640.719846-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/book3s/64/pgtable-4k.h | 10 ---------- arch/powerpc/include/asm/book3s/64/pgtable-64k.h | 9 --------- 2 files changed, 19 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h index 4e697bc2f4cd..48f21820afe2 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h @@ -26,16 +26,6 @@ static inline int pud_huge(pud_t pud) return 0; } -static inline int pgd_huge(pgd_t pgd) -{ - /* - * leaf pte for huge page - */ - if (radix_enabled()) - return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE)); - return 0; -} -#define pgd_huge pgd_huge /* * With radix , we have hugepage ptes in the pud and pmd entries. We don't * need to setup hugepage directory for them. Our pte and page directory format diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h index 34d1018896b3..2fce3498b000 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h @@ -30,15 +30,6 @@ static inline int pud_huge(pud_t pud) return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); } -static inline int pgd_huge(pgd_t pgd) -{ - /* - * leaf pte for huge page - */ - return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE)); -} -#define pgd_huge pgd_huge - /* * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't * need to setup hugepage directory for them. Our pte and page directory format -- cgit v1.2.3-70-g09d2 From 79c5640ab4460a03535ce0f120193174e7701b65 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Sat, 3 Sep 2022 22:36:40 +1000 Subject: powerpc/mm/64s: Drop p4d_leaf() Because 64-bit Book3S uses pgtable-nop4d.h, the P4D is folded into the PGD. So P4D entries are actually PGD entries, or vice versa. The other way to think of it is that the P4D is a single entry page table below the PGD. Zero bits of the address are needed to index into the P4D, therefore a P4D entry maps the same size address space as a PGD entry. As explained in the previous commit, there are no huge page sizes supported directly at the PGD level on 64-bit Book3S, so there are also no huge page sizes supported at the P4D level. Therefore p4d_is_leaf() can never be true, so drop the definition and fallback to the default implementation that always returns false. Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220903123640.719846-2-mpe@ellerman.id.au --- arch/powerpc/include/asm/book3s/64/pgtable.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index b7fd9b69e828..28fd016cf7ff 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1462,12 +1462,5 @@ static inline bool pud_is_leaf(pud_t pud) return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); } -#define p4d_is_leaf p4d_is_leaf -#define p4d_leaf p4d_is_leaf -static inline bool p4d_is_leaf(p4d_t p4d) -{ - return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PTE)); -} - #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ -- cgit v1.2.3-70-g09d2 From a26494cf4aeb8e9888428a43f55cc486f06f1334 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 7 Sep 2022 11:34:44 +0200 Subject: powerpc/nohash: Remove pgd_huge() stub linux/hugetlb.h has a fallback pgd_huge() macro for when pgd_huge is not defined. Remove the powerpc redundant definitions. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/ae6aa7fce84f7abcbf67f534271a4a6dd7949b0d.1662543243.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/pgtable.h | 6 ------ arch/powerpc/include/asm/page.h | 1 - 2 files changed, 7 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index b499da6c1a99..08429c612cdf 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -277,12 +277,6 @@ static inline int pud_huge(pud_t pud) return 0; } -static inline int pgd_huge(pgd_t pgd) -{ - return 0; -} -#define pgd_huge pgd_huge - #define is_hugepd(hpd) (hugepd_ok(hpd)) #endif diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index e5f75c70eda8..c67eb9531a3f 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -311,7 +311,6 @@ static inline bool pfn_valid(unsigned long pfn) #ifndef CONFIG_HUGETLB_PAGE #define is_hugepd(pdep) (0) -#define pgd_huge(pgd) (0) #endif /* CONFIG_HUGETLB_PAGE */ struct page; -- cgit v1.2.3-70-g09d2 From 691cdf016d3be6f66a3ea384809be229e0f9c590 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 7 Sep 2022 11:34:45 +0200 Subject: powerpc: Rely on generic definition of hugepd_t and is_hugepd when unused CONFIG_ARCH_HAS_HUGEPD is used to tell core mm when huge page directories are used. When they are not used, no need to provide hugepd_t or is_hugepd(), just rely on the core mm fallback definition. For that, change core mm behaviour so that CONFIG_ARCH_HAS_HUGEPD is used instead of indirect is_hugepd macro existence. powerpc being the only user of huge page directories, there is no impact on other architectures. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/da81462d93069bb90fe5e762dd3283a644318937.1662543243.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/page.h | 5 ----- arch/powerpc/include/asm/pgtable-be-types.h | 2 ++ arch/powerpc/include/asm/pgtable-types.h | 2 ++ include/linux/hugetlb.h | 2 +- 4 files changed, 5 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index c67eb9531a3f..7f20636d13ed 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -308,11 +308,6 @@ static inline bool pfn_valid(unsigned long pfn) #include #endif - -#ifndef CONFIG_HUGETLB_PAGE -#define is_hugepd(pdep) (0) -#endif /* CONFIG_HUGETLB_PAGE */ - struct page; extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); extern void copy_user_page(void *to, void *from, unsigned long vaddr, diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h index b169bbf95fcb..82633200b500 100644 --- a/arch/powerpc/include/asm/pgtable-be-types.h +++ b/arch/powerpc/include/asm/pgtable-be-types.h @@ -101,6 +101,7 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new) return pmd_raw(old) == prev; } +#ifdef CONFIG_ARCH_HAS_HUGEPD typedef struct { __be64 pdbe; } hugepd_t; #define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) }) @@ -108,5 +109,6 @@ static inline unsigned long hpd_val(hugepd_t x) { return be64_to_cpu(x.pdbe); } +#endif #endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */ diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h index efed0db7b1db..082c85cc09b1 100644 --- a/arch/powerpc/include/asm/pgtable-types.h +++ b/arch/powerpc/include/asm/pgtable-types.h @@ -83,11 +83,13 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new) } #endif +#ifdef CONFIG_ARCH_HAS_HUGEPD typedef struct { unsigned long pd; } hugepd_t; #define __hugepd(x) ((hugepd_t) { (x) }) static inline unsigned long hpd_val(hugepd_t x) { return x.pd; } +#endif #endif /* _ASM_POWERPC_PGTABLE_TYPES_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 3ec981a0d8b3..1ec1535be04f 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -17,7 +17,7 @@ struct ctl_table; struct user_struct; struct mmu_gather; -#ifndef is_hugepd +#ifndef CONFIG_ARCH_HAS_HUGEPD typedef struct { unsigned long pd; } hugepd_t; #define is_hugepd(hugepd) (0) #define __hugepd(x) ((hugepd_t) { (x) }) -- cgit v1.2.3-70-g09d2 From 73ea68ad0d2f655815b6f1fbe1c5521d72f01b64 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 5 Sep 2022 11:38:25 +0200 Subject: powerpc/book3s: Inline first level of update_mmu_cache() update_mmu_cache() voids when hash page tables are not used. On PPC32 that means when MMU_FTR_HPTE_TABLE is not defined. On PPC64 that means when RADIX is enabled. Rename core part of update_mmu_cache() as __update_mmu_cache() and include the initial verification in an inlined caller. Signed-off-by: Christophe Leroy Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/bea5ad0de7f83eff256116816d46c84fa0a444de.1662370698.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/book3s/pgtable.h | 15 ++++++++++----- arch/powerpc/mm/book3s32/mmu.c | 4 +--- arch/powerpc/mm/book3s64/hash_utils.c | 5 +---- 3 files changed, 12 insertions(+), 12 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h index e8269434ecbe..d18b748ea3ae 100644 --- a/arch/powerpc/include/asm/book3s/pgtable.h +++ b/arch/powerpc/include/asm/book3s/pgtable.h @@ -25,7 +25,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot); #define __HAVE_PHYS_MEM_ACCESS_PROT -#if defined(CONFIG_PPC32) || defined(CONFIG_PPC_64S_HASH_MMU) +void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); + /* * This gets called at the end of handling a page fault, when * the kernel has put a new PTE into the page table for the process. @@ -35,10 +36,14 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, * corresponding HPTE into the hash table ahead of time, instead of * waiting for the inevitable extra hash-table miss exception. */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); -#else -static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {} -#endif +static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) +{ + if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE)) + return; + if (radix_enabled()) + return; + __update_mmu_cache(vma, address, ptep); +} #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index e5ee05c234c7..850783cfa9c7 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -314,11 +314,9 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea) * * This must always be called with the pte lock held. */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, +void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) - return; /* * We don't need to worry about _PAGE_PRESENT here because we are * called with either mm->page_table_lock held or ptl lock held diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 363a9447d63d..ced1107b1677 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1781,7 +1781,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, * * This must always be called with the pte lock held. */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, +void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { /* @@ -1791,9 +1791,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, unsigned long trap; bool is_exec; - if (radix_enabled()) - return; - /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ if (!pte_young(*ptep) || address >= TASK_SIZE) return; -- cgit v1.2.3-70-g09d2 From b997b2f57cae396448bb62c428efa4b112dd90ed Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 7 Sep 2022 12:05:01 +0200 Subject: powerpc/mm: Reduce redundancy in pgtable.h PAGE_KERNEL_TEXT, PAGE_KERNEL_EXEC and PAGE_AGP are the same for all powerpcs. Remove duplicated definitions. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/92254499430d13d99e4a4d7e9ad8e8284cb35380.1662544974.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/book3s/32/pgtable.h | 19 ------------------- arch/powerpc/include/asm/book3s/64/pgtable.h | 19 ------------------- arch/powerpc/include/asm/nohash/pgtable.h | 19 ------------------- arch/powerpc/include/asm/pgtable.h | 19 +++++++++++++++++++ 4 files changed, 19 insertions(+), 57 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 40041ac713d9..8da6d4544822 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -118,25 +118,6 @@ static inline bool pte_user(pte_t pte) #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) -/* - * Protection used for kernel text. We want the debuggers to be able to - * set breakpoints anywhere, so don't write protect the kernel text - * on platforms where such control is possible. - */ -#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ - defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) -#define PAGE_KERNEL_TEXT PAGE_KERNEL_X -#else -#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX -#endif - -/* Make modules code happy. We don't set RO yet */ -#define PAGE_KERNEL_EXEC PAGE_KERNEL_X - -/* Advertise special mapping type for AGP */ -#define PAGE_AGP (PAGE_KERNEL_NC) -#define HAVE_PAGE_AGP - #define PTE_INDEX_SIZE PTE_SHIFT #define PMD_INDEX_SIZE 0 #define PUD_INDEX_SIZE 0 diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 28fd016cf7ff..3007a251a186 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -164,22 +164,6 @@ #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) -/* - * Protection used for kernel text. We want the debuggers to be able to - * set breakpoints anywhere, so don't write protect the kernel text - * on platforms where such control is possible. - */ -#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \ - defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) -#define PAGE_KERNEL_TEXT PAGE_KERNEL_X -#else -#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX -#endif - -/* Make modules code happy. We don't set RO yet */ -#define PAGE_KERNEL_EXEC PAGE_KERNEL_X -#define PAGE_AGP (PAGE_KERNEL_NC) - #ifndef __ASSEMBLY__ /* * page table defines @@ -335,9 +319,6 @@ extern unsigned long pci_io_base; #define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE) #define FIXADDR_SIZE SZ_32M -/* Advertise special mapping type for AGP */ -#define HAVE_PAGE_AGP - #ifndef __ASSEMBLY__ /* diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 08429c612cdf..18b29cfee0d6 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -17,25 +17,6 @@ #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) -/* - * Protection used for kernel text. We want the debuggers to be able to - * set breakpoints anywhere, so don't write protect the kernel text - * on platforms where such control is possible. - */ -#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ - defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) -#define PAGE_KERNEL_TEXT PAGE_KERNEL_X -#else -#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX -#endif - -/* Make modules code happy. We don't set RO yet */ -#define PAGE_KERNEL_EXEC PAGE_KERNEL_X - -/* Advertise special mapping type for AGP */ -#define PAGE_AGP (PAGE_KERNEL_NC) -#define HAVE_PAGE_AGP - #ifndef __ASSEMBLY__ /* Generic accessors to PTE bits */ diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 33f4bf8d22b0..283f40d05a4d 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -20,6 +20,25 @@ struct mm_struct; #include #endif /* !CONFIG_PPC_BOOK3S */ +/* + * Protection used for kernel text. We want the debuggers to be able to + * set breakpoints anywhere, so don't write protect the kernel text + * on platforms where such control is possible. + */ +#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \ + defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) +#define PAGE_KERNEL_TEXT PAGE_KERNEL_X +#else +#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX +#endif + +/* Make modules code happy. We don't set RO yet */ +#define PAGE_KERNEL_EXEC PAGE_KERNEL_X + +/* Advertise special mapping type for AGP */ +#define PAGE_AGP (PAGE_KERNEL_NC) +#define HAVE_PAGE_AGP + #ifndef __ASSEMBLY__ #ifndef MAX_PTRS_PER_PGD -- cgit v1.2.3-70-g09d2 From 6cc07821adce44e864c3752a3842936a6a7f6aef Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 7 Sep 2022 12:05:21 +0200 Subject: powerpc/mm: Make PAGE_KERNEL_xxx macros grep-friendly Avoid multi-lines to help getting a complete view when using grep. They still remain under the 100 chars limit. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/3bc3f5a51949ee7f52dba36677db23d4337c7995.1662544980.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/book3s/32/pgtable.h | 3 +-- arch/powerpc/include/asm/book3s/64/pgtable.h | 9 +++------ arch/powerpc/include/asm/nohash/pgtable.h | 3 +-- 3 files changed, 5 insertions(+), 10 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 8da6d4544822..75823f39e042 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -112,8 +112,7 @@ static inline bool pte_user(pte_t pte) /* Permission masks used for kernel mappings */ #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE) -#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ - _PAGE_NO_CACHE | _PAGE_GUARDED) +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED) #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 3007a251a186..b5f8264cc980 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -117,8 +117,7 @@ #define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY) #define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ) #define _PAGE_KERNEL_ROX (_PAGE_PRIVILEGED | _PAGE_READ | _PAGE_EXEC) -#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \ - _PAGE_RW | _PAGE_EXEC) +#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) /* * _PAGE_CHG_MASK masks of bits that are to be preserved across * pgprot changes @@ -156,10 +155,8 @@ /* Permission masks used for kernel mappings */ #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) -#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ - _PAGE_TOLERANT) -#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ - _PAGE_NON_IDEMPOTENT) +#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_TOLERANT) +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NON_IDEMPOTENT) #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 18b29cfee0d6..4fd73c7412d0 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -11,8 +11,7 @@ /* Permission masks used for kernel mappings */ #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE) -#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ - _PAGE_NO_CACHE | _PAGE_GUARDED) +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED) #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) -- cgit v1.2.3-70-g09d2 From 76b719881a26fec3b77652134f19cf1dfcc96318 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:29 +0200 Subject: powerpc/cputable: Move __cpu_setup() prototypes out of cputable.h Move all prototypes out of cputable.h For that rename cpu_setup_power.h to cpu_setup.h and move all prototypes in it. Signed-off-by: Christophe Leroy [mpe: Standardise cpu_spec *spec formatting] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/f45118489ee450db654db8bbcdfd8f5907337c22.1663606875.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/cpu_setup.h | 49 ++++++++++++++++++++++++++++++ arch/powerpc/include/asm/cpu_setup_power.h | 12 -------- arch/powerpc/kernel/cpu_setup_power.c | 2 +- arch/powerpc/kernel/cputable.c | 38 +---------------------- 4 files changed, 51 insertions(+), 50 deletions(-) create mode 100644 arch/powerpc/include/asm/cpu_setup.h delete mode 100644 arch/powerpc/include/asm/cpu_setup_power.h (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/cpu_setup.h b/arch/powerpc/include/asm/cpu_setup.h new file mode 100644 index 000000000000..30e2fe389502 --- /dev/null +++ b/arch/powerpc/include/asm/cpu_setup.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2020 IBM Corporation + */ + +#ifndef _ASM_POWERPC_CPU_SETUP_H +#define _ASM_POWERPC_CPU_SETUP_H +void __setup_cpu_power7(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_power8(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_power9(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_power10(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_power7(void); +void __restore_cpu_power8(void); +void __restore_cpu_power9(void); +void __restore_cpu_power10(void); + +void __setup_cpu_e500v1(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_e500v2(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_e500mc(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_440ep(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_440epx(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_440gx(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_440grx(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_440spe(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_440x5(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_460ex(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_460gt(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_603(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_604(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_750(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_750cx(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_750fx(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_7400(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_7410(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_745x(unsigned long offset, struct cpu_spec *spec); + +void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_pa6t(void); +void __restore_cpu_ppc970(void); + +void __setup_cpu_e5500(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_e6500(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_e5500(void); +void __restore_cpu_e6500(void); +#endif /* _ASM_POWERPC_CPU_SETUP_H */ diff --git a/arch/powerpc/include/asm/cpu_setup_power.h b/arch/powerpc/include/asm/cpu_setup_power.h deleted file mode 100644 index 24be9131f803..000000000000 --- a/arch/powerpc/include/asm/cpu_setup_power.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2020 IBM Corporation - */ -void __setup_cpu_power7(unsigned long offset, struct cpu_spec *spec); -void __restore_cpu_power7(void); -void __setup_cpu_power8(unsigned long offset, struct cpu_spec *spec); -void __restore_cpu_power8(void); -void __setup_cpu_power9(unsigned long offset, struct cpu_spec *spec); -void __restore_cpu_power9(void); -void __setup_cpu_power10(unsigned long offset, struct cpu_spec *spec); -void __restore_cpu_power10(void); diff --git a/arch/powerpc/kernel/cpu_setup_power.c b/arch/powerpc/kernel/cpu_setup_power.c index 3dc61e203f37..097c033668f0 100644 --- a/arch/powerpc/kernel/cpu_setup_power.c +++ b/arch/powerpc/kernel/cpu_setup_power.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include /* Disable CPU_FTR_HVMODE and return false if MSR:HV is not set */ static bool init_hvmode_206(struct cpu_spec *t) diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 5ace97cccad8..9229e0930332 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -18,6 +18,7 @@ #include #include #include +#include static struct cpu_spec the_cpu_spec __read_mostly; @@ -34,43 +35,6 @@ const char *powerpc_base_platform; * part of the cputable though. That has to be fixed for both ppc32 * and ppc64 */ -#ifdef CONFIG_PPC32 -extern void __setup_cpu_e500v1(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_e500v2(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_e500mc(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_440ep(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec); -extern void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec); -extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_750cx(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_750fx(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec); -#endif /* CONFIG_PPC32 */ -#ifdef CONFIG_PPC64 -#include -extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); -extern void __restore_cpu_pa6t(void); -extern void __restore_cpu_ppc970(void); -#endif /* CONFIG_PPC64 */ -#if defined(CONFIG_E500) -extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_e6500(unsigned long offset, struct cpu_spec* spec); -extern void __restore_cpu_e5500(void); -extern void __restore_cpu_e6500(void); -#endif /* CONFIG_E500 */ /* This table only contains "desktop" CPUs, it need to be filled with embedded * ones as well... -- cgit v1.2.3-70-g09d2 From dfc3095cec27f402c183da920f4733785e4c873d Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:31 +0200 Subject: powerpc: Remove CONFIG_FSL_BOOKE PPC_85xx is PPC32 only. PPC_85xx always selects E500 and is the only PPC32 that selects E500. FSL_BOOKE is selected when E500 and PPC32 are selected. So FSL_BOOKE is redundant with PPC_85xx. Remove FSL_BOOKE. And rename four files accordingly. cpu_setup_fsl_booke.S is not renamed because it is linked to PPC_FSL_BOOK3E and not to FSL_BOOKE as suggested by its name. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/08e3e15594e66d63b9e89c5b4f9c35153913c28f.1663606875.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 28 +- arch/powerpc/Makefile | 2 +- arch/powerpc/include/asm/kexec.h | 2 +- arch/powerpc/include/asm/nohash/32/pgtable.h | 6 +- arch/powerpc/include/asm/nohash/32/pte-85xx.h | 74 ++ arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h | 74 -- arch/powerpc/include/asm/nohash/tlbflush.h | 2 +- arch/powerpc/kernel/85xx_entry_mapping.S | 230 ++++ arch/powerpc/kernel/Makefile | 6 +- arch/powerpc/kernel/fsl_booke_entry_mapping.S | 230 ---- arch/powerpc/kernel/head_85xx.S | 1227 ++++++++++++++++++++ arch/powerpc/kernel/head_fsl_booke.S | 1227 -------------------- arch/powerpc/kernel/kgdb.c | 12 +- arch/powerpc/kernel/swsusp_85xx.S | 202 ++++ arch/powerpc/kernel/swsusp_booke.S | 202 ---- arch/powerpc/kernel/traps.c | 4 +- arch/powerpc/kexec/core_32.c | 2 +- arch/powerpc/kexec/relocate_32.S | 4 +- arch/powerpc/kvm/booke_interrupts.S | 4 +- arch/powerpc/mm/init_32.c | 4 +- arch/powerpc/mm/mmu_decl.h | 4 +- arch/powerpc/mm/nohash/fsl_book3e.c | 2 +- arch/powerpc/mm/nohash/tlb.c | 2 +- arch/powerpc/mm/nohash/tlb_low.S | 2 +- arch/powerpc/platforms/Kconfig.cputype | 11 +- 25 files changed, 1779 insertions(+), 1784 deletions(-) create mode 100644 arch/powerpc/include/asm/nohash/32/pte-85xx.h delete mode 100644 arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h create mode 100644 arch/powerpc/kernel/85xx_entry_mapping.S delete mode 100644 arch/powerpc/kernel/fsl_booke_entry_mapping.S create mode 100644 arch/powerpc/kernel/head_85xx.S delete mode 100644 arch/powerpc/kernel/head_fsl_booke.S create mode 100644 arch/powerpc/kernel/swsusp_85xx.S delete mode 100644 arch/powerpc/kernel/swsusp_booke.S (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 220045692e48..dafb14f44672 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -135,7 +135,7 @@ config PPC select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 select ARCH_HAS_SET_MEMORY select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S || PPC_8xx || 40x) && !HIBERNATION - select ARCH_HAS_STRICT_KERNEL_RWX if FSL_BOOKE && !HIBERNATION && !RANDOMIZE_BASE + select ARCH_HAS_STRICT_KERNEL_RWX if PPC_85xx && !HIBERNATION && !RANDOMIZE_BASE select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE @@ -548,7 +548,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE config KEXEC bool "kexec system call" - depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E + depends on (PPC_BOOK3S || PPC_85xx || (44x && !SMP)) || PPC_BOOK3E select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your @@ -583,7 +583,7 @@ config ARCH_HAS_KEXEC_PURGATORY config RELOCATABLE bool "Build a relocatable kernel" - depends on PPC64 || (FLATMEM && (44x || FSL_BOOKE)) + depends on PPC64 || (FLATMEM && (44x || PPC_85xx)) select NONSTATIC_KERNEL help This builds a kernel image that is capable of running at the @@ -606,7 +606,7 @@ config RELOCATABLE config RANDOMIZE_BASE bool "Randomize the address of the kernel image" - depends on (FSL_BOOKE && FLATMEM && PPC32) + depends on (PPC_85xx && FLATMEM && PPC32) depends on RELOCATABLE help Randomizes the virtual address at which the kernel image is @@ -625,8 +625,8 @@ config RELOCATABLE_TEST config CRASH_DUMP bool "Build a dump capture kernel" - depends on PPC64 || PPC_BOOK3S_32 || FSL_BOOKE || (44x && !SMP) - select RELOCATABLE if PPC64 || 44x || FSL_BOOKE + depends on PPC64 || PPC_BOOK3S_32 || PPC_85xx || (44x && !SMP) + select RELOCATABLE if PPC64 || 44x || PPC_85xx help Build a kernel suitable for use as a dump capture kernel. The same kernel binary can be used as production kernel and dump @@ -815,7 +815,7 @@ config DATA_SHIFT_BOOL depends on ADVANCED_OPTIONS depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX) || \ - FSL_BOOKE + PPC_85xx help This option allows you to set the kernel data alignment. When RAM is mapped by blocks, the alignment needs to fit the size and @@ -828,13 +828,13 @@ config DATA_SHIFT default 24 if STRICT_KERNEL_RWX && PPC64 range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32 range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx - range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && FSL_BOOKE + range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_85xx default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32 default 23 if STRICT_KERNEL_RWX && PPC_8xx default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx - default 24 if STRICT_KERNEL_RWX && FSL_BOOKE + default 24 if STRICT_KERNEL_RWX && PPC_85xx default PPC_PAGE_SHIFT help On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO. @@ -1150,7 +1150,7 @@ config LOWMEM_SIZE config LOWMEM_CAM_NUM_BOOL bool "Set number of CAMs to use to map low memory" - depends on ADVANCED_OPTIONS && FSL_BOOKE + depends on ADVANCED_OPTIONS && PPC_85xx help This option allows you to set the maximum number of CAM slots that will be used to map low memory. There are a limited number of slots @@ -1161,7 +1161,7 @@ config LOWMEM_CAM_NUM_BOOL Say N here unless you know what you are doing. config LOWMEM_CAM_NUM - depends on FSL_BOOKE + depends on PPC_85xx int "Number of CAMs to use to map low memory" if LOWMEM_CAM_NUM_BOOL default 3 if !STRICT_KERNEL_RWX default 9 if DATA_SHIFT >= 24 @@ -1170,7 +1170,7 @@ config LOWMEM_CAM_NUM config DYNAMIC_MEMSTART bool "Enable page aligned dynamic load address for kernel" - depends on ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || 44x) + depends on ADVANCED_OPTIONS && FLATMEM && (PPC_85xx || 44x) select NONSTATIC_KERNEL help This option enables the kernel to be loaded at any page aligned @@ -1219,7 +1219,7 @@ config KERNEL_START config PHYSICAL_START_BOOL bool "Set physical address where the kernel is loaded" - depends on ADVANCED_OPTIONS && FLATMEM && FSL_BOOKE + depends on ADVANCED_OPTIONS && FLATMEM && PPC_85xx help This gives the physical address where the kernel is loaded. @@ -1232,7 +1232,7 @@ config PHYSICAL_START config PHYSICAL_ALIGN hex - default "0x04000000" if FSL_BOOKE + default "0x04000000" if PPC_85xx help This value puts the alignment restrictions on physical address where kernel is loaded and run from. Kernel is compiled for an diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 02742facf895..f6d477c4aa64 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -231,7 +231,7 @@ head-$(CONFIG_PPC_BOOK3S_32) := arch/powerpc/kernel/head_book3s_32.o head-$(CONFIG_PPC_8xx) := arch/powerpc/kernel/head_8xx.o head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o -head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o +head-$(CONFIG_PPC_85xx) := arch/powerpc/kernel/head_85xx.o head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index f8d122d16af4..a1ddba01e7d1 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -3,7 +3,7 @@ #define _ASM_POWERPC_KEXEC_H #ifdef __KERNEL__ -#if defined(CONFIG_FSL_BOOKE) || defined(CONFIG_44x) +#if defined(CONFIG_PPC_85xx) || defined(CONFIG_44x) /* * On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 9091e4904a6b..197e7552d9f6 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -130,10 +130,10 @@ void unmap_kernel_page(unsigned long va); #include #elif defined(CONFIG_44x) #include -#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) +#elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) #include -#elif defined(CONFIG_FSL_BOOKE) -#include +#elif defined(CONFIG_PPC_85xx) +#include #elif defined(CONFIG_PPC_8xx) #include #endif diff --git a/arch/powerpc/include/asm/nohash/32/pte-85xx.h b/arch/powerpc/include/asm/nohash/32/pte-85xx.h new file mode 100644 index 000000000000..93fb8e11a3f1 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/pte-85xx.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_32_PTE_85xx_H +#define _ASM_POWERPC_NOHASH_32_PTE_85xx_H +#ifdef __KERNEL__ + +/* PTE bit definitions for Freescale BookE SW loaded TLB MMU based + * processors + * + MMU Assist Register 3: + + 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63 + RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR + + - PRESENT *must* be in the bottom three bits because swap cache + entries use the top 29 bits. + +*/ + +/* Definitions for FSL Book-E Cores */ +#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */ +#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */ +#define _PAGE_RW 0x00004 /* S: Write permission (SW) */ +#define _PAGE_DIRTY 0x00008 /* S: Page dirty */ +#define _PAGE_EXEC 0x00010 /* H: SX permission */ +#define _PAGE_ACCESSED 0x00020 /* S: Page referenced */ + +#define _PAGE_ENDIAN 0x00040 /* H: E bit */ +#define _PAGE_GUARDED 0x00080 /* H: G bit */ +#define _PAGE_COHERENT 0x00100 /* H: M bit */ +#define _PAGE_NO_CACHE 0x00200 /* H: I bit */ +#define _PAGE_WRITETHRU 0x00400 /* H: W bit */ +#define _PAGE_SPECIAL 0x00800 /* S: Special page */ + +#define _PAGE_KERNEL_RO 0 +#define _PAGE_KERNEL_ROX _PAGE_EXEC +#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) + +/* No page size encoding in the linux PTE */ +#define _PAGE_PSIZE 0 + +#define _PMD_PRESENT 0 +#define _PMD_PRESENT_MASK (PAGE_MASK) +#define _PMD_BAD (~PAGE_MASK) +#define _PMD_USER 0 + +#define _PTE_NONE_MASK 0 + +#define PTE_WIMGE_SHIFT (6) + +/* + * We define 2 sets of base prot bits, one for basic pages (ie, + * cacheable kernel and user pages) and one for non cacheable + * pages. We always set _PAGE_COHERENT when SMP is enabled or + * the processor might need it for DMA coherency. + */ +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) +#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) +#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) +#else +#define _PAGE_BASE (_PAGE_BASE_NC) +#endif + +/* Permission masks used to generate the __P and __S table */ +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_85xx_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h b/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h deleted file mode 100644 index 0fc1bd42bb3e..000000000000 --- a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h +++ /dev/null @@ -1,74 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H -#define _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H -#ifdef __KERNEL__ - -/* PTE bit definitions for Freescale BookE SW loaded TLB MMU based - * processors - * - MMU Assist Register 3: - - 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63 - RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR - - - PRESENT *must* be in the bottom three bits because swap cache - entries use the top 29 bits. - -*/ - -/* Definitions for FSL Book-E Cores */ -#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */ -#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */ -#define _PAGE_RW 0x00004 /* S: Write permission (SW) */ -#define _PAGE_DIRTY 0x00008 /* S: Page dirty */ -#define _PAGE_EXEC 0x00010 /* H: SX permission */ -#define _PAGE_ACCESSED 0x00020 /* S: Page referenced */ - -#define _PAGE_ENDIAN 0x00040 /* H: E bit */ -#define _PAGE_GUARDED 0x00080 /* H: G bit */ -#define _PAGE_COHERENT 0x00100 /* H: M bit */ -#define _PAGE_NO_CACHE 0x00200 /* H: I bit */ -#define _PAGE_WRITETHRU 0x00400 /* H: W bit */ -#define _PAGE_SPECIAL 0x00800 /* S: Special page */ - -#define _PAGE_KERNEL_RO 0 -#define _PAGE_KERNEL_ROX _PAGE_EXEC -#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) -#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) - -/* No page size encoding in the linux PTE */ -#define _PAGE_PSIZE 0 - -#define _PMD_PRESENT 0 -#define _PMD_PRESENT_MASK (PAGE_MASK) -#define _PMD_BAD (~PAGE_MASK) -#define _PMD_USER 0 - -#define _PTE_NONE_MASK 0 - -#define PTE_WIMGE_SHIFT (6) - -/* - * We define 2 sets of base prot bits, one for basic pages (ie, - * cacheable kernel and user pages) and one for non cacheable - * pages. We always set _PAGE_COHERENT when SMP is enabled or - * the processor might need it for DMA coherency. - */ -#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) -#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) -#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) -#else -#define _PAGE_BASE (_PAGE_BASE_NC) -#endif - -/* Permission masks used to generate the __P and __S table */ -#define PAGE_NONE __pgprot(_PAGE_BASE) -#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) -#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) -#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H */ diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h index 698935d4f72d..bdaf34ad41ea 100644 --- a/arch/powerpc/include/asm/nohash/tlbflush.h +++ b/arch/powerpc/include/asm/nohash/tlbflush.h @@ -18,7 +18,7 @@ /* * TLB flushing for software loaded TLB chips * - * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & + * TODO: (CONFIG_PPC_85xx) determine if flush_tlb_range & * flush_tlb_kernel_range are best implemented as tlbia vs * specific tlbie's */ diff --git a/arch/powerpc/kernel/85xx_entry_mapping.S b/arch/powerpc/kernel/85xx_entry_mapping.S new file mode 100644 index 000000000000..dedc17fac8f8 --- /dev/null +++ b/arch/powerpc/kernel/85xx_entry_mapping.S @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* 1. Find the index of the entry we're executing in */ + bcl 20,31,$+4 /* Find our address */ +invstr: mflr r6 /* Make it accessible */ + mfmsr r7 + rlwinm r4,r7,27,31,31 /* extract MSR[IS] */ + mfspr r7, SPRN_PID0 + slwi r7,r7,16 + or r7,r7,r4 + mtspr SPRN_MAS6,r7 + tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */ + mfspr r7,SPRN_MAS1 + andis. r7,r7,MAS1_VALID@h + bne match_TLB + + mfspr r7,SPRN_MMUCFG + rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */ + cmpwi r7,3 + bne match_TLB /* skip if NPIDS != 3 */ + + mfspr r7,SPRN_PID1 + slwi r7,r7,16 + or r7,r7,r4 + mtspr SPRN_MAS6,r7 + tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */ + mfspr r7,SPRN_MAS1 + andis. r7,r7,MAS1_VALID@h + bne match_TLB + mfspr r7, SPRN_PID2 + slwi r7,r7,16 + or r7,r7,r4 + mtspr SPRN_MAS6,r7 + tlbsx 0,r6 /* Fall through, we had to match */ + +match_TLB: + mfspr r7,SPRN_MAS0 + rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */ + + mfspr r7,SPRN_MAS1 /* Insure IPROT set */ + oris r7,r7,MAS1_IPROT@h + mtspr SPRN_MAS1,r7 + tlbwe + +/* 2. Invalidate all entries except the entry we're executing in */ + mfspr r9,SPRN_TLB1CFG + andi. r9,r9,0xfff + li r6,0 /* Set Entry counter to 0 */ +1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ + mtspr SPRN_MAS0,r7 + tlbre + mfspr r7,SPRN_MAS1 + rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ + cmpw r3,r6 + beq skpinv /* Dont update the current execution TLB */ + mtspr SPRN_MAS1,r7 + tlbwe + isync +skpinv: addi r6,r6,1 /* Increment */ + cmpw r6,r9 /* Are we done? */ + bne 1b /* If not, repeat */ + + /* Invalidate TLB0 */ + li r6,0x04 + tlbivax 0,r6 + TLBSYNC + /* Invalidate TLB1 */ + li r6,0x0c + tlbivax 0,r6 + TLBSYNC + +/* 3. Setup a temp mapping and jump to it */ + andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ + addi r5, r5, 0x1 + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ + mtspr SPRN_MAS0,r7 + tlbre + + /* grab and fixup the RPN */ + mfspr r6,SPRN_MAS1 /* extract MAS1[SIZE] */ + rlwinm r6,r6,25,27,31 + li r8,-1 + addi r6,r6,10 + slw r6,r8,r6 /* convert to mask */ + + bcl 20,31,$+4 /* Find our address */ +1: mflr r7 + + mfspr r8,SPRN_MAS3 +#ifdef CONFIG_PHYS_64BIT + mfspr r23,SPRN_MAS7 +#endif + and r8,r6,r8 + subfic r9,r6,-4096 + and r9,r9,r7 + + or r25,r8,r9 + ori r8,r25,(MAS3_SX|MAS3_SW|MAS3_SR) + + /* Just modify the entry ID and EPN for the temp mapping */ + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ + mtspr SPRN_MAS0,r7 + xori r6,r4,1 /* Setup TMP mapping in the other Address space */ + slwi r6,r6,12 + oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h + ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_4K))@l + mtspr SPRN_MAS1,r6 + mfspr r6,SPRN_MAS2 + li r7,0 /* temp EPN = 0 */ + rlwimi r7,r6,0,20,31 + mtspr SPRN_MAS2,r7 + mtspr SPRN_MAS3,r8 + tlbwe + + xori r6,r4,1 + slwi r6,r6,5 /* setup new context with other address space */ + bcl 20,31,$+4 /* Find our address */ +1: mflr r9 + rlwimi r7,r9,0,20,31 + addi r7,r7,(2f - 1b) + mtspr SPRN_SRR0,r7 + mtspr SPRN_SRR1,r6 + rfi +2: +/* 4. Clear out PIDs & Search info */ + li r6,0 + mtspr SPRN_MAS6,r6 + mtspr SPRN_PID0,r6 + + mfspr r7,SPRN_MMUCFG + rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */ + cmpwi r7,3 + bne 2f /* skip if NPIDS != 3 */ + + mtspr SPRN_PID1,r6 + mtspr SPRN_PID2,r6 + +/* 5. Invalidate mapping we started in */ +2: + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ + mtspr SPRN_MAS0,r7 + tlbre + mfspr r6,SPRN_MAS1 + rlwinm r6,r6,0,2,0 /* clear IPROT */ + mtspr SPRN_MAS1,r6 + tlbwe + /* Invalidate TLB1 */ + li r9,0x0c + tlbivax 0,r9 + TLBSYNC + +#if defined(ENTRY_MAPPING_BOOT_SETUP) + +/* 6. Setup kernstart_virt_addr mapping in TLB1[0] */ + lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ + mtspr SPRN_MAS0,r6 + lis r6,(MAS1_VALID|MAS1_IPROT)@h + ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l + mtspr SPRN_MAS1,r6 + lis r6,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@h + ori r6,r6,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@l + and r6,r6,r20 + ori r6,r6,MAS2_M_IF_NEEDED@l + mtspr SPRN_MAS2,r6 + mtspr SPRN_MAS3,r8 + tlbwe + +/* 7. Jump to kernstart_virt_addr mapping */ + mr r6,r20 + +#elif defined(ENTRY_MAPPING_KEXEC_SETUP) +/* + * 6. Setup a 1:1 mapping in TLB1. Esel 0 is unsued, 1 or 2 contains the tmp + * mapping so we start at 3. We setup 8 mappings, each 256MiB in size. This + * will cover the first 2GiB of memory. + */ + + lis r10, (MAS1_VALID|MAS1_IPROT)@h + ori r10,r10, (MAS1_TSIZE(BOOK3E_PAGESZ_256M))@l + li r11, 0 + li r0, 8 + mtctr r0 + +next_tlb_setup: + addi r0, r11, 3 + rlwinm r0, r0, 16, 4, 15 // Compute esel + rlwinm r9, r11, 28, 0, 3 // Compute [ER]PN + oris r0, r0, (MAS0_TLBSEL(1))@h + mtspr SPRN_MAS0,r0 + mtspr SPRN_MAS1,r10 + mtspr SPRN_MAS2,r9 + ori r9, r9, (MAS3_SX|MAS3_SW|MAS3_SR) + mtspr SPRN_MAS3,r9 + tlbwe + addi r11, r11, 1 + bdnz+ next_tlb_setup + +/* 7. Jump to our 1:1 mapping */ + mr r6, r25 +#else + #error You need to specify the mapping or not use this at all. +#endif + + lis r7,MSR_KERNEL@h + ori r7,r7,MSR_KERNEL@l + bcl 20,31,$+4 /* Find our address */ +1: mflr r9 + rlwimi r6,r9,0,20,31 + addi r6,r6,(2f - 1b) + mtspr SPRN_SRR0,r6 + mtspr SPRN_SRR1,r7 + rfi /* start execution out of TLB1[0] entry */ + +/* 8. Clear out the temp mapping */ +2: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ + mtspr SPRN_MAS0,r7 + tlbre + mfspr r8,SPRN_MAS1 + rlwinm r8,r8,0,2,0 /* clear IPROT */ + mtspr SPRN_MAS1,r8 + tlbwe + /* Invalidate TLB1 */ + li r9,0x0c + tlbivax 0,r9 + TLBSYNC diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 06d2d1f78f71..4483cae7dc9f 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -106,8 +106,8 @@ endif obj-$(CONFIG_PPC_BOOK3S_32) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o obj-$(CONFIG_TAU) += tau_6xx.o obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o -ifdef CONFIG_FSL_BOOKE -obj-$(CONFIG_HIBERNATION) += swsusp_booke.o +ifdef CONFIG_PPC_85xx +obj-$(CONFIG_HIBERNATION) += swsusp_85xx.o else obj-$(CONFIG_HIBERNATION) += swsusp_$(BITS).o endif @@ -122,7 +122,7 @@ extra-$(CONFIG_PPC64) := head_64.o extra-$(CONFIG_PPC_BOOK3S_32) := head_book3s_32.o extra-$(CONFIG_40x) := head_40x.o extra-$(CONFIG_44x) := head_44x.o -extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o +extra-$(CONFIG_PPC_85xx) := head_85xx.o extra-$(CONFIG_PPC_8xx) := head_8xx.o extra-y += vmlinux.lds diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S deleted file mode 100644 index dedc17fac8f8..000000000000 --- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S +++ /dev/null @@ -1,230 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* 1. Find the index of the entry we're executing in */ - bcl 20,31,$+4 /* Find our address */ -invstr: mflr r6 /* Make it accessible */ - mfmsr r7 - rlwinm r4,r7,27,31,31 /* extract MSR[IS] */ - mfspr r7, SPRN_PID0 - slwi r7,r7,16 - or r7,r7,r4 - mtspr SPRN_MAS6,r7 - tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */ - mfspr r7,SPRN_MAS1 - andis. r7,r7,MAS1_VALID@h - bne match_TLB - - mfspr r7,SPRN_MMUCFG - rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */ - cmpwi r7,3 - bne match_TLB /* skip if NPIDS != 3 */ - - mfspr r7,SPRN_PID1 - slwi r7,r7,16 - or r7,r7,r4 - mtspr SPRN_MAS6,r7 - tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */ - mfspr r7,SPRN_MAS1 - andis. r7,r7,MAS1_VALID@h - bne match_TLB - mfspr r7, SPRN_PID2 - slwi r7,r7,16 - or r7,r7,r4 - mtspr SPRN_MAS6,r7 - tlbsx 0,r6 /* Fall through, we had to match */ - -match_TLB: - mfspr r7,SPRN_MAS0 - rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */ - - mfspr r7,SPRN_MAS1 /* Insure IPROT set */ - oris r7,r7,MAS1_IPROT@h - mtspr SPRN_MAS1,r7 - tlbwe - -/* 2. Invalidate all entries except the entry we're executing in */ - mfspr r9,SPRN_TLB1CFG - andi. r9,r9,0xfff - li r6,0 /* Set Entry counter to 0 */ -1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ - rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ - mtspr SPRN_MAS0,r7 - tlbre - mfspr r7,SPRN_MAS1 - rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ - cmpw r3,r6 - beq skpinv /* Dont update the current execution TLB */ - mtspr SPRN_MAS1,r7 - tlbwe - isync -skpinv: addi r6,r6,1 /* Increment */ - cmpw r6,r9 /* Are we done? */ - bne 1b /* If not, repeat */ - - /* Invalidate TLB0 */ - li r6,0x04 - tlbivax 0,r6 - TLBSYNC - /* Invalidate TLB1 */ - li r6,0x0c - tlbivax 0,r6 - TLBSYNC - -/* 3. Setup a temp mapping and jump to it */ - andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ - addi r5, r5, 0x1 - lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ - rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ - mtspr SPRN_MAS0,r7 - tlbre - - /* grab and fixup the RPN */ - mfspr r6,SPRN_MAS1 /* extract MAS1[SIZE] */ - rlwinm r6,r6,25,27,31 - li r8,-1 - addi r6,r6,10 - slw r6,r8,r6 /* convert to mask */ - - bcl 20,31,$+4 /* Find our address */ -1: mflr r7 - - mfspr r8,SPRN_MAS3 -#ifdef CONFIG_PHYS_64BIT - mfspr r23,SPRN_MAS7 -#endif - and r8,r6,r8 - subfic r9,r6,-4096 - and r9,r9,r7 - - or r25,r8,r9 - ori r8,r25,(MAS3_SX|MAS3_SW|MAS3_SR) - - /* Just modify the entry ID and EPN for the temp mapping */ - lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ - rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ - mtspr SPRN_MAS0,r7 - xori r6,r4,1 /* Setup TMP mapping in the other Address space */ - slwi r6,r6,12 - oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h - ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_4K))@l - mtspr SPRN_MAS1,r6 - mfspr r6,SPRN_MAS2 - li r7,0 /* temp EPN = 0 */ - rlwimi r7,r6,0,20,31 - mtspr SPRN_MAS2,r7 - mtspr SPRN_MAS3,r8 - tlbwe - - xori r6,r4,1 - slwi r6,r6,5 /* setup new context with other address space */ - bcl 20,31,$+4 /* Find our address */ -1: mflr r9 - rlwimi r7,r9,0,20,31 - addi r7,r7,(2f - 1b) - mtspr SPRN_SRR0,r7 - mtspr SPRN_SRR1,r6 - rfi -2: -/* 4. Clear out PIDs & Search info */ - li r6,0 - mtspr SPRN_MAS6,r6 - mtspr SPRN_PID0,r6 - - mfspr r7,SPRN_MMUCFG - rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */ - cmpwi r7,3 - bne 2f /* skip if NPIDS != 3 */ - - mtspr SPRN_PID1,r6 - mtspr SPRN_PID2,r6 - -/* 5. Invalidate mapping we started in */ -2: - lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ - rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ - mtspr SPRN_MAS0,r7 - tlbre - mfspr r6,SPRN_MAS1 - rlwinm r6,r6,0,2,0 /* clear IPROT */ - mtspr SPRN_MAS1,r6 - tlbwe - /* Invalidate TLB1 */ - li r9,0x0c - tlbivax 0,r9 - TLBSYNC - -#if defined(ENTRY_MAPPING_BOOT_SETUP) - -/* 6. Setup kernstart_virt_addr mapping in TLB1[0] */ - lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ - mtspr SPRN_MAS0,r6 - lis r6,(MAS1_VALID|MAS1_IPROT)@h - ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l - mtspr SPRN_MAS1,r6 - lis r6,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@h - ori r6,r6,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@l - and r6,r6,r20 - ori r6,r6,MAS2_M_IF_NEEDED@l - mtspr SPRN_MAS2,r6 - mtspr SPRN_MAS3,r8 - tlbwe - -/* 7. Jump to kernstart_virt_addr mapping */ - mr r6,r20 - -#elif defined(ENTRY_MAPPING_KEXEC_SETUP) -/* - * 6. Setup a 1:1 mapping in TLB1. Esel 0 is unsued, 1 or 2 contains the tmp - * mapping so we start at 3. We setup 8 mappings, each 256MiB in size. This - * will cover the first 2GiB of memory. - */ - - lis r10, (MAS1_VALID|MAS1_IPROT)@h - ori r10,r10, (MAS1_TSIZE(BOOK3E_PAGESZ_256M))@l - li r11, 0 - li r0, 8 - mtctr r0 - -next_tlb_setup: - addi r0, r11, 3 - rlwinm r0, r0, 16, 4, 15 // Compute esel - rlwinm r9, r11, 28, 0, 3 // Compute [ER]PN - oris r0, r0, (MAS0_TLBSEL(1))@h - mtspr SPRN_MAS0,r0 - mtspr SPRN_MAS1,r10 - mtspr SPRN_MAS2,r9 - ori r9, r9, (MAS3_SX|MAS3_SW|MAS3_SR) - mtspr SPRN_MAS3,r9 - tlbwe - addi r11, r11, 1 - bdnz+ next_tlb_setup - -/* 7. Jump to our 1:1 mapping */ - mr r6, r25 -#else - #error You need to specify the mapping or not use this at all. -#endif - - lis r7,MSR_KERNEL@h - ori r7,r7,MSR_KERNEL@l - bcl 20,31,$+4 /* Find our address */ -1: mflr r9 - rlwimi r6,r9,0,20,31 - addi r6,r6,(2f - 1b) - mtspr SPRN_SRR0,r6 - mtspr SPRN_SRR1,r7 - rfi /* start execution out of TLB1[0] entry */ - -/* 8. Clear out the temp mapping */ -2: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ - rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ - mtspr SPRN_MAS0,r7 - tlbre - mfspr r8,SPRN_MAS1 - rlwinm r8,r8,0,2,0 /* clear IPROT */ - mtspr SPRN_MAS1,r8 - tlbwe - /* Invalidate TLB1 */ - li r9,0x0c - tlbivax 0,r9 - TLBSYNC diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S new file mode 100644 index 000000000000..48b168b5dc57 --- /dev/null +++ b/arch/powerpc/kernel/head_85xx.S @@ -0,0 +1,1227 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Kernel execution entry point code. + * + * Copyright (c) 1995-1996 Gary Thomas + * Initial PowerPC version. + * Copyright (c) 1996 Cort Dougan + * Rewritten for PReP + * Copyright (c) 1996 Paul Mackerras + * Low-level exception handers, MMU support, and rewrite. + * Copyright (c) 1997 Dan Malek + * PowerPC 8xx modifications. + * Copyright (c) 1998-1999 TiVo, Inc. + * PowerPC 403GCX modifications. + * Copyright (c) 1999 Grant Erickson + * PowerPC 403GCX/405GP modifications. + * Copyright 2000 MontaVista Software Inc. + * PPC405 modifications + * PowerPC 403GCX/405GP modifications. + * Author: MontaVista Software, Inc. + * frank_rowand@mvista.com or source@mvista.com + * debbie_chu@mvista.com + * Copyright 2002-2004 MontaVista Software, Inc. + * PowerPC 44x support, Matt Porter + * Copyright 2004 Freescale Semiconductor, Inc + * PowerPC e500 modifications, Kumar Gala + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "head_booke.h" + +/* As with the other PowerPC ports, it is expected that when code + * execution begins here, the following registers contain valid, yet + * optional, information: + * + * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) + * r4 - Starting address of the init RAM disk + * r5 - Ending address of the init RAM disk + * r6 - Start of kernel command line string (e.g. "mem=128") + * r7 - End of kernel command line string + * + */ + __HEAD +_GLOBAL(_stext); +_GLOBAL(_start); + /* + * Reserve a word at a fixed location to store the address + * of abatron_pteptrs + */ + nop + + /* Translate device tree address to physical, save in r30/r31 */ + bl get_phys_addr + mr r30,r3 + mr r31,r4 + + li r25,0 /* phys kernel start (low) */ + li r24,0 /* CPU number */ + li r23,0 /* phys kernel start (high) */ + +#ifdef CONFIG_RELOCATABLE + LOAD_REG_ADDR_PIC(r3, _stext) /* Get our current runtime base */ + + /* Translate _stext address to physical, save in r23/r25 */ + bl get_phys_addr + mr r23,r3 + mr r25,r4 + + bcl 20,31,$+4 +0: mflr r8 + addis r3,r8,(is_second_reloc - 0b)@ha + lwz r19,(is_second_reloc - 0b)@l(r3) + + /* Check if this is the second relocation. */ + cmpwi r19,1 + bne 1f + + /* + * For the second relocation, we already get the real memstart_addr + * from device tree. So we will map PAGE_OFFSET to memstart_addr, + * then the virtual address of start kernel should be: + * PAGE_OFFSET + (kernstart_addr - memstart_addr) + * Since the offset between kernstart_addr and memstart_addr should + * never be beyond 1G, so we can just use the lower 32bit of them + * for the calculation. + */ + lis r3,PAGE_OFFSET@h + + addis r4,r8,(kernstart_addr - 0b)@ha + addi r4,r4,(kernstart_addr - 0b)@l + lwz r5,4(r4) + + addis r6,r8,(memstart_addr - 0b)@ha + addi r6,r6,(memstart_addr - 0b)@l + lwz r7,4(r6) + + subf r5,r7,r5 + add r3,r3,r5 + b 2f + +1: + /* + * We have the runtime (virtual) address of our base. + * We calculate our shift of offset from a 64M page. + * We could map the 64M page we belong to at PAGE_OFFSET and + * get going from there. + */ + lis r4,KERNELBASE@h + ori r4,r4,KERNELBASE@l + rlwinm r6,r25,0,0x3ffffff /* r6 = PHYS_START % 64M */ + rlwinm r5,r4,0,0x3ffffff /* r5 = KERNELBASE % 64M */ + subf r3,r5,r6 /* r3 = r6 - r5 */ + add r3,r4,r3 /* Required Virtual Address */ + +2: bl relocate + + /* + * For the second relocation, we already set the right tlb entries + * for the kernel space, so skip the code in 85xx_entry_mapping.S + */ + cmpwi r19,1 + beq set_ivor +#endif + +/* We try to not make any assumptions about how the boot loader + * setup or used the TLBs. We invalidate all mappings from the + * boot loader and load a single entry in TLB1[0] to map the + * first 64M of kernel memory. Any boot info passed from the + * bootloader needs to live in this first 64M. + * + * Requirement on bootloader: + * - The page we're executing in needs to reside in TLB1 and + * have IPROT=1. If not an invalidate broadcast could + * evict the entry we're currently executing in. + * + * r3 = Index of TLB1 were executing in + * r4 = Current MSR[IS] + * r5 = Index of TLB1 temp mapping + * + * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0] + * if needed + */ + +_GLOBAL(__early_start) + LOAD_REG_ADDR_PIC(r20, kernstart_virt_addr) + lwz r20,0(r20) + +#define ENTRY_MAPPING_BOOT_SETUP +#include "85xx_entry_mapping.S" +#undef ENTRY_MAPPING_BOOT_SETUP + +set_ivor: + /* Establish the interrupt vector offsets */ + SET_IVOR(0, CriticalInput); + SET_IVOR(1, MachineCheck); + SET_IVOR(2, DataStorage); + SET_IVOR(3, InstructionStorage); + SET_IVOR(4, ExternalInput); + SET_IVOR(5, Alignment); + SET_IVOR(6, Program); + SET_IVOR(7, FloatingPointUnavailable); + SET_IVOR(8, SystemCall); + SET_IVOR(9, AuxillaryProcessorUnavailable); + SET_IVOR(10, Decrementer); + SET_IVOR(11, FixedIntervalTimer); + SET_IVOR(12, WatchdogTimer); + SET_IVOR(13, DataTLBError); + SET_IVOR(14, InstructionTLBError); + SET_IVOR(15, DebugCrit); + + /* Establish the interrupt vector base */ + lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ + mtspr SPRN_IVPR,r4 + + /* Setup the defaults for TLB entries */ + li r2,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l + mtspr SPRN_MAS4, r2 + +#if !defined(CONFIG_BDI_SWITCH) + /* + * The Abatron BDI JTAG debugger does not tolerate others + * mucking with the debug registers. + */ + lis r2,DBCR0_IDM@h + mtspr SPRN_DBCR0,r2 + isync + /* clear any residual debug events */ + li r2,-1 + mtspr SPRN_DBSR,r2 +#endif + +#ifdef CONFIG_SMP + /* Check to see if we're the second processor, and jump + * to the secondary_start code if so + */ + LOAD_REG_ADDR_PIC(r24, boot_cpuid) + lwz r24, 0(r24) + cmpwi r24, -1 + mfspr r24,SPRN_PIR + bne __secondary_start +#endif + + /* + * This is where the main kernel code starts. + */ + + /* ptr to current */ + lis r2,init_task@h + ori r2,r2,init_task@l + + /* ptr to current thread */ + addi r4,r2,THREAD /* init task's THREAD */ + mtspr SPRN_SPRG_THREAD,r4 + + /* stack */ + lis r1,init_thread_union@h + ori r1,r1,init_thread_union@l + li r0,0 + stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) + +#ifdef CONFIG_SMP + stw r24, TASK_CPU(r2) +#endif + + bl early_init + +#ifdef CONFIG_KASAN + bl kasan_early_init +#endif +#ifdef CONFIG_RELOCATABLE + mr r3,r30 + mr r4,r31 +#ifdef CONFIG_PHYS_64BIT + mr r5,r23 + mr r6,r25 +#else + mr r5,r25 +#endif + bl relocate_init +#endif + +#ifdef CONFIG_DYNAMIC_MEMSTART + lis r3,kernstart_addr@ha + la r3,kernstart_addr@l(r3) +#ifdef CONFIG_PHYS_64BIT + stw r23,0(r3) + stw r25,4(r3) +#else + stw r25,0(r3) +#endif +#endif + +/* + * Decide what sort of machine this is and initialize the MMU. + */ + mr r3,r30 + mr r4,r31 + bl machine_init + bl MMU_init + + /* Setup PTE pointers for the Abatron bdiGDB */ + lis r6, swapper_pg_dir@h + ori r6, r6, swapper_pg_dir@l + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l + lis r3, kernstart_virt_addr@ha + lwz r4, kernstart_virt_addr@l(r3) + stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ + stw r6, 0(r5) + + /* Let's move on */ + lis r4,start_kernel@h + ori r4,r4,start_kernel@l + lis r3,MSR_KERNEL@h + ori r3,r3,MSR_KERNEL@l + mtspr SPRN_SRR0,r4 + mtspr SPRN_SRR1,r3 + rfi /* change context and jump to start_kernel */ + +/* Macros to hide the PTE size differences + * + * FIND_PTE -- walks the page tables given EA & pgdir pointer + * r10 -- EA of fault + * r11 -- PGDIR pointer + * r12 -- free + * label 2: is the bailout case + * + * if we find the pte (fall through): + * r11 is low pte word + * r12 is pointer to the pte + * r10 is the pshift from the PGD, if we're a hugepage + */ +#ifdef CONFIG_PTE_64BIT +#ifdef CONFIG_HUGETLB_PAGE +#define FIND_PTE \ + rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ + lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ + rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ + blt 1000f; /* Normal non-huge page */ \ + beq 2f; /* Bail if no table */ \ + oris r11, r11, PD_HUGE@h; /* Put back address bit */ \ + andi. r10, r11, HUGEPD_SHIFT_MASK@l; /* extract size field */ \ + xor r12, r10, r11; /* drop size bits from pointer */ \ + b 1001f; \ +1000: rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ + li r10, 0; /* clear r10 */ \ +1001: lwz r11, 4(r12); /* Get pte entry */ +#else +#define FIND_PTE \ + rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ + lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ + rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ + beq 2f; /* Bail if no table */ \ + rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ + lwz r11, 4(r12); /* Get pte entry */ +#endif /* HUGEPAGE */ +#else /* !PTE_64BIT */ +#define FIND_PTE \ + rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ + lwz r11, 0(r11); /* Get L1 entry */ \ + rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \ + beq 2f; /* Bail if no table */ \ + rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \ + lwz r11, 0(r12); /* Get Linux PTE */ +#endif + +/* + * Interrupt vector entry code + * + * The Book E MMUs are always on so we don't need to handle + * interrupts in real mode as with previous PPC processors. In + * this case we handle interrupts in the kernel virtual address + * space. + * + * Interrupt vectors are dynamically placed relative to the + * interrupt prefix as determined by the address of interrupt_base. + * The interrupt vectors offsets are programmed using the labels + * for each interrupt vector entry. + * + * Interrupt vectors must be aligned on a 16 byte boundary. + * We align on a 32 byte cache line boundary for good measure. + */ + +interrupt_base: + /* Critical Input Interrupt */ + CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception) + + /* Machine Check Interrupt */ + MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception) + + /* Data Storage Interrupt */ + START_EXCEPTION(DataStorage) + NORMAL_EXCEPTION_PROLOG(0x300, DATA_STORAGE) + mfspr r5,SPRN_ESR /* Grab the ESR, save it */ + stw r5,_ESR(r11) + mfspr r4,SPRN_DEAR /* Grab the DEAR, save it */ + stw r4, _DEAR(r11) + andis. r10,r5,(ESR_ILK|ESR_DLK)@h + bne 1f + prepare_transfer_to_handler + bl do_page_fault + b interrupt_return +1: + prepare_transfer_to_handler + bl CacheLockingException + b interrupt_return + + /* Instruction Storage Interrupt */ + INSTRUCTION_STORAGE_EXCEPTION + + /* External Input Interrupt */ + EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ) + + /* Alignment Interrupt */ + ALIGNMENT_EXCEPTION + + /* Program Interrupt */ + PROGRAM_EXCEPTION + + /* Floating Point Unavailable Interrupt */ +#ifdef CONFIG_PPC_FPU + FP_UNAVAILABLE_EXCEPTION +#else + EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception) +#endif + + /* System Call Interrupt */ + START_EXCEPTION(SystemCall) + SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL SPRN_SRR1 + + /* Auxiliary Processor Unavailable Interrupt */ + EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, unknown_exception) + + /* Decrementer Interrupt */ + DECREMENTER_EXCEPTION + + /* Fixed Internal Timer Interrupt */ + /* TODO: Add FIT support */ + EXCEPTION(0x3100, FIT, FixedIntervalTimer, unknown_exception) + + /* Watchdog Timer Interrupt */ +#ifdef CONFIG_BOOKE_WDT + CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, WatchdogException) +#else + CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, unknown_exception) +#endif + + /* Data TLB Error Interrupt */ + START_EXCEPTION(DataTLBError) + mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ + mfspr r10, SPRN_SPRG_THREAD + stw r11, THREAD_NORMSAVE(0)(r10) +#ifdef CONFIG_KVM_BOOKE_HV +BEGIN_FTR_SECTION + mfspr r11, SPRN_SRR1 +END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) +#endif + stw r12, THREAD_NORMSAVE(1)(r10) + stw r13, THREAD_NORMSAVE(2)(r10) + mfcr r13 + stw r13, THREAD_NORMSAVE(3)(r10) + DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1 +START_BTB_FLUSH_SECTION + mfspr r11, SPRN_SRR1 + andi. r10,r11,MSR_PR + beq 1f + BTB_FLUSH(r10) +1: +END_BTB_FLUSH_SECTION + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, PAGE_OFFSET@h + cmplw 5, r10, r11 + blt 5, 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + + mfspr r12,SPRN_MAS1 /* Set TID to 0 */ + rlwinm r12,r12,0,16,1 + mtspr SPRN_MAS1,r12 + + b 4f + + /* Get the PGD for the current thread */ +3: + mfspr r11,SPRN_SPRG_THREAD + lwz r11,PGDIR(r11) + +#ifdef CONFIG_PPC_KUAP + mfspr r12, SPRN_MAS1 + rlwinm. r12,r12,0,0x3fff0000 + beq 2f /* KUAP fault */ +#endif + +4: + /* Mask of required permission bits. Note that while we + * do copy ESR:ST to _PAGE_RW position as trying to write + * to an RO page is pretty common, we don't do it with + * _PAGE_DIRTY. We could do it, but it's a fairly rare + * event so I'd rather take the overhead when it happens + * rather than adding an instruction here. We should measure + * whether the whole thing is worth it in the first place + * as we could avoid loading SPRN_ESR completely in the first + * place... + * + * TODO: Is it worth doing that mfspr & rlwimi in the first + * place or can we save a couple of instructions here ? + */ + mfspr r12,SPRN_ESR +#ifdef CONFIG_PTE_64BIT + li r13,_PAGE_PRESENT + oris r13,r13,_PAGE_ACCESSED@h +#else + li r13,_PAGE_PRESENT|_PAGE_ACCESSED +#endif + rlwimi r13,r12,11,29,29 + + FIND_PTE + andc. r13,r13,r11 /* Check permission */ + +#ifdef CONFIG_PTE_64BIT +#ifdef CONFIG_SMP + subf r13,r11,r12 /* create false data dep */ + lwzx r13,r11,r13 /* Get upper pte bits */ +#else + lwz r13,0(r12) /* Get upper pte bits */ +#endif +#endif + + bne 2f /* Bail if permission/valid mismatch */ + + /* Jump to common tlb load */ + b finish_tlb_load +2: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r10, SPRN_SPRG_THREAD + lwz r11, THREAD_NORMSAVE(3)(r10) + mtcr r11 + lwz r13, THREAD_NORMSAVE(2)(r10) + lwz r12, THREAD_NORMSAVE(1)(r10) + lwz r11, THREAD_NORMSAVE(0)(r10) + mfspr r10, SPRN_SPRG_RSCRATCH0 + b DataStorage + + /* Instruction TLB Error Interrupt */ + /* + * Nearly the same as above, except we get our + * information from different registers and bailout + * to a different point. + */ + START_EXCEPTION(InstructionTLBError) + mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ + mfspr r10, SPRN_SPRG_THREAD + stw r11, THREAD_NORMSAVE(0)(r10) +#ifdef CONFIG_KVM_BOOKE_HV +BEGIN_FTR_SECTION + mfspr r11, SPRN_SRR1 +END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) +#endif + stw r12, THREAD_NORMSAVE(1)(r10) + stw r13, THREAD_NORMSAVE(2)(r10) + mfcr r13 + stw r13, THREAD_NORMSAVE(3)(r10) + DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1 +START_BTB_FLUSH_SECTION + mfspr r11, SPRN_SRR1 + andi. r10,r11,MSR_PR + beq 1f + BTB_FLUSH(r10) +1: +END_BTB_FLUSH_SECTION + + mfspr r10, SPRN_SRR0 /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, PAGE_OFFSET@h + cmplw 5, r10, r11 + blt 5, 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + + mfspr r12,SPRN_MAS1 /* Set TID to 0 */ + rlwinm r12,r12,0,16,1 + mtspr SPRN_MAS1,r12 + + /* Make up the required permissions for kernel code */ +#ifdef CONFIG_PTE_64BIT + li r13,_PAGE_PRESENT | _PAGE_BAP_SX + oris r13,r13,_PAGE_ACCESSED@h +#else + li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC +#endif + b 4f + + /* Get the PGD for the current thread */ +3: + mfspr r11,SPRN_SPRG_THREAD + lwz r11,PGDIR(r11) + +#ifdef CONFIG_PPC_KUAP + mfspr r12, SPRN_MAS1 + rlwinm. r12,r12,0,0x3fff0000 + beq 2f /* KUAP fault */ +#endif + + /* Make up the required permissions for user code */ +#ifdef CONFIG_PTE_64BIT + li r13,_PAGE_PRESENT | _PAGE_BAP_UX + oris r13,r13,_PAGE_ACCESSED@h +#else + li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC +#endif + +4: + FIND_PTE + andc. r13,r13,r11 /* Check permission */ + +#ifdef CONFIG_PTE_64BIT +#ifdef CONFIG_SMP + subf r13,r11,r12 /* create false data dep */ + lwzx r13,r11,r13 /* Get upper pte bits */ +#else + lwz r13,0(r12) /* Get upper pte bits */ +#endif +#endif + + bne 2f /* Bail if permission mismatch */ + + /* Jump to common TLB load point */ + b finish_tlb_load + +2: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r10, SPRN_SPRG_THREAD + lwz r11, THREAD_NORMSAVE(3)(r10) + mtcr r11 + lwz r13, THREAD_NORMSAVE(2)(r10) + lwz r12, THREAD_NORMSAVE(1)(r10) + lwz r11, THREAD_NORMSAVE(0)(r10) + mfspr r10, SPRN_SPRG_RSCRATCH0 + b InstructionStorage + +/* Define SPE handlers for e500v2 */ +#ifdef CONFIG_SPE + /* SPE Unavailable */ + START_EXCEPTION(SPEUnavailable) + NORMAL_EXCEPTION_PROLOG(0x2010, SPE_UNAVAIL) + beq 1f + bl load_up_spe + b fast_exception_return +1: prepare_transfer_to_handler + bl KernelSPE + b interrupt_return +#elif defined(CONFIG_SPE_POSSIBLE) + EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, unknown_exception) +#endif /* CONFIG_SPE_POSSIBLE */ + + /* SPE Floating Point Data */ +#ifdef CONFIG_SPE + START_EXCEPTION(SPEFloatingPointData) + NORMAL_EXCEPTION_PROLOG(0x2030, SPE_FP_DATA) + prepare_transfer_to_handler + bl SPEFloatingPointException + REST_NVGPRS(r1) + b interrupt_return + + /* SPE Floating Point Round */ + START_EXCEPTION(SPEFloatingPointRound) + NORMAL_EXCEPTION_PROLOG(0x2050, SPE_FP_ROUND) + prepare_transfer_to_handler + bl SPEFloatingPointRoundException + REST_NVGPRS(r1) + b interrupt_return +#elif defined(CONFIG_SPE_POSSIBLE) + EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, unknown_exception) + EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, unknown_exception) +#endif /* CONFIG_SPE_POSSIBLE */ + + + /* Performance Monitor */ + EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \ + performance_monitor_exception) + + EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception) + + CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \ + CriticalDoorbell, unknown_exception) + + /* Debug Interrupt */ + DEBUG_DEBUG_EXCEPTION + DEBUG_CRIT_EXCEPTION + + GUEST_DOORBELL_EXCEPTION + + CRITICAL_EXCEPTION(0, GUEST_DBELL_CRIT, CriticalGuestDoorbell, \ + unknown_exception) + + /* Hypercall */ + EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception) + + /* Embedded Hypervisor Privilege */ + EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception) + +interrupt_end: + +/* + * Local functions + */ + +/* + * Both the instruction and data TLB miss get to this + * point to load the TLB. + * r10 - tsize encoding (if HUGETLB_PAGE) or available to use + * r11 - TLB (info from Linux PTE) + * r12 - available to use + * r13 - upper bits of PTE (if PTE_64BIT) or available to use + * CR5 - results of addr >= PAGE_OFFSET + * MAS0, MAS1 - loaded with proper value when we get here + * MAS2, MAS3 - will need additional info from Linux PTE + * Upon exit, we reload everything and RFI. + */ +finish_tlb_load: +#ifdef CONFIG_HUGETLB_PAGE + cmpwi 6, r10, 0 /* check for huge page */ + beq 6, finish_tlb_load_cont /* !huge */ + + /* Alas, we need more scratch registers for hugepages */ + mfspr r12, SPRN_SPRG_THREAD + stw r14, THREAD_NORMSAVE(4)(r12) + stw r15, THREAD_NORMSAVE(5)(r12) + stw r16, THREAD_NORMSAVE(6)(r12) + stw r17, THREAD_NORMSAVE(7)(r12) + + /* Get the next_tlbcam_idx percpu var */ +#ifdef CONFIG_SMP + lwz r15, TASK_CPU-THREAD(r12) + lis r14, __per_cpu_offset@h + ori r14, r14, __per_cpu_offset@l + rlwinm r15, r15, 2, 0, 29 + lwzx r16, r14, r15 +#else + li r16, 0 +#endif + lis r17, next_tlbcam_idx@h + ori r17, r17, next_tlbcam_idx@l + add r17, r17, r16 /* r17 = *next_tlbcam_idx */ + lwz r15, 0(r17) /* r15 = next_tlbcam_idx */ + + lis r14, MAS0_TLBSEL(1)@h /* select TLB1 (TLBCAM) */ + rlwimi r14, r15, 16, 4, 15 /* next_tlbcam_idx entry */ + mtspr SPRN_MAS0, r14 + + /* Extract TLB1CFG(NENTRY) */ + mfspr r16, SPRN_TLB1CFG + andi. r16, r16, 0xfff + + /* Update next_tlbcam_idx, wrapping when necessary */ + addi r15, r15, 1 + cmpw r15, r16 + blt 100f + lis r14, tlbcam_index@h + ori r14, r14, tlbcam_index@l + lwz r15, 0(r14) +100: stw r15, 0(r17) + + /* + * Calc MAS1_TSIZE from r10 (which has pshift encoded) + * tlb_enc = (pshift - 10). + */ + subi r15, r10, 10 + mfspr r16, SPRN_MAS1 + rlwimi r16, r15, 7, 20, 24 + mtspr SPRN_MAS1, r16 + + /* copy the pshift for use later */ + mr r14, r10 + + /* fall through */ + +#endif /* CONFIG_HUGETLB_PAGE */ + + /* + * We set execute, because we don't have the granularity to + * properly set this at the page level (Linux problem). + * Many of these bits are software only. Bits we don't set + * here we (properly should) assume have the appropriate value. + */ +finish_tlb_load_cont: +#ifdef CONFIG_PTE_64BIT + rlwinm r12, r11, 32-2, 26, 31 /* Move in perm bits */ + andi. r10, r11, _PAGE_DIRTY + bne 1f + li r10, MAS3_SW | MAS3_UW + andc r12, r12, r10 +1: rlwimi r12, r13, 20, 0, 11 /* grab RPN[32:43] */ + rlwimi r12, r11, 20, 12, 19 /* grab RPN[44:51] */ +2: mtspr SPRN_MAS3, r12 +BEGIN_MMU_FTR_SECTION + srwi r10, r13, 12 /* grab RPN[12:31] */ + mtspr SPRN_MAS7, r10 +END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) +#else + li r10, (_PAGE_EXEC | _PAGE_PRESENT) + mr r13, r11 + rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */ + and r12, r11, r10 + andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ + slwi r10, r12, 1 + or r10, r10, r12 + rlwinm r10, r10, 0, ~_PAGE_EXEC /* Clear SX on user pages */ + iseleq r12, r12, r10 + rlwimi r13, r12, 0, 20, 31 /* Get RPN from PTE, merge w/ perms */ + mtspr SPRN_MAS3, r13 +#endif + + mfspr r12, SPRN_MAS2 +#ifdef CONFIG_PTE_64BIT + rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */ +#else + rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ +#endif +#ifdef CONFIG_HUGETLB_PAGE + beq 6, 3f /* don't mask if page isn't huge */ + li r13, 1 + slw r13, r13, r14 + subi r13, r13, 1 + rlwinm r13, r13, 0, 0, 19 /* bottom bits used for WIMGE/etc */ + andc r12, r12, r13 /* mask off ea bits within the page */ +#endif +3: mtspr SPRN_MAS2, r12 + +tlb_write_entry: + tlbwe + + /* Done...restore registers and get out of here. */ + mfspr r10, SPRN_SPRG_THREAD +#ifdef CONFIG_HUGETLB_PAGE + beq 6, 8f /* skip restore for 4k page faults */ + lwz r14, THREAD_NORMSAVE(4)(r10) + lwz r15, THREAD_NORMSAVE(5)(r10) + lwz r16, THREAD_NORMSAVE(6)(r10) + lwz r17, THREAD_NORMSAVE(7)(r10) +#endif +8: lwz r11, THREAD_NORMSAVE(3)(r10) + mtcr r11 + lwz r13, THREAD_NORMSAVE(2)(r10) + lwz r12, THREAD_NORMSAVE(1)(r10) + lwz r11, THREAD_NORMSAVE(0)(r10) + mfspr r10, SPRN_SPRG_RSCRATCH0 + rfi /* Force context change */ + +#ifdef CONFIG_SPE +/* Note that the SPE support is closely modeled after the AltiVec + * support. Changes to one are likely to be applicable to the + * other! */ +_GLOBAL(load_up_spe) +/* + * Disable SPE for the task which had SPE previously, + * and save its SPE registers in its thread_struct. + * Enables SPE for use in the kernel on return. + * On SMP we know the SPE units are free, since we give it up every + * switch. -- Kumar + */ + mfmsr r5 + oris r5,r5,MSR_SPE@h + mtmsr r5 /* enable use of SPE now */ + isync + /* enable use of SPE after return */ + oris r9,r9,MSR_SPE@h + mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ + li r4,1 + li r10,THREAD_ACC + stw r4,THREAD_USED_SPE(r5) + evlddx evr4,r10,r5 + evmra evr4,evr4 + REST_32EVRS(0,r10,r5,THREAD_EVR0) + blr + +/* + * SPE unavailable trap from kernel - print a message, but let + * the task use SPE in the kernel until it returns to user mode. + */ +KernelSPE: + lwz r3,_MSR(r1) + oris r3,r3,MSR_SPE@h + stw r3,_MSR(r1) /* enable use of SPE after return */ +#ifdef CONFIG_PRINTK + lis r3,87f@h + ori r3,r3,87f@l + mr r4,r2 /* current */ + lwz r5,_NIP(r1) + bl _printk +#endif + b interrupt_return +#ifdef CONFIG_PRINTK +87: .string "SPE used in kernel (task=%p, pc=%x) \n" +#endif + .align 4,0 + +#endif /* CONFIG_SPE */ + +/* + * Translate the effec addr in r3 to phys addr. The phys addr will be put + * into r3(higher 32bit) and r4(lower 32bit) + */ +get_phys_addr: + mfmsr r8 + mfspr r9,SPRN_PID + rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */ + rlwimi r9,r8,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */ + mtspr SPRN_MAS6,r9 + + tlbsx 0,r3 /* must succeed */ + + mfspr r8,SPRN_MAS1 + mfspr r12,SPRN_MAS3 + rlwinm r9,r8,25,0x1f /* r9 = log2(page size) */ + li r10,1024 + slw r10,r10,r9 /* r10 = page size */ + addi r10,r10,-1 + and r11,r3,r10 /* r11 = page offset */ + andc r4,r12,r10 /* r4 = page base */ + or r4,r4,r11 /* r4 = devtree phys addr */ +#ifdef CONFIG_PHYS_64BIT + mfspr r3,SPRN_MAS7 +#endif + blr + +/* + * Global functions + */ + +#ifdef CONFIG_E500 +#ifndef CONFIG_PPC_E500MC +/* Adjust or setup IVORs for e500v1/v2 */ +_GLOBAL(__setup_e500_ivors) + li r3,DebugCrit@l + mtspr SPRN_IVOR15,r3 + li r3,SPEUnavailable@l + mtspr SPRN_IVOR32,r3 + li r3,SPEFloatingPointData@l + mtspr SPRN_IVOR33,r3 + li r3,SPEFloatingPointRound@l + mtspr SPRN_IVOR34,r3 + li r3,PerformanceMonitor@l + mtspr SPRN_IVOR35,r3 + sync + blr +#else +/* Adjust or setup IVORs for e500mc */ +_GLOBAL(__setup_e500mc_ivors) + li r3,DebugDebug@l + mtspr SPRN_IVOR15,r3 + li r3,PerformanceMonitor@l + mtspr SPRN_IVOR35,r3 + li r3,Doorbell@l + mtspr SPRN_IVOR36,r3 + li r3,CriticalDoorbell@l + mtspr SPRN_IVOR37,r3 + sync + blr + +/* setup ehv ivors for */ +_GLOBAL(__setup_ehv_ivors) + li r3,GuestDoorbell@l + mtspr SPRN_IVOR38,r3 + li r3,CriticalGuestDoorbell@l + mtspr SPRN_IVOR39,r3 + li r3,Hypercall@l + mtspr SPRN_IVOR40,r3 + li r3,Ehvpriv@l + mtspr SPRN_IVOR41,r3 + sync + blr +#endif /* CONFIG_PPC_E500MC */ +#endif /* CONFIG_E500 */ + +#ifdef CONFIG_SPE +/* + * extern void __giveup_spe(struct task_struct *prev) + * + */ +_GLOBAL(__giveup_spe) + addi r3,r3,THREAD /* want THREAD of task */ + lwz r5,PT_REGS(r3) + cmpi 0,r5,0 + SAVE_32EVRS(0, r4, r3, THREAD_EVR0) + evxor evr6, evr6, evr6 /* clear out evr6 */ + evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ + li r4,THREAD_ACC + evstddx evr6, r4, r3 /* save off accumulator */ + beq 1f + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r3,MSR_SPE@h + andc r4,r4,r3 /* disable SPE for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: + blr +#endif /* CONFIG_SPE */ + +/* + * extern void abort(void) + * + * At present, this routine just applies a system reset. + */ +_GLOBAL(abort) + li r13,0 + mtspr SPRN_DBCR0,r13 /* disable all debug events */ + isync + mfmsr r13 + ori r13,r13,MSR_DE@l /* Enable Debug Events */ + mtmsr r13 + isync + mfspr r13,SPRN_DBCR0 + lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h + mtspr SPRN_DBCR0,r13 + isync + +#ifdef CONFIG_SMP +/* When we get here, r24 needs to hold the CPU # */ + .globl __secondary_start +__secondary_start: + LOAD_REG_ADDR_PIC(r3, tlbcam_index) + lwz r3,0(r3) + mtctr r3 + li r26,0 /* r26 safe? */ + + bl switch_to_as1 + mr r27,r3 /* tlb entry */ + /* Load each CAM entry */ +1: mr r3,r26 + bl loadcam_entry + addi r26,r26,1 + bdnz 1b + mr r3,r27 /* tlb entry */ + LOAD_REG_ADDR_PIC(r4, memstart_addr) + lwz r4,0(r4) + mr r5,r25 /* phys kernel start */ + rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */ + subf r4,r5,r4 /* memstart_addr - phys kernel start */ + lis r7,KERNELBASE@h + ori r7,r7,KERNELBASE@l + cmpw r20,r7 /* if kernstart_virt_addr != KERNELBASE, randomized */ + beq 2f + li r4,0 +2: li r5,0 /* no device tree */ + li r6,0 /* not boot cpu */ + bl restore_to_as0 + + + lis r3,__secondary_hold_acknowledge@h + ori r3,r3,__secondary_hold_acknowledge@l + stw r24,0(r3) + + li r3,0 + mr r4,r24 /* Why? */ + bl call_setup_cpu + + /* get current's stack and current */ + lis r2,secondary_current@ha + lwz r2,secondary_current@l(r2) + lwz r1,TASK_STACK(r2) + + /* stack */ + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + li r0,0 + stw r0,0(r1) + + /* ptr to current thread */ + addi r4,r2,THREAD /* address of our thread_struct */ + mtspr SPRN_SPRG_THREAD,r4 + + /* Setup the defaults for TLB entries */ + li r4,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l + mtspr SPRN_MAS4,r4 + + /* Jump to start_secondary */ + lis r4,MSR_KERNEL@h + ori r4,r4,MSR_KERNEL@l + lis r3,start_secondary@h + ori r3,r3,start_secondary@l + mtspr SPRN_SRR0,r3 + mtspr SPRN_SRR1,r4 + sync + rfi + sync + + .globl __secondary_hold_acknowledge +__secondary_hold_acknowledge: + .long -1 +#endif + +/* + * Create a 64M tlb by address and entry + * r3 - entry + * r4 - virtual address + * r5/r6 - physical address + */ +_GLOBAL(create_kaslr_tlb_entry) + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ + mtspr SPRN_MAS0,r7 /* Write MAS0 */ + + lis r3,(MAS1_VALID|MAS1_IPROT)@h + ori r3,r3,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l + mtspr SPRN_MAS1,r3 /* Write MAS1 */ + + lis r3,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@h + ori r3,r3,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@l + and r3,r3,r4 + ori r3,r3,MAS2_M_IF_NEEDED@l + mtspr SPRN_MAS2,r3 /* Write MAS2(EPN) */ + +#ifdef CONFIG_PHYS_64BIT + ori r8,r6,(MAS3_SW|MAS3_SR|MAS3_SX) + mtspr SPRN_MAS3,r8 /* Write MAS3(RPN) */ + mtspr SPRN_MAS7,r5 +#else + ori r8,r5,(MAS3_SW|MAS3_SR|MAS3_SX) + mtspr SPRN_MAS3,r8 /* Write MAS3(RPN) */ +#endif + + tlbwe /* Write TLB */ + isync + sync + blr + +/* + * Return to the start of the relocated kernel and run again + * r3 - virtual address of fdt + * r4 - entry of the kernel + */ +_GLOBAL(reloc_kernel_entry) + mfmsr r7 + rlwinm r7, r7, 0, ~(MSR_IS | MSR_DS) + + mtspr SPRN_SRR0,r4 + mtspr SPRN_SRR1,r7 + rfi + +/* + * Create a tlb entry with the same effective and physical address as + * the tlb entry used by the current running code. But set the TS to 1. + * Then switch to the address space 1. It will return with the r3 set to + * the ESEL of the new created tlb. + */ +_GLOBAL(switch_to_as1) + mflr r5 + + /* Find a entry not used */ + mfspr r3,SPRN_TLB1CFG + andi. r3,r3,0xfff + mfspr r4,SPRN_PID + rlwinm r4,r4,16,0x3fff0000 /* turn PID into MAS6[SPID] */ + mtspr SPRN_MAS6,r4 +1: lis r4,0x1000 /* Set MAS0(TLBSEL) = 1 */ + addi r3,r3,-1 + rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ + mtspr SPRN_MAS0,r4 + tlbre + mfspr r4,SPRN_MAS1 + andis. r4,r4,MAS1_VALID@h + bne 1b + + /* Get the tlb entry used by the current running code */ + bcl 20,31,$+4 +0: mflr r4 + tlbsx 0,r4 + + mfspr r4,SPRN_MAS1 + ori r4,r4,MAS1_TS /* Set the TS = 1 */ + mtspr SPRN_MAS1,r4 + + mfspr r4,SPRN_MAS0 + rlwinm r4,r4,0,~MAS0_ESEL_MASK + rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ + mtspr SPRN_MAS0,r4 + tlbwe + isync + sync + + mfmsr r4 + ori r4,r4,MSR_IS | MSR_DS + mtspr SPRN_SRR0,r5 + mtspr SPRN_SRR1,r4 + sync + rfi + +/* + * Restore to the address space 0 and also invalidate the tlb entry created + * by switch_to_as1. + * r3 - the tlb entry which should be invalidated + * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0) + * r5 - device tree virtual address. If r4 is 0, r5 is ignored. + * r6 - boot cpu +*/ +_GLOBAL(restore_to_as0) + mflr r0 + + bcl 20,31,$+4 +0: mflr r9 + addi r9,r9,1f - 0b + + /* + * We may map the PAGE_OFFSET in AS0 to a different physical address, + * so we need calculate the right jump and device tree address based + * on the offset passed by r4. + */ + add r9,r9,r4 + add r5,r5,r4 + add r0,r0,r4 + +2: mfmsr r7 + li r8,(MSR_IS | MSR_DS) + andc r7,r7,r8 + + mtspr SPRN_SRR0,r9 + mtspr SPRN_SRR1,r7 + sync + rfi + + /* Invalidate the temporary tlb entry for AS1 */ +1: lis r9,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r9,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ + mtspr SPRN_MAS0,r9 + tlbre + mfspr r9,SPRN_MAS1 + rlwinm r9,r9,0,2,31 /* Clear MAS1 Valid and IPPROT */ + mtspr SPRN_MAS1,r9 + tlbwe + isync + + cmpwi r4,0 + cmpwi cr1,r6,0 + cror eq,4*cr1+eq,eq + bne 3f /* offset != 0 && is_boot_cpu */ + mtlr r0 + blr + + /* + * The PAGE_OFFSET will map to a different physical address, + * jump to _start to do another relocation again. + */ +3: mr r3,r5 + bl _start diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S deleted file mode 100644 index f0db4f52bc00..000000000000 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ /dev/null @@ -1,1227 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Kernel execution entry point code. - * - * Copyright (c) 1995-1996 Gary Thomas - * Initial PowerPC version. - * Copyright (c) 1996 Cort Dougan - * Rewritten for PReP - * Copyright (c) 1996 Paul Mackerras - * Low-level exception handers, MMU support, and rewrite. - * Copyright (c) 1997 Dan Malek - * PowerPC 8xx modifications. - * Copyright (c) 1998-1999 TiVo, Inc. - * PowerPC 403GCX modifications. - * Copyright (c) 1999 Grant Erickson - * PowerPC 403GCX/405GP modifications. - * Copyright 2000 MontaVista Software Inc. - * PPC405 modifications - * PowerPC 403GCX/405GP modifications. - * Author: MontaVista Software, Inc. - * frank_rowand@mvista.com or source@mvista.com - * debbie_chu@mvista.com - * Copyright 2002-2004 MontaVista Software, Inc. - * PowerPC 44x support, Matt Porter - * Copyright 2004 Freescale Semiconductor, Inc - * PowerPC e500 modifications, Kumar Gala - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "head_booke.h" - -/* As with the other PowerPC ports, it is expected that when code - * execution begins here, the following registers contain valid, yet - * optional, information: - * - * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) - * r4 - Starting address of the init RAM disk - * r5 - Ending address of the init RAM disk - * r6 - Start of kernel command line string (e.g. "mem=128") - * r7 - End of kernel command line string - * - */ - __HEAD -_GLOBAL(_stext); -_GLOBAL(_start); - /* - * Reserve a word at a fixed location to store the address - * of abatron_pteptrs - */ - nop - - /* Translate device tree address to physical, save in r30/r31 */ - bl get_phys_addr - mr r30,r3 - mr r31,r4 - - li r25,0 /* phys kernel start (low) */ - li r24,0 /* CPU number */ - li r23,0 /* phys kernel start (high) */ - -#ifdef CONFIG_RELOCATABLE - LOAD_REG_ADDR_PIC(r3, _stext) /* Get our current runtime base */ - - /* Translate _stext address to physical, save in r23/r25 */ - bl get_phys_addr - mr r23,r3 - mr r25,r4 - - bcl 20,31,$+4 -0: mflr r8 - addis r3,r8,(is_second_reloc - 0b)@ha - lwz r19,(is_second_reloc - 0b)@l(r3) - - /* Check if this is the second relocation. */ - cmpwi r19,1 - bne 1f - - /* - * For the second relocation, we already get the real memstart_addr - * from device tree. So we will map PAGE_OFFSET to memstart_addr, - * then the virtual address of start kernel should be: - * PAGE_OFFSET + (kernstart_addr - memstart_addr) - * Since the offset between kernstart_addr and memstart_addr should - * never be beyond 1G, so we can just use the lower 32bit of them - * for the calculation. - */ - lis r3,PAGE_OFFSET@h - - addis r4,r8,(kernstart_addr - 0b)@ha - addi r4,r4,(kernstart_addr - 0b)@l - lwz r5,4(r4) - - addis r6,r8,(memstart_addr - 0b)@ha - addi r6,r6,(memstart_addr - 0b)@l - lwz r7,4(r6) - - subf r5,r7,r5 - add r3,r3,r5 - b 2f - -1: - /* - * We have the runtime (virtual) address of our base. - * We calculate our shift of offset from a 64M page. - * We could map the 64M page we belong to at PAGE_OFFSET and - * get going from there. - */ - lis r4,KERNELBASE@h - ori r4,r4,KERNELBASE@l - rlwinm r6,r25,0,0x3ffffff /* r6 = PHYS_START % 64M */ - rlwinm r5,r4,0,0x3ffffff /* r5 = KERNELBASE % 64M */ - subf r3,r5,r6 /* r3 = r6 - r5 */ - add r3,r4,r3 /* Required Virtual Address */ - -2: bl relocate - - /* - * For the second relocation, we already set the right tlb entries - * for the kernel space, so skip the code in fsl_booke_entry_mapping.S - */ - cmpwi r19,1 - beq set_ivor -#endif - -/* We try to not make any assumptions about how the boot loader - * setup or used the TLBs. We invalidate all mappings from the - * boot loader and load a single entry in TLB1[0] to map the - * first 64M of kernel memory. Any boot info passed from the - * bootloader needs to live in this first 64M. - * - * Requirement on bootloader: - * - The page we're executing in needs to reside in TLB1 and - * have IPROT=1. If not an invalidate broadcast could - * evict the entry we're currently executing in. - * - * r3 = Index of TLB1 were executing in - * r4 = Current MSR[IS] - * r5 = Index of TLB1 temp mapping - * - * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0] - * if needed - */ - -_GLOBAL(__early_start) - LOAD_REG_ADDR_PIC(r20, kernstart_virt_addr) - lwz r20,0(r20) - -#define ENTRY_MAPPING_BOOT_SETUP -#include "fsl_booke_entry_mapping.S" -#undef ENTRY_MAPPING_BOOT_SETUP - -set_ivor: - /* Establish the interrupt vector offsets */ - SET_IVOR(0, CriticalInput); - SET_IVOR(1, MachineCheck); - SET_IVOR(2, DataStorage); - SET_IVOR(3, InstructionStorage); - SET_IVOR(4, ExternalInput); - SET_IVOR(5, Alignment); - SET_IVOR(6, Program); - SET_IVOR(7, FloatingPointUnavailable); - SET_IVOR(8, SystemCall); - SET_IVOR(9, AuxillaryProcessorUnavailable); - SET_IVOR(10, Decrementer); - SET_IVOR(11, FixedIntervalTimer); - SET_IVOR(12, WatchdogTimer); - SET_IVOR(13, DataTLBError); - SET_IVOR(14, InstructionTLBError); - SET_IVOR(15, DebugCrit); - - /* Establish the interrupt vector base */ - lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ - mtspr SPRN_IVPR,r4 - - /* Setup the defaults for TLB entries */ - li r2,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l - mtspr SPRN_MAS4, r2 - -#if !defined(CONFIG_BDI_SWITCH) - /* - * The Abatron BDI JTAG debugger does not tolerate others - * mucking with the debug registers. - */ - lis r2,DBCR0_IDM@h - mtspr SPRN_DBCR0,r2 - isync - /* clear any residual debug events */ - li r2,-1 - mtspr SPRN_DBSR,r2 -#endif - -#ifdef CONFIG_SMP - /* Check to see if we're the second processor, and jump - * to the secondary_start code if so - */ - LOAD_REG_ADDR_PIC(r24, boot_cpuid) - lwz r24, 0(r24) - cmpwi r24, -1 - mfspr r24,SPRN_PIR - bne __secondary_start -#endif - - /* - * This is where the main kernel code starts. - */ - - /* ptr to current */ - lis r2,init_task@h - ori r2,r2,init_task@l - - /* ptr to current thread */ - addi r4,r2,THREAD /* init task's THREAD */ - mtspr SPRN_SPRG_THREAD,r4 - - /* stack */ - lis r1,init_thread_union@h - ori r1,r1,init_thread_union@l - li r0,0 - stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) - -#ifdef CONFIG_SMP - stw r24, TASK_CPU(r2) -#endif - - bl early_init - -#ifdef CONFIG_KASAN - bl kasan_early_init -#endif -#ifdef CONFIG_RELOCATABLE - mr r3,r30 - mr r4,r31 -#ifdef CONFIG_PHYS_64BIT - mr r5,r23 - mr r6,r25 -#else - mr r5,r25 -#endif - bl relocate_init -#endif - -#ifdef CONFIG_DYNAMIC_MEMSTART - lis r3,kernstart_addr@ha - la r3,kernstart_addr@l(r3) -#ifdef CONFIG_PHYS_64BIT - stw r23,0(r3) - stw r25,4(r3) -#else - stw r25,0(r3) -#endif -#endif - -/* - * Decide what sort of machine this is and initialize the MMU. - */ - mr r3,r30 - mr r4,r31 - bl machine_init - bl MMU_init - - /* Setup PTE pointers for the Abatron bdiGDB */ - lis r6, swapper_pg_dir@h - ori r6, r6, swapper_pg_dir@l - lis r5, abatron_pteptrs@h - ori r5, r5, abatron_pteptrs@l - lis r3, kernstart_virt_addr@ha - lwz r4, kernstart_virt_addr@l(r3) - stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ - stw r6, 0(r5) - - /* Let's move on */ - lis r4,start_kernel@h - ori r4,r4,start_kernel@l - lis r3,MSR_KERNEL@h - ori r3,r3,MSR_KERNEL@l - mtspr SPRN_SRR0,r4 - mtspr SPRN_SRR1,r3 - rfi /* change context and jump to start_kernel */ - -/* Macros to hide the PTE size differences - * - * FIND_PTE -- walks the page tables given EA & pgdir pointer - * r10 -- EA of fault - * r11 -- PGDIR pointer - * r12 -- free - * label 2: is the bailout case - * - * if we find the pte (fall through): - * r11 is low pte word - * r12 is pointer to the pte - * r10 is the pshift from the PGD, if we're a hugepage - */ -#ifdef CONFIG_PTE_64BIT -#ifdef CONFIG_HUGETLB_PAGE -#define FIND_PTE \ - rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ - lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ - rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ - blt 1000f; /* Normal non-huge page */ \ - beq 2f; /* Bail if no table */ \ - oris r11, r11, PD_HUGE@h; /* Put back address bit */ \ - andi. r10, r11, HUGEPD_SHIFT_MASK@l; /* extract size field */ \ - xor r12, r10, r11; /* drop size bits from pointer */ \ - b 1001f; \ -1000: rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ - li r10, 0; /* clear r10 */ \ -1001: lwz r11, 4(r12); /* Get pte entry */ -#else -#define FIND_PTE \ - rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ - lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ - rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ - beq 2f; /* Bail if no table */ \ - rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ - lwz r11, 4(r12); /* Get pte entry */ -#endif /* HUGEPAGE */ -#else /* !PTE_64BIT */ -#define FIND_PTE \ - rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ - lwz r11, 0(r11); /* Get L1 entry */ \ - rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \ - beq 2f; /* Bail if no table */ \ - rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \ - lwz r11, 0(r12); /* Get Linux PTE */ -#endif - -/* - * Interrupt vector entry code - * - * The Book E MMUs are always on so we don't need to handle - * interrupts in real mode as with previous PPC processors. In - * this case we handle interrupts in the kernel virtual address - * space. - * - * Interrupt vectors are dynamically placed relative to the - * interrupt prefix as determined by the address of interrupt_base. - * The interrupt vectors offsets are programmed using the labels - * for each interrupt vector entry. - * - * Interrupt vectors must be aligned on a 16 byte boundary. - * We align on a 32 byte cache line boundary for good measure. - */ - -interrupt_base: - /* Critical Input Interrupt */ - CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception) - - /* Machine Check Interrupt */ - MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception) - - /* Data Storage Interrupt */ - START_EXCEPTION(DataStorage) - NORMAL_EXCEPTION_PROLOG(0x300, DATA_STORAGE) - mfspr r5,SPRN_ESR /* Grab the ESR, save it */ - stw r5,_ESR(r11) - mfspr r4,SPRN_DEAR /* Grab the DEAR, save it */ - stw r4, _DEAR(r11) - andis. r10,r5,(ESR_ILK|ESR_DLK)@h - bne 1f - prepare_transfer_to_handler - bl do_page_fault - b interrupt_return -1: - prepare_transfer_to_handler - bl CacheLockingException - b interrupt_return - - /* Instruction Storage Interrupt */ - INSTRUCTION_STORAGE_EXCEPTION - - /* External Input Interrupt */ - EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ) - - /* Alignment Interrupt */ - ALIGNMENT_EXCEPTION - - /* Program Interrupt */ - PROGRAM_EXCEPTION - - /* Floating Point Unavailable Interrupt */ -#ifdef CONFIG_PPC_FPU - FP_UNAVAILABLE_EXCEPTION -#else - EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception) -#endif - - /* System Call Interrupt */ - START_EXCEPTION(SystemCall) - SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL SPRN_SRR1 - - /* Auxiliary Processor Unavailable Interrupt */ - EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, unknown_exception) - - /* Decrementer Interrupt */ - DECREMENTER_EXCEPTION - - /* Fixed Internal Timer Interrupt */ - /* TODO: Add FIT support */ - EXCEPTION(0x3100, FIT, FixedIntervalTimer, unknown_exception) - - /* Watchdog Timer Interrupt */ -#ifdef CONFIG_BOOKE_WDT - CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, WatchdogException) -#else - CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, unknown_exception) -#endif - - /* Data TLB Error Interrupt */ - START_EXCEPTION(DataTLBError) - mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ - mfspr r10, SPRN_SPRG_THREAD - stw r11, THREAD_NORMSAVE(0)(r10) -#ifdef CONFIG_KVM_BOOKE_HV -BEGIN_FTR_SECTION - mfspr r11, SPRN_SRR1 -END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) -#endif - stw r12, THREAD_NORMSAVE(1)(r10) - stw r13, THREAD_NORMSAVE(2)(r10) - mfcr r13 - stw r13, THREAD_NORMSAVE(3)(r10) - DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1 -START_BTB_FLUSH_SECTION - mfspr r11, SPRN_SRR1 - andi. r10,r11,MSR_PR - beq 1f - BTB_FLUSH(r10) -1: -END_BTB_FLUSH_SECTION - mfspr r10, SPRN_DEAR /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, PAGE_OFFSET@h - cmplw 5, r10, r11 - blt 5, 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - - mfspr r12,SPRN_MAS1 /* Set TID to 0 */ - rlwinm r12,r12,0,16,1 - mtspr SPRN_MAS1,r12 - - b 4f - - /* Get the PGD for the current thread */ -3: - mfspr r11,SPRN_SPRG_THREAD - lwz r11,PGDIR(r11) - -#ifdef CONFIG_PPC_KUAP - mfspr r12, SPRN_MAS1 - rlwinm. r12,r12,0,0x3fff0000 - beq 2f /* KUAP fault */ -#endif - -4: - /* Mask of required permission bits. Note that while we - * do copy ESR:ST to _PAGE_RW position as trying to write - * to an RO page is pretty common, we don't do it with - * _PAGE_DIRTY. We could do it, but it's a fairly rare - * event so I'd rather take the overhead when it happens - * rather than adding an instruction here. We should measure - * whether the whole thing is worth it in the first place - * as we could avoid loading SPRN_ESR completely in the first - * place... - * - * TODO: Is it worth doing that mfspr & rlwimi in the first - * place or can we save a couple of instructions here ? - */ - mfspr r12,SPRN_ESR -#ifdef CONFIG_PTE_64BIT - li r13,_PAGE_PRESENT - oris r13,r13,_PAGE_ACCESSED@h -#else - li r13,_PAGE_PRESENT|_PAGE_ACCESSED -#endif - rlwimi r13,r12,11,29,29 - - FIND_PTE - andc. r13,r13,r11 /* Check permission */ - -#ifdef CONFIG_PTE_64BIT -#ifdef CONFIG_SMP - subf r13,r11,r12 /* create false data dep */ - lwzx r13,r11,r13 /* Get upper pte bits */ -#else - lwz r13,0(r12) /* Get upper pte bits */ -#endif -#endif - - bne 2f /* Bail if permission/valid mismatch */ - - /* Jump to common tlb load */ - b finish_tlb_load -2: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ - mfspr r10, SPRN_SPRG_THREAD - lwz r11, THREAD_NORMSAVE(3)(r10) - mtcr r11 - lwz r13, THREAD_NORMSAVE(2)(r10) - lwz r12, THREAD_NORMSAVE(1)(r10) - lwz r11, THREAD_NORMSAVE(0)(r10) - mfspr r10, SPRN_SPRG_RSCRATCH0 - b DataStorage - - /* Instruction TLB Error Interrupt */ - /* - * Nearly the same as above, except we get our - * information from different registers and bailout - * to a different point. - */ - START_EXCEPTION(InstructionTLBError) - mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ - mfspr r10, SPRN_SPRG_THREAD - stw r11, THREAD_NORMSAVE(0)(r10) -#ifdef CONFIG_KVM_BOOKE_HV -BEGIN_FTR_SECTION - mfspr r11, SPRN_SRR1 -END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) -#endif - stw r12, THREAD_NORMSAVE(1)(r10) - stw r13, THREAD_NORMSAVE(2)(r10) - mfcr r13 - stw r13, THREAD_NORMSAVE(3)(r10) - DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1 -START_BTB_FLUSH_SECTION - mfspr r11, SPRN_SRR1 - andi. r10,r11,MSR_PR - beq 1f - BTB_FLUSH(r10) -1: -END_BTB_FLUSH_SECTION - - mfspr r10, SPRN_SRR0 /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, PAGE_OFFSET@h - cmplw 5, r10, r11 - blt 5, 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - - mfspr r12,SPRN_MAS1 /* Set TID to 0 */ - rlwinm r12,r12,0,16,1 - mtspr SPRN_MAS1,r12 - - /* Make up the required permissions for kernel code */ -#ifdef CONFIG_PTE_64BIT - li r13,_PAGE_PRESENT | _PAGE_BAP_SX - oris r13,r13,_PAGE_ACCESSED@h -#else - li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC -#endif - b 4f - - /* Get the PGD for the current thread */ -3: - mfspr r11,SPRN_SPRG_THREAD - lwz r11,PGDIR(r11) - -#ifdef CONFIG_PPC_KUAP - mfspr r12, SPRN_MAS1 - rlwinm. r12,r12,0,0x3fff0000 - beq 2f /* KUAP fault */ -#endif - - /* Make up the required permissions for user code */ -#ifdef CONFIG_PTE_64BIT - li r13,_PAGE_PRESENT | _PAGE_BAP_UX - oris r13,r13,_PAGE_ACCESSED@h -#else - li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC -#endif - -4: - FIND_PTE - andc. r13,r13,r11 /* Check permission */ - -#ifdef CONFIG_PTE_64BIT -#ifdef CONFIG_SMP - subf r13,r11,r12 /* create false data dep */ - lwzx r13,r11,r13 /* Get upper pte bits */ -#else - lwz r13,0(r12) /* Get upper pte bits */ -#endif -#endif - - bne 2f /* Bail if permission mismatch */ - - /* Jump to common TLB load point */ - b finish_tlb_load - -2: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ - mfspr r10, SPRN_SPRG_THREAD - lwz r11, THREAD_NORMSAVE(3)(r10) - mtcr r11 - lwz r13, THREAD_NORMSAVE(2)(r10) - lwz r12, THREAD_NORMSAVE(1)(r10) - lwz r11, THREAD_NORMSAVE(0)(r10) - mfspr r10, SPRN_SPRG_RSCRATCH0 - b InstructionStorage - -/* Define SPE handlers for e500v2 */ -#ifdef CONFIG_SPE - /* SPE Unavailable */ - START_EXCEPTION(SPEUnavailable) - NORMAL_EXCEPTION_PROLOG(0x2010, SPE_UNAVAIL) - beq 1f - bl load_up_spe - b fast_exception_return -1: prepare_transfer_to_handler - bl KernelSPE - b interrupt_return -#elif defined(CONFIG_SPE_POSSIBLE) - EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, unknown_exception) -#endif /* CONFIG_SPE_POSSIBLE */ - - /* SPE Floating Point Data */ -#ifdef CONFIG_SPE - START_EXCEPTION(SPEFloatingPointData) - NORMAL_EXCEPTION_PROLOG(0x2030, SPE_FP_DATA) - prepare_transfer_to_handler - bl SPEFloatingPointException - REST_NVGPRS(r1) - b interrupt_return - - /* SPE Floating Point Round */ - START_EXCEPTION(SPEFloatingPointRound) - NORMAL_EXCEPTION_PROLOG(0x2050, SPE_FP_ROUND) - prepare_transfer_to_handler - bl SPEFloatingPointRoundException - REST_NVGPRS(r1) - b interrupt_return -#elif defined(CONFIG_SPE_POSSIBLE) - EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, unknown_exception) - EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, unknown_exception) -#endif /* CONFIG_SPE_POSSIBLE */ - - - /* Performance Monitor */ - EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \ - performance_monitor_exception) - - EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception) - - CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \ - CriticalDoorbell, unknown_exception) - - /* Debug Interrupt */ - DEBUG_DEBUG_EXCEPTION - DEBUG_CRIT_EXCEPTION - - GUEST_DOORBELL_EXCEPTION - - CRITICAL_EXCEPTION(0, GUEST_DBELL_CRIT, CriticalGuestDoorbell, \ - unknown_exception) - - /* Hypercall */ - EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception) - - /* Embedded Hypervisor Privilege */ - EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception) - -interrupt_end: - -/* - * Local functions - */ - -/* - * Both the instruction and data TLB miss get to this - * point to load the TLB. - * r10 - tsize encoding (if HUGETLB_PAGE) or available to use - * r11 - TLB (info from Linux PTE) - * r12 - available to use - * r13 - upper bits of PTE (if PTE_64BIT) or available to use - * CR5 - results of addr >= PAGE_OFFSET - * MAS0, MAS1 - loaded with proper value when we get here - * MAS2, MAS3 - will need additional info from Linux PTE - * Upon exit, we reload everything and RFI. - */ -finish_tlb_load: -#ifdef CONFIG_HUGETLB_PAGE - cmpwi 6, r10, 0 /* check for huge page */ - beq 6, finish_tlb_load_cont /* !huge */ - - /* Alas, we need more scratch registers for hugepages */ - mfspr r12, SPRN_SPRG_THREAD - stw r14, THREAD_NORMSAVE(4)(r12) - stw r15, THREAD_NORMSAVE(5)(r12) - stw r16, THREAD_NORMSAVE(6)(r12) - stw r17, THREAD_NORMSAVE(7)(r12) - - /* Get the next_tlbcam_idx percpu var */ -#ifdef CONFIG_SMP - lwz r15, TASK_CPU-THREAD(r12) - lis r14, __per_cpu_offset@h - ori r14, r14, __per_cpu_offset@l - rlwinm r15, r15, 2, 0, 29 - lwzx r16, r14, r15 -#else - li r16, 0 -#endif - lis r17, next_tlbcam_idx@h - ori r17, r17, next_tlbcam_idx@l - add r17, r17, r16 /* r17 = *next_tlbcam_idx */ - lwz r15, 0(r17) /* r15 = next_tlbcam_idx */ - - lis r14, MAS0_TLBSEL(1)@h /* select TLB1 (TLBCAM) */ - rlwimi r14, r15, 16, 4, 15 /* next_tlbcam_idx entry */ - mtspr SPRN_MAS0, r14 - - /* Extract TLB1CFG(NENTRY) */ - mfspr r16, SPRN_TLB1CFG - andi. r16, r16, 0xfff - - /* Update next_tlbcam_idx, wrapping when necessary */ - addi r15, r15, 1 - cmpw r15, r16 - blt 100f - lis r14, tlbcam_index@h - ori r14, r14, tlbcam_index@l - lwz r15, 0(r14) -100: stw r15, 0(r17) - - /* - * Calc MAS1_TSIZE from r10 (which has pshift encoded) - * tlb_enc = (pshift - 10). - */ - subi r15, r10, 10 - mfspr r16, SPRN_MAS1 - rlwimi r16, r15, 7, 20, 24 - mtspr SPRN_MAS1, r16 - - /* copy the pshift for use later */ - mr r14, r10 - - /* fall through */ - -#endif /* CONFIG_HUGETLB_PAGE */ - - /* - * We set execute, because we don't have the granularity to - * properly set this at the page level (Linux problem). - * Many of these bits are software only. Bits we don't set - * here we (properly should) assume have the appropriate value. - */ -finish_tlb_load_cont: -#ifdef CONFIG_PTE_64BIT - rlwinm r12, r11, 32-2, 26, 31 /* Move in perm bits */ - andi. r10, r11, _PAGE_DIRTY - bne 1f - li r10, MAS3_SW | MAS3_UW - andc r12, r12, r10 -1: rlwimi r12, r13, 20, 0, 11 /* grab RPN[32:43] */ - rlwimi r12, r11, 20, 12, 19 /* grab RPN[44:51] */ -2: mtspr SPRN_MAS3, r12 -BEGIN_MMU_FTR_SECTION - srwi r10, r13, 12 /* grab RPN[12:31] */ - mtspr SPRN_MAS7, r10 -END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) -#else - li r10, (_PAGE_EXEC | _PAGE_PRESENT) - mr r13, r11 - rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */ - and r12, r11, r10 - andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ - slwi r10, r12, 1 - or r10, r10, r12 - rlwinm r10, r10, 0, ~_PAGE_EXEC /* Clear SX on user pages */ - iseleq r12, r12, r10 - rlwimi r13, r12, 0, 20, 31 /* Get RPN from PTE, merge w/ perms */ - mtspr SPRN_MAS3, r13 -#endif - - mfspr r12, SPRN_MAS2 -#ifdef CONFIG_PTE_64BIT - rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */ -#else - rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ -#endif -#ifdef CONFIG_HUGETLB_PAGE - beq 6, 3f /* don't mask if page isn't huge */ - li r13, 1 - slw r13, r13, r14 - subi r13, r13, 1 - rlwinm r13, r13, 0, 0, 19 /* bottom bits used for WIMGE/etc */ - andc r12, r12, r13 /* mask off ea bits within the page */ -#endif -3: mtspr SPRN_MAS2, r12 - -tlb_write_entry: - tlbwe - - /* Done...restore registers and get out of here. */ - mfspr r10, SPRN_SPRG_THREAD -#ifdef CONFIG_HUGETLB_PAGE - beq 6, 8f /* skip restore for 4k page faults */ - lwz r14, THREAD_NORMSAVE(4)(r10) - lwz r15, THREAD_NORMSAVE(5)(r10) - lwz r16, THREAD_NORMSAVE(6)(r10) - lwz r17, THREAD_NORMSAVE(7)(r10) -#endif -8: lwz r11, THREAD_NORMSAVE(3)(r10) - mtcr r11 - lwz r13, THREAD_NORMSAVE(2)(r10) - lwz r12, THREAD_NORMSAVE(1)(r10) - lwz r11, THREAD_NORMSAVE(0)(r10) - mfspr r10, SPRN_SPRG_RSCRATCH0 - rfi /* Force context change */ - -#ifdef CONFIG_SPE -/* Note that the SPE support is closely modeled after the AltiVec - * support. Changes to one are likely to be applicable to the - * other! */ -_GLOBAL(load_up_spe) -/* - * Disable SPE for the task which had SPE previously, - * and save its SPE registers in its thread_struct. - * Enables SPE for use in the kernel on return. - * On SMP we know the SPE units are free, since we give it up every - * switch. -- Kumar - */ - mfmsr r5 - oris r5,r5,MSR_SPE@h - mtmsr r5 /* enable use of SPE now */ - isync - /* enable use of SPE after return */ - oris r9,r9,MSR_SPE@h - mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ - li r4,1 - li r10,THREAD_ACC - stw r4,THREAD_USED_SPE(r5) - evlddx evr4,r10,r5 - evmra evr4,evr4 - REST_32EVRS(0,r10,r5,THREAD_EVR0) - blr - -/* - * SPE unavailable trap from kernel - print a message, but let - * the task use SPE in the kernel until it returns to user mode. - */ -KernelSPE: - lwz r3,_MSR(r1) - oris r3,r3,MSR_SPE@h - stw r3,_MSR(r1) /* enable use of SPE after return */ -#ifdef CONFIG_PRINTK - lis r3,87f@h - ori r3,r3,87f@l - mr r4,r2 /* current */ - lwz r5,_NIP(r1) - bl _printk -#endif - b interrupt_return -#ifdef CONFIG_PRINTK -87: .string "SPE used in kernel (task=%p, pc=%x) \n" -#endif - .align 4,0 - -#endif /* CONFIG_SPE */ - -/* - * Translate the effec addr in r3 to phys addr. The phys addr will be put - * into r3(higher 32bit) and r4(lower 32bit) - */ -get_phys_addr: - mfmsr r8 - mfspr r9,SPRN_PID - rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */ - rlwimi r9,r8,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */ - mtspr SPRN_MAS6,r9 - - tlbsx 0,r3 /* must succeed */ - - mfspr r8,SPRN_MAS1 - mfspr r12,SPRN_MAS3 - rlwinm r9,r8,25,0x1f /* r9 = log2(page size) */ - li r10,1024 - slw r10,r10,r9 /* r10 = page size */ - addi r10,r10,-1 - and r11,r3,r10 /* r11 = page offset */ - andc r4,r12,r10 /* r4 = page base */ - or r4,r4,r11 /* r4 = devtree phys addr */ -#ifdef CONFIG_PHYS_64BIT - mfspr r3,SPRN_MAS7 -#endif - blr - -/* - * Global functions - */ - -#ifdef CONFIG_E500 -#ifndef CONFIG_PPC_E500MC -/* Adjust or setup IVORs for e500v1/v2 */ -_GLOBAL(__setup_e500_ivors) - li r3,DebugCrit@l - mtspr SPRN_IVOR15,r3 - li r3,SPEUnavailable@l - mtspr SPRN_IVOR32,r3 - li r3,SPEFloatingPointData@l - mtspr SPRN_IVOR33,r3 - li r3,SPEFloatingPointRound@l - mtspr SPRN_IVOR34,r3 - li r3,PerformanceMonitor@l - mtspr SPRN_IVOR35,r3 - sync - blr -#else -/* Adjust or setup IVORs for e500mc */ -_GLOBAL(__setup_e500mc_ivors) - li r3,DebugDebug@l - mtspr SPRN_IVOR15,r3 - li r3,PerformanceMonitor@l - mtspr SPRN_IVOR35,r3 - li r3,Doorbell@l - mtspr SPRN_IVOR36,r3 - li r3,CriticalDoorbell@l - mtspr SPRN_IVOR37,r3 - sync - blr - -/* setup ehv ivors for */ -_GLOBAL(__setup_ehv_ivors) - li r3,GuestDoorbell@l - mtspr SPRN_IVOR38,r3 - li r3,CriticalGuestDoorbell@l - mtspr SPRN_IVOR39,r3 - li r3,Hypercall@l - mtspr SPRN_IVOR40,r3 - li r3,Ehvpriv@l - mtspr SPRN_IVOR41,r3 - sync - blr -#endif /* CONFIG_PPC_E500MC */ -#endif /* CONFIG_E500 */ - -#ifdef CONFIG_SPE -/* - * extern void __giveup_spe(struct task_struct *prev) - * - */ -_GLOBAL(__giveup_spe) - addi r3,r3,THREAD /* want THREAD of task */ - lwz r5,PT_REGS(r3) - cmpi 0,r5,0 - SAVE_32EVRS(0, r4, r3, THREAD_EVR0) - evxor evr6, evr6, evr6 /* clear out evr6 */ - evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ - li r4,THREAD_ACC - evstddx evr6, r4, r3 /* save off accumulator */ - beq 1f - lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) - lis r3,MSR_SPE@h - andc r4,r4,r3 /* disable SPE for previous task */ - stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: - blr -#endif /* CONFIG_SPE */ - -/* - * extern void abort(void) - * - * At present, this routine just applies a system reset. - */ -_GLOBAL(abort) - li r13,0 - mtspr SPRN_DBCR0,r13 /* disable all debug events */ - isync - mfmsr r13 - ori r13,r13,MSR_DE@l /* Enable Debug Events */ - mtmsr r13 - isync - mfspr r13,SPRN_DBCR0 - lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h - mtspr SPRN_DBCR0,r13 - isync - -#ifdef CONFIG_SMP -/* When we get here, r24 needs to hold the CPU # */ - .globl __secondary_start -__secondary_start: - LOAD_REG_ADDR_PIC(r3, tlbcam_index) - lwz r3,0(r3) - mtctr r3 - li r26,0 /* r26 safe? */ - - bl switch_to_as1 - mr r27,r3 /* tlb entry */ - /* Load each CAM entry */ -1: mr r3,r26 - bl loadcam_entry - addi r26,r26,1 - bdnz 1b - mr r3,r27 /* tlb entry */ - LOAD_REG_ADDR_PIC(r4, memstart_addr) - lwz r4,0(r4) - mr r5,r25 /* phys kernel start */ - rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */ - subf r4,r5,r4 /* memstart_addr - phys kernel start */ - lis r7,KERNELBASE@h - ori r7,r7,KERNELBASE@l - cmpw r20,r7 /* if kernstart_virt_addr != KERNELBASE, randomized */ - beq 2f - li r4,0 -2: li r5,0 /* no device tree */ - li r6,0 /* not boot cpu */ - bl restore_to_as0 - - - lis r3,__secondary_hold_acknowledge@h - ori r3,r3,__secondary_hold_acknowledge@l - stw r24,0(r3) - - li r3,0 - mr r4,r24 /* Why? */ - bl call_setup_cpu - - /* get current's stack and current */ - lis r2,secondary_current@ha - lwz r2,secondary_current@l(r2) - lwz r1,TASK_STACK(r2) - - /* stack */ - addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD - li r0,0 - stw r0,0(r1) - - /* ptr to current thread */ - addi r4,r2,THREAD /* address of our thread_struct */ - mtspr SPRN_SPRG_THREAD,r4 - - /* Setup the defaults for TLB entries */ - li r4,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l - mtspr SPRN_MAS4,r4 - - /* Jump to start_secondary */ - lis r4,MSR_KERNEL@h - ori r4,r4,MSR_KERNEL@l - lis r3,start_secondary@h - ori r3,r3,start_secondary@l - mtspr SPRN_SRR0,r3 - mtspr SPRN_SRR1,r4 - sync - rfi - sync - - .globl __secondary_hold_acknowledge -__secondary_hold_acknowledge: - .long -1 -#endif - -/* - * Create a 64M tlb by address and entry - * r3 - entry - * r4 - virtual address - * r5/r6 - physical address - */ -_GLOBAL(create_kaslr_tlb_entry) - lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ - rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ - mtspr SPRN_MAS0,r7 /* Write MAS0 */ - - lis r3,(MAS1_VALID|MAS1_IPROT)@h - ori r3,r3,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l - mtspr SPRN_MAS1,r3 /* Write MAS1 */ - - lis r3,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@h - ori r3,r3,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@l - and r3,r3,r4 - ori r3,r3,MAS2_M_IF_NEEDED@l - mtspr SPRN_MAS2,r3 /* Write MAS2(EPN) */ - -#ifdef CONFIG_PHYS_64BIT - ori r8,r6,(MAS3_SW|MAS3_SR|MAS3_SX) - mtspr SPRN_MAS3,r8 /* Write MAS3(RPN) */ - mtspr SPRN_MAS7,r5 -#else - ori r8,r5,(MAS3_SW|MAS3_SR|MAS3_SX) - mtspr SPRN_MAS3,r8 /* Write MAS3(RPN) */ -#endif - - tlbwe /* Write TLB */ - isync - sync - blr - -/* - * Return to the start of the relocated kernel and run again - * r3 - virtual address of fdt - * r4 - entry of the kernel - */ -_GLOBAL(reloc_kernel_entry) - mfmsr r7 - rlwinm r7, r7, 0, ~(MSR_IS | MSR_DS) - - mtspr SPRN_SRR0,r4 - mtspr SPRN_SRR1,r7 - rfi - -/* - * Create a tlb entry with the same effective and physical address as - * the tlb entry used by the current running code. But set the TS to 1. - * Then switch to the address space 1. It will return with the r3 set to - * the ESEL of the new created tlb. - */ -_GLOBAL(switch_to_as1) - mflr r5 - - /* Find a entry not used */ - mfspr r3,SPRN_TLB1CFG - andi. r3,r3,0xfff - mfspr r4,SPRN_PID - rlwinm r4,r4,16,0x3fff0000 /* turn PID into MAS6[SPID] */ - mtspr SPRN_MAS6,r4 -1: lis r4,0x1000 /* Set MAS0(TLBSEL) = 1 */ - addi r3,r3,-1 - rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ - mtspr SPRN_MAS0,r4 - tlbre - mfspr r4,SPRN_MAS1 - andis. r4,r4,MAS1_VALID@h - bne 1b - - /* Get the tlb entry used by the current running code */ - bcl 20,31,$+4 -0: mflr r4 - tlbsx 0,r4 - - mfspr r4,SPRN_MAS1 - ori r4,r4,MAS1_TS /* Set the TS = 1 */ - mtspr SPRN_MAS1,r4 - - mfspr r4,SPRN_MAS0 - rlwinm r4,r4,0,~MAS0_ESEL_MASK - rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ - mtspr SPRN_MAS0,r4 - tlbwe - isync - sync - - mfmsr r4 - ori r4,r4,MSR_IS | MSR_DS - mtspr SPRN_SRR0,r5 - mtspr SPRN_SRR1,r4 - sync - rfi - -/* - * Restore to the address space 0 and also invalidate the tlb entry created - * by switch_to_as1. - * r3 - the tlb entry which should be invalidated - * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0) - * r5 - device tree virtual address. If r4 is 0, r5 is ignored. - * r6 - boot cpu -*/ -_GLOBAL(restore_to_as0) - mflr r0 - - bcl 20,31,$+4 -0: mflr r9 - addi r9,r9,1f - 0b - - /* - * We may map the PAGE_OFFSET in AS0 to a different physical address, - * so we need calculate the right jump and device tree address based - * on the offset passed by r4. - */ - add r9,r9,r4 - add r5,r5,r4 - add r0,r0,r4 - -2: mfmsr r7 - li r8,(MSR_IS | MSR_DS) - andc r7,r7,r8 - - mtspr SPRN_SRR0,r9 - mtspr SPRN_SRR1,r7 - sync - rfi - - /* Invalidate the temporary tlb entry for AS1 */ -1: lis r9,0x1000 /* Set MAS0(TLBSEL) = 1 */ - rlwimi r9,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ - mtspr SPRN_MAS0,r9 - tlbre - mfspr r9,SPRN_MAS1 - rlwinm r9,r9,0,2,31 /* Clear MAS1 Valid and IPPROT */ - mtspr SPRN_MAS1,r9 - tlbwe - isync - - cmpwi r4,0 - cmpwi cr1,r6,0 - cror eq,4*cr1+eq,eq - bne 3f /* offset != 0 && is_boot_cpu */ - mtlr r0 - blr - - /* - * The PAGE_OFFSET will map to a different physical address, - * jump to _start to do another relocation again. - */ -3: mr r3,r5 - bl _start diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index a20deebf233f..1a1e9995dae3 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -47,7 +47,7 @@ static struct hard_trap_info { 0x0c00, 0x14 /* SIGCHLD */ }, /* system call */ #ifdef CONFIG_BOOKE_OR_40x { 0x2002, 0x05 /* SIGTRAP */ }, /* debug */ -#if defined(CONFIG_FSL_BOOKE) +#if defined(CONFIG_PPC_85xx) { 0x2010, 0x08 /* SIGFPE */ }, /* spe unavailable */ { 0x2020, 0x08 /* SIGFPE */ }, /* spe unavailable */ { 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */ @@ -57,7 +57,7 @@ static struct hard_trap_info { 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */ { 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */ { 0x3200, 0x02 /* SIGINT */ }, /* watchdog */ -#else /* ! CONFIG_FSL_BOOKE */ +#else /* ! CONFIG_PPC_85xx */ { 0x1000, 0x0e /* SIGALRM */ }, /* prog interval timer */ { 0x1010, 0x0e /* SIGALRM */ }, /* fixed interval timer */ { 0x1020, 0x02 /* SIGINT */ }, /* watchdog */ @@ -208,7 +208,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) for (reg = 14; reg < 32; reg++) PACK64(ptr, regs->gpr[reg]); -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx #ifdef CONFIG_SPE for (reg = 0; reg < 32; reg++) PACK64(ptr, p->thread.evr[reg]); @@ -234,7 +234,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) #define GDB_SIZEOF_REG sizeof(unsigned long) #define GDB_SIZEOF_REG_U32 sizeof(u32) -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx #define GDB_SIZEOF_FLOAT_REG sizeof(unsigned long) #else #define GDB_SIZEOF_FLOAT_REG sizeof(u64) @@ -329,7 +329,7 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) if (regno >= 32 && regno < 64) { /* FP registers 32 -> 63 */ -#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE) +#if defined(CONFIG_PPC_85xx) && defined(CONFIG_SPE) if (current) memcpy(mem, ¤t->thread.evr[regno-32], dbg_reg_def[regno].size); @@ -355,7 +355,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) if (regno >= 32 && regno < 64) { /* FP registers 32 -> 63 */ -#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE) +#if defined(CONFIG_PPC_85xx) && defined(CONFIG_SPE) memcpy(¤t->thread.evr[regno-32], mem, dbg_reg_def[regno].size); #else diff --git a/arch/powerpc/kernel/swsusp_85xx.S b/arch/powerpc/kernel/swsusp_85xx.S new file mode 100644 index 000000000000..88cfdbd530f1 --- /dev/null +++ b/arch/powerpc/kernel/swsusp_85xx.S @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Based on swsusp_32.S, modified for FSL BookE by + * Anton Vorontsov + * Copyright (c) 2009-2010 MontaVista Software, LLC. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Structure for storing CPU registers on the save area. + */ +#define SL_SP 0 +#define SL_PC 4 +#define SL_MSR 8 +#define SL_TCR 0xc +#define SL_SPRG0 0x10 +#define SL_SPRG1 0x14 +#define SL_SPRG2 0x18 +#define SL_SPRG3 0x1c +#define SL_SPRG4 0x20 +#define SL_SPRG5 0x24 +#define SL_SPRG6 0x28 +#define SL_SPRG7 0x2c +#define SL_TBU 0x30 +#define SL_TBL 0x34 +#define SL_R2 0x38 +#define SL_CR 0x3c +#define SL_LR 0x40 +#define SL_R12 0x44 /* r12 to r31 */ +#define SL_SIZE (SL_R12 + 80) + + .section .data + .align 5 + +_GLOBAL(swsusp_save_area) + .space SL_SIZE + + + .section .text + .align 5 + +_GLOBAL(swsusp_arch_suspend) + lis r11,swsusp_save_area@h + ori r11,r11,swsusp_save_area@l + + mflr r0 + stw r0,SL_LR(r11) + mfcr r0 + stw r0,SL_CR(r11) + stw r1,SL_SP(r11) + stw r2,SL_R2(r11) + stmw r12,SL_R12(r11) + + /* Save MSR & TCR */ + mfmsr r4 + stw r4,SL_MSR(r11) + mfspr r4,SPRN_TCR + stw r4,SL_TCR(r11) + + /* Get a stable timebase and save it */ +1: mfspr r4,SPRN_TBRU + stw r4,SL_TBU(r11) + mfspr r5,SPRN_TBRL + stw r5,SL_TBL(r11) + mfspr r3,SPRN_TBRU + cmpw r3,r4 + bne 1b + + /* Save SPRGs */ + mfspr r4,SPRN_SPRG0 + stw r4,SL_SPRG0(r11) + mfspr r4,SPRN_SPRG1 + stw r4,SL_SPRG1(r11) + mfspr r4,SPRN_SPRG2 + stw r4,SL_SPRG2(r11) + mfspr r4,SPRN_SPRG3 + stw r4,SL_SPRG3(r11) + mfspr r4,SPRN_SPRG4 + stw r4,SL_SPRG4(r11) + mfspr r4,SPRN_SPRG5 + stw r4,SL_SPRG5(r11) + mfspr r4,SPRN_SPRG6 + stw r4,SL_SPRG6(r11) + mfspr r4,SPRN_SPRG7 + stw r4,SL_SPRG7(r11) + + /* Call the low level suspend stuff (we should probably have made + * a stackframe... + */ + bl swsusp_save + + /* Restore LR from the save area */ + lis r11,swsusp_save_area@h + ori r11,r11,swsusp_save_area@l + lwz r0,SL_LR(r11) + mtlr r0 + + blr + +_GLOBAL(swsusp_arch_resume) + sync + + /* Load ptr the list of pages to copy in r3 */ + lis r11,(restore_pblist)@h + ori r11,r11,restore_pblist@l + lwz r3,0(r11) + + /* Copy the pages. This is a very basic implementation, to + * be replaced by something more cache efficient */ +1: + li r0,256 + mtctr r0 + lwz r5,pbe_address(r3) /* source */ + lwz r6,pbe_orig_address(r3) /* destination */ +2: + lwz r8,0(r5) + lwz r9,4(r5) + lwz r10,8(r5) + lwz r11,12(r5) + addi r5,r5,16 + stw r8,0(r6) + stw r9,4(r6) + stw r10,8(r6) + stw r11,12(r6) + addi r6,r6,16 + bdnz 2b + lwz r3,pbe_next(r3) + cmpwi 0,r3,0 + bne 1b + + bl flush_dcache_L1 + bl flush_instruction_cache + + lis r11,swsusp_save_area@h + ori r11,r11,swsusp_save_area@l + + /* + * Mappings from virtual addresses to physical addresses may be + * different than they were prior to restoring hibernation state. + * Invalidate the TLB so that the boot CPU is using the new + * mappings. + */ + bl _tlbil_all + + lwz r4,SL_SPRG0(r11) + mtspr SPRN_SPRG0,r4 + lwz r4,SL_SPRG1(r11) + mtspr SPRN_SPRG1,r4 + lwz r4,SL_SPRG2(r11) + mtspr SPRN_SPRG2,r4 + lwz r4,SL_SPRG3(r11) + mtspr SPRN_SPRG3,r4 + lwz r4,SL_SPRG4(r11) + mtspr SPRN_SPRG4,r4 + lwz r4,SL_SPRG5(r11) + mtspr SPRN_SPRG5,r4 + lwz r4,SL_SPRG6(r11) + mtspr SPRN_SPRG6,r4 + lwz r4,SL_SPRG7(r11) + mtspr SPRN_SPRG7,r4 + + /* restore the MSR */ + lwz r3,SL_MSR(r11) + mtmsr r3 + + /* Restore TB */ + li r3,0 + mtspr SPRN_TBWL,r3 + lwz r3,SL_TBU(r11) + lwz r4,SL_TBL(r11) + mtspr SPRN_TBWU,r3 + mtspr SPRN_TBWL,r4 + + /* Restore TCR and clear any pending bits in TSR. */ + lwz r4,SL_TCR(r11) + mtspr SPRN_TCR,r4 + lis r4, (TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS)@h + mtspr SPRN_TSR,r4 + + /* Kick decrementer */ + li r0,1 + mtdec r0 + + /* Restore the callee-saved registers and return */ + lwz r0,SL_CR(r11) + mtcr r0 + lwz r2,SL_R2(r11) + lmw r12,SL_R12(r11) + lwz r1,SL_SP(r11) + lwz r0,SL_LR(r11) + mtlr r0 + + li r3,0 + blr diff --git a/arch/powerpc/kernel/swsusp_booke.S b/arch/powerpc/kernel/swsusp_booke.S deleted file mode 100644 index 88cfdbd530f1..000000000000 --- a/arch/powerpc/kernel/swsusp_booke.S +++ /dev/null @@ -1,202 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Based on swsusp_32.S, modified for FSL BookE by - * Anton Vorontsov - * Copyright (c) 2009-2010 MontaVista Software, LLC. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Structure for storing CPU registers on the save area. - */ -#define SL_SP 0 -#define SL_PC 4 -#define SL_MSR 8 -#define SL_TCR 0xc -#define SL_SPRG0 0x10 -#define SL_SPRG1 0x14 -#define SL_SPRG2 0x18 -#define SL_SPRG3 0x1c -#define SL_SPRG4 0x20 -#define SL_SPRG5 0x24 -#define SL_SPRG6 0x28 -#define SL_SPRG7 0x2c -#define SL_TBU 0x30 -#define SL_TBL 0x34 -#define SL_R2 0x38 -#define SL_CR 0x3c -#define SL_LR 0x40 -#define SL_R12 0x44 /* r12 to r31 */ -#define SL_SIZE (SL_R12 + 80) - - .section .data - .align 5 - -_GLOBAL(swsusp_save_area) - .space SL_SIZE - - - .section .text - .align 5 - -_GLOBAL(swsusp_arch_suspend) - lis r11,swsusp_save_area@h - ori r11,r11,swsusp_save_area@l - - mflr r0 - stw r0,SL_LR(r11) - mfcr r0 - stw r0,SL_CR(r11) - stw r1,SL_SP(r11) - stw r2,SL_R2(r11) - stmw r12,SL_R12(r11) - - /* Save MSR & TCR */ - mfmsr r4 - stw r4,SL_MSR(r11) - mfspr r4,SPRN_TCR - stw r4,SL_TCR(r11) - - /* Get a stable timebase and save it */ -1: mfspr r4,SPRN_TBRU - stw r4,SL_TBU(r11) - mfspr r5,SPRN_TBRL - stw r5,SL_TBL(r11) - mfspr r3,SPRN_TBRU - cmpw r3,r4 - bne 1b - - /* Save SPRGs */ - mfspr r4,SPRN_SPRG0 - stw r4,SL_SPRG0(r11) - mfspr r4,SPRN_SPRG1 - stw r4,SL_SPRG1(r11) - mfspr r4,SPRN_SPRG2 - stw r4,SL_SPRG2(r11) - mfspr r4,SPRN_SPRG3 - stw r4,SL_SPRG3(r11) - mfspr r4,SPRN_SPRG4 - stw r4,SL_SPRG4(r11) - mfspr r4,SPRN_SPRG5 - stw r4,SL_SPRG5(r11) - mfspr r4,SPRN_SPRG6 - stw r4,SL_SPRG6(r11) - mfspr r4,SPRN_SPRG7 - stw r4,SL_SPRG7(r11) - - /* Call the low level suspend stuff (we should probably have made - * a stackframe... - */ - bl swsusp_save - - /* Restore LR from the save area */ - lis r11,swsusp_save_area@h - ori r11,r11,swsusp_save_area@l - lwz r0,SL_LR(r11) - mtlr r0 - - blr - -_GLOBAL(swsusp_arch_resume) - sync - - /* Load ptr the list of pages to copy in r3 */ - lis r11,(restore_pblist)@h - ori r11,r11,restore_pblist@l - lwz r3,0(r11) - - /* Copy the pages. This is a very basic implementation, to - * be replaced by something more cache efficient */ -1: - li r0,256 - mtctr r0 - lwz r5,pbe_address(r3) /* source */ - lwz r6,pbe_orig_address(r3) /* destination */ -2: - lwz r8,0(r5) - lwz r9,4(r5) - lwz r10,8(r5) - lwz r11,12(r5) - addi r5,r5,16 - stw r8,0(r6) - stw r9,4(r6) - stw r10,8(r6) - stw r11,12(r6) - addi r6,r6,16 - bdnz 2b - lwz r3,pbe_next(r3) - cmpwi 0,r3,0 - bne 1b - - bl flush_dcache_L1 - bl flush_instruction_cache - - lis r11,swsusp_save_area@h - ori r11,r11,swsusp_save_area@l - - /* - * Mappings from virtual addresses to physical addresses may be - * different than they were prior to restoring hibernation state. - * Invalidate the TLB so that the boot CPU is using the new - * mappings. - */ - bl _tlbil_all - - lwz r4,SL_SPRG0(r11) - mtspr SPRN_SPRG0,r4 - lwz r4,SL_SPRG1(r11) - mtspr SPRN_SPRG1,r4 - lwz r4,SL_SPRG2(r11) - mtspr SPRN_SPRG2,r4 - lwz r4,SL_SPRG3(r11) - mtspr SPRN_SPRG3,r4 - lwz r4,SL_SPRG4(r11) - mtspr SPRN_SPRG4,r4 - lwz r4,SL_SPRG5(r11) - mtspr SPRN_SPRG5,r4 - lwz r4,SL_SPRG6(r11) - mtspr SPRN_SPRG6,r4 - lwz r4,SL_SPRG7(r11) - mtspr SPRN_SPRG7,r4 - - /* restore the MSR */ - lwz r3,SL_MSR(r11) - mtmsr r3 - - /* Restore TB */ - li r3,0 - mtspr SPRN_TBWL,r3 - lwz r3,SL_TBU(r11) - lwz r4,SL_TBL(r11) - mtspr SPRN_TBWU,r3 - mtspr SPRN_TBWL,r4 - - /* Restore TCR and clear any pending bits in TSR. */ - lwz r4,SL_TCR(r11) - mtspr SPRN_TCR,r4 - lis r4, (TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS)@h - mtspr SPRN_TSR,r4 - - /* Kick decrementer */ - li r0,1 - mtdec r0 - - /* Restore the callee-saved registers and return */ - lwz r0,SL_CR(r11) - mtcr r0 - lwz r2,SL_R2(r11) - lmw r12,SL_R12(r11) - lwz r1,SL_SP(r11) - lwz r0,SL_LR(r11) - mtlr r0 - - li r3,0 - blr diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index dcf4046f8565..f181c434289e 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -2085,7 +2085,7 @@ DEFINE_INTERRUPT_HANDLER(altivec_assist_exception) } #endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx DEFINE_INTERRUPT_HANDLER(CacheLockingException) { unsigned long error_code = regs->dsisr; @@ -2098,7 +2098,7 @@ DEFINE_INTERRUPT_HANDLER(CacheLockingException) _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); return; } -#endif /* CONFIG_FSL_BOOKE */ +#endif /* CONFIG_PPC_85xx */ #ifdef CONFIG_SPE DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException) diff --git a/arch/powerpc/kexec/core_32.c b/arch/powerpc/kexec/core_32.c index b50aed48d09d..c95f96850c9e 100644 --- a/arch/powerpc/kexec/core_32.c +++ b/arch/powerpc/kexec/core_32.c @@ -55,7 +55,7 @@ void default_machine_kexec(struct kimage *image) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); - if (!IS_ENABLED(CONFIG_FSL_BOOKE) && !IS_ENABLED(CONFIG_44x)) + if (!IS_ENABLED(CONFIG_PPC_85xx) && !IS_ENABLED(CONFIG_44x)) relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start); /* now call it */ diff --git a/arch/powerpc/kexec/relocate_32.S b/arch/powerpc/kexec/relocate_32.S index cf6e52bdf8d8..d9f0dd9b34ff 100644 --- a/arch/powerpc/kexec/relocate_32.S +++ b/arch/powerpc/kexec/relocate_32.S @@ -25,14 +25,14 @@ relocate_new_kernel: /* r4 = reboot_code_buffer */ /* r5 = start_address */ -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx mr r29, r3 mr r30, r4 mr r31, r5 #define ENTRY_MAPPING_KEXEC_SETUP -#include +#include #undef ENTRY_MAPPING_KEXEC_SETUP mr r3, r29 diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 6fa82efe833b..205545d820a1 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -223,7 +223,7 @@ _GLOBAL(kvmppc_resume_host) lwz r3, VCPU_HOST_PID(r4) mtspr SPRN_PID, r3 -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx /* we cheat and know that Linux doesn't use PID1 which is always 0 */ lis r3, 0 mtspr SPRN_PID1, r3 @@ -406,7 +406,7 @@ lightweight_exit: lwz r3, VCPU_SHADOW_PID(r4) mtspr SPRN_PID, r3 -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx lwz r3, VCPU_SHADOW_PID1(r4) mtspr SPRN_PID1, r3 #endif diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 3142d7617412..d4cc3749e621 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -85,12 +85,12 @@ void __init MMU_init(void) total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr; lowmem_end_addr = memstart_addr + total_lowmem; -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB * entries, so we need to adjust lowmem to match the amount we can map * in the fixed entries */ adjust_total_lowmem(); -#endif /* CONFIG_FSL_BOOKE */ +#endif /* CONFIG_PPC_85xx */ if (total_lowmem > __max_low_memory) { total_lowmem = __max_low_memory; diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 0e3528aec49e..88805757d0c9 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -146,9 +146,9 @@ struct tlbcam { extern struct tlbcam TLBCAM[NUM_TLBCAMS]; #endif -#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx) +#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_8xx) /* 6xx have BATS */ -/* FSL_BOOKE have TLBCAM */ +/* PPC_85xx have TLBCAM */ /* 8xx have LTLB */ phys_addr_t v_block_mapped(unsigned long va); unsigned long p_block_mapped(phys_addr_t pa); diff --git a/arch/powerpc/mm/nohash/fsl_book3e.c b/arch/powerpc/mm/nohash/fsl_book3e.c index c1ad173de318..40a4e69ae1a9 100644 --- a/arch/powerpc/mm/nohash/fsl_book3e.c +++ b/arch/powerpc/mm/nohash/fsl_book3e.c @@ -59,7 +59,7 @@ static struct { phys_addr_t phys; } tlbcam_addrs[NUM_TLBCAMS]; -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx /* * Return PA for this VA if it is mapped by a CAM, or 0 */ diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index 5e7ccb48b79c..f21896ebdc5a 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -130,7 +130,7 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { .enc = BOOK3E_PAGESZ_1GB, }, }; -#endif /* CONFIG_FSL_BOOKE */ +#endif /* CONFIG_PPC_85xx */ static inline int mmu_get_tsize(int psize) { diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S index d62b613a0d5d..d378031246ab 100644 --- a/arch/powerpc/mm/nohash/tlb_low.S +++ b/arch/powerpc/mm/nohash/tlb_low.S @@ -221,7 +221,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_476_DD2) blr #endif /* CONFIG_PPC_47x */ -#elif defined(CONFIG_FSL_BOOKE) +#elif defined(CONFIG_PPC_85xx) /* * FSL BookE implementations. * diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 19fd95a06352..11780074eb23 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -324,11 +324,6 @@ config BOOKE_OR_40x depends on BOOKE || 40x default y -config FSL_BOOKE - bool - depends on E500 && PPC32 - default y - # this is for common code between PPC32 & PPC64 FSL BOOKE config PPC_FSL_BOOK3E bool @@ -337,7 +332,7 @@ config PPC_FSL_BOOK3E select PPC_SMP_MUXED_IPI select PPC_DOORBELL select PPC_KUEP - default y if FSL_BOOKE + default y if PPC_85xx config PTE_64BIT bool @@ -485,7 +480,7 @@ config PPC_MMU_NOHASH config PPC_BOOK3E_MMU def_bool y - depends on FSL_BOOKE || PPC_BOOK3E + depends on PPC_85xx || PPC_BOOK3E config PPC_HAVE_PMU_SUPPORT bool @@ -508,7 +503,7 @@ config FORCE_SMP select SMP config SMP - depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x + depends on PPC_BOOK3S || PPC_BOOK3E || PPC_85xx || PPC_47x select GENERIC_IRQ_MIGRATION bool "Symmetric multi-processing support" if !FORCE_SMP help -- cgit v1.2.3-70-g09d2 From e0d68273d7069537701bb91c51d90d1e12aacc33 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:33 +0200 Subject: powerpc: Remove CONFIG_PPC_BOOK3E CONFIG_PPC_BOOK3E is redundant with CONFIG_PPC_BOOK3E_64. The later is more explicit about the fact that it's a 64 bits target. Remove CONFIG_PPC_BOOK3E. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/5d0891490813c19cdcfc04678f512ea68cba3e64.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/cputable.h | 4 ++-- arch/powerpc/include/asm/interrupt.h | 2 +- arch/powerpc/include/asm/nohash/pgalloc.h | 2 +- arch/powerpc/include/asm/paca.h | 8 +++---- arch/powerpc/include/asm/ppc_asm.h | 4 ++-- arch/powerpc/kernel/asm-offsets.c | 6 ++--- arch/powerpc/kernel/entry_64.S | 6 ++--- arch/powerpc/kernel/head_64.S | 40 +++++++++++++++---------------- arch/powerpc/kernel/misc_64.S | 6 ++--- arch/powerpc/kernel/paca.c | 6 ++--- arch/powerpc/kernel/setup.h | 2 +- arch/powerpc/kernel/setup_64.c | 8 +++---- arch/powerpc/kernel/vmlinux.lds.S | 2 +- arch/powerpc/kexec/core_64.c | 2 +- arch/powerpc/mm/mmu_decl.h | 6 ++--- arch/powerpc/mm/nohash/tlb_low.S | 2 +- arch/powerpc/platforms/85xx/Kconfig | 2 +- arch/powerpc/platforms/Kconfig.cputype | 10 +++----- arch/powerpc/xmon/xmon.c | 16 ++++++------- 20 files changed, 66 insertions(+), 70 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index dafb14f44672..9d721cac20b8 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -548,7 +548,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE config KEXEC bool "kexec system call" - depends on (PPC_BOOK3S || PPC_85xx || (44x && !SMP)) || PPC_BOOK3E + depends on (PPC_BOOK3S || PPC_85xx || (44x && !SMP)) || PPC_BOOK3E_64 select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index ae8c3e13cfce..27875f0b7bc7 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -463,7 +463,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTRS_COMPATIBLE (CPU_FTR_PPCAS_ARCH_V2) #ifdef CONFIG_PPC64 -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 #define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500) #else #ifdef CONFIG_CPU_LITTLE_ENDIAN @@ -521,7 +521,7 @@ enum { #endif /* __powerpc64__ */ #ifdef CONFIG_PPC64 -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 #define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500) #else diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 8069dbc4b8d1..84a1cdc3204c 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -281,7 +281,7 @@ static inline bool nmi_disables_ftrace(struct pt_regs *regs) if (TRAP(regs) == INTERRUPT_PERFMON) return false; } - if (IS_ENABLED(CONFIG_PPC_BOOK3E)) { + if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) { if (TRAP(regs) == INTERRUPT_PERFMON) return false; } diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h index 29c43665a753..4b62376318e1 100644 --- a/arch/powerpc/include/asm/nohash/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/pgalloc.h @@ -15,7 +15,7 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb, { } -#endif /* !CONFIG_PPC_BOOK3E */ +#endif /* !CONFIG_PPC_BOOK3E_64 */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 3537b0500f4d..09f1790d0ae1 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -18,7 +18,7 @@ #include #include #include -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 #include #else #include @@ -127,7 +127,7 @@ struct paca_struct { #endif #endif /* CONFIG_PPC_BOOK3S_64 */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 u64 exgen[8] __aligned(0x40); /* Keep pgd in the same cacheline as the start of extlb */ pgd_t *pgd __aligned(0x40); /* Current PGD */ @@ -151,7 +151,7 @@ struct paca_struct { void *dbg_kstack; struct tlb_core_data tcd; -#endif /* CONFIG_PPC_BOOK3E */ +#endif /* CONFIG_PPC_BOOK3E_64 */ #ifdef CONFIG_PPC_64S_HASH_MMU unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE]; @@ -168,7 +168,7 @@ struct paca_struct { #ifdef CONFIG_PPC64 u64 exit_save_r1; /* Syscall/interrupt R1 save */ #endif -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 u16 trap_save; /* Used when bad stack is encountered */ #endif #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 83c02f5a7f2a..55149a0384db 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -709,7 +709,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) * kernel is built for. */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 #define FIXUP_ENDIAN #else /* @@ -749,7 +749,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) .long 0x2402004c; /* hrfid */ \ 191: -#endif /* !CONFIG_PPC_BOOK3E */ +#endif /* !CONFIG_PPC_BOOK3E_64 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 8c10f536e478..10ce03052a19 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -197,7 +197,7 @@ int main(void) OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened); OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled); -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 OFFSET(PACAPGD, paca_struct, pgd); OFFSET(PACA_KERNELPGD, paca_struct, kernel_pgd); OFFSET(PACA_EXGEN, paca_struct, exgen); @@ -213,7 +213,7 @@ int main(void) OFFSET(TCD_ESEL_NEXT, tlb_core_data, esel_next); OFFSET(TCD_ESEL_MAX, tlb_core_data, esel_max); OFFSET(TCD_ESEL_FIRST, tlb_core_data, esel_first); -#endif /* CONFIG_PPC_BOOK3E */ +#endif /* CONFIG_PPC_BOOK3E_64 */ #ifdef CONFIG_PPC_BOOK3S_64 OFFSET(PACA_EXGEN, paca_struct, exgen); @@ -248,7 +248,7 @@ int main(void) #ifdef CONFIG_PPC64 OFFSET(PACA_EXIT_SAVE_R1, paca_struct, exit_save_r1); #endif -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save); #endif OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso); diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 01ace4c56104..3e2e37e6ecab 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -292,16 +292,16 @@ _GLOBAL(enter_prom) /* Prepare a 32-bit mode big endian MSR */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 rlwinm r11,r11,0,1,31 mtsrr1 r11 rfi -#else /* CONFIG_PPC_BOOK3E */ +#else /* CONFIG_PPC_BOOK3E_64 */ LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_LE) andc r11,r11,r12 mtsrr1 r11 RFI_TO_KERNEL -#endif /* CONFIG_PPC_BOOK3E */ +#endif /* CONFIG_PPC_BOOK3E_64 */ 1: /* Return from OF */ FIXUP_ENDIAN diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index cf2c08902c05..da544ea0ce01 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -143,7 +143,7 @@ DEFINE_FIXED_SYMBOL(__run_at_load, first_256B) .globl __secondary_hold __secondary_hold: FIXUP_ENDIAN -#ifndef CONFIG_PPC_BOOK3E +#ifndef CONFIG_PPC_BOOK3E_64 mfmsr r24 ori r24,r24,MSR_RI mtmsrd r24 /* RI on */ @@ -160,7 +160,7 @@ __secondary_hold: sync li r26,0 -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 tovirt(r26,r26) #endif /* All secondary cpus wait here until told to start. */ @@ -169,7 +169,7 @@ __secondary_hold: beq 100b #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE) -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 tovirt(r12,r12) #endif mtctr r12 @@ -178,7 +178,7 @@ __secondary_hold: * it may be the case that other platforms have r4 right to * begin with, this gives us some safety in case it is not */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 mr r4,r25 #else li r4,0 @@ -214,7 +214,7 @@ USE_TEXT_SECTION() #include "interrupt_64.S" -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* * The booting_thread_hwid holds the thread id we want to boot in cpu * hotplug case. It is set by cpu hotplug code, and is invalid by default. @@ -322,7 +322,7 @@ _GLOBAL(fsl_secondary_thread_init) bl book3e_secondary_thread_init b generic_secondary_common_init -#endif /* CONFIG_PPC_BOOK3E */ +#endif /* CONFIG_PPC_BOOK3E_64 */ /* * On pSeries and most other platforms, secondary processors spin @@ -345,7 +345,7 @@ _GLOBAL(generic_secondary_smp_init) bl relative_toc tovirt(r2,r2) -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* Book3E initialization */ mr r3,r24 mr r4,r25 @@ -417,7 +417,7 @@ generic_secondary_common_init: b kexec_wait /* next kernel might do better */ 2: SET_PACA(r13) -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */ mtspr SPRN_SPRG_TLB_EXFRAME,r12 #endif @@ -519,7 +519,7 @@ __start_initialization_multiplatform: mr r29,r9 #endif -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 bl start_initialization_book3e b __after_prom_start #else @@ -540,7 +540,7 @@ __start_initialization_multiplatform: /* Switch off MMU if not already off */ bl __mmu_off b __after_prom_start -#endif /* CONFIG_PPC_BOOK3E */ +#endif /* CONFIG_PPC_BOOK3E_64 */ __REF __boot_from_prom: @@ -587,11 +587,11 @@ __after_prom_start: /* process relocations for the final address of the kernel */ lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ sldi r25,r25,32 -#if defined(CONFIG_PPC_BOOK3E) +#if defined(CONFIG_PPC_BOOK3E_64) tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */ #endif lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26) -#if defined(CONFIG_PPC_BOOK3E) +#if defined(CONFIG_PPC_BOOK3E_64) tophys(r26,r26) #endif cmplwi cr0,r7,1 /* flagged to stay where we are ? */ @@ -599,7 +599,7 @@ __after_prom_start: add r25,r25,r26 1: mr r3,r25 bl relocate -#if defined(CONFIG_PPC_BOOK3E) +#if defined(CONFIG_PPC_BOOK3E_64) /* IVPR needs to be set after relocation. */ bl init_core_book3e #endif @@ -613,11 +613,11 @@ __after_prom_start: * Note: This process overwrites the OF exception vectors. */ li r3,0 /* target addr */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */ #endif mr. r4,r26 /* In some cases the loader may */ -#if defined(CONFIG_PPC_BOOK3E) +#if defined(CONFIG_PPC_BOOK3E_64) tovirt(r4,r4) #endif beq 9f /* have already put us at zero */ @@ -630,14 +630,14 @@ __after_prom_start: * variable __run_at_load, if it is set the kernel is treated as relocatable * kernel, otherwise it will be moved to PHYSICAL_START */ -#if defined(CONFIG_PPC_BOOK3E) +#if defined(CONFIG_PPC_BOOK3E_64) tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */ #endif lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26) cmplwi cr0,r7,1 bne 3f -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 LOAD_REG_ADDR(r5, __end_interrupts) LOAD_REG_ADDR(r11, _stext) sub r5,r5,r11 @@ -871,10 +871,10 @@ _GLOBAL(start_secondary_resume) */ enable_64b_mode: mfmsr r11 /* grab the current MSR */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ mtmsr r11 -#else /* CONFIG_PPC_BOOK3E */ +#else /* CONFIG_PPC_BOOK3E_64 */ LOAD_REG_IMMEDIATE(r12, MSR_64BIT) or r11,r11,r12 mtmsrd r11 @@ -940,7 +940,7 @@ start_here_multiplatform: std r29,8(r11); #endif -#ifndef CONFIG_PPC_BOOK3E +#ifndef CONFIG_PPC_BOOK3E_64 mfmsr r6 ori r6,r6,MSR_RI mtmsrd r6 /* RI on */ diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index fd6d8d3a548e..36184cada00b 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -286,7 +286,7 @@ kexec_flag: #ifdef CONFIG_KEXEC_CORE -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* * BOOK3E has no real MMU mode, so we have to setup the initial TLB * for a core to identity map v:0 to p:0. This current implementation @@ -354,7 +354,7 @@ _GLOBAL(kexec_smp_wait) * don't overwrite r3 here, it is live for kexec_wait above. */ real_mode: /* assume normal blr return */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* Create an identity mapping. */ b kexec_create_tlb #else @@ -413,7 +413,7 @@ _GLOBAL(kexec_sequence) lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ /* disable interrupts, we are overwriting kernel data next */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 wrteei 0 #else mfmsr r3 diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index dfd097b79160..be8db402e963 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -186,7 +186,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) #ifdef CONFIG_PPC_PSERIES new_paca->lppaca_ptr = NULL; #endif -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 new_paca->kernel_pgd = swapper_pg_dir; #endif new_paca->lock_token = 0x8000; @@ -203,7 +203,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) new_paca->slb_shadow_ptr = NULL; #endif -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* For now -- if we have threads this will be adjusted later */ new_paca->tcd_ptr = &new_paca->tcd; #endif @@ -215,7 +215,7 @@ void setup_paca(struct paca_struct *new_paca) /* Setup r13 */ local_paca = new_paca; -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* On Book3E, initialize the TLB miss exception frames */ mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); #else diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h index 93f22da12abe..7912bb50a7cb 100644 --- a/arch/powerpc/kernel/setup.h +++ b/arch/powerpc/kernel/setup.h @@ -23,7 +23,7 @@ void check_smt_enabled(void); static inline void check_smt_enabled(void) { } #endif -#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) +#if defined(CONFIG_PPC_BOOK3E_64) && defined(CONFIG_SMP) void setup_tlb_core_data(void); #else static inline void setup_tlb_core_data(void) { } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 2b2d0b0fbb30..6434a3f6acb5 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -86,7 +86,7 @@ struct ppc64_caches ppc64_caches = { }; EXPORT_SYMBOL_GPL(ppc64_caches); -#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) +#if defined(CONFIG_PPC_BOOK3E_64) && defined(CONFIG_SMP) void __init setup_tlb_core_data(void) { int cpu; @@ -673,7 +673,7 @@ void __init initialize_cache_info(void) */ __init u64 ppc64_bolted_size(void) { -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* Freescale BookE bolts the entire linear mapping */ /* XXX: BookE ppc64_rma_limit setup seems to disagree? */ if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) @@ -723,7 +723,7 @@ void __init irqstack_early_init(void) } } -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 void __init exc_lvl_early_init(void) { unsigned int i; @@ -825,7 +825,7 @@ void __init setup_per_cpu_areas(void) /* * BookE and BookS radix are historical values and should be revisited. */ - if (IS_ENABLED(CONFIG_PPC_BOOK3E)) { + if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) { atom_size = SZ_1M; } else if (radix_enabled()) { atom_size = PAGE_SIZE; diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index e68eb9381066..b60d81acccfc 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -71,7 +71,7 @@ SECTIONS .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { #ifdef CONFIG_PPC64 KEEP(*(.head.text.first_256B)); -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 #else KEEP(*(.head.text.real_vectors)); *(.head.text.real_trampolines); diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c index c2bea9db1c1e..a79e28c91e2b 100644 --- a/arch/powerpc/kexec/core_64.c +++ b/arch/powerpc/kexec/core_64.c @@ -360,7 +360,7 @@ void default_machine_kexec(struct kimage *image) * the RMA. On BookE there is no real MMU off mode, so we have to * keep it enabled as well (but then we have bolted TLB entries). */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 copy_with_mmu_off = false; #else copy_with_mmu_off = radix_enabled() || diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 88805757d0c9..341c2e0c71d2 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -38,7 +38,7 @@ static inline void _tlbil_pid(unsigned int pid) #else /* CONFIG_40x || CONFIG_PPC_8xx */ extern void _tlbil_all(void); extern void _tlbil_pid(unsigned int pid); -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 extern void _tlbil_pid_noind(unsigned int pid); #else #define _tlbil_pid_noind(pid) _tlbil_pid(pid) @@ -55,7 +55,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid, asm volatile ("tlbie %0; sync" : : "r" (address) : "memory"); trace_tlbie(0, 0, address, pid, 0, 0, 0); } -#elif defined(CONFIG_PPC_BOOK3E) +#elif defined(CONFIG_PPC_BOOK3E_64) extern void _tlbil_va(unsigned long address, unsigned int pid, unsigned int tsize, unsigned int ind); #else @@ -67,7 +67,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid, } #endif /* CONFIG_PPC_8xx */ -#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x) +#if defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_47x) extern void _tlbivax_bcast(unsigned long address, unsigned int pid, unsigned int tsize, unsigned int ind); #else diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S index d378031246ab..6914bc8e4ead 100644 --- a/arch/powerpc/mm/nohash/tlb_low.S +++ b/arch/powerpc/mm/nohash/tlb_low.S @@ -294,7 +294,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) isync 1: wrtee r10 blr -#elif defined(CONFIG_PPC_BOOK3E) +#elif defined(CONFIG_PPC_BOOK3E_64) /* * New Book3E (>= 2.06) implementation * diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index be16eba0f704..069628670a0c 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 menuconfig FSL_SOC_BOOKE bool "Freescale Book-E Machine Type" - depends on PPC_85xx || PPC_BOOK3E + depends on PPC_85xx || PPC_BOOK3E_64 select FSL_SOC select PPC_UDBG_16550 select MPIC diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 11780074eb23..d63b6386a974 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -257,10 +257,6 @@ config PPC_BOOK3S def_bool y depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 -config PPC_BOOK3E - def_bool y - depends on PPC_BOOK3E_64 - config E500 select FSL_EMB_PERFMON select PPC_FSL_BOOK3E @@ -316,7 +312,7 @@ config 4xx config BOOKE bool - depends on E500 || 44x || PPC_BOOK3E + depends on E500 || 44x || PPC_BOOK3E_64 default y config BOOKE_OR_40x @@ -480,7 +476,7 @@ config PPC_MMU_NOHASH config PPC_BOOK3E_MMU def_bool y - depends on PPC_85xx || PPC_BOOK3E + depends on PPC_85xx || PPC_BOOK3E_64 config PPC_HAVE_PMU_SUPPORT bool @@ -503,7 +499,7 @@ config FORCE_SMP select SMP config SMP - depends on PPC_BOOK3S || PPC_BOOK3E || PPC_85xx || PPC_47x + depends on PPC_BOOK3S || PPC_BOOK3E_64 || PPC_85xx || PPC_47x select GENERIC_IRQ_MIGRATION bool "Symmetric multi-processing support" if !FORCE_SMP help diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 26ef3388c24c..e6d678d27b0f 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -195,7 +195,7 @@ static int do_spu_cmd(void); #ifdef CONFIG_44x static void dump_tlb_44x(void); #endif -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 static void dump_tlb_book3e(void); #endif @@ -288,11 +288,11 @@ Commands:\n\ t print backtrace\n\ x exit monitor and recover\n\ X exit monitor and don't recover\n" -#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_BOOK3E) +#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_BOOK3E_64) " u dump segment table or SLB\n" #elif defined(CONFIG_PPC_BOOK3S_32) " u dump segment registers\n" -#elif defined(CONFIG_44x) || defined(CONFIG_PPC_BOOK3E) +#elif defined(CONFIG_44x) || defined(CONFIG_PPC_BOOK3E_64) " u dump TLB\n" #endif " U show uptime information\n" @@ -1166,7 +1166,7 @@ cmds(struct pt_regs *excp) case 'u': dump_tlb_44x(); break; -#elif defined(CONFIG_PPC_BOOK3E) +#elif defined(CONFIG_PPC_BOOK3E_64) case 'u': dump_tlb_book3e(); break; @@ -2686,7 +2686,7 @@ static void dump_one_paca(int cpu) DUMP(p, rfi_flush_fallback_area, "%-*px"); #endif DUMP(p, dscr_default, "%#-*llx"); -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 DUMP(p, pgd, "%-*px"); DUMP(p, kernel_pgd, "%-*px"); DUMP(p, tcd_ptr, "%-*px"); @@ -2701,7 +2701,7 @@ static void dump_one_paca(int cpu) DUMP(p, canary, "%#-*lx"); #endif DUMP(p, saved_r1, "%#-*llx"); -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 DUMP(p, trap_save, "%#-*x"); #endif DUMP(p, irq_soft_mask, "%#-*x"); @@ -3823,7 +3823,7 @@ static void dump_tlb_44x(void) } #endif /* CONFIG_44x */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 static void dump_tlb_book3e(void) { u32 mmucfg, pidmask, lpidmask; @@ -3965,7 +3965,7 @@ static void dump_tlb_book3e(void) } } } -#endif /* CONFIG_PPC_BOOK3E */ +#endif /* CONFIG_PPC_BOOK3E_64 */ static void xmon_init(int enable) { -- cgit v1.2.3-70-g09d2 From 688de017efaab8a7764ab2c05ce7128d0361023b Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:35 +0200 Subject: powerpc: Change CONFIG_E500 to CONFIG_PPC_E500 It will be used outside arch/powerpc, make it clear its a powerpc configuration item. And we already have CONFIG_PPC_E500MC, so that will make it more consistent. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/e63b22083c11c4300f4a82d3123a46e5fdd54fa6.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/Makefile | 2 +- arch/powerpc/include/asm/cputable.h | 4 ++-- arch/powerpc/include/asm/kgdb.h | 2 +- arch/powerpc/include/asm/mmu.h | 4 ++-- arch/powerpc/include/asm/reg_booke.h | 6 +++--- arch/powerpc/include/asm/synch.h | 2 +- arch/powerpc/include/asm/vdso/timebase.h | 2 +- arch/powerpc/kernel/Makefile | 2 +- arch/powerpc/kernel/cpu_setup_fsl_booke.S | 4 ++-- arch/powerpc/kernel/entry_32.S | 4 ++-- arch/powerpc/kernel/head_85xx.S | 4 ++-- arch/powerpc/kernel/head_booke.h | 2 +- arch/powerpc/kernel/setup_32.c | 2 +- arch/powerpc/kernel/traps.c | 2 +- arch/powerpc/kvm/Kconfig | 4 ++-- arch/powerpc/platforms/Kconfig.cputype | 26 +++++++++++++------------- arch/powerpc/sysdev/fsl_pci.c | 2 +- arch/powerpc/sysdev/fsl_rio.c | 2 +- 18 files changed, 38 insertions(+), 38 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index f6d477c4aa64..cb01832385d0 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -210,7 +210,7 @@ KBUILD_CFLAGS += $(call cc-option,-mno-string) cpu-as-$(CONFIG_40x) += -Wa,-m405 cpu-as-$(CONFIG_44x) += -Wa,-m440 cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) -cpu-as-$(CONFIG_E500) += -Wa,-me500 +cpu-as-$(CONFIG_PPC_E500) += -Wa,-me500 # When using '-many -mpower4' gas will first try and find a matching power4 # mnemonic and failing that it will allow any valid mnemonic that GAS knows diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 27875f0b7bc7..757dbded11dc 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -510,7 +510,7 @@ enum { #elif defined(CONFIG_44x) CPU_FTRS_44X | CPU_FTRS_440x6 | #endif -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 CPU_FTRS_E500 | CPU_FTRS_E500_2 | #endif #ifdef CONFIG_PPC_E500MC @@ -584,7 +584,7 @@ enum { #elif defined(CONFIG_44x) CPU_FTRS_44X & CPU_FTRS_440x6 & #endif -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 CPU_FTRS_E500 & CPU_FTRS_E500_2 & #endif #ifdef CONFIG_PPC_E500MC diff --git a/arch/powerpc/include/asm/kgdb.h b/arch/powerpc/include/asm/kgdb.h index a9e098a3b881..715c18b75334 100644 --- a/arch/powerpc/include/asm/kgdb.h +++ b/arch/powerpc/include/asm/kgdb.h @@ -52,7 +52,7 @@ static inline void arch_kgdb_breakpoint(void) /* On non-E500 family PPC32 we determine the size by picking the last * register we need, but on E500 we skip sections so we list what we * need to store, and add it up. */ -#ifndef CONFIG_E500 +#ifndef CONFIG_PPC_E500 #define MAXREG (PT_FPSCR+1) #else /* 32 GPRs (8 bytes), nip, msr, ccr, link, ctr, xer, acc (8 bytes), spefscr*/ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 860d0290ca4d..5b46da9ba7f6 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -162,7 +162,7 @@ enum { #elif defined(CONFIG_44x) MMU_FTR_TYPE_44x | #endif -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX | #endif #ifdef CONFIG_PPC_BOOK3S_32 @@ -211,7 +211,7 @@ enum { #elif defined(CONFIG_44x) #define MMU_FTRS_ALWAYS MMU_FTR_TYPE_44x #endif -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 #define MMU_FTRS_ALWAYS MMU_FTR_TYPE_FSL_E #endif diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 17b8dcd9a40d..af56980b6cdb 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -246,7 +246,7 @@ #define PPC47x_MCSR_FPR 0x00800000 /* FPR parity error */ #define PPC47x_MCSR_IPR 0x00400000 /* Imprecise Machine Check Exception */ -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 /* All e500 */ #define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */ #define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */ @@ -282,7 +282,7 @@ #endif /* Bit definitions for the HID1 */ -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 /* e500v1/v2 */ #define HID1_PLL_CFG_MASK 0xfc000000 /* PLL_CFG input pins */ #define HID1_RFXE 0x00020000 /* Read fault exception enable */ @@ -545,7 +545,7 @@ #define TCR_FIE 0x00800000 /* FIT Interrupt Enable */ #define TCR_ARE 0x00400000 /* Auto Reload Enable */ -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 #define TCR_GET_WP(tcr) ((((tcr) & 0xC0000000) >> 30) | \ (((tcr) & 0x1E0000) >> 15)) #else diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index 7130176d8cb8..b0b4c64870d7 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -44,7 +44,7 @@ static inline void ppc_after_tlbiel_barrier(void) #if defined(__powerpc64__) # define LWSYNC lwsync -#elif defined(CONFIG_E500) +#elif defined(CONFIG_PPC_E500) # define LWSYNC \ START_LWSYNC_SECTION(96); \ sync; \ diff --git a/arch/powerpc/include/asm/vdso/timebase.h b/arch/powerpc/include/asm/vdso/timebase.h index 891c9d5eaabe..e9245f86a46c 100644 --- a/arch/powerpc/include/asm/vdso/timebase.h +++ b/arch/powerpc/include/asm/vdso/timebase.h @@ -12,7 +12,7 @@ * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit * version below in the else case of the ifdef. */ -#if defined(__powerpc64__) && (defined(CONFIG_PPC_CELL) || defined(CONFIG_E500)) +#if defined(__powerpc64__) && (defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_E500)) #define mftb() ({unsigned long rval; \ asm volatile( \ "90: mfspr %0, %2;\n" \ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 4483cae7dc9f..33dafd12e81d 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -101,7 +101,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_FA_DUMP) += fadump.o obj-$(CONFIG_PRESERVE_FA_DUMP) += fadump.o ifdef CONFIG_PPC32 -obj-$(CONFIG_E500) += idle_e500.o +obj-$(CONFIG_PPC_E500) += idle_e500.o endif obj-$(CONFIG_PPC_BOOK3S_32) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o obj-$(CONFIG_TAU) += tau_6xx.o diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index 4bf33f1b4193..058336079069 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S @@ -108,7 +108,7 @@ _GLOBAL(__setup_cpu_e6500) #endif /* CONFIG_PPC_E500MC */ #ifdef CONFIG_PPC32 -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 #ifndef CONFIG_PPC_E500MC _GLOBAL(__setup_cpu_e500v1) _GLOBAL(__setup_cpu_e500v2) @@ -156,7 +156,7 @@ _GLOBAL(__setup_cpu_e5500) mtlr r5 blr #endif /* CONFIG_PPC_E500MC */ -#endif /* CONFIG_E500 */ +#endif /* CONFIG_PPC_E500 */ #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC_BOOK3E_64 diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 1d599df6f169..e6d5fe3a8585 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -49,7 +49,7 @@ */ .align 12 -#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) +#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500) .globl prepare_transfer_to_handler prepare_transfer_to_handler: /* if from kernel, check interrupted DOZE/NAP mode */ @@ -71,7 +71,7 @@ prepare_transfer_to_handler: lwz r2, GPR2(r11) b fast_exception_return _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) -#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */ +#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */ #if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32) .globl __kuep_lock diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S index 48b168b5dc57..52c0ab416326 100644 --- a/arch/powerpc/kernel/head_85xx.S +++ b/arch/powerpc/kernel/head_85xx.S @@ -912,7 +912,7 @@ get_phys_addr: * Global functions */ -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 #ifndef CONFIG_PPC_E500MC /* Adjust or setup IVORs for e500v1/v2 */ _GLOBAL(__setup_e500_ivors) @@ -955,7 +955,7 @@ _GLOBAL(__setup_ehv_ivors) sync blr #endif /* CONFIG_PPC_E500MC */ -#endif /* CONFIG_E500 */ +#endif /* CONFIG_PPC_E500 */ #ifdef CONFIG_SPE /* diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index bb6d5d0fc4ac..a2f82ced6e4a 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -103,7 +103,7 @@ END_BTB_FLUSH_SECTION .endm .macro prepare_transfer_to_handler -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 andi. r12,r9,MSR_PR bne 777f bl prepare_transfer_to_handler diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 813261789303..b761cc1a403c 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -207,7 +207,7 @@ void __init setup_power_save(void) ppc_md.power_save = ppc6xx_idle; #endif -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 if (cpu_has_feature(CPU_FTR_CAN_DOZE) || cpu_has_feature(CPU_FTR_CAN_NAP)) ppc_md.power_save = e500_idle; diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index f181c434289e..62ec50a7a8ef 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -600,7 +600,7 @@ static inline int check_io_access(struct pt_regs *regs) #define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4) -#if defined(CONFIG_E500) +#if defined(CONFIG_PPC_E500) int machine_check_e500mc(struct pt_regs *regs) { unsigned long mcsr = mfspr(SPRN_MCSR); diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index dcb398d5e009..61cdd782d3c5 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -189,7 +189,7 @@ config KVM_EXIT_TIMING config KVM_E500V2 bool "KVM support for PowerPC E500v2 processors" - depends on E500 && !PPC_E500MC + depends on PPC_E500 && !PPC_E500MC select KVM select KVM_MMIO select MMU_NOTIFIER @@ -220,7 +220,7 @@ config KVM_E500MC config KVM_MPIC bool "KVM in-kernel MPIC emulation" - depends on KVM && E500 + depends on KVM && PPC_E500 select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQFD select HAVE_KVM_IRQ_ROUTING diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index d63b6386a974..5b065186ace5 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -33,7 +33,7 @@ config PPC_BOOK3S_32 config PPC_85xx bool "Freescale 85xx" - select E500 + select PPC_E500 config PPC_8xx bool "Freescale 8xx" @@ -108,7 +108,7 @@ config PPC_BOOK3S_64 config PPC_BOOK3E_64 bool "Embedded processors" select PPC_FSL_BOOK3E - select E500 + select PPC_E500 select PPC_E500MC select PPC_FPU # Make it a choice ? select PPC_SMP_MUXED_IPI @@ -175,11 +175,11 @@ config POWER9_CPU config E5500_CPU bool "Freescale e5500" - depends on PPC64 && E500 + depends on PPC64 && PPC_E500 config E6500_CPU bool "Freescale e6500" - depends on PPC64 && E500 + depends on PPC64 && PPC_E500 config 405_CPU bool "40x family" @@ -257,7 +257,7 @@ config PPC_BOOK3S def_bool y depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 -config E500 +config PPC_E500 select FSL_EMB_PERFMON select PPC_FSL_BOOK3E bool @@ -266,7 +266,7 @@ config PPC_E500MC bool "e500mc Support" select PPC_FPU select COMMON_CLK - depends on E500 + depends on PPC_E500 help This must be enabled for running on e500mc (and derivatives such as e5500/e6500), and must be disabled for running on @@ -289,7 +289,7 @@ config PPC_FPU config FSL_EMB_PERFMON bool "Freescale Embedded Perfmon" - depends on E500 || PPC_83xx + depends on PPC_E500 || PPC_83xx help This is the Performance Monitor support found on the e500 core and some e300 cores (c3 and c4). Select this only if your @@ -302,7 +302,7 @@ config FSL_EMB_PERF_EVENT config FSL_EMB_PERF_EVENT_E500 bool - depends on FSL_EMB_PERF_EVENT && E500 + depends on FSL_EMB_PERF_EVENT && PPC_E500 default y config 4xx @@ -312,7 +312,7 @@ config 4xx config BOOKE bool - depends on E500 || 44x || PPC_BOOK3E_64 + depends on PPC_E500 || 44x || PPC_BOOK3E_64 default y config BOOKE_OR_40x @@ -332,12 +332,12 @@ config PPC_FSL_BOOK3E config PTE_64BIT bool - depends on 44x || E500 || PPC_86xx + depends on 44x || PPC_E500 || PPC_86xx default y if PHYS_64BIT config PHYS_64BIT - bool 'Large physical address support' if E500 || PPC_86xx - depends on (44x || E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx + bool 'Large physical address support' if PPC_E500 || PPC_86xx + depends on (44x || PPC_E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx select PHYS_ADDR_T_64BIT help This option enables kernel support for larger than 32-bit physical @@ -384,7 +384,7 @@ config VSX config SPE_POSSIBLE def_bool y - depends on E500 && !PPC_E500MC + depends on PPC_E500 && !PPC_E500MC config SPE bool "SPE Support" diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 1ef7400ef244..974d3db6faab 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -943,7 +943,7 @@ u64 fsl_pci_immrbar_base(struct pci_controller *hose) return 0; } -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 static int mcheck_handle_load(struct pt_regs *regs, u32 inst) { unsigned int rd, ra, rb, d; diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 4647c6074f3b..c8f044d62fe2 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -98,7 +98,7 @@ resource_size_t rio_law_start; struct fsl_rio_dbell *dbell; struct fsl_rio_pw *pw; -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 int fsl_rio_mcheck_exception(struct pt_regs *regs) { const struct exception_table_entry *entry; -- cgit v1.2.3-70-g09d2 From 3e7318584dfec11992f3ac45658c4bc1210b3778 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:38 +0200 Subject: powerpc: Remove CONFIG_PPC_FSL_BOOK3E CONFIG_PPC_FSL_BOOK3E is redundant with CONFIG_PPC_E500. Remove it. And rename five files accordingly. Signed-off-by: Christophe Leroy [mpe: Rename include guards to match new file names] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/795cb93b88c9a0279289712e674f39e3b108a1b4.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/barrier.h | 2 +- arch/powerpc/include/asm/hugetlb.h | 4 +- arch/powerpc/include/asm/kvm_host.h | 2 +- arch/powerpc/include/asm/mmu.h | 2 +- arch/powerpc/include/asm/nohash/32/pgtable.h | 2 +- arch/powerpc/include/asm/nohash/64/pgtable.h | 2 +- arch/powerpc/include/asm/nohash/hugetlb-book3e.h | 45 --- arch/powerpc/include/asm/nohash/hugetlb-e500.h | 45 +++ arch/powerpc/include/asm/nohash/pgtable.h | 2 +- arch/powerpc/include/asm/nohash/pte-book3e.h | 129 -------- arch/powerpc/include/asm/nohash/pte-e500.h | 129 ++++++++ arch/powerpc/include/asm/page.h | 2 +- arch/powerpc/include/asm/ppc_asm.h | 6 +- arch/powerpc/include/asm/setup.h | 2 +- arch/powerpc/kernel/Makefile | 2 +- arch/powerpc/kernel/asm-offsets.c | 4 +- arch/powerpc/kernel/cpu_setup_e500.S | 333 ++++++++++++++++++++ arch/powerpc/kernel/cpu_setup_fsl_booke.S | 333 -------------------- arch/powerpc/kernel/head_booke.h | 2 +- arch/powerpc/kernel/interrupt_64.S | 2 +- arch/powerpc/kernel/security.c | 10 +- arch/powerpc/kernel/smp.c | 2 +- arch/powerpc/kernel/sysfs.c | 6 +- arch/powerpc/kernel/vmlinux.lds.S | 2 +- arch/powerpc/lib/feature-fixups.c | 4 +- arch/powerpc/mm/hugetlbpage.c | 2 +- arch/powerpc/mm/mem.c | 2 +- arch/powerpc/mm/mmu_decl.h | 4 +- arch/powerpc/mm/nohash/Makefile | 6 +- arch/powerpc/mm/nohash/book3e_hugetlbpage.c | 190 ------------ arch/powerpc/mm/nohash/e500.c | 375 +++++++++++++++++++++++ arch/powerpc/mm/nohash/e500_hugetlbpage.c | 190 ++++++++++++ arch/powerpc/mm/nohash/fsl_book3e.c | 375 ----------------------- arch/powerpc/mm/nohash/tlb.c | 16 +- arch/powerpc/mm/nohash/tlb_low.S | 2 +- arch/powerpc/platforms/Kconfig.cputype | 16 +- 37 files changed, 1123 insertions(+), 1131 deletions(-) delete mode 100644 arch/powerpc/include/asm/nohash/hugetlb-book3e.h create mode 100644 arch/powerpc/include/asm/nohash/hugetlb-e500.h delete mode 100644 arch/powerpc/include/asm/nohash/pte-book3e.h create mode 100644 arch/powerpc/include/asm/nohash/pte-e500.h create mode 100644 arch/powerpc/kernel/cpu_setup_e500.S delete mode 100644 arch/powerpc/kernel/cpu_setup_fsl_booke.S delete mode 100644 arch/powerpc/mm/nohash/book3e_hugetlbpage.c create mode 100644 arch/powerpc/mm/nohash/e500.c create mode 100644 arch/powerpc/mm/nohash/e500_hugetlbpage.c delete mode 100644 arch/powerpc/mm/nohash/fsl_book3e.c (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 9d721cac20b8..a7b58645cc3f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -290,7 +290,7 @@ config PPC_LONG_DOUBLE_128 config PPC_BARRIER_NOSPEC bool default y - depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E + depends on PPC_BOOK3S_64 || PPC_E500 config EARLY_PRINTK bool diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index ef2d8b15eaab..e80b2c0e9315 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -86,7 +86,7 @@ do { \ #ifdef CONFIG_PPC_BOOK3S_64 #define NOSPEC_BARRIER_SLOT nop -#elif defined(CONFIG_PPC_FSL_BOOK3E) +#elif defined(CONFIG_PPC_E500) #define NOSPEC_BARRIER_SLOT nop; nop #endif diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 32ce0fb7548f..ea71f7245a63 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -7,8 +7,8 @@ #ifdef CONFIG_PPC_BOOK3S_64 #include -#elif defined(CONFIG_PPC_FSL_BOOK3E) -#include +#elif defined(CONFIG_PPC_E500) +#include #elif defined(CONFIG_PPC_8xx) #include #endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index c2b003550dc9..caea15dcb91d 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -443,7 +443,7 @@ struct kvmppc_passthru_irqmap { }; #endif -# ifdef CONFIG_PPC_FSL_BOOK3E +# ifdef CONFIG_PPC_E500 #define KVMPPC_BOOKE_IAC_NUM 2 #define KVMPPC_BOOKE_DAC_NUM 2 # else diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 5b46da9ba7f6..39057320e436 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -141,7 +141,7 @@ typedef pte_t *pgtable_t; -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 #include DECLARE_PER_CPU(int, next_tlbcam_idx); #endif diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 197e7552d9f6..0d40b33184eb 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -131,7 +131,7 @@ void unmap_kernel_page(unsigned long va); #elif defined(CONFIG_44x) #include #elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) -#include +#include #elif defined(CONFIG_PPC_85xx) #include #elif defined(CONFIG_PPC_8xx) diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 599921cc257e..879e9a6e5a87 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -70,7 +70,7 @@ /* * Include the PTE bits definitions */ -#include +#include #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) diff --git a/arch/powerpc/include/asm/nohash/hugetlb-book3e.h b/arch/powerpc/include/asm/nohash/hugetlb-book3e.h deleted file mode 100644 index ecd8694cb229..000000000000 --- a/arch/powerpc/include/asm/nohash/hugetlb-book3e.h +++ /dev/null @@ -1,45 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H -#define _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H - -static inline pte_t *hugepd_page(hugepd_t hpd) -{ - if (WARN_ON(!hugepd_ok(hpd))) - return NULL; - - return (pte_t *)((hpd_val(hpd) & ~HUGEPD_SHIFT_MASK) | PD_HUGE); -} - -static inline unsigned int hugepd_shift(hugepd_t hpd) -{ - return hpd_val(hpd) & HUGEPD_SHIFT_MASK; -} - -static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, - unsigned int pdshift) -{ - /* - * On FSL BookE, we have multiple higher-level table entries that - * point to the same hugepte. Just use the first one since they're all - * identical. So for that case, idx=0. - */ - return hugepd_page(hpd); -} - -void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); - -static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift) -{ - /* We use the old format for PPC_FSL_BOOK3E */ - *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift); -} - -static inline int check_and_get_huge_psize(int shift) -{ - if (shift & 1) /* Not a power of 4 */ - return -EINVAL; - - return shift_to_mmu_psize(shift); -} - -#endif /* _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H */ diff --git a/arch/powerpc/include/asm/nohash/hugetlb-e500.h b/arch/powerpc/include/asm/nohash/hugetlb-e500.h new file mode 100644 index 000000000000..8f04ad20e040 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/hugetlb-e500.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_HUGETLB_E500_H +#define _ASM_POWERPC_NOHASH_HUGETLB_E500_H + +static inline pte_t *hugepd_page(hugepd_t hpd) +{ + if (WARN_ON(!hugepd_ok(hpd))) + return NULL; + + return (pte_t *)((hpd_val(hpd) & ~HUGEPD_SHIFT_MASK) | PD_HUGE); +} + +static inline unsigned int hugepd_shift(hugepd_t hpd) +{ + return hpd_val(hpd) & HUGEPD_SHIFT_MASK; +} + +static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, + unsigned int pdshift) +{ + /* + * On FSL BookE, we have multiple higher-level table entries that + * point to the same hugepte. Just use the first one since they're all + * identical. So for that case, idx=0. + */ + return hugepd_page(hpd); +} + +void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); + +static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift) +{ + /* We use the old format for PPC_E500 */ + *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift); +} + +static inline int check_and_get_huge_psize(int shift) +{ + if (shift & 1) /* Not a power of 4 */ + return -EINVAL; + + return shift_to_mmu_psize(shift); +} + +#endif /* _ASM_POWERPC_NOHASH_HUGETLB_E500_H */ diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 4fd73c7412d0..d9067dfc531c 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -266,7 +266,7 @@ static inline int pud_huge(pud_t pud) * We use it to ensure coherency between the i-cache and d-cache * for the page which has just been mapped in. */ -#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE) +#if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #else static inline diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h deleted file mode 100644 index f798640422c2..000000000000 --- a/arch/powerpc/include/asm/nohash/pte-book3e.h +++ /dev/null @@ -1,129 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_PTE_BOOK3E_H -#define _ASM_POWERPC_NOHASH_PTE_BOOK3E_H -#ifdef __KERNEL__ - -/* PTE bit definitions for processors compliant to the Book3E - * architecture 2.06 or later. The position of the PTE bits - * matches the HW definition of the optional Embedded Page Table - * category. - */ - -/* Architected bits */ -#define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */ -#define _PAGE_SW1 0x000002 -#define _PAGE_BIT_SWAP_TYPE 2 -#define _PAGE_BAP_SR 0x000004 -#define _PAGE_BAP_UR 0x000008 -#define _PAGE_BAP_SW 0x000010 -#define _PAGE_BAP_UW 0x000020 -#define _PAGE_BAP_SX 0x000040 -#define _PAGE_BAP_UX 0x000080 -#define _PAGE_PSIZE_MSK 0x000f00 -#define _PAGE_PSIZE_4K 0x000200 -#define _PAGE_PSIZE_8K 0x000300 -#define _PAGE_PSIZE_16K 0x000400 -#define _PAGE_PSIZE_32K 0x000500 -#define _PAGE_PSIZE_64K 0x000600 -#define _PAGE_PSIZE_128K 0x000700 -#define _PAGE_PSIZE_256K 0x000800 -#define _PAGE_PSIZE_512K 0x000900 -#define _PAGE_PSIZE_1M 0x000a00 -#define _PAGE_PSIZE_2M 0x000b00 -#define _PAGE_PSIZE_4M 0x000c00 -#define _PAGE_PSIZE_8M 0x000d00 -#define _PAGE_PSIZE_16M 0x000e00 -#define _PAGE_PSIZE_32M 0x000f00 -#define _PAGE_DIRTY 0x001000 /* C: page changed */ -#define _PAGE_SW0 0x002000 -#define _PAGE_U3 0x004000 -#define _PAGE_U2 0x008000 -#define _PAGE_U1 0x010000 -#define _PAGE_U0 0x020000 -#define _PAGE_ACCESSED 0x040000 -#define _PAGE_ENDIAN 0x080000 -#define _PAGE_GUARDED 0x100000 -#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */ -#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ -#define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */ - -/* "Higher level" linux bit combinations */ -#define _PAGE_EXEC (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */ -#define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */ -#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY) -#define _PAGE_KERNEL_RO (_PAGE_BAP_SR) -#define _PAGE_KERNEL_RWX (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX) -#define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX) -#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */ -#define _PAGE_PRIVILEGED (_PAGE_BAP_SR) - -#define _PAGE_SPECIAL _PAGE_SW0 - -/* Base page size */ -#define _PAGE_PSIZE _PAGE_PSIZE_4K -#define PTE_RPN_SHIFT (24) - -#define PTE_WIMGE_SHIFT (19) -#define PTE_BAP_SHIFT (2) - -/* On 32-bit, we never clear the top part of the PTE */ -#ifdef CONFIG_PPC32 -#define _PTE_NONE_MASK 0xffffffff00000000ULL -#define _PMD_PRESENT 0 -#define _PMD_PRESENT_MASK (PAGE_MASK) -#define _PMD_BAD (~PAGE_MASK) -#define _PMD_USER 0 -#else -#define _PTE_NONE_MASK 0 -#endif - -/* - * We define 2 sets of base prot bits, one for basic pages (ie, - * cacheable kernel and user pages) and one for non cacheable - * pages. We always set _PAGE_COHERENT when SMP is enabled or - * the processor might need it for DMA coherency. - */ -#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) -#if defined(CONFIG_SMP) -#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) -#else -#define _PAGE_BASE (_PAGE_BASE_NC) -#endif - -/* Permission masks used to generate the __P and __S table */ -#define PAGE_NONE __pgprot(_PAGE_BASE) -#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX) -#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX) -#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX) - -#ifndef __ASSEMBLY__ -static inline pte_t pte_mkprivileged(pte_t pte) -{ - return __pte((pte_val(pte) & ~_PAGE_USER) | _PAGE_PRIVILEGED); -} - -#define pte_mkprivileged pte_mkprivileged - -static inline pte_t pte_mkuser(pte_t pte) -{ - return __pte((pte_val(pte) & ~_PAGE_PRIVILEGED) | _PAGE_USER); -} - -#define pte_mkuser pte_mkuser - -static inline pte_t pte_mkexec(pte_t pte) -{ - if (pte_val(pte) & _PAGE_BAP_UR) - return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX); - else - return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX); -} -#define pte_mkexec pte_mkexec - -#endif /* __ASSEMBLY__ */ - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_NOHASH_PTE_BOOK3E_H */ diff --git a/arch/powerpc/include/asm/nohash/pte-e500.h b/arch/powerpc/include/asm/nohash/pte-e500.h new file mode 100644 index 000000000000..0934e8965e4e --- /dev/null +++ b/arch/powerpc/include/asm/nohash/pte-e500.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_PTE_E500_H +#define _ASM_POWERPC_NOHASH_PTE_E500_H +#ifdef __KERNEL__ + +/* PTE bit definitions for processors compliant to the Book3E + * architecture 2.06 or later. The position of the PTE bits + * matches the HW definition of the optional Embedded Page Table + * category. + */ + +/* Architected bits */ +#define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */ +#define _PAGE_SW1 0x000002 +#define _PAGE_BIT_SWAP_TYPE 2 +#define _PAGE_BAP_SR 0x000004 +#define _PAGE_BAP_UR 0x000008 +#define _PAGE_BAP_SW 0x000010 +#define _PAGE_BAP_UW 0x000020 +#define _PAGE_BAP_SX 0x000040 +#define _PAGE_BAP_UX 0x000080 +#define _PAGE_PSIZE_MSK 0x000f00 +#define _PAGE_PSIZE_4K 0x000200 +#define _PAGE_PSIZE_8K 0x000300 +#define _PAGE_PSIZE_16K 0x000400 +#define _PAGE_PSIZE_32K 0x000500 +#define _PAGE_PSIZE_64K 0x000600 +#define _PAGE_PSIZE_128K 0x000700 +#define _PAGE_PSIZE_256K 0x000800 +#define _PAGE_PSIZE_512K 0x000900 +#define _PAGE_PSIZE_1M 0x000a00 +#define _PAGE_PSIZE_2M 0x000b00 +#define _PAGE_PSIZE_4M 0x000c00 +#define _PAGE_PSIZE_8M 0x000d00 +#define _PAGE_PSIZE_16M 0x000e00 +#define _PAGE_PSIZE_32M 0x000f00 +#define _PAGE_DIRTY 0x001000 /* C: page changed */ +#define _PAGE_SW0 0x002000 +#define _PAGE_U3 0x004000 +#define _PAGE_U2 0x008000 +#define _PAGE_U1 0x010000 +#define _PAGE_U0 0x020000 +#define _PAGE_ACCESSED 0x040000 +#define _PAGE_ENDIAN 0x080000 +#define _PAGE_GUARDED 0x100000 +#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */ +#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ +#define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */ + +/* "Higher level" linux bit combinations */ +#define _PAGE_EXEC (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */ +#define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */ +#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY) +#define _PAGE_KERNEL_RO (_PAGE_BAP_SR) +#define _PAGE_KERNEL_RWX (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX) +#define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX) +#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */ +#define _PAGE_PRIVILEGED (_PAGE_BAP_SR) + +#define _PAGE_SPECIAL _PAGE_SW0 + +/* Base page size */ +#define _PAGE_PSIZE _PAGE_PSIZE_4K +#define PTE_RPN_SHIFT (24) + +#define PTE_WIMGE_SHIFT (19) +#define PTE_BAP_SHIFT (2) + +/* On 32-bit, we never clear the top part of the PTE */ +#ifdef CONFIG_PPC32 +#define _PTE_NONE_MASK 0xffffffff00000000ULL +#define _PMD_PRESENT 0 +#define _PMD_PRESENT_MASK (PAGE_MASK) +#define _PMD_BAD (~PAGE_MASK) +#define _PMD_USER 0 +#else +#define _PTE_NONE_MASK 0 +#endif + +/* + * We define 2 sets of base prot bits, one for basic pages (ie, + * cacheable kernel and user pages) and one for non cacheable + * pages. We always set _PAGE_COHERENT when SMP is enabled or + * the processor might need it for DMA coherency. + */ +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) +#if defined(CONFIG_SMP) +#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) +#else +#define _PAGE_BASE (_PAGE_BASE_NC) +#endif + +/* Permission masks used to generate the __P and __S table */ +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX) + +#ifndef __ASSEMBLY__ +static inline pte_t pte_mkprivileged(pte_t pte) +{ + return __pte((pte_val(pte) & ~_PAGE_USER) | _PAGE_PRIVILEGED); +} + +#define pte_mkprivileged pte_mkprivileged + +static inline pte_t pte_mkuser(pte_t pte) +{ + return __pte((pte_val(pte) & ~_PAGE_PRIVILEGED) | _PAGE_USER); +} + +#define pte_mkuser pte_mkuser + +static inline pte_t pte_mkexec(pte_t pte) +{ + if (pte_val(pte) & _PAGE_BAP_UR) + return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX); + else + return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX); +} +#define pte_mkexec pte_mkexec + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_NOHASH_PTE_E500_H */ diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 7f20636d13ed..edf1dd1b0ca9 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -31,7 +31,7 @@ extern unsigned int hpage_shift; #define HPAGE_SHIFT hpage_shift #elif defined(CONFIG_PPC_8xx) #define HPAGE_SHIFT 19 /* 512k pages */ -#elif defined(CONFIG_PPC_FSL_BOOK3E) +#elif defined(CONFIG_PPC_E500) #define HPAGE_SHIFT 22 /* 4M pages */ #endif #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 55149a0384db..7e4fe766e247 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -342,7 +342,7 @@ n: #endif /* various errata or part fixups */ -#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) +#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_E500) #define MFTB(dest) \ 90: mfspr dest, SPRN_TBRL; \ BEGIN_FTR_SECTION_NESTED(96); \ @@ -768,7 +768,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) stringify_in_c(.llong (_target);) \ stringify_in_c(.previous) -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 #define BTB_FLUSH(reg) \ lis reg,BUCSR_INIT@h; \ ori reg,reg,BUCSR_INIT@l; \ @@ -776,6 +776,6 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) isync; #else #define BTB_FLUSH(reg) -#endif /* CONFIG_PPC_FSL_BOOK3E */ +#endif /* CONFIG_PPC_E500 */ #endif /* _ASM_POWERPC_PPC_ASM_H */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index dd461b2c825c..85143849a586 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -69,7 +69,7 @@ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { } #endif -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 void __init setup_spectre_v2(void); #else static inline void setup_spectre_v2(void) {} diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 33dafd12e81d..658c4dffaa56 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -114,7 +114,7 @@ endif obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o obj-$(CONFIG_MODULES) += module.o module_$(BITS).o obj-$(CONFIG_44x) += cpu_setup_44x.o -obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o +obj-$(CONFIG_PPC_E500) += cpu_setup_e500.o obj-$(CONFIG_PPC_DOORBELL) += dbell.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 10ce03052a19..4ce2a4aa3985 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -59,7 +59,7 @@ #endif #endif -#if defined(CONFIG_PPC_FSL_BOOK3E) +#if defined(CONFIG_PPC_E500) #include "../mm/mmu_decl.h" #endif @@ -651,7 +651,7 @@ int main(void) DEFINE(PGD_T_LOG2, PGD_T_LOG2); DEFINE(PTE_T_LOG2, PTE_T_LOG2); #endif -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); OFFSET(TLBCAM_MAS0, tlbcam, MAS0); OFFSET(TLBCAM_MAS1, tlbcam, MAS1); diff --git a/arch/powerpc/kernel/cpu_setup_e500.S b/arch/powerpc/kernel/cpu_setup_e500.S new file mode 100644 index 000000000000..058336079069 --- /dev/null +++ b/arch/powerpc/kernel/cpu_setup_e500.S @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file contains low level CPU setup functions. + * Kumar Gala + * Copyright 2009 Freescale Semiconductor, Inc. + * + * Based on cpu_setup_6xx code by + * Benjamin Herrenschmidt + */ + +#include +#include +#include +#include +#include +#include +#include + +_GLOBAL(__e500_icache_setup) + mfspr r0, SPRN_L1CSR1 + andi. r3, r0, L1CSR1_ICE + bnelr /* Already enabled */ + oris r0, r0, L1CSR1_CPE@h + ori r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR | L1CSR1_ICE) + mtspr SPRN_L1CSR1, r0 /* Enable I-Cache */ + isync + blr + +_GLOBAL(__e500_dcache_setup) + mfspr r0, SPRN_L1CSR0 + andi. r3, r0, L1CSR0_DCE + bnelr /* Already enabled */ + msync + isync + li r0, 0 + mtspr SPRN_L1CSR0, r0 /* Disable */ + msync + isync + li r0, (L1CSR0_DCFI | L1CSR0_CLFC) + mtspr SPRN_L1CSR0, r0 /* Invalidate */ + isync +1: mfspr r0, SPRN_L1CSR0 + andi. r3, r0, L1CSR0_CLFC + bne+ 1b /* Wait for lock bits reset */ + oris r0, r0, L1CSR0_CPE@h + ori r0, r0, L1CSR0_DCE + msync + isync + mtspr SPRN_L1CSR0, r0 /* Enable */ + isync + blr + +/* + * FIXME - we haven't yet done testing to determine a reasonable default + * value for PW20_WAIT_IDLE_BIT. + */ +#define PW20_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */ +_GLOBAL(setup_pw20_idle) + mfspr r3, SPRN_PWRMGTCR0 + + /* Set PW20_WAIT bit, enable pw20 state*/ + ori r3, r3, PWRMGTCR0_PW20_WAIT + li r11, PW20_WAIT_IDLE_BIT + + /* Set Automatic PW20 Core Idle Count */ + rlwimi r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT + + mtspr SPRN_PWRMGTCR0, r3 + + blr + +/* + * FIXME - we haven't yet done testing to determine a reasonable default + * value for AV_WAIT_IDLE_BIT. + */ +#define AV_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */ +_GLOBAL(setup_altivec_idle) + mfspr r3, SPRN_PWRMGTCR0 + + /* Enable Altivec Idle */ + oris r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h + li r11, AV_WAIT_IDLE_BIT + + /* Set Automatic AltiVec Idle Count */ + rlwimi r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT + + mtspr SPRN_PWRMGTCR0, r3 + + blr + +#ifdef CONFIG_PPC_E500MC +_GLOBAL(__setup_cpu_e6500) + mflr r6 +#ifdef CONFIG_PPC64 + bl setup_altivec_ivors + /* Touch IVOR42 only if the CPU supports E.HV category */ + mfspr r10,SPRN_MMUCFG + rlwinm. r10,r10,0,MMUCFG_LPIDSIZE + beq 1f + bl setup_lrat_ivor +1: +#endif + bl setup_pw20_idle + bl setup_altivec_idle + bl __setup_cpu_e5500 + mtlr r6 + blr +#endif /* CONFIG_PPC_E500MC */ + +#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC_E500 +#ifndef CONFIG_PPC_E500MC +_GLOBAL(__setup_cpu_e500v1) +_GLOBAL(__setup_cpu_e500v2) + mflr r4 + bl __e500_icache_setup + bl __e500_dcache_setup + bl __setup_e500_ivors +#if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI) + /* Ensure that RFXE is set */ + mfspr r3,SPRN_HID1 + oris r3,r3,HID1_RFXE@h + mtspr SPRN_HID1,r3 +#endif + mtlr r4 + blr +#else /* CONFIG_PPC_E500MC */ +_GLOBAL(__setup_cpu_e500mc) +_GLOBAL(__setup_cpu_e5500) + mflr r5 + bl __e500_icache_setup + bl __e500_dcache_setup + bl __setup_e500mc_ivors + /* + * We only want to touch IVOR38-41 if we're running on hardware + * that supports category E.HV. The architectural way to determine + * this is MMUCFG[LPIDSIZE]. + */ + mfspr r3, SPRN_MMUCFG + rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE + beq 1f + bl __setup_ehv_ivors + b 2f +1: + lwz r3, CPU_SPEC_FEATURES(r4) + /* We need this check as cpu_setup is also called for + * the secondary cores. So, if we have already cleared + * the feature on the primary core, avoid doing it on the + * secondary core. + */ + andi. r6, r3, CPU_FTR_EMB_HV + beq 2f + rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV + stw r3, CPU_SPEC_FEATURES(r4) +2: + mtlr r5 + blr +#endif /* CONFIG_PPC_E500MC */ +#endif /* CONFIG_PPC_E500 */ +#endif /* CONFIG_PPC32 */ + +#ifdef CONFIG_PPC_BOOK3E_64 +_GLOBAL(__restore_cpu_e6500) + mflr r5 + bl setup_altivec_ivors + /* Touch IVOR42 only if the CPU supports E.HV category */ + mfspr r10,SPRN_MMUCFG + rlwinm. r10,r10,0,MMUCFG_LPIDSIZE + beq 1f + bl setup_lrat_ivor +1: + bl setup_pw20_idle + bl setup_altivec_idle + bl __restore_cpu_e5500 + mtlr r5 + blr + +_GLOBAL(__restore_cpu_e5500) + mflr r4 + bl __e500_icache_setup + bl __e500_dcache_setup + bl __setup_base_ivors + bl setup_perfmon_ivor + bl setup_doorbell_ivors + /* + * We only want to touch IVOR38-41 if we're running on hardware + * that supports category E.HV. The architectural way to determine + * this is MMUCFG[LPIDSIZE]. + */ + mfspr r10,SPRN_MMUCFG + rlwinm. r10,r10,0,MMUCFG_LPIDSIZE + beq 1f + bl setup_ehv_ivors +1: + mtlr r4 + blr + +_GLOBAL(__setup_cpu_e5500) + mflr r5 + bl __e500_icache_setup + bl __e500_dcache_setup + bl __setup_base_ivors + bl setup_perfmon_ivor + bl setup_doorbell_ivors + /* + * We only want to touch IVOR38-41 if we're running on hardware + * that supports category E.HV. The architectural way to determine + * this is MMUCFG[LPIDSIZE]. + */ + mfspr r10,SPRN_MMUCFG + rlwinm. r10,r10,0,MMUCFG_LPIDSIZE + beq 1f + bl setup_ehv_ivors + b 2f +1: + ld r10,CPU_SPEC_FEATURES(r4) + LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV) + andc r10,r10,r9 + std r10,CPU_SPEC_FEATURES(r4) +2: + mtlr r5 + blr +#endif + +/* flush L1 data cache, it can apply to e500v2, e500mc and e5500 */ +_GLOBAL(flush_dcache_L1) + mfmsr r10 + wrteei 0 + + mfspr r3,SPRN_L1CFG0 + rlwinm r5,r3,9,3 /* Extract cache block size */ + twlgti r5,1 /* Only 32 and 64 byte cache blocks + * are currently defined. + */ + li r4,32 + subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - + * log2(number of ways) + */ + slw r5,r4,r5 /* r5 = cache block size */ + + rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ + mulli r7,r7,13 /* An 8-way cache will require 13 + * loads per set. + */ + slw r7,r7,r6 + + /* save off HID0 and set DCFA */ + mfspr r8,SPRN_HID0 + ori r9,r8,HID0_DCFA@l + mtspr SPRN_HID0,r9 + isync + + LOAD_REG_IMMEDIATE(r6, KERNELBASE) + mr r4, r6 + mtctr r7 + +1: lwz r3,0(r4) /* Load... */ + add r4,r4,r5 + bdnz 1b + + msync + mr r4, r6 + mtctr r7 + +1: dcbf 0,r4 /* ...and flush. */ + add r4,r4,r5 + bdnz 1b + + /* restore HID0 */ + mtspr SPRN_HID0,r8 + isync + + wrtee r10 + + blr + +has_L2_cache: + /* skip L2 cache on P2040/P2040E as they have no L2 cache */ + mfspr r3, SPRN_SVR + /* shift right by 8 bits and clear E bit of SVR */ + rlwinm r4, r3, 24, ~0x800 + + lis r3, SVR_P2040@h + ori r3, r3, SVR_P2040@l + cmpw r4, r3 + beq 1f + + li r3, 1 + blr +1: + li r3, 0 + blr + +/* flush backside L2 cache */ +flush_backside_L2_cache: + mflr r10 + bl has_L2_cache + mtlr r10 + cmpwi r3, 0 + beq 2f + + /* Flush the L2 cache */ + mfspr r3, SPRN_L2CSR0 + ori r3, r3, L2CSR0_L2FL@l + msync + isync + mtspr SPRN_L2CSR0,r3 + isync + + /* check if it is complete */ +1: mfspr r3,SPRN_L2CSR0 + andi. r3, r3, L2CSR0_L2FL@l + bne 1b +2: + blr + +_GLOBAL(cpu_down_flush_e500v2) + mflr r0 + bl flush_dcache_L1 + mtlr r0 + blr + +_GLOBAL(cpu_down_flush_e500mc) +_GLOBAL(cpu_down_flush_e5500) + mflr r0 + bl flush_dcache_L1 + bl flush_backside_L2_cache + mtlr r0 + blr + +/* L1 Data Cache of e6500 contains no modified data, no flush is required */ +_GLOBAL(cpu_down_flush_e6500) + blr diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S deleted file mode 100644 index 058336079069..000000000000 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ /dev/null @@ -1,333 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * This file contains low level CPU setup functions. - * Kumar Gala - * Copyright 2009 Freescale Semiconductor, Inc. - * - * Based on cpu_setup_6xx code by - * Benjamin Herrenschmidt - */ - -#include -#include -#include -#include -#include -#include -#include - -_GLOBAL(__e500_icache_setup) - mfspr r0, SPRN_L1CSR1 - andi. r3, r0, L1CSR1_ICE - bnelr /* Already enabled */ - oris r0, r0, L1CSR1_CPE@h - ori r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR | L1CSR1_ICE) - mtspr SPRN_L1CSR1, r0 /* Enable I-Cache */ - isync - blr - -_GLOBAL(__e500_dcache_setup) - mfspr r0, SPRN_L1CSR0 - andi. r3, r0, L1CSR0_DCE - bnelr /* Already enabled */ - msync - isync - li r0, 0 - mtspr SPRN_L1CSR0, r0 /* Disable */ - msync - isync - li r0, (L1CSR0_DCFI | L1CSR0_CLFC) - mtspr SPRN_L1CSR0, r0 /* Invalidate */ - isync -1: mfspr r0, SPRN_L1CSR0 - andi. r3, r0, L1CSR0_CLFC - bne+ 1b /* Wait for lock bits reset */ - oris r0, r0, L1CSR0_CPE@h - ori r0, r0, L1CSR0_DCE - msync - isync - mtspr SPRN_L1CSR0, r0 /* Enable */ - isync - blr - -/* - * FIXME - we haven't yet done testing to determine a reasonable default - * value for PW20_WAIT_IDLE_BIT. - */ -#define PW20_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */ -_GLOBAL(setup_pw20_idle) - mfspr r3, SPRN_PWRMGTCR0 - - /* Set PW20_WAIT bit, enable pw20 state*/ - ori r3, r3, PWRMGTCR0_PW20_WAIT - li r11, PW20_WAIT_IDLE_BIT - - /* Set Automatic PW20 Core Idle Count */ - rlwimi r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT - - mtspr SPRN_PWRMGTCR0, r3 - - blr - -/* - * FIXME - we haven't yet done testing to determine a reasonable default - * value for AV_WAIT_IDLE_BIT. - */ -#define AV_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */ -_GLOBAL(setup_altivec_idle) - mfspr r3, SPRN_PWRMGTCR0 - - /* Enable Altivec Idle */ - oris r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h - li r11, AV_WAIT_IDLE_BIT - - /* Set Automatic AltiVec Idle Count */ - rlwimi r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT - - mtspr SPRN_PWRMGTCR0, r3 - - blr - -#ifdef CONFIG_PPC_E500MC -_GLOBAL(__setup_cpu_e6500) - mflr r6 -#ifdef CONFIG_PPC64 - bl setup_altivec_ivors - /* Touch IVOR42 only if the CPU supports E.HV category */ - mfspr r10,SPRN_MMUCFG - rlwinm. r10,r10,0,MMUCFG_LPIDSIZE - beq 1f - bl setup_lrat_ivor -1: -#endif - bl setup_pw20_idle - bl setup_altivec_idle - bl __setup_cpu_e5500 - mtlr r6 - blr -#endif /* CONFIG_PPC_E500MC */ - -#ifdef CONFIG_PPC32 -#ifdef CONFIG_PPC_E500 -#ifndef CONFIG_PPC_E500MC -_GLOBAL(__setup_cpu_e500v1) -_GLOBAL(__setup_cpu_e500v2) - mflr r4 - bl __e500_icache_setup - bl __e500_dcache_setup - bl __setup_e500_ivors -#if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI) - /* Ensure that RFXE is set */ - mfspr r3,SPRN_HID1 - oris r3,r3,HID1_RFXE@h - mtspr SPRN_HID1,r3 -#endif - mtlr r4 - blr -#else /* CONFIG_PPC_E500MC */ -_GLOBAL(__setup_cpu_e500mc) -_GLOBAL(__setup_cpu_e5500) - mflr r5 - bl __e500_icache_setup - bl __e500_dcache_setup - bl __setup_e500mc_ivors - /* - * We only want to touch IVOR38-41 if we're running on hardware - * that supports category E.HV. The architectural way to determine - * this is MMUCFG[LPIDSIZE]. - */ - mfspr r3, SPRN_MMUCFG - rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE - beq 1f - bl __setup_ehv_ivors - b 2f -1: - lwz r3, CPU_SPEC_FEATURES(r4) - /* We need this check as cpu_setup is also called for - * the secondary cores. So, if we have already cleared - * the feature on the primary core, avoid doing it on the - * secondary core. - */ - andi. r6, r3, CPU_FTR_EMB_HV - beq 2f - rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV - stw r3, CPU_SPEC_FEATURES(r4) -2: - mtlr r5 - blr -#endif /* CONFIG_PPC_E500MC */ -#endif /* CONFIG_PPC_E500 */ -#endif /* CONFIG_PPC32 */ - -#ifdef CONFIG_PPC_BOOK3E_64 -_GLOBAL(__restore_cpu_e6500) - mflr r5 - bl setup_altivec_ivors - /* Touch IVOR42 only if the CPU supports E.HV category */ - mfspr r10,SPRN_MMUCFG - rlwinm. r10,r10,0,MMUCFG_LPIDSIZE - beq 1f - bl setup_lrat_ivor -1: - bl setup_pw20_idle - bl setup_altivec_idle - bl __restore_cpu_e5500 - mtlr r5 - blr - -_GLOBAL(__restore_cpu_e5500) - mflr r4 - bl __e500_icache_setup - bl __e500_dcache_setup - bl __setup_base_ivors - bl setup_perfmon_ivor - bl setup_doorbell_ivors - /* - * We only want to touch IVOR38-41 if we're running on hardware - * that supports category E.HV. The architectural way to determine - * this is MMUCFG[LPIDSIZE]. - */ - mfspr r10,SPRN_MMUCFG - rlwinm. r10,r10,0,MMUCFG_LPIDSIZE - beq 1f - bl setup_ehv_ivors -1: - mtlr r4 - blr - -_GLOBAL(__setup_cpu_e5500) - mflr r5 - bl __e500_icache_setup - bl __e500_dcache_setup - bl __setup_base_ivors - bl setup_perfmon_ivor - bl setup_doorbell_ivors - /* - * We only want to touch IVOR38-41 if we're running on hardware - * that supports category E.HV. The architectural way to determine - * this is MMUCFG[LPIDSIZE]. - */ - mfspr r10,SPRN_MMUCFG - rlwinm. r10,r10,0,MMUCFG_LPIDSIZE - beq 1f - bl setup_ehv_ivors - b 2f -1: - ld r10,CPU_SPEC_FEATURES(r4) - LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV) - andc r10,r10,r9 - std r10,CPU_SPEC_FEATURES(r4) -2: - mtlr r5 - blr -#endif - -/* flush L1 data cache, it can apply to e500v2, e500mc and e5500 */ -_GLOBAL(flush_dcache_L1) - mfmsr r10 - wrteei 0 - - mfspr r3,SPRN_L1CFG0 - rlwinm r5,r3,9,3 /* Extract cache block size */ - twlgti r5,1 /* Only 32 and 64 byte cache blocks - * are currently defined. - */ - li r4,32 - subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - - * log2(number of ways) - */ - slw r5,r4,r5 /* r5 = cache block size */ - - rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ - mulli r7,r7,13 /* An 8-way cache will require 13 - * loads per set. - */ - slw r7,r7,r6 - - /* save off HID0 and set DCFA */ - mfspr r8,SPRN_HID0 - ori r9,r8,HID0_DCFA@l - mtspr SPRN_HID0,r9 - isync - - LOAD_REG_IMMEDIATE(r6, KERNELBASE) - mr r4, r6 - mtctr r7 - -1: lwz r3,0(r4) /* Load... */ - add r4,r4,r5 - bdnz 1b - - msync - mr r4, r6 - mtctr r7 - -1: dcbf 0,r4 /* ...and flush. */ - add r4,r4,r5 - bdnz 1b - - /* restore HID0 */ - mtspr SPRN_HID0,r8 - isync - - wrtee r10 - - blr - -has_L2_cache: - /* skip L2 cache on P2040/P2040E as they have no L2 cache */ - mfspr r3, SPRN_SVR - /* shift right by 8 bits and clear E bit of SVR */ - rlwinm r4, r3, 24, ~0x800 - - lis r3, SVR_P2040@h - ori r3, r3, SVR_P2040@l - cmpw r4, r3 - beq 1f - - li r3, 1 - blr -1: - li r3, 0 - blr - -/* flush backside L2 cache */ -flush_backside_L2_cache: - mflr r10 - bl has_L2_cache - mtlr r10 - cmpwi r3, 0 - beq 2f - - /* Flush the L2 cache */ - mfspr r3, SPRN_L2CSR0 - ori r3, r3, L2CSR0_L2FL@l - msync - isync - mtspr SPRN_L2CSR0,r3 - isync - - /* check if it is complete */ -1: mfspr r3,SPRN_L2CSR0 - andi. r3, r3, L2CSR0_L2FL@l - bne 1b -2: - blr - -_GLOBAL(cpu_down_flush_e500v2) - mflr r0 - bl flush_dcache_L1 - mtlr r0 - blr - -_GLOBAL(cpu_down_flush_e500mc) -_GLOBAL(cpu_down_flush_e5500) - mflr r0 - bl flush_dcache_L1 - bl flush_backside_L2_cache - mtlr r0 - blr - -/* L1 Data Cache of e6500 contains no modified data, no flush is required */ -_GLOBAL(cpu_down_flush_e6500) - blr diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index a2f82ced6e4a..1047dc053b47 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -34,7 +34,7 @@ */ #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4)) -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 #define BOOKE_CLEAR_BTB(reg) \ START_BTB_FLUSH_SECTION \ BTB_FLUSH(reg) \ diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index 4b4ba3364665..a2d3abb48075 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -230,7 +230,7 @@ _ASM_NOKPROBE_SYMBOL(system_call_common) std r0,GPR0(r1) std r10,GPR1(r1) std r2,GPR2(r1) -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 START_BTB_FLUSH_SECTION BTB_FLUSH(r10) END_BTB_FLUSH_SECTION diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index b562a1d2c750..206475e3e0b4 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -35,7 +35,7 @@ static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_N bool barrier_nospec_enabled; static bool no_nospec; static bool btb_flush_enabled; -#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) +#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64) static bool no_spectrev2; #endif @@ -122,7 +122,7 @@ static __init int security_feature_debugfs_init(void) device_initcall(security_feature_debugfs_init); #endif /* CONFIG_DEBUG_FS */ -#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) +#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64) static int __init handle_nospectre_v2(char *p) { no_spectrev2 = true; @@ -130,9 +130,9 @@ static int __init handle_nospectre_v2(char *p) return 0; } early_param("nospectre_v2", handle_nospectre_v2); -#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ +#endif /* CONFIG_PPC_E500 || CONFIG_PPC_BOOK3S_64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 void __init setup_spectre_v2(void) { if (no_spectrev2 || cpu_mitigations_off()) @@ -140,7 +140,7 @@ void __init setup_spectre_v2(void) else btb_flush_enabled = true; } -#endif /* CONFIG_PPC_FSL_BOOK3E */ +#endif /* CONFIG_PPC_E500 */ #ifdef CONFIG_PPC_BOOK3S_64 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 169703fead57..11ded19186b9 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -708,7 +708,7 @@ static struct task_struct *current_set[NR_CPUS]; static void smp_store_cpu_info(int id) { per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 per_cpu(next_tlbcam_idx, id) = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; #endif diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 3a10cda9c05e..ef9a61718940 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -228,7 +228,7 @@ static void __init sysfs_create_dscr_default(void) } #endif /* CONFIG_PPC64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 #define MAX_BIT 63 static u64 pw20_wt; @@ -907,7 +907,7 @@ static int register_cpu_online(unsigned int cpu) device_create_file(s, &dev_attr_tscr); #endif /* CONFIG_PPC64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) { device_create_file(s, &dev_attr_pw20_state); device_create_file(s, &dev_attr_pw20_wait_time); @@ -1003,7 +1003,7 @@ static int unregister_cpu_online(unsigned int cpu) device_remove_file(s, &dev_attr_tscr); #endif /* CONFIG_PPC64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) { device_remove_file(s, &dev_attr_pw20_state); device_remove_file(s, &dev_attr_pw20_wait_time); diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index b60d81acccfc..c025c83dfdc3 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -239,7 +239,7 @@ SECTIONS } #endif /* CONFIG_PPC_BARRIER_NOSPEC */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 . = ALIGN(8); __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) { __start__btb_flush_fixup = .; diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 993d3f31832a..31f40f544de5 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -550,7 +550,7 @@ void do_barrier_nospec_fixups(bool enable) } #endif /* CONFIG_PPC_BARRIER_NOSPEC */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) { unsigned int instr[2], *dest; @@ -602,7 +602,7 @@ void __init do_btb_flush_fixups(void) for (; start < end; start += 2) patch_btb_flush_section(start); } -#endif /* CONFIG_PPC_FSL_BOOK3E */ +#endif /* CONFIG_PPC_E500 */ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) { diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index bc84a594ca62..8c3ea5300ac3 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -623,7 +623,7 @@ static int __init hugetlbpage_init(void) if (pdshift > shift) { if (!IS_ENABLED(CONFIG_PPC_8xx)) pgtable_cache_add(pdshift - shift); - } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || + } else if (IS_ENABLED(CONFIG_PPC_E500) || IS_ENABLED(CONFIG_PPC_8xx)) { pgtable_cache_add(PTE_T_ORDER); } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 6ddbd6cb3a2a..84d171953ba4 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -308,7 +308,7 @@ void __init mem_init(void) } #endif /* CONFIG_HIGHMEM */ -#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) +#if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP) /* * If smp is enabled, next_tlbcam_idx is initialized in the cpu up * functions.... do it here for the non-smp case. diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 341c2e0c71d2..bd9784f77f2e 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -111,7 +111,7 @@ void MMU_init_hw_patch(void); unsigned long mmu_mapin_ram(unsigned long base, unsigned long top); #endif -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init); #ifdef CONFIG_PPC32 @@ -157,7 +157,7 @@ static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; } static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; } #endif -#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_FSL_BOOK3E) +#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_E500) void mmu_mark_initmem_nx(void); void mmu_mark_rodata_ro(void); #else diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile index b467a25ee155..f3894e79d5f7 100644 --- a/arch/powerpc/mm/nohash/Makefile +++ b/arch/powerpc/mm/nohash/Makefile @@ -7,13 +7,13 @@ obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o obj-$(CONFIG_40x) += 40x.o obj-$(CONFIG_44x) += 44x.o obj-$(CONFIG_PPC_8xx) += 8xx.o -obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_book3e.o +obj-$(CONFIG_PPC_E500) += e500.o obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_booke.o ifdef CONFIG_HUGETLB_PAGE -obj-$(CONFIG_PPC_FSL_BOOK3E) += book3e_hugetlbpage.o +obj-$(CONFIG_PPC_E500) += e500_hugetlbpage.o endif # Disable kcov instrumentation on sensitive code # This is necessary for booting with kcov enabled on book3e machines KCOV_INSTRUMENT_tlb.o := n -KCOV_INSTRUMENT_fsl_book3e.o := n +KCOV_INSTRUMENT_e500.o := n diff --git a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c deleted file mode 100644 index c7d4b317a823..000000000000 --- a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c +++ /dev/null @@ -1,190 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PPC Huge TLB Page Support for Book3E MMU - * - * Copyright (C) 2009 David Gibson, IBM Corporation. - * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor - * - */ -#include -#include - -#include - -#ifdef CONFIG_PPC64 -#include - -static inline int tlb1_next(void) -{ - struct paca_struct *paca = get_paca(); - struct tlb_core_data *tcd; - int this, next; - - tcd = paca->tcd_ptr; - this = tcd->esel_next; - - next = this + 1; - if (next >= tcd->esel_max) - next = tcd->esel_first; - - tcd->esel_next = next; - return this; -} - -static inline void book3e_tlb_lock(void) -{ - struct paca_struct *paca = get_paca(); - unsigned long tmp; - int token = smp_processor_id() + 1; - - /* - * Besides being unnecessary in the absence of SMT, this - * check prevents trying to do lbarx/stbcx. on e5500 which - * doesn't implement either feature. - */ - if (!cpu_has_feature(CPU_FTR_SMT)) - return; - - asm volatile("1: lbarx %0, 0, %1;" - "cmpwi %0, 0;" - "bne 2f;" - "stbcx. %2, 0, %1;" - "bne 1b;" - "b 3f;" - "2: lbzx %0, 0, %1;" - "cmpwi %0, 0;" - "bne 2b;" - "b 1b;" - "3:" - : "=&r" (tmp) - : "r" (&paca->tcd_ptr->lock), "r" (token) - : "memory"); -} - -static inline void book3e_tlb_unlock(void) -{ - struct paca_struct *paca = get_paca(); - - if (!cpu_has_feature(CPU_FTR_SMT)) - return; - - isync(); - paca->tcd_ptr->lock = 0; -} -#else -static inline int tlb1_next(void) -{ - int index, ncams; - - ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; - - index = this_cpu_read(next_tlbcam_idx); - - /* Just round-robin the entries and wrap when we hit the end */ - if (unlikely(index == ncams - 1)) - __this_cpu_write(next_tlbcam_idx, tlbcam_index); - else - __this_cpu_inc(next_tlbcam_idx); - - return index; -} - -static inline void book3e_tlb_lock(void) -{ -} - -static inline void book3e_tlb_unlock(void) -{ -} -#endif - -static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) -{ - int found = 0; - - mtspr(SPRN_MAS6, pid << 16); - asm volatile( - "tlbsx 0,%1\n" - "mfspr %0,0x271\n" - "srwi %0,%0,31\n" - : "=&r"(found) : "r"(ea)); - - return found; -} - -static void -book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte) -{ - unsigned long mas1, mas2; - u64 mas7_3; - unsigned long psize, tsize, shift; - unsigned long flags; - struct mm_struct *mm; - int index; - - if (unlikely(is_kernel_addr(ea))) - return; - - mm = vma->vm_mm; - - psize = vma_mmu_pagesize(vma); - shift = __ilog2(psize); - tsize = shift - 10; - /* - * We can't be interrupted while we're setting up the MAS - * registers or after we've confirmed that no tlb exists. - */ - local_irq_save(flags); - - book3e_tlb_lock(); - - if (unlikely(book3e_tlb_exists(ea, mm->context.id))) { - book3e_tlb_unlock(); - local_irq_restore(flags); - return; - } - - /* We have to use the CAM(TLB1) on FSL parts for hugepages */ - index = tlb1_next(); - mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1)); - - mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize); - mas2 = ea & ~((1UL << shift) - 1); - mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; - mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT; - mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK; - if (!pte_dirty(pte)) - mas7_3 &= ~(MAS3_SW|MAS3_UW); - - mtspr(SPRN_MAS1, mas1); - mtspr(SPRN_MAS2, mas2); - - if (mmu_has_feature(MMU_FTR_BIG_PHYS)) - mtspr(SPRN_MAS7, upper_32_bits(mas7_3)); - mtspr(SPRN_MAS3, lower_32_bits(mas7_3)); - - asm volatile ("tlbwe"); - - book3e_tlb_unlock(); - local_irq_restore(flags); -} - -/* - * This is called at the end of handling a user page fault, when the - * fault has been handled by updating a PTE in the linux page tables. - * - * This must always be called with the pte lock held. - */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) -{ - if (is_vm_hugetlb_page(vma)) - book3e_hugetlb_preload(vma, address, *ptep); -} - -void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) -{ - struct hstate *hstate = hstate_file(vma->vm_file); - unsigned long tsize = huge_page_shift(hstate) - 10; - - __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); -} diff --git a/arch/powerpc/mm/nohash/e500.c b/arch/powerpc/mm/nohash/e500.c new file mode 100644 index 000000000000..40a4e69ae1a9 --- /dev/null +++ b/arch/powerpc/mm/nohash/e500.c @@ -0,0 +1,375 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Modifications by Kumar Gala (galak@kernel.crashing.org) to support + * E500 Book E processors. + * + * Copyright 2004,2010 Freescale Semiconductor, Inc. + * + * This file contains the routines for initializing the MMU + * on the 4xx series of chips. + * -- paulus + * + * Derived from arch/ppc/mm/init.c: + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +unsigned int tlbcam_index; + +struct tlbcam TLBCAM[NUM_TLBCAMS]; + +static struct { + unsigned long start; + unsigned long limit; + phys_addr_t phys; +} tlbcam_addrs[NUM_TLBCAMS]; + +#ifdef CONFIG_PPC_85xx +/* + * Return PA for this VA if it is mapped by a CAM, or 0 + */ +phys_addr_t v_block_mapped(unsigned long va) +{ + int b; + for (b = 0; b < tlbcam_index; ++b) + if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit) + return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start); + return 0; +} + +/* + * Return VA for a given PA or 0 if not mapped + */ +unsigned long p_block_mapped(phys_addr_t pa) +{ + int b; + for (b = 0; b < tlbcam_index; ++b) + if (pa >= tlbcam_addrs[b].phys + && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start) + +tlbcam_addrs[b].phys) + return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys); + return 0; +} +#endif + +/* + * Set up a variable-size TLB entry (tlbcam). The parameters are not checked; + * in particular size must be a power of 4 between 4k and the max supported by + * an implementation; max may further be limited by what can be represented in + * an unsigned long (for example, 32-bit implementations cannot support a 4GB + * size). + */ +static void settlbcam(int index, unsigned long virt, phys_addr_t phys, + unsigned long size, unsigned long flags, unsigned int pid) +{ + unsigned int tsize; + + tsize = __ilog2(size) - 10; + +#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) + if ((flags & _PAGE_NO_CACHE) == 0) + flags |= _PAGE_COHERENT; +#endif + + TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1); + TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid); + TLBCAM[index].MAS2 = virt & PAGE_MASK; + + TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0; + TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0; + TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0; + TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0; + TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0; + + TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR; + TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0; + if (mmu_has_feature(MMU_FTR_BIG_PHYS)) + TLBCAM[index].MAS7 = (u64)phys >> 32; + + /* Below is unlikely -- only for large user pages or similar */ + if (pte_user(__pte(flags))) { + TLBCAM[index].MAS3 |= MAS3_UR; + TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0; + TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0; + } else { + TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0; + } + + tlbcam_addrs[index].start = virt; + tlbcam_addrs[index].limit = virt + size - 1; + tlbcam_addrs[index].phys = phys; +} + +static unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, + phys_addr_t phys) +{ + unsigned int camsize = __ilog2(ram); + unsigned int align = __ffs(virt | phys); + unsigned long max_cam; + + if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { + /* Convert (4^max) kB to (2^max) bytes */ + max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10; + camsize &= ~1U; + align &= ~1U; + } else { + /* Convert (2^max) kB to (2^max) bytes */ + max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10; + } + + if (camsize > align) + camsize = align; + if (camsize > max_cam) + camsize = max_cam; + + return 1UL << camsize; +} + +static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, + unsigned long ram, int max_cam_idx, + bool dryrun, bool init) +{ + int i; + unsigned long amount_mapped = 0; + unsigned long boundary; + + if (strict_kernel_rwx_enabled()) + boundary = (unsigned long)(_sinittext - _stext); + else + boundary = ram; + + /* Calculate CAM values */ + for (i = 0; boundary && i < max_cam_idx; i++) { + unsigned long cam_sz; + pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX; + + cam_sz = calc_cam_sz(boundary, virt, phys); + if (!dryrun) + settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0); + + boundary -= cam_sz; + amount_mapped += cam_sz; + virt += cam_sz; + phys += cam_sz; + } + for (ram -= amount_mapped; ram && i < max_cam_idx; i++) { + unsigned long cam_sz; + pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL; + + cam_sz = calc_cam_sz(ram, virt, phys); + if (!dryrun) + settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0); + + ram -= cam_sz; + amount_mapped += cam_sz; + virt += cam_sz; + phys += cam_sz; + } + + if (dryrun) + return amount_mapped; + + if (init) { + loadcam_multi(0, i, max_cam_idx); + tlbcam_index = i; + } else { + loadcam_multi(0, i, 0); + WARN_ON(i > tlbcam_index); + } + +#ifdef CONFIG_PPC64 + get_paca()->tcd.esel_next = i; + get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; + get_paca()->tcd.esel_first = i; +#endif + + return amount_mapped; +} + +unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init) +{ + unsigned long virt = PAGE_OFFSET; + phys_addr_t phys = memstart_addr; + + return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init); +} + +#ifdef CONFIG_PPC32 + +#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) +#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" +#endif + +unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) +{ + return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1; +} + +void flush_instruction_cache(void) +{ + unsigned long tmp; + + tmp = mfspr(SPRN_L1CSR1); + tmp |= L1CSR1_ICFI | L1CSR1_ICLFR; + mtspr(SPRN_L1CSR1, tmp); + isync(); +} + +/* + * MMU_init_hw does the chip-specific initialization of the MMU hardware. + */ +void __init MMU_init_hw(void) +{ + flush_instruction_cache(); +} + +static unsigned long __init tlbcam_sz(int idx) +{ + return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1; +} + +void __init adjust_total_lowmem(void) +{ + unsigned long ram; + int i; + + /* adjust lowmem size to __max_low_memory */ + ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); + + i = switch_to_as1(); + __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true); + restore_to_as0(i, 0, NULL, 1); + + pr_info("Memory CAM mapping: "); + for (i = 0; i < tlbcam_index - 1; i++) + pr_cont("%lu/", tlbcam_sz(i) >> 20); + pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, + (unsigned int)((total_lowmem - __max_low_memory) >> 20)); + + memblock_set_current_limit(memstart_addr + __max_low_memory); +} + +#ifdef CONFIG_STRICT_KERNEL_RWX +void mmu_mark_rodata_ro(void) +{ + unsigned long remapped; + + remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false); + + WARN_ON(__max_low_memory != remapped); +} +#endif + +void mmu_mark_initmem_nx(void) +{ + /* Everything is done in mmu_mark_rodata_ro() */ +} + +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + phys_addr_t limit = first_memblock_base + first_memblock_size; + + /* 64M mapped initially according to head_fsl_booke.S */ + memblock_set_current_limit(min_t(u64, limit, 0x04000000)); +} + +#ifdef CONFIG_RELOCATABLE +int __initdata is_second_reloc; +notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start) +{ + unsigned long base = kernstart_virt_addr; + phys_addr_t size; + + kernstart_addr = start; + if (is_second_reloc) { + virt_phys_offset = PAGE_OFFSET - memstart_addr; + kaslr_late_init(); + return; + } + + /* + * Relocatable kernel support based on processing of dynamic + * relocation entries. Before we get the real memstart_addr, + * We will compute the virt_phys_offset like this: + * virt_phys_offset = stext.run - kernstart_addr + * + * stext.run = (KERNELBASE & ~0x3ffffff) + + * (kernstart_addr & 0x3ffffff) + * When we relocate, we have : + * + * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff) + * + * hence: + * virt_phys_offset = (KERNELBASE & ~0x3ffffff) - + * (kernstart_addr & ~0x3ffffff) + * + */ + start &= ~0x3ffffff; + base &= ~0x3ffffff; + virt_phys_offset = base - start; + early_get_first_memblock_info(__va(dt_ptr), &size); + /* + * We now get the memstart_addr, then we should check if this + * address is the same as what the PAGE_OFFSET map to now. If + * not we have to change the map of PAGE_OFFSET to memstart_addr + * and do a second relocation. + */ + if (start != memstart_addr) { + int n; + long offset = start - memstart_addr; + + is_second_reloc = 1; + n = switch_to_as1(); + /* map a 64M area for the second relocation */ + if (memstart_addr > start) + map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM, + false, true); + else + map_mem_in_cams_addr(start, PAGE_OFFSET + offset, + 0x4000000, CONFIG_LOWMEM_CAM_NUM, + false, true); + restore_to_as0(n, offset, __va(dt_ptr), 1); + /* We should never reach here */ + panic("Relocation error"); + } + + kaslr_early_init(__va(dt_ptr), size); +} +#endif +#endif diff --git a/arch/powerpc/mm/nohash/e500_hugetlbpage.c b/arch/powerpc/mm/nohash/e500_hugetlbpage.c new file mode 100644 index 000000000000..c7d4b317a823 --- /dev/null +++ b/arch/powerpc/mm/nohash/e500_hugetlbpage.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PPC Huge TLB Page Support for Book3E MMU + * + * Copyright (C) 2009 David Gibson, IBM Corporation. + * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor + * + */ +#include +#include + +#include + +#ifdef CONFIG_PPC64 +#include + +static inline int tlb1_next(void) +{ + struct paca_struct *paca = get_paca(); + struct tlb_core_data *tcd; + int this, next; + + tcd = paca->tcd_ptr; + this = tcd->esel_next; + + next = this + 1; + if (next >= tcd->esel_max) + next = tcd->esel_first; + + tcd->esel_next = next; + return this; +} + +static inline void book3e_tlb_lock(void) +{ + struct paca_struct *paca = get_paca(); + unsigned long tmp; + int token = smp_processor_id() + 1; + + /* + * Besides being unnecessary in the absence of SMT, this + * check prevents trying to do lbarx/stbcx. on e5500 which + * doesn't implement either feature. + */ + if (!cpu_has_feature(CPU_FTR_SMT)) + return; + + asm volatile("1: lbarx %0, 0, %1;" + "cmpwi %0, 0;" + "bne 2f;" + "stbcx. %2, 0, %1;" + "bne 1b;" + "b 3f;" + "2: lbzx %0, 0, %1;" + "cmpwi %0, 0;" + "bne 2b;" + "b 1b;" + "3:" + : "=&r" (tmp) + : "r" (&paca->tcd_ptr->lock), "r" (token) + : "memory"); +} + +static inline void book3e_tlb_unlock(void) +{ + struct paca_struct *paca = get_paca(); + + if (!cpu_has_feature(CPU_FTR_SMT)) + return; + + isync(); + paca->tcd_ptr->lock = 0; +} +#else +static inline int tlb1_next(void) +{ + int index, ncams; + + ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; + + index = this_cpu_read(next_tlbcam_idx); + + /* Just round-robin the entries and wrap when we hit the end */ + if (unlikely(index == ncams - 1)) + __this_cpu_write(next_tlbcam_idx, tlbcam_index); + else + __this_cpu_inc(next_tlbcam_idx); + + return index; +} + +static inline void book3e_tlb_lock(void) +{ +} + +static inline void book3e_tlb_unlock(void) +{ +} +#endif + +static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) +{ + int found = 0; + + mtspr(SPRN_MAS6, pid << 16); + asm volatile( + "tlbsx 0,%1\n" + "mfspr %0,0x271\n" + "srwi %0,%0,31\n" + : "=&r"(found) : "r"(ea)); + + return found; +} + +static void +book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte) +{ + unsigned long mas1, mas2; + u64 mas7_3; + unsigned long psize, tsize, shift; + unsigned long flags; + struct mm_struct *mm; + int index; + + if (unlikely(is_kernel_addr(ea))) + return; + + mm = vma->vm_mm; + + psize = vma_mmu_pagesize(vma); + shift = __ilog2(psize); + tsize = shift - 10; + /* + * We can't be interrupted while we're setting up the MAS + * registers or after we've confirmed that no tlb exists. + */ + local_irq_save(flags); + + book3e_tlb_lock(); + + if (unlikely(book3e_tlb_exists(ea, mm->context.id))) { + book3e_tlb_unlock(); + local_irq_restore(flags); + return; + } + + /* We have to use the CAM(TLB1) on FSL parts for hugepages */ + index = tlb1_next(); + mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1)); + + mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize); + mas2 = ea & ~((1UL << shift) - 1); + mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; + mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT; + mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK; + if (!pte_dirty(pte)) + mas7_3 &= ~(MAS3_SW|MAS3_UW); + + mtspr(SPRN_MAS1, mas1); + mtspr(SPRN_MAS2, mas2); + + if (mmu_has_feature(MMU_FTR_BIG_PHYS)) + mtspr(SPRN_MAS7, upper_32_bits(mas7_3)); + mtspr(SPRN_MAS3, lower_32_bits(mas7_3)); + + asm volatile ("tlbwe"); + + book3e_tlb_unlock(); + local_irq_restore(flags); +} + +/* + * This is called at the end of handling a user page fault, when the + * fault has been handled by updating a PTE in the linux page tables. + * + * This must always be called with the pte lock held. + */ +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) +{ + if (is_vm_hugetlb_page(vma)) + book3e_hugetlb_preload(vma, address, *ptep); +} + +void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) +{ + struct hstate *hstate = hstate_file(vma->vm_file); + unsigned long tsize = huge_page_shift(hstate) - 10; + + __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); +} diff --git a/arch/powerpc/mm/nohash/fsl_book3e.c b/arch/powerpc/mm/nohash/fsl_book3e.c deleted file mode 100644 index 40a4e69ae1a9..000000000000 --- a/arch/powerpc/mm/nohash/fsl_book3e.c +++ /dev/null @@ -1,375 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Modifications by Kumar Gala (galak@kernel.crashing.org) to support - * E500 Book E processors. - * - * Copyright 2004,2010 Freescale Semiconductor, Inc. - * - * This file contains the routines for initializing the MMU - * on the 4xx series of chips. - * -- paulus - * - * Derived from arch/ppc/mm/init.c: - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) - * - * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) - * and Cort Dougan (PReP) (cort@cs.nmt.edu) - * Copyright (C) 1996 Paul Mackerras - * - * Derived from "arch/i386/mm/init.c" - * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -unsigned int tlbcam_index; - -struct tlbcam TLBCAM[NUM_TLBCAMS]; - -static struct { - unsigned long start; - unsigned long limit; - phys_addr_t phys; -} tlbcam_addrs[NUM_TLBCAMS]; - -#ifdef CONFIG_PPC_85xx -/* - * Return PA for this VA if it is mapped by a CAM, or 0 - */ -phys_addr_t v_block_mapped(unsigned long va) -{ - int b; - for (b = 0; b < tlbcam_index; ++b) - if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit) - return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start); - return 0; -} - -/* - * Return VA for a given PA or 0 if not mapped - */ -unsigned long p_block_mapped(phys_addr_t pa) -{ - int b; - for (b = 0; b < tlbcam_index; ++b) - if (pa >= tlbcam_addrs[b].phys - && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start) - +tlbcam_addrs[b].phys) - return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys); - return 0; -} -#endif - -/* - * Set up a variable-size TLB entry (tlbcam). The parameters are not checked; - * in particular size must be a power of 4 between 4k and the max supported by - * an implementation; max may further be limited by what can be represented in - * an unsigned long (for example, 32-bit implementations cannot support a 4GB - * size). - */ -static void settlbcam(int index, unsigned long virt, phys_addr_t phys, - unsigned long size, unsigned long flags, unsigned int pid) -{ - unsigned int tsize; - - tsize = __ilog2(size) - 10; - -#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) - if ((flags & _PAGE_NO_CACHE) == 0) - flags |= _PAGE_COHERENT; -#endif - - TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1); - TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid); - TLBCAM[index].MAS2 = virt & PAGE_MASK; - - TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0; - TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0; - TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0; - TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0; - TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0; - - TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR; - TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0; - if (mmu_has_feature(MMU_FTR_BIG_PHYS)) - TLBCAM[index].MAS7 = (u64)phys >> 32; - - /* Below is unlikely -- only for large user pages or similar */ - if (pte_user(__pte(flags))) { - TLBCAM[index].MAS3 |= MAS3_UR; - TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0; - TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0; - } else { - TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0; - } - - tlbcam_addrs[index].start = virt; - tlbcam_addrs[index].limit = virt + size - 1; - tlbcam_addrs[index].phys = phys; -} - -static unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, - phys_addr_t phys) -{ - unsigned int camsize = __ilog2(ram); - unsigned int align = __ffs(virt | phys); - unsigned long max_cam; - - if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { - /* Convert (4^max) kB to (2^max) bytes */ - max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10; - camsize &= ~1U; - align &= ~1U; - } else { - /* Convert (2^max) kB to (2^max) bytes */ - max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10; - } - - if (camsize > align) - camsize = align; - if (camsize > max_cam) - camsize = max_cam; - - return 1UL << camsize; -} - -static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, - unsigned long ram, int max_cam_idx, - bool dryrun, bool init) -{ - int i; - unsigned long amount_mapped = 0; - unsigned long boundary; - - if (strict_kernel_rwx_enabled()) - boundary = (unsigned long)(_sinittext - _stext); - else - boundary = ram; - - /* Calculate CAM values */ - for (i = 0; boundary && i < max_cam_idx; i++) { - unsigned long cam_sz; - pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX; - - cam_sz = calc_cam_sz(boundary, virt, phys); - if (!dryrun) - settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0); - - boundary -= cam_sz; - amount_mapped += cam_sz; - virt += cam_sz; - phys += cam_sz; - } - for (ram -= amount_mapped; ram && i < max_cam_idx; i++) { - unsigned long cam_sz; - pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL; - - cam_sz = calc_cam_sz(ram, virt, phys); - if (!dryrun) - settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0); - - ram -= cam_sz; - amount_mapped += cam_sz; - virt += cam_sz; - phys += cam_sz; - } - - if (dryrun) - return amount_mapped; - - if (init) { - loadcam_multi(0, i, max_cam_idx); - tlbcam_index = i; - } else { - loadcam_multi(0, i, 0); - WARN_ON(i > tlbcam_index); - } - -#ifdef CONFIG_PPC64 - get_paca()->tcd.esel_next = i; - get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; - get_paca()->tcd.esel_first = i; -#endif - - return amount_mapped; -} - -unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init) -{ - unsigned long virt = PAGE_OFFSET; - phys_addr_t phys = memstart_addr; - - return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init); -} - -#ifdef CONFIG_PPC32 - -#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) -#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" -#endif - -unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) -{ - return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1; -} - -void flush_instruction_cache(void) -{ - unsigned long tmp; - - tmp = mfspr(SPRN_L1CSR1); - tmp |= L1CSR1_ICFI | L1CSR1_ICLFR; - mtspr(SPRN_L1CSR1, tmp); - isync(); -} - -/* - * MMU_init_hw does the chip-specific initialization of the MMU hardware. - */ -void __init MMU_init_hw(void) -{ - flush_instruction_cache(); -} - -static unsigned long __init tlbcam_sz(int idx) -{ - return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1; -} - -void __init adjust_total_lowmem(void) -{ - unsigned long ram; - int i; - - /* adjust lowmem size to __max_low_memory */ - ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); - - i = switch_to_as1(); - __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true); - restore_to_as0(i, 0, NULL, 1); - - pr_info("Memory CAM mapping: "); - for (i = 0; i < tlbcam_index - 1; i++) - pr_cont("%lu/", tlbcam_sz(i) >> 20); - pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, - (unsigned int)((total_lowmem - __max_low_memory) >> 20)); - - memblock_set_current_limit(memstart_addr + __max_low_memory); -} - -#ifdef CONFIG_STRICT_KERNEL_RWX -void mmu_mark_rodata_ro(void) -{ - unsigned long remapped; - - remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false); - - WARN_ON(__max_low_memory != remapped); -} -#endif - -void mmu_mark_initmem_nx(void) -{ - /* Everything is done in mmu_mark_rodata_ro() */ -} - -void setup_initial_memory_limit(phys_addr_t first_memblock_base, - phys_addr_t first_memblock_size) -{ - phys_addr_t limit = first_memblock_base + first_memblock_size; - - /* 64M mapped initially according to head_fsl_booke.S */ - memblock_set_current_limit(min_t(u64, limit, 0x04000000)); -} - -#ifdef CONFIG_RELOCATABLE -int __initdata is_second_reloc; -notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start) -{ - unsigned long base = kernstart_virt_addr; - phys_addr_t size; - - kernstart_addr = start; - if (is_second_reloc) { - virt_phys_offset = PAGE_OFFSET - memstart_addr; - kaslr_late_init(); - return; - } - - /* - * Relocatable kernel support based on processing of dynamic - * relocation entries. Before we get the real memstart_addr, - * We will compute the virt_phys_offset like this: - * virt_phys_offset = stext.run - kernstart_addr - * - * stext.run = (KERNELBASE & ~0x3ffffff) + - * (kernstart_addr & 0x3ffffff) - * When we relocate, we have : - * - * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff) - * - * hence: - * virt_phys_offset = (KERNELBASE & ~0x3ffffff) - - * (kernstart_addr & ~0x3ffffff) - * - */ - start &= ~0x3ffffff; - base &= ~0x3ffffff; - virt_phys_offset = base - start; - early_get_first_memblock_info(__va(dt_ptr), &size); - /* - * We now get the memstart_addr, then we should check if this - * address is the same as what the PAGE_OFFSET map to now. If - * not we have to change the map of PAGE_OFFSET to memstart_addr - * and do a second relocation. - */ - if (start != memstart_addr) { - int n; - long offset = start - memstart_addr; - - is_second_reloc = 1; - n = switch_to_as1(); - /* map a 64M area for the second relocation */ - if (memstart_addr > start) - map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM, - false, true); - else - map_mem_in_cams_addr(start, PAGE_OFFSET + offset, - 0x4000000, CONFIG_LOWMEM_CAM_NUM, - false, true); - restore_to_as0(n, offset, __va(dt_ptr), 1); - /* We should never reach here */ - panic("Relocation error"); - } - - kaslr_early_init(__va(dt_ptr), size); -} -#endif -#endif diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index f21896ebdc5a..fcb1e5ae5c55 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -50,7 +50,7 @@ * indirect page table entries. */ #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx) -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { .shift = 12, @@ -166,7 +166,7 @@ int extlb_level_exc; #endif /* CONFIG_PPC64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */ DEFINE_PER_CPU(int, next_tlbcam_idx); EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx); @@ -441,7 +441,7 @@ static void __init setup_page_sizes(void) unsigned int eptcfg; int i, psize; -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 unsigned int mmucfg = mfspr(SPRN_MMUCFG); int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); @@ -584,7 +584,7 @@ static void __init setup_mmu_htw(void) patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); break; -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 case PPC_HTW_E6500: extlb_level_exc = EX_TLB_SIZE; patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); @@ -627,7 +627,7 @@ static void early_init_this_mmu(void) } mtspr(SPRN_MAS4, mas4); -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { unsigned int num_cams; bool map = true; @@ -680,7 +680,7 @@ static void __init early_init_mmu_global(void) /* Look for HW tablewalk support */ setup_mmu_htw(); -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { if (book3e_htw_mode == PPC_HTW_NONE) { extlb_level_exc = EX_TLB_SIZE; @@ -701,7 +701,7 @@ static void __init early_init_mmu_global(void) static void __init early_mmu_set_memory_limit(void) { -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { /* * Limit memory so we dont have linear faults. @@ -750,7 +750,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, * We crop it to the size of the first MEMBLOCK to * avoid going over total available memory just in case... */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { unsigned long linear_sz; unsigned int num_cams; diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S index 6914bc8e4ead..e1199608ff4d 100644 --- a/arch/powerpc/mm/nohash/tlb_low.S +++ b/arch/powerpc/mm/nohash/tlb_low.S @@ -364,7 +364,7 @@ _GLOBAL(_tlbivax_bcast) #error Unsupported processor type ! #endif -#if defined(CONFIG_PPC_FSL_BOOK3E) +#if defined(CONFIG_PPC_E500) /* * extern void loadcam_entry(unsigned int index) * diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 5b065186ace5..32c60ad8f45d 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -107,7 +107,6 @@ config PPC_BOOK3S_64 config PPC_BOOK3E_64 bool "Embedded processors" - select PPC_FSL_BOOK3E select PPC_E500 select PPC_E500MC select PPC_FPU # Make it a choice ? @@ -259,8 +258,11 @@ config PPC_BOOK3S config PPC_E500 select FSL_EMB_PERFMON - select PPC_FSL_BOOK3E bool + select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 + select PPC_SMP_MUXED_IPI + select PPC_DOORBELL + select PPC_KUEP config PPC_E500MC bool "e500mc Support" @@ -320,16 +322,6 @@ config BOOKE_OR_40x depends on BOOKE || 40x default y -# this is for common code between PPC32 & PPC64 FSL BOOKE -config PPC_FSL_BOOK3E - bool - select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 - imply FSL_EMB_PERFMON - select PPC_SMP_MUXED_IPI - select PPC_DOORBELL - select PPC_KUEP - default y if PPC_85xx - config PTE_64BIT bool depends on 44x || PPC_E500 || PPC_86xx -- cgit v1.2.3-70-g09d2 From aa5f59df201dd350f7c291c845ac8b62c0d0edd5 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:39 +0200 Subject: powerpc: Remove CONFIG_PPC_BOOK3E_MMU CONFIG_PPC_BOOK3E_MMU is redundant with CONFIG_PPC_E500. Remove it. Also rename mmu-book3e.h to mmu-e500.h Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/c5549cd59a131204ff94ab909cad2e2dad4ddf2f.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/mmu-book3e.h | 324 --------------------------- arch/powerpc/include/asm/nohash/mmu-e500.h | 324 +++++++++++++++++++++++++++ arch/powerpc/include/asm/nohash/mmu.h | 4 +- arch/powerpc/kernel/cpu_setup_e500.S | 2 +- arch/powerpc/kernel/entry_32.S | 2 +- arch/powerpc/kernel/head_booke.h | 4 +- arch/powerpc/kernel/kvm.c | 8 +- arch/powerpc/kvm/e500.h | 2 +- arch/powerpc/mm/nohash/tlb.c | 4 +- arch/powerpc/mm/ptdump/Makefile | 2 +- arch/powerpc/platforms/Kconfig.cputype | 4 - 11 files changed, 338 insertions(+), 342 deletions(-) delete mode 100644 arch/powerpc/include/asm/nohash/mmu-book3e.h create mode 100644 arch/powerpc/include/asm/nohash/mmu-e500.h (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/nohash/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-book3e.h deleted file mode 100644 index e43a418d3ccd..000000000000 --- a/arch/powerpc/include/asm/nohash/mmu-book3e.h +++ /dev/null @@ -1,324 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_MMU_BOOK3E_H_ -#define _ASM_POWERPC_MMU_BOOK3E_H_ -/* - * Freescale Book-E/Book-3e (ISA 2.06+) MMU support - */ - -/* Book-3e defined page sizes */ -#define BOOK3E_PAGESZ_1K 0 -#define BOOK3E_PAGESZ_2K 1 -#define BOOK3E_PAGESZ_4K 2 -#define BOOK3E_PAGESZ_8K 3 -#define BOOK3E_PAGESZ_16K 4 -#define BOOK3E_PAGESZ_32K 5 -#define BOOK3E_PAGESZ_64K 6 -#define BOOK3E_PAGESZ_128K 7 -#define BOOK3E_PAGESZ_256K 8 -#define BOOK3E_PAGESZ_512K 9 -#define BOOK3E_PAGESZ_1M 10 -#define BOOK3E_PAGESZ_2M 11 -#define BOOK3E_PAGESZ_4M 12 -#define BOOK3E_PAGESZ_8M 13 -#define BOOK3E_PAGESZ_16M 14 -#define BOOK3E_PAGESZ_32M 15 -#define BOOK3E_PAGESZ_64M 16 -#define BOOK3E_PAGESZ_128M 17 -#define BOOK3E_PAGESZ_256M 18 -#define BOOK3E_PAGESZ_512M 19 -#define BOOK3E_PAGESZ_1GB 20 -#define BOOK3E_PAGESZ_2GB 21 -#define BOOK3E_PAGESZ_4GB 22 -#define BOOK3E_PAGESZ_8GB 23 -#define BOOK3E_PAGESZ_16GB 24 -#define BOOK3E_PAGESZ_32GB 25 -#define BOOK3E_PAGESZ_64GB 26 -#define BOOK3E_PAGESZ_128GB 27 -#define BOOK3E_PAGESZ_256GB 28 -#define BOOK3E_PAGESZ_512GB 29 -#define BOOK3E_PAGESZ_1TB 30 -#define BOOK3E_PAGESZ_2TB 31 - -/* MAS registers bit definitions */ - -#define MAS0_TLBSEL_MASK 0x30000000 -#define MAS0_TLBSEL_SHIFT 28 -#define MAS0_TLBSEL(x) (((x) << MAS0_TLBSEL_SHIFT) & MAS0_TLBSEL_MASK) -#define MAS0_GET_TLBSEL(mas0) (((mas0) & MAS0_TLBSEL_MASK) >> \ - MAS0_TLBSEL_SHIFT) -#define MAS0_ESEL_MASK 0x0FFF0000 -#define MAS0_ESEL_SHIFT 16 -#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK) -#define MAS0_NV(x) ((x) & 0x00000FFF) -#define MAS0_HES 0x00004000 -#define MAS0_WQ_ALLWAYS 0x00000000 -#define MAS0_WQ_COND 0x00001000 -#define MAS0_WQ_CLR_RSRV 0x00002000 - -#define MAS1_VALID 0x80000000 -#define MAS1_IPROT 0x40000000 -#define MAS1_TID(x) (((x) << 16) & 0x3FFF0000) -#define MAS1_IND 0x00002000 -#define MAS1_TS 0x00001000 -#define MAS1_TSIZE_MASK 0x00000f80 -#define MAS1_TSIZE_SHIFT 7 -#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) -#define MAS1_GET_TSIZE(mas1) (((mas1) & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT) - -#define MAS2_EPN (~0xFFFUL) -#define MAS2_X0 0x00000040 -#define MAS2_X1 0x00000020 -#define MAS2_W 0x00000010 -#define MAS2_I 0x00000008 -#define MAS2_M 0x00000004 -#define MAS2_G 0x00000002 -#define MAS2_E 0x00000001 -#define MAS2_WIMGE_MASK 0x0000001f -#define MAS2_EPN_MASK(size) (~0 << (size + 10)) - -#define MAS3_RPN 0xFFFFF000 -#define MAS3_U0 0x00000200 -#define MAS3_U1 0x00000100 -#define MAS3_U2 0x00000080 -#define MAS3_U3 0x00000040 -#define MAS3_UX 0x00000020 -#define MAS3_SX 0x00000010 -#define MAS3_UW 0x00000008 -#define MAS3_SW 0x00000004 -#define MAS3_UR 0x00000002 -#define MAS3_SR 0x00000001 -#define MAS3_BAP_MASK 0x0000003f -#define MAS3_SPSIZE 0x0000003e -#define MAS3_SPSIZE_SHIFT 1 - -#define MAS4_TLBSEL_MASK MAS0_TLBSEL_MASK -#define MAS4_TLBSELD(x) MAS0_TLBSEL(x) -#define MAS4_INDD 0x00008000 /* Default IND */ -#define MAS4_TSIZED(x) MAS1_TSIZE(x) -#define MAS4_X0D 0x00000040 -#define MAS4_X1D 0x00000020 -#define MAS4_WD 0x00000010 -#define MAS4_ID 0x00000008 -#define MAS4_MD 0x00000004 -#define MAS4_GD 0x00000002 -#define MAS4_ED 0x00000001 -#define MAS4_WIMGED_MASK 0x0000001f /* Default WIMGE */ -#define MAS4_WIMGED_SHIFT 0 -#define MAS4_VLED MAS4_X1D /* Default VLE */ -#define MAS4_ACMD 0x000000c0 /* Default ACM */ -#define MAS4_ACMD_SHIFT 6 -#define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */ -#define MAS4_TSIZED_SHIFT 7 - -#define MAS5_SGS 0x80000000 - -#define MAS6_SPID0 0x3FFF0000 -#define MAS6_SPID1 0x00007FFE -#define MAS6_ISIZE(x) MAS1_TSIZE(x) -#define MAS6_SAS 0x00000001 -#define MAS6_SPID MAS6_SPID0 -#define MAS6_SIND 0x00000002 /* Indirect page */ -#define MAS6_SIND_SHIFT 1 -#define MAS6_SPID_MASK 0x3fff0000 -#define MAS6_SPID_SHIFT 16 -#define MAS6_ISIZE_MASK 0x00000f80 -#define MAS6_ISIZE_SHIFT 7 - -#define MAS7_RPN 0xFFFFFFFF - -#define MAS8_TGS 0x80000000 /* Guest space */ -#define MAS8_VF 0x40000000 /* Virtualization Fault */ -#define MAS8_TLPID 0x000000ff - -/* Bit definitions for MMUCFG */ -#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */ -#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */ -#define MMUCFG_MAVN_V2 0x00000001 /* v2.0 */ -#define MMUCFG_NTLBS 0x0000000c /* Number of TLBs */ -#define MMUCFG_PIDSIZE 0x000007c0 /* PID Reg Size */ -#define MMUCFG_TWC 0x00008000 /* TLB Write Conditional (v2.0) */ -#define MMUCFG_LRAT 0x00010000 /* LRAT Supported (v2.0) */ -#define MMUCFG_RASIZE 0x00fe0000 /* Real Addr Size */ -#define MMUCFG_LPIDSIZE 0x0f000000 /* LPID Reg Size */ - -/* Bit definitions for MMUCSR0 */ -#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */ -#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */ -#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */ -#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */ -#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \ - MMUCSR0_TLB2FI | MMUCSR0_TLB3FI) -#define MMUCSR0_TLB0PS 0x00000780 /* TLB0 Page Size */ -#define MMUCSR0_TLB1PS 0x00007800 /* TLB1 Page Size */ -#define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */ -#define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */ - -/* MMUCFG bits */ -#define MMUCFG_MAVN_NASK 0x00000003 -#define MMUCFG_MAVN_V1_0 0x00000000 -#define MMUCFG_MAVN_V2_0 0x00000001 -#define MMUCFG_NTLB_MASK 0x0000000c -#define MMUCFG_NTLB_SHIFT 2 -#define MMUCFG_PIDSIZE_MASK 0x000007c0 -#define MMUCFG_PIDSIZE_SHIFT 6 -#define MMUCFG_TWC 0x00008000 -#define MMUCFG_LRAT 0x00010000 -#define MMUCFG_RASIZE_MASK 0x00fe0000 -#define MMUCFG_RASIZE_SHIFT 17 -#define MMUCFG_LPIDSIZE_MASK 0x0f000000 -#define MMUCFG_LPIDSIZE_SHIFT 24 - -/* TLBnCFG encoding */ -#define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */ -#define TLBnCFG_HES 0x00002000 /* HW select supported */ -#define TLBnCFG_IPROT 0x00008000 /* IPROT supported */ -#define TLBnCFG_GTWE 0x00010000 /* Guest can write */ -#define TLBnCFG_IND 0x00020000 /* IND entries supported */ -#define TLBnCFG_PT 0x00040000 /* Can load from page table */ -#define TLBnCFG_MINSIZE 0x00f00000 /* Minimum Page Size (v1.0) */ -#define TLBnCFG_MINSIZE_SHIFT 20 -#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */ -#define TLBnCFG_MAXSIZE_SHIFT 16 -#define TLBnCFG_ASSOC 0xff000000 /* Associativity */ -#define TLBnCFG_ASSOC_SHIFT 24 - -/* TLBnPS encoding */ -#define TLBnPS_4K 0x00000004 -#define TLBnPS_8K 0x00000008 -#define TLBnPS_16K 0x00000010 -#define TLBnPS_32K 0x00000020 -#define TLBnPS_64K 0x00000040 -#define TLBnPS_128K 0x00000080 -#define TLBnPS_256K 0x00000100 -#define TLBnPS_512K 0x00000200 -#define TLBnPS_1M 0x00000400 -#define TLBnPS_2M 0x00000800 -#define TLBnPS_4M 0x00001000 -#define TLBnPS_8M 0x00002000 -#define TLBnPS_16M 0x00004000 -#define TLBnPS_32M 0x00008000 -#define TLBnPS_64M 0x00010000 -#define TLBnPS_128M 0x00020000 -#define TLBnPS_256M 0x00040000 -#define TLBnPS_512M 0x00080000 -#define TLBnPS_1G 0x00100000 -#define TLBnPS_2G 0x00200000 -#define TLBnPS_4G 0x00400000 -#define TLBnPS_8G 0x00800000 -#define TLBnPS_16G 0x01000000 -#define TLBnPS_32G 0x02000000 -#define TLBnPS_64G 0x04000000 -#define TLBnPS_128G 0x08000000 -#define TLBnPS_256G 0x10000000 - -/* tlbilx action encoding */ -#define TLBILX_T_ALL 0 -#define TLBILX_T_TID 1 -#define TLBILX_T_FULLMATCH 3 -#define TLBILX_T_CLASS0 4 -#define TLBILX_T_CLASS1 5 -#define TLBILX_T_CLASS2 6 -#define TLBILX_T_CLASS3 7 - -/* - * The mapping only needs to be cache-coherent on SMP, except on - * Freescale e500mc derivatives where it's also needed for coherent DMA. - */ -#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) -#define MAS2_M_IF_NEEDED MAS2_M -#else -#define MAS2_M_IF_NEEDED 0 -#endif - -#ifndef __ASSEMBLY__ -#include - -extern unsigned int tlbcam_index; - -typedef struct { - unsigned int id; - unsigned int active; - void __user *vdso; -} mm_context_t; - -/* Page size definitions, common between 32 and 64-bit - * - * shift : is the "PAGE_SHIFT" value for that page size - * penc : is the pte encoding mask - * - */ -struct mmu_psize_def -{ - unsigned int shift; /* number of bits */ - unsigned int enc; /* PTE encoding */ - unsigned int ind; /* Corresponding indirect page size shift */ - unsigned int flags; -#define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */ -#define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */ -}; -extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; - -static inline int shift_to_mmu_psize(unsigned int shift) -{ - int psize; - - for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) - if (mmu_psize_defs[psize].shift == shift) - return psize; - return -1; -} - -static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) -{ - if (mmu_psize_defs[mmu_psize].shift) - return mmu_psize_defs[mmu_psize].shift; - BUG(); -} - -/* The page sizes use the same names as 64-bit hash but are - * constants - */ -#if defined(CONFIG_PPC_4K_PAGES) -#define mmu_virtual_psize MMU_PAGE_4K -#else -#error Unsupported page size -#endif - -extern int mmu_linear_psize; -extern int mmu_vmemmap_psize; - -struct tlb_core_data { - /* - * Per-core spinlock for e6500 TLB handlers (no tlbsrx.) - * Must be the first struct element. - */ - u8 lock; - - /* For software way selection, as on Freescale TLB1 */ - u8 esel_next, esel_max, esel_first; -}; - -#ifdef CONFIG_PPC64 -extern unsigned long linear_map_top; -extern int book3e_htw_mode; - -#define PPC_HTW_NONE 0 -#define PPC_HTW_IBM 1 -#define PPC_HTW_E6500 2 - -/* - * 64-bit booke platforms don't load the tlb in the tlb miss handler code. - * HUGETLB_NEED_PRELOAD handles this - it causes huge_ptep_set_access_flags to - * return 1, indicating that the tlb requires preloading. - */ -#define HUGETLB_NEED_PRELOAD - -#define mmu_cleanup_all NULL - -#define MAX_PHYSMEM_BITS 44 - -#endif - -#endif /* !__ASSEMBLY__ */ - -#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */ diff --git a/arch/powerpc/include/asm/nohash/mmu-e500.h b/arch/powerpc/include/asm/nohash/mmu-e500.h new file mode 100644 index 000000000000..e43a418d3ccd --- /dev/null +++ b/arch/powerpc/include/asm/nohash/mmu-e500.h @@ -0,0 +1,324 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_MMU_BOOK3E_H_ +#define _ASM_POWERPC_MMU_BOOK3E_H_ +/* + * Freescale Book-E/Book-3e (ISA 2.06+) MMU support + */ + +/* Book-3e defined page sizes */ +#define BOOK3E_PAGESZ_1K 0 +#define BOOK3E_PAGESZ_2K 1 +#define BOOK3E_PAGESZ_4K 2 +#define BOOK3E_PAGESZ_8K 3 +#define BOOK3E_PAGESZ_16K 4 +#define BOOK3E_PAGESZ_32K 5 +#define BOOK3E_PAGESZ_64K 6 +#define BOOK3E_PAGESZ_128K 7 +#define BOOK3E_PAGESZ_256K 8 +#define BOOK3E_PAGESZ_512K 9 +#define BOOK3E_PAGESZ_1M 10 +#define BOOK3E_PAGESZ_2M 11 +#define BOOK3E_PAGESZ_4M 12 +#define BOOK3E_PAGESZ_8M 13 +#define BOOK3E_PAGESZ_16M 14 +#define BOOK3E_PAGESZ_32M 15 +#define BOOK3E_PAGESZ_64M 16 +#define BOOK3E_PAGESZ_128M 17 +#define BOOK3E_PAGESZ_256M 18 +#define BOOK3E_PAGESZ_512M 19 +#define BOOK3E_PAGESZ_1GB 20 +#define BOOK3E_PAGESZ_2GB 21 +#define BOOK3E_PAGESZ_4GB 22 +#define BOOK3E_PAGESZ_8GB 23 +#define BOOK3E_PAGESZ_16GB 24 +#define BOOK3E_PAGESZ_32GB 25 +#define BOOK3E_PAGESZ_64GB 26 +#define BOOK3E_PAGESZ_128GB 27 +#define BOOK3E_PAGESZ_256GB 28 +#define BOOK3E_PAGESZ_512GB 29 +#define BOOK3E_PAGESZ_1TB 30 +#define BOOK3E_PAGESZ_2TB 31 + +/* MAS registers bit definitions */ + +#define MAS0_TLBSEL_MASK 0x30000000 +#define MAS0_TLBSEL_SHIFT 28 +#define MAS0_TLBSEL(x) (((x) << MAS0_TLBSEL_SHIFT) & MAS0_TLBSEL_MASK) +#define MAS0_GET_TLBSEL(mas0) (((mas0) & MAS0_TLBSEL_MASK) >> \ + MAS0_TLBSEL_SHIFT) +#define MAS0_ESEL_MASK 0x0FFF0000 +#define MAS0_ESEL_SHIFT 16 +#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK) +#define MAS0_NV(x) ((x) & 0x00000FFF) +#define MAS0_HES 0x00004000 +#define MAS0_WQ_ALLWAYS 0x00000000 +#define MAS0_WQ_COND 0x00001000 +#define MAS0_WQ_CLR_RSRV 0x00002000 + +#define MAS1_VALID 0x80000000 +#define MAS1_IPROT 0x40000000 +#define MAS1_TID(x) (((x) << 16) & 0x3FFF0000) +#define MAS1_IND 0x00002000 +#define MAS1_TS 0x00001000 +#define MAS1_TSIZE_MASK 0x00000f80 +#define MAS1_TSIZE_SHIFT 7 +#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) +#define MAS1_GET_TSIZE(mas1) (((mas1) & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT) + +#define MAS2_EPN (~0xFFFUL) +#define MAS2_X0 0x00000040 +#define MAS2_X1 0x00000020 +#define MAS2_W 0x00000010 +#define MAS2_I 0x00000008 +#define MAS2_M 0x00000004 +#define MAS2_G 0x00000002 +#define MAS2_E 0x00000001 +#define MAS2_WIMGE_MASK 0x0000001f +#define MAS2_EPN_MASK(size) (~0 << (size + 10)) + +#define MAS3_RPN 0xFFFFF000 +#define MAS3_U0 0x00000200 +#define MAS3_U1 0x00000100 +#define MAS3_U2 0x00000080 +#define MAS3_U3 0x00000040 +#define MAS3_UX 0x00000020 +#define MAS3_SX 0x00000010 +#define MAS3_UW 0x00000008 +#define MAS3_SW 0x00000004 +#define MAS3_UR 0x00000002 +#define MAS3_SR 0x00000001 +#define MAS3_BAP_MASK 0x0000003f +#define MAS3_SPSIZE 0x0000003e +#define MAS3_SPSIZE_SHIFT 1 + +#define MAS4_TLBSEL_MASK MAS0_TLBSEL_MASK +#define MAS4_TLBSELD(x) MAS0_TLBSEL(x) +#define MAS4_INDD 0x00008000 /* Default IND */ +#define MAS4_TSIZED(x) MAS1_TSIZE(x) +#define MAS4_X0D 0x00000040 +#define MAS4_X1D 0x00000020 +#define MAS4_WD 0x00000010 +#define MAS4_ID 0x00000008 +#define MAS4_MD 0x00000004 +#define MAS4_GD 0x00000002 +#define MAS4_ED 0x00000001 +#define MAS4_WIMGED_MASK 0x0000001f /* Default WIMGE */ +#define MAS4_WIMGED_SHIFT 0 +#define MAS4_VLED MAS4_X1D /* Default VLE */ +#define MAS4_ACMD 0x000000c0 /* Default ACM */ +#define MAS4_ACMD_SHIFT 6 +#define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */ +#define MAS4_TSIZED_SHIFT 7 + +#define MAS5_SGS 0x80000000 + +#define MAS6_SPID0 0x3FFF0000 +#define MAS6_SPID1 0x00007FFE +#define MAS6_ISIZE(x) MAS1_TSIZE(x) +#define MAS6_SAS 0x00000001 +#define MAS6_SPID MAS6_SPID0 +#define MAS6_SIND 0x00000002 /* Indirect page */ +#define MAS6_SIND_SHIFT 1 +#define MAS6_SPID_MASK 0x3fff0000 +#define MAS6_SPID_SHIFT 16 +#define MAS6_ISIZE_MASK 0x00000f80 +#define MAS6_ISIZE_SHIFT 7 + +#define MAS7_RPN 0xFFFFFFFF + +#define MAS8_TGS 0x80000000 /* Guest space */ +#define MAS8_VF 0x40000000 /* Virtualization Fault */ +#define MAS8_TLPID 0x000000ff + +/* Bit definitions for MMUCFG */ +#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */ +#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */ +#define MMUCFG_MAVN_V2 0x00000001 /* v2.0 */ +#define MMUCFG_NTLBS 0x0000000c /* Number of TLBs */ +#define MMUCFG_PIDSIZE 0x000007c0 /* PID Reg Size */ +#define MMUCFG_TWC 0x00008000 /* TLB Write Conditional (v2.0) */ +#define MMUCFG_LRAT 0x00010000 /* LRAT Supported (v2.0) */ +#define MMUCFG_RASIZE 0x00fe0000 /* Real Addr Size */ +#define MMUCFG_LPIDSIZE 0x0f000000 /* LPID Reg Size */ + +/* Bit definitions for MMUCSR0 */ +#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */ +#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */ +#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */ +#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */ +#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \ + MMUCSR0_TLB2FI | MMUCSR0_TLB3FI) +#define MMUCSR0_TLB0PS 0x00000780 /* TLB0 Page Size */ +#define MMUCSR0_TLB1PS 0x00007800 /* TLB1 Page Size */ +#define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */ +#define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */ + +/* MMUCFG bits */ +#define MMUCFG_MAVN_NASK 0x00000003 +#define MMUCFG_MAVN_V1_0 0x00000000 +#define MMUCFG_MAVN_V2_0 0x00000001 +#define MMUCFG_NTLB_MASK 0x0000000c +#define MMUCFG_NTLB_SHIFT 2 +#define MMUCFG_PIDSIZE_MASK 0x000007c0 +#define MMUCFG_PIDSIZE_SHIFT 6 +#define MMUCFG_TWC 0x00008000 +#define MMUCFG_LRAT 0x00010000 +#define MMUCFG_RASIZE_MASK 0x00fe0000 +#define MMUCFG_RASIZE_SHIFT 17 +#define MMUCFG_LPIDSIZE_MASK 0x0f000000 +#define MMUCFG_LPIDSIZE_SHIFT 24 + +/* TLBnCFG encoding */ +#define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */ +#define TLBnCFG_HES 0x00002000 /* HW select supported */ +#define TLBnCFG_IPROT 0x00008000 /* IPROT supported */ +#define TLBnCFG_GTWE 0x00010000 /* Guest can write */ +#define TLBnCFG_IND 0x00020000 /* IND entries supported */ +#define TLBnCFG_PT 0x00040000 /* Can load from page table */ +#define TLBnCFG_MINSIZE 0x00f00000 /* Minimum Page Size (v1.0) */ +#define TLBnCFG_MINSIZE_SHIFT 20 +#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */ +#define TLBnCFG_MAXSIZE_SHIFT 16 +#define TLBnCFG_ASSOC 0xff000000 /* Associativity */ +#define TLBnCFG_ASSOC_SHIFT 24 + +/* TLBnPS encoding */ +#define TLBnPS_4K 0x00000004 +#define TLBnPS_8K 0x00000008 +#define TLBnPS_16K 0x00000010 +#define TLBnPS_32K 0x00000020 +#define TLBnPS_64K 0x00000040 +#define TLBnPS_128K 0x00000080 +#define TLBnPS_256K 0x00000100 +#define TLBnPS_512K 0x00000200 +#define TLBnPS_1M 0x00000400 +#define TLBnPS_2M 0x00000800 +#define TLBnPS_4M 0x00001000 +#define TLBnPS_8M 0x00002000 +#define TLBnPS_16M 0x00004000 +#define TLBnPS_32M 0x00008000 +#define TLBnPS_64M 0x00010000 +#define TLBnPS_128M 0x00020000 +#define TLBnPS_256M 0x00040000 +#define TLBnPS_512M 0x00080000 +#define TLBnPS_1G 0x00100000 +#define TLBnPS_2G 0x00200000 +#define TLBnPS_4G 0x00400000 +#define TLBnPS_8G 0x00800000 +#define TLBnPS_16G 0x01000000 +#define TLBnPS_32G 0x02000000 +#define TLBnPS_64G 0x04000000 +#define TLBnPS_128G 0x08000000 +#define TLBnPS_256G 0x10000000 + +/* tlbilx action encoding */ +#define TLBILX_T_ALL 0 +#define TLBILX_T_TID 1 +#define TLBILX_T_FULLMATCH 3 +#define TLBILX_T_CLASS0 4 +#define TLBILX_T_CLASS1 5 +#define TLBILX_T_CLASS2 6 +#define TLBILX_T_CLASS3 7 + +/* + * The mapping only needs to be cache-coherent on SMP, except on + * Freescale e500mc derivatives where it's also needed for coherent DMA. + */ +#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) +#define MAS2_M_IF_NEEDED MAS2_M +#else +#define MAS2_M_IF_NEEDED 0 +#endif + +#ifndef __ASSEMBLY__ +#include + +extern unsigned int tlbcam_index; + +typedef struct { + unsigned int id; + unsigned int active; + void __user *vdso; +} mm_context_t; + +/* Page size definitions, common between 32 and 64-bit + * + * shift : is the "PAGE_SHIFT" value for that page size + * penc : is the pte encoding mask + * + */ +struct mmu_psize_def +{ + unsigned int shift; /* number of bits */ + unsigned int enc; /* PTE encoding */ + unsigned int ind; /* Corresponding indirect page size shift */ + unsigned int flags; +#define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */ +#define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */ +}; +extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; + +static inline int shift_to_mmu_psize(unsigned int shift) +{ + int psize; + + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) + if (mmu_psize_defs[psize].shift == shift) + return psize; + return -1; +} + +static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) +{ + if (mmu_psize_defs[mmu_psize].shift) + return mmu_psize_defs[mmu_psize].shift; + BUG(); +} + +/* The page sizes use the same names as 64-bit hash but are + * constants + */ +#if defined(CONFIG_PPC_4K_PAGES) +#define mmu_virtual_psize MMU_PAGE_4K +#else +#error Unsupported page size +#endif + +extern int mmu_linear_psize; +extern int mmu_vmemmap_psize; + +struct tlb_core_data { + /* + * Per-core spinlock for e6500 TLB handlers (no tlbsrx.) + * Must be the first struct element. + */ + u8 lock; + + /* For software way selection, as on Freescale TLB1 */ + u8 esel_next, esel_max, esel_first; +}; + +#ifdef CONFIG_PPC64 +extern unsigned long linear_map_top; +extern int book3e_htw_mode; + +#define PPC_HTW_NONE 0 +#define PPC_HTW_IBM 1 +#define PPC_HTW_E6500 2 + +/* + * 64-bit booke platforms don't load the tlb in the tlb miss handler code. + * HUGETLB_NEED_PRELOAD handles this - it causes huge_ptep_set_access_flags to + * return 1, indicating that the tlb requires preloading. + */ +#define HUGETLB_NEED_PRELOAD + +#define mmu_cleanup_all NULL + +#define MAX_PHYSMEM_BITS 44 + +#endif + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */ diff --git a/arch/powerpc/include/asm/nohash/mmu.h b/arch/powerpc/include/asm/nohash/mmu.h index edc793e5f08f..e264be219fdb 100644 --- a/arch/powerpc/include/asm/nohash/mmu.h +++ b/arch/powerpc/include/asm/nohash/mmu.h @@ -8,9 +8,9 @@ #elif defined(CONFIG_44x) /* 44x-style software loaded TLB */ #include -#elif defined(CONFIG_PPC_BOOK3E_MMU) +#elif defined(CONFIG_PPC_E500) /* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */ -#include +#include #elif defined (CONFIG_PPC_8xx) /* Motorola/Freescale 8xx software loaded TLB */ #include diff --git a/arch/powerpc/kernel/cpu_setup_e500.S b/arch/powerpc/kernel/cpu_setup_e500.S index 058336079069..2ab25161b0ad 100644 --- a/arch/powerpc/kernel/cpu_setup_e500.S +++ b/arch/powerpc/kernel/cpu_setup_e500.S @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index e6d5fe3a8585..2b5b0677d36c 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -488,7 +488,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return) mtspr SPRN_##exc_lvl_srr0,r9; \ mtspr SPRN_##exc_lvl_srr1,r10; -#if defined(CONFIG_PPC_BOOK3E_MMU) +#if defined(CONFIG_PPC_E500) #ifdef CONFIG_PHYS_64BIT #define RESTORE_MAS7 \ lwz r11,MAS7(r1); \ diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 1047dc053b47..1cb9d0f7cbf2 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -242,7 +242,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) .macro SAVE_MMU_REGS -#ifdef CONFIG_PPC_BOOK3E_MMU +#ifdef CONFIG_PPC_E500 mfspr r0,SPRN_MAS0 stw r0,MAS0(r1) mfspr r0,SPRN_MAS1 @@ -257,7 +257,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) mfspr r0,SPRN_MAS7 stw r0,MAS7(r1) #endif /* CONFIG_PHYS_64BIT */ -#endif /* CONFIG_PPC_BOOK3E_MMU */ +#endif /* CONFIG_PPC_E500 */ #ifdef CONFIG_44x mfspr r0,SPRN_MMUCR stw r0,MMUCR(r1) diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 6568823cf306..5b3c093611ba 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -455,7 +455,7 @@ static void __init kvm_check_ins(u32 *inst, u32 features) kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt); break; -#ifdef CONFIG_PPC_BOOK3E_MMU +#ifdef CONFIG_PPC_E500 case KVM_INST_MFSPR(SPRN_MAS0): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt); @@ -484,7 +484,7 @@ static void __init kvm_check_ins(u32 *inst, u32 features) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt); break; -#endif /* CONFIG_PPC_BOOK3E_MMU */ +#endif /* CONFIG_PPC_E500 */ case KVM_INST_MFSPR(SPRN_SPRG4): #ifdef CONFIG_BOOKE @@ -557,7 +557,7 @@ static void __init kvm_check_ins(u32 *inst, u32 features) case KVM_INST_MTSPR(SPRN_DSISR): kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt); break; -#ifdef CONFIG_PPC_BOOK3E_MMU +#ifdef CONFIG_PPC_E500 case KVM_INST_MTSPR(SPRN_MAS0): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt); @@ -586,7 +586,7 @@ static void __init kvm_check_ins(u32 *inst, u32 features) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt); break; -#endif /* CONFIG_PPC_BOOK3E_MMU */ +#endif /* CONFIG_PPC_E500 */ case KVM_INST_MTSPR(SPRN_SPRG4): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index c3ef751465fb..6d0d329cbb35 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -17,7 +17,7 @@ #define KVM_E500_H #include -#include +#include #include #include diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index fcb1e5ae5c55..fac59fbd475a 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -49,7 +49,7 @@ * other sizes not listed here. The .ind field is only used on MMUs that have * indirect page table entries. */ -#if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx) +#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_8xx) #ifdef CONFIG_PPC_E500 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { @@ -142,7 +142,7 @@ static inline int mmu_get_tsize(int psize) /* This isn't used on !Book3E for now */ return 0; } -#endif /* CONFIG_PPC_BOOK3E_MMU */ +#endif /* CONFIG_PPC_E500 */ /* The variables below are currently only used on 64-bit Book3E * though this will probably be made common with other nohash diff --git a/arch/powerpc/mm/ptdump/Makefile b/arch/powerpc/mm/ptdump/Makefile index b533caaf0910..dc896d2874f3 100644 --- a/arch/powerpc/mm/ptdump/Makefile +++ b/arch/powerpc/mm/ptdump/Makefile @@ -4,7 +4,7 @@ obj-y += ptdump.o obj-$(CONFIG_4xx) += shared.o obj-$(CONFIG_PPC_8xx) += 8xx.o -obj-$(CONFIG_PPC_BOOK3E_MMU) += shared.o +obj-$(CONFIG_PPC_E500) += shared.o obj-$(CONFIG_PPC_BOOK3S_32) += shared.o obj-$(CONFIG_PPC_BOOK3S_64) += book3s64.o diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 32c60ad8f45d..1746d19d058f 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -466,10 +466,6 @@ config PPC_MMU_NOHASH def_bool y depends on !PPC_BOOK3S -config PPC_BOOK3E_MMU - def_bool y - depends on PPC_85xx || PPC_BOOK3E_64 - config PPC_HAVE_PMU_SUPPORT bool -- cgit v1.2.3-70-g09d2 From 6556fd1a1e9fcd180348c4368d2387bdc6a17613 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:42 +0200 Subject: powerpc: Cleanup idle for e500 e500 idle setup is a bit messy. e500_idle() is used for PPC32 while book3e_idle() is used for PPC64. As they are mutually exclusive, call them all e500_idle(). Use CONFIG_MPC_85xx instead of PPC32 + E500 in Makefile and rename idle_e500.c to idle_85xx.c . Rename idle_book3e.c to idle_64e.c and remove #ifdef PPC64 in as it's only built on PPC64. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/8039301334e948974c85ec5ef2db37751075185b.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/machdep.h | 1 - arch/powerpc/kernel/Makefile | 6 +- arch/powerpc/kernel/idle_64e.S | 99 +++++++++++++++++++++++++ arch/powerpc/kernel/idle_85xx.S | 85 +++++++++++++++++++++ arch/powerpc/kernel/idle_book3e.S | 103 -------------------------- arch/powerpc/kernel/idle_e500.S | 85 --------------------- arch/powerpc/platforms/85xx/corenet_generic.c | 4 - arch/powerpc/platforms/85xx/qemu_e500.c | 4 - 8 files changed, 186 insertions(+), 201 deletions(-) create mode 100644 arch/powerpc/kernel/idle_64e.S create mode 100644 arch/powerpc/kernel/idle_85xx.S delete mode 100644 arch/powerpc/kernel/idle_book3e.S delete mode 100644 arch/powerpc/kernel/idle_e500.S (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 8cb83600c434..378b8d5836a7 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -204,7 +204,6 @@ struct machdep_calls { extern void e500_idle(void); extern void power4_idle(void); extern void ppc6xx_idle(void); -extern void book3e_idle(void); /* * ppc_md contains a copy of the machine description structure for the diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 658c4dffaa56..1f121c188805 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -81,7 +81,7 @@ obj-$(CONFIG_PPC_DAWR) += dawr.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o -obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o +obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_64e.o obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o obj-$(CONFIG_PPC64) += vdso64_wrapper.o obj-$(CONFIG_ALTIVEC) += vecemu.o @@ -100,9 +100,7 @@ obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_FA_DUMP) += fadump.o obj-$(CONFIG_PRESERVE_FA_DUMP) += fadump.o -ifdef CONFIG_PPC32 -obj-$(CONFIG_PPC_E500) += idle_e500.o -endif +obj-$(CONFIG_PPC_85xx) += idle_85xx.o obj-$(CONFIG_PPC_BOOK3S_32) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o obj-$(CONFIG_TAU) += tau_6xx.o obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o diff --git a/arch/powerpc/kernel/idle_64e.S b/arch/powerpc/kernel/idle_64e.S new file mode 100644 index 000000000000..1736aad2afe9 --- /dev/null +++ b/arch/powerpc/kernel/idle_64e.S @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2010 IBM Corp, Benjamin Herrenschmidt + * + * Generic idle routine for 64 bits e500 processors + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* 64-bit version only for now */ +.macro BOOK3E_IDLE name loop +_GLOBAL(\name) + /* Save LR for later */ + mflr r0 + std r0,16(r1) + + /* Hard disable interrupts */ + wrteei 0 + + /* Now check if an interrupt came in while we were soft disabled + * since we may otherwise lose it (doorbells etc...). + */ + lbz r3,PACAIRQHAPPENED(r13) + cmpwi cr0,r3,0 + bne 2f + + /* Now we are going to mark ourselves as soft and hard enabled in + * order to be able to take interrupts while asleep. We inform lockdep + * of that. We don't actually turn interrupts on just yet tho. + */ +#ifdef CONFIG_TRACE_IRQFLAGS + stdu r1,-128(r1) + bl trace_hardirqs_on + addi r1,r1,128 +#endif + li r0,IRQS_ENABLED + stb r0,PACAIRQSOFTMASK(r13) + + /* Interrupts will make use return to LR, so get something we want + * in there + */ + bl 1f + + /* And return (interrupts are on) */ + ld r0,16(r1) + mtlr r0 + blr + +1: /* Let's set the _TLF_NAPPING flag so interrupts make us return + * to the right spot + */ + ld r11, PACACURRENT(r13) + ld r10,TI_LOCAL_FLAGS(r11) + ori r10,r10,_TLF_NAPPING + std r10,TI_LOCAL_FLAGS(r11) + + /* We can now re-enable hard interrupts and go to sleep */ + wrteei 1 + \loop + +2: + lbz r10,PACAIRQHAPPENED(r13) + ori r10,r10,PACA_IRQ_HARD_DIS + stb r10,PACAIRQHAPPENED(r13) + blr +.endm + +.macro BOOK3E_IDLE_LOOP +1: + PPC_WAIT(0) + b 1b +.endm + +/* epapr_ev_idle_start below is patched with the proper hcall + opcodes during kernel initialization */ +.macro EPAPR_EV_IDLE_LOOP +idle_loop: + LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE)) + +.global epapr_ev_idle_start +epapr_ev_idle_start: + li r3, -1 + nop + nop + nop + b idle_loop +.endm + +BOOK3E_IDLE epapr_ev_idle EPAPR_EV_IDLE_LOOP + +BOOK3E_IDLE e500_idle BOOK3E_IDLE_LOOP diff --git a/arch/powerpc/kernel/idle_85xx.S b/arch/powerpc/kernel/idle_85xx.S new file mode 100644 index 000000000000..9e1bc4502c50 --- /dev/null +++ b/arch/powerpc/kernel/idle_85xx.S @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. + * Dave Liu + * copy from idle_6xx.S and modify for e500 based processor, + * implement the power_save function in idle. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + .text + +_GLOBAL(e500_idle) + lwz r4,TI_LOCAL_FLAGS(r2) /* set napping bit */ + ori r4,r4,_TLF_NAPPING /* so when we take an exception */ + stw r4,TI_LOCAL_FLAGS(r2) /* it will return to our caller */ + +#ifdef CONFIG_PPC_E500MC + wrteei 1 +1: wait + + /* + * Guard against spurious wakeups (e.g. from a hypervisor) -- + * any real interrupt will cause us to return to LR due to + * _TLF_NAPPING. + */ + b 1b +#else + /* Check if we can nap or doze, put HID0 mask in r3 */ + lis r3,0 +BEGIN_FTR_SECTION + lis r3,HID0_DOZE@h +END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) + +BEGIN_FTR_SECTION + /* Now check if user enabled NAP mode */ + lis r4,powersave_nap@ha + lwz r4,powersave_nap@l(r4) + cmpwi 0,r4,0 + beq 1f + stwu r1,-16(r1) + mflr r0 + stw r0,20(r1) + bl flush_dcache_L1 + lwz r0,20(r1) + addi r1,r1,16 + mtlr r0 + lis r3,HID0_NAP@h +END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) +1: + /* Go to NAP or DOZE now */ + mfspr r4,SPRN_HID0 + rlwinm r4,r4,0,~(HID0_DOZE|HID0_NAP|HID0_SLEEP) + or r4,r4,r3 + isync + mtspr SPRN_HID0,r4 + isync + + mfmsr r7 + oris r7,r7,MSR_WE@h + ori r7,r7,MSR_EE + msync + mtmsr r7 + isync +2: b 2b +#endif /* !E500MC */ + +/* + * Return from NAP/DOZE mode, restore some CPU specific registers, + * r2 containing address of current. + * r11 points to the exception frame. + * We have to preserve r10. + */ +_GLOBAL(power_save_ppc32_restore) + lwz r9,_LINK(r11) /* interrupted in e500_idle */ + stw r9,_NIP(r11) /* make it do a blr */ + blr +_ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore) diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S deleted file mode 100644 index cc008de58b05..000000000000 --- a/arch/powerpc/kernel/idle_book3e.S +++ /dev/null @@ -1,103 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright 2010 IBM Corp, Benjamin Herrenschmidt - * - * Generic idle routine for Book3E processors - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* 64-bit version only for now */ -#ifdef CONFIG_PPC64 - -.macro BOOK3E_IDLE name loop -_GLOBAL(\name) - /* Save LR for later */ - mflr r0 - std r0,16(r1) - - /* Hard disable interrupts */ - wrteei 0 - - /* Now check if an interrupt came in while we were soft disabled - * since we may otherwise lose it (doorbells etc...). - */ - lbz r3,PACAIRQHAPPENED(r13) - cmpwi cr0,r3,0 - bne 2f - - /* Now we are going to mark ourselves as soft and hard enabled in - * order to be able to take interrupts while asleep. We inform lockdep - * of that. We don't actually turn interrupts on just yet tho. - */ -#ifdef CONFIG_TRACE_IRQFLAGS - stdu r1,-128(r1) - bl trace_hardirqs_on - addi r1,r1,128 -#endif - li r0,IRQS_ENABLED - stb r0,PACAIRQSOFTMASK(r13) - - /* Interrupts will make use return to LR, so get something we want - * in there - */ - bl 1f - - /* And return (interrupts are on) */ - ld r0,16(r1) - mtlr r0 - blr - -1: /* Let's set the _TLF_NAPPING flag so interrupts make us return - * to the right spot - */ - ld r11, PACACURRENT(r13) - ld r10,TI_LOCAL_FLAGS(r11) - ori r10,r10,_TLF_NAPPING - std r10,TI_LOCAL_FLAGS(r11) - - /* We can now re-enable hard interrupts and go to sleep */ - wrteei 1 - \loop - -2: - lbz r10,PACAIRQHAPPENED(r13) - ori r10,r10,PACA_IRQ_HARD_DIS - stb r10,PACAIRQHAPPENED(r13) - blr -.endm - -.macro BOOK3E_IDLE_LOOP -1: - PPC_WAIT(0) - b 1b -.endm - -/* epapr_ev_idle_start below is patched with the proper hcall - opcodes during kernel initialization */ -.macro EPAPR_EV_IDLE_LOOP -idle_loop: - LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE)) - -.global epapr_ev_idle_start -epapr_ev_idle_start: - li r3, -1 - nop - nop - nop - b idle_loop -.endm - -BOOK3E_IDLE epapr_ev_idle EPAPR_EV_IDLE_LOOP - -BOOK3E_IDLE book3e_idle BOOK3E_IDLE_LOOP - -#endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S deleted file mode 100644 index 9e1bc4502c50..000000000000 --- a/arch/powerpc/kernel/idle_e500.S +++ /dev/null @@ -1,85 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. - * Dave Liu - * copy from idle_6xx.S and modify for e500 based processor, - * implement the power_save function in idle. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - - .text - -_GLOBAL(e500_idle) - lwz r4,TI_LOCAL_FLAGS(r2) /* set napping bit */ - ori r4,r4,_TLF_NAPPING /* so when we take an exception */ - stw r4,TI_LOCAL_FLAGS(r2) /* it will return to our caller */ - -#ifdef CONFIG_PPC_E500MC - wrteei 1 -1: wait - - /* - * Guard against spurious wakeups (e.g. from a hypervisor) -- - * any real interrupt will cause us to return to LR due to - * _TLF_NAPPING. - */ - b 1b -#else - /* Check if we can nap or doze, put HID0 mask in r3 */ - lis r3,0 -BEGIN_FTR_SECTION - lis r3,HID0_DOZE@h -END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) - -BEGIN_FTR_SECTION - /* Now check if user enabled NAP mode */ - lis r4,powersave_nap@ha - lwz r4,powersave_nap@l(r4) - cmpwi 0,r4,0 - beq 1f - stwu r1,-16(r1) - mflr r0 - stw r0,20(r1) - bl flush_dcache_L1 - lwz r0,20(r1) - addi r1,r1,16 - mtlr r0 - lis r3,HID0_NAP@h -END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) -1: - /* Go to NAP or DOZE now */ - mfspr r4,SPRN_HID0 - rlwinm r4,r4,0,~(HID0_DOZE|HID0_NAP|HID0_SLEEP) - or r4,r4,r3 - isync - mtspr SPRN_HID0,r4 - isync - - mfmsr r7 - oris r7,r7,MSR_WE@h - ori r7,r7,MSR_EE - msync - mtmsr r7 - isync -2: b 2b -#endif /* !E500MC */ - -/* - * Return from NAP/DOZE mode, restore some CPU specific registers, - * r2 containing address of current. - * r11 points to the exception frame. - * We have to preserve r10. - */ -_GLOBAL(power_save_ppc32_restore) - lwz r9,_LINK(r11) /* interrupted in e500_idle */ - stw r9,_NIP(r11) /* make it do a blr */ - blr -_ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore) diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c index 28d6b36f1ccd..2c539de2d629 100644 --- a/arch/powerpc/platforms/85xx/corenet_generic.c +++ b/arch/powerpc/platforms/85xx/corenet_generic.c @@ -200,9 +200,5 @@ define_machine(corenet_generic) { #endif .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, -#ifdef CONFIG_PPC64 - .power_save = book3e_idle, -#else .power_save = e500_idle, -#endif }; diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c index 64109ad6736c..1639e222cc33 100644 --- a/arch/powerpc/platforms/85xx/qemu_e500.c +++ b/arch/powerpc/platforms/85xx/qemu_e500.c @@ -68,9 +68,5 @@ define_machine(qemu_e500) { .get_irq = mpic_get_coreint_irq, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, -#ifdef CONFIG_PPC64 - .power_save = book3e_idle, -#else .power_save = e500_idle, -#endif }; -- cgit v1.2.3-70-g09d2 From 4af83545538a4fa80d14b9247ffc0db556e6a556 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 20 Sep 2022 08:41:08 +0200 Subject: powerpc/irq: Refactor irq_soft_mask_{set,or}_return() This partialy reapply commit ef5b570d3700 ("powerpc/irq: Don't open code irq_soft_mask helpers") which was reverted by commit 684c68d92e2e ("Revert "powerpc/irq: Don't open code irq_soft_mask helpers"") irq_soft_mask_set_return() and irq_soft_mask_or_return() are overset of irq_soft_mask_set(). Have them use irq_soft_mask_set() instead of duplicating it. Signed-off-by: Christophe Leroy Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/18473da42362ee8f07bce36b9caef8ba77d7633f.1663656054.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/hw_irq.h | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 983551859891..e8de249339d8 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -157,36 +157,18 @@ static inline notrace void irq_soft_mask_set(unsigned long mask) static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask) { - unsigned long flags; - -#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG - WARN_ON(mask && !(mask & IRQS_DISABLED)); -#endif + unsigned long flags = irq_soft_mask_return(); - asm volatile( - "lbz %0,%1(13); stb %2,%1(13)" - : "=&r" (flags) - : "i" (offsetof(struct paca_struct, irq_soft_mask)), - "r" (mask) - : "memory"); + irq_soft_mask_set(mask); return flags; } static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask) { - unsigned long flags, tmp; + unsigned long flags = irq_soft_mask_return(); - asm volatile( - "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)" - : "=&r" (flags), "=r" (tmp) - : "i" (offsetof(struct paca_struct, irq_soft_mask)), - "r" (mask) - : "memory"); - -#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG - WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED)); -#endif + irq_soft_mask_set(flags | mask); return flags; } -- cgit v1.2.3-70-g09d2 From 5ba6c9a912fe4c60f84d6617ad10d2b8d7910990 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:41 +1000 Subject: powerpc: Remove asmlinkage from syscall handler definitions The asmlinkage macro has no special meaning in powerpc, and prior to this patch is used sporadically on some syscall handler definitions. On architectures that do not define asmlinkage, it resolves to extern "C" for C++ compilers and a nop otherwise. The current invocations of asmlinkage provide far from complete support for C++ toolchains, and so the macro serves no purpose in powerpc. Remove all invocations of asmlinkage in arch/powerpc. These incidentally only occur in syscall definitions and prototypes. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Reviewed-by: Andrew Donnellan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-2-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscalls.h | 16 ++++++++-------- arch/powerpc/kernel/sys_ppc32.c | 8 ++++---- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index a2b13e55254f..21c2faaa2957 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -10,14 +10,14 @@ struct rtas_args; -asmlinkage long sys_mmap(unsigned long addr, size_t len, - unsigned long prot, unsigned long flags, - unsigned long fd, off_t offset); -asmlinkage long sys_mmap2(unsigned long addr, size_t len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff); -asmlinkage long ppc64_personality(unsigned long personality); -asmlinkage long sys_rtas(struct rtas_args __user *uargs); +long sys_mmap(unsigned long addr, size_t len, + unsigned long prot, unsigned long flags, + unsigned long fd, off_t offset); +long sys_mmap2(unsigned long addr, size_t len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +long ppc64_personality(unsigned long personality); +long sys_rtas(struct rtas_args __user *uargs); int ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct __kernel_old_timeval __user *tvp); long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 16ff0399a257..f4edcc9489fb 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -85,20 +85,20 @@ compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offset1, u32 offset2, u3 return ksys_readahead(fd, merge_64(offset1, offset2), count); } -asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4, +int compat_sys_truncate64(const char __user * path, u32 reg4, unsigned long len1, unsigned long len2) { return ksys_truncate(path, merge_64(len1, len2)); } -asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2, +long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2, u32 len1, u32 len2) { return ksys_fallocate(fd, mode, ((loff_t)offset1 << 32) | offset2, merge_64(len1, len2)); } -asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, +int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, unsigned long len2) { return ksys_ftruncate(fd, merge_64(len1, len2)); @@ -111,7 +111,7 @@ long ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2, advice); } -asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags, +long compat_sys_sync_file_range2(int fd, unsigned int flags, unsigned offset1, unsigned offset2, unsigned nbytes1, unsigned nbytes2) { -- cgit v1.2.3-70-g09d2 From 9d54a5ce3aa87810f13cd33b314097ac6d28c350 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:43 +1000 Subject: powerpc: Add ZEROIZE_GPRS macros for register clears Provide register zeroing macros, following the same convention as existing register stack save/restore macros, to be used in later change to concisely zero a sequence of consecutive gprs. The resulting macros are called ZEROIZE_GPRS and ZEROIZE_NVGPRS, keeping with the naming of the accompanying restore and save macros, and usage of zeroize to describe this operation elsewhere in the kernel. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-4-rmclure@linux.ibm.com --- arch/powerpc/include/asm/ppc_asm.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 7e4fe766e247..eeb7dc8cd45f 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -33,6 +33,20 @@ .endr .endm +/* + * This expands to a sequence of register clears for regs start to end + * inclusive, of the form: + * + * li rN, 0 + */ +.macro ZEROIZE_REGS start, end + .Lreg=\start + .rept (\end - \start + 1) + li .Lreg, 0 + .Lreg=.Lreg+1 + .endr +.endm + /* * Macros for storing registers into and loading registers from * exception frames. @@ -49,6 +63,14 @@ #define REST_NVGPRS(base) REST_GPRS(13, 31, base) #endif +#define ZEROIZE_GPRS(start, end) ZEROIZE_REGS start, end +#ifdef __powerpc64__ +#define ZEROIZE_NVGPRS() ZEROIZE_GPRS(14, 31) +#else +#define ZEROIZE_NVGPRS() ZEROIZE_GPRS(13, 31) +#endif +#define ZEROIZE_GPR(n) ZEROIZE_GPRS(n, n) + #define SAVE_GPR(n, base) SAVE_GPRS(n, n, base) #define REST_GPR(n, base) REST_GPRS(n, n, base) -- cgit v1.2.3-70-g09d2 From 016ff72bd2090903715c0f9422a44afbb966f4ee Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:48 +1000 Subject: powerpc: Fix fallocate and fadvise64_64 compat parameter combination As reported[1] by Arnd, the arch-specific fadvise64_64 and fallocate compatibility handlers assume parameters are passed with 32-bit big-endian ABI. This affects the assignment of odd-even parameter pairs to the high or low words of a 64-bit syscall parameter. Fix fadvise64_64 fallocate compat handlers to correctly swap upper/lower 32 bits conditioned on endianness. A future patch will replace the arch-specific compat fallocate with an asm-generic implementation. This patch is intended for ease of back-port. [1]: https://lore.kernel.org/all/be29926f-226e-48dc-871a-e29a54e80583@www.fastmail.com/ Fixes: 57f48b4b74e7 ("powerpc/compat_sys: swap hi/lo parts of 64-bit syscall args in LE mode") Reported-by: Arnd Bergmann Signed-off-by: Rohan McLure Reviewed-by: Arnd Bergmann Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-9-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscalls.h | 12 ++++++++++++ arch/powerpc/kernel/sys_ppc32.c | 14 +------------- arch/powerpc/kernel/syscalls.c | 4 ++-- 3 files changed, 15 insertions(+), 15 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 21c2faaa2957..ae7ca59f6267 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -8,6 +8,18 @@ #include #include +/* + * long long munging: + * The 32 bit ABI passes long longs in an odd even register pair. + * High and low parts are swapped depending on endian mode, + * so define a macro (similar to mips linux32) to handle that. + */ +#ifdef __LITTLE_ENDIAN__ +#define merge_64(low, high) (((u64)high << 32) | low) +#else +#define merge_64(high, low) (((u64)high << 32) | low) +#endif + struct rtas_args; long sys_mmap(unsigned long addr, size_t len, diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index f4edcc9489fb..ba363328da2b 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -56,18 +56,6 @@ unsigned long compat_sys_mmap2(unsigned long addr, size_t len, return sys_mmap(addr, len, prot, flags, fd, pgoff << 12); } -/* - * long long munging: - * The 32 bit ABI passes long longs in an odd even register pair. - * High and low parts are swapped depending on endian mode, - * so define a macro (similar to mips linux32) to handle that. - */ -#ifdef __LITTLE_ENDIAN__ -#define merge_64(low, high) ((u64)high << 32) | low -#else -#define merge_64(high, low) ((u64)high << 32) | low -#endif - compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, u32 reg6, u32 pos1, u32 pos2) { @@ -94,7 +82,7 @@ int compat_sys_truncate64(const char __user * path, u32 reg4, long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2, u32 len1, u32 len2) { - return ksys_fallocate(fd, mode, ((loff_t)offset1 << 32) | offset2, + return ksys_fallocate(fd, mode, merge_64(offset1, offset2), merge_64(len1, len2)); } diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index fc999140bc27..abc3fbb3c490 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -98,8 +98,8 @@ long ppc64_personality(unsigned long personality) long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, u32 len_high, u32 len_low) { - return ksys_fadvise64_64(fd, (u64)offset_high << 32 | offset_low, - (u64)len_high << 32 | len_low, advice); + return ksys_fadvise64_64(fd, merge_64(offset_high, offset_low), + merge_64(len_high, len_low), advice); } SYSCALL_DEFINE0(switch_endian) -- cgit v1.2.3-70-g09d2 From c2e7a19827eec443a7cbe85e8d959052412d6dc3 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:50 +1000 Subject: powerpc: Use generic fallocate compatibility syscall The powerpc fallocate compat syscall handler is identical to the generic implementation provided by commit 59c10c52f573f ("riscv: compat: syscall: Add compat_sys_call_table implementation"), and as such can be removed in favour of the generic implementation. A future patch series will replace more architecture-defined syscall handlers with generic implementations, dependent on introducing generic implementations that are compatible with powerpc and arm's parameter reorderings. Reported-by: Arnd Bergmann Signed-off-by: Rohan McLure Reviewed-by: Arnd Bergmann Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-11-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscalls.h | 2 -- arch/powerpc/include/asm/unistd.h | 1 + arch/powerpc/kernel/sys_ppc32.c | 7 ------- 3 files changed, 1 insertion(+), 9 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index ae7ca59f6267..52f5e1985989 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -51,8 +51,6 @@ compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offset1, u32 offset2, u3 int compat_sys_truncate64(const char __user *path, u32 reg4, unsigned long len1, unsigned long len2); -long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2, u32 len1, u32 len2); - int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, unsigned long len2); diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index b1129b4ef57d..659a996c75aa 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -45,6 +45,7 @@ #define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_SYS_NEWFSTATAT #define __ARCH_WANT_COMPAT_STAT +#define __ARCH_WANT_COMPAT_FALLOCATE #define __ARCH_WANT_COMPAT_SYS_SENDFILE #endif #define __ARCH_WANT_SYS_FORK diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index ba363328da2b..d961634976d8 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -79,13 +79,6 @@ int compat_sys_truncate64(const char __user * path, u32 reg4, return ksys_truncate(path, merge_64(len1, len2)); } -long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2, - u32 len1, u32 len2) -{ - return ksys_fallocate(fd, mode, merge_64(offset1, offset2), - merge_64(len1, len2)); -} - int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, unsigned long len2) { -- cgit v1.2.3-70-g09d2 From b6b1334c9510e162bd8ca0ae58403cafad9572f1 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:51 +1000 Subject: powerpc/32: Remove powerpc select specialisation Syscall #82 has been implemented for 32-bit platforms in a unique way on powerpc systems. This hack will in effect guess whether the caller is expecting new select semantics or old select semantics. It does so via a guess, based off the first parameter. In new select, this parameter represents the length of a user-memory array of file descriptors, and in old select this is a pointer to an arguments structure. The heuristic simply interprets sufficiently large values of its first parameter as being a call to old select. The following is a discussion on how this syscall should be handled. As discussed in this thread, the existence of such a hack suggests that for whatever powerpc binaries may predate glibc, it is most likely that they would have taken use of the old select semantics. x86 and arm64 both implement this syscall with oldselect semantics. Remove the powerpc implementation, and update syscall.tbl to refer to emit a reference to sys_old_select and compat_sys_old_select for 32-bit binaries, in keeping with how other architectures support syscall #82. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/lkml/13737de5-0eb7-e881-9af0-163b0d29a1a0@csgroup.eu/ Link: https://lore.kernel.org/r/20220921065605.1051927-12-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscalls.h | 2 -- arch/powerpc/kernel/syscalls.c | 17 ----------------- arch/powerpc/kernel/syscalls/syscall.tbl | 2 +- tools/perf/arch/powerpc/entry/syscalls/syscall.tbl | 2 +- 4 files changed, 2 insertions(+), 21 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 52f5e1985989..565b4b1d7d41 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -30,8 +30,6 @@ long sys_mmap2(unsigned long addr, size_t len, unsigned long fd, unsigned long pgoff); long ppc64_personality(unsigned long personality); long sys_rtas(struct rtas_args __user *uargs); -int ppc_select(int n, fd_set __user *inp, fd_set __user *outp, - fd_set __user *exp, struct __kernel_old_timeval __user *tvp); long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, u32 len_high, u32 len_low); diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index abc3fbb3c490..34e1ae88e15b 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -63,23 +63,6 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len, return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT); } -#ifdef CONFIG_PPC32 -/* - * Due to some executables calling the wrong select we sometimes - * get wrong args. This determines how the args are being passed - * (a single ptr to them all args passed) then calls - * sys_select() with the appropriate args. -- Cort - */ -int -ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct __kernel_old_timeval __user *tvp) -{ - if ((unsigned long)n >= 4096) - return sys_old_select((void __user *)n); - - return sys_select(n, inp, outp, exp, tvp); -} -#endif - #ifdef CONFIG_PPC64 long ppc64_personality(unsigned long personality) { diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index 2600b4237292..64f27cbbdd2c 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -110,7 +110,7 @@ 79 common settimeofday sys_settimeofday compat_sys_settimeofday 80 common getgroups sys_getgroups 81 common setgroups sys_setgroups -82 32 select ppc_select sys_ni_syscall +82 32 select sys_old_select compat_sys_old_select 82 64 select sys_ni_syscall 82 spu select sys_ni_syscall 83 common symlink sys_symlink diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index 2600b4237292..64f27cbbdd2c 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -110,7 +110,7 @@ 79 common settimeofday sys_settimeofday compat_sys_settimeofday 80 common getgroups sys_getgroups 81 common setgroups sys_setgroups -82 32 select ppc_select sys_ni_syscall +82 32 select sys_old_select compat_sys_old_select 82 64 select sys_ni_syscall 82 spu select sys_ni_syscall 83 common symlink sys_symlink -- cgit v1.2.3-70-g09d2 From b7fa9ce86d32baf2a3a8bf8fdaa44870084edd85 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:53 +1000 Subject: powerpc: Remove direct call to mmap2 syscall handlers Syscall handlers should not be invoked internally by their symbol names, as these symbols defined by the architecture-defined SYSCALL_DEFINE macro. Move the compatibility syscall definition for mmap2 to syscalls.c, so that all mmap implementations can share a helper function. Remove 'inline' on static mmap helper. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin [mpe: Fix compat_sys_mmap2() prototype and offset handling] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-14-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscalls.h | 2 +- arch/powerpc/kernel/sys_ppc32.c | 9 --------- arch/powerpc/kernel/syscalls.c | 16 +++++++++++++--- 3 files changed, 14 insertions(+), 13 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 565b4b1d7d41..6610dc8d078a 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -34,7 +34,7 @@ long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, u32 len_high, u32 len_low); #ifdef CONFIG_COMPAT -unsigned long compat_sys_mmap2(unsigned long addr, size_t len, +long compat_sys_mmap2(unsigned long addr, size_t len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index d961634976d8..776ae7565fc5 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -48,14 +47,6 @@ #include #include -unsigned long compat_sys_mmap2(unsigned long addr, size_t len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - /* This should remain 12 even if PAGE_SIZE changes */ - return sys_mmap(addr, len, prot, flags, fd, pgoff << 12); -} - compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, u32 reg6, u32 pos1, u32 pos2) { diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index a04c97faa21a..e84a523cd65e 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -36,9 +36,9 @@ #include #include -static inline long do_mmap2(unsigned long addr, size_t len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long off, int shift) +static long do_mmap2(unsigned long addr, size_t len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long off, int shift) { if (!arch_validate_prot(prot, addr)) return -EINVAL; @@ -56,6 +56,16 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, size_t, len, return do_mmap2(addr, len, prot, flags, fd, pgoff, PAGE_SHIFT-12); } +#ifdef CONFIG_COMPAT +COMPAT_SYSCALL_DEFINE6(mmap2, + unsigned long, addr, size_t, len, + unsigned long, prot, unsigned long, flags, + unsigned long, fd, unsigned long, off_4k) +{ + return do_mmap2(addr, len, prot, flags, fd, off_4k, PAGE_SHIFT-12); +} +#endif + SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, off_t, offset) -- cgit v1.2.3-70-g09d2 From dec20c50df79cadaff17e964ef7f622491a52134 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:55 +1000 Subject: powerpc: Adopt SYSCALL_DEFINE for arch-specific syscall handlers Arch-specific implementations of syscall handlers are currently used over generic implementations for the following reasons: 1. Semantics unique to powerpc 2. Compatibility syscalls require 'argument padding' to comply with 64-bit argument convention in ELF32 abi. 3. Parameter types or order is different in other architectures. These syscall handlers have been defined prior to this patch series without invoking the SYSCALL_DEFINE or COMPAT_SYSCALL_DEFINE macros with custom input and output types. We remove every such direct definition in favour of the aforementioned macros. Also update syscalls.tbl in order to refer to the symbol names generated by each of these macros. Since ppc64_personality can be called by both 64 bit and 32 bit binaries through compatibility, we must generate both both compat_sys_ and sys_ symbols for this handler. As an aside: A number of architectures including arm and powerpc agree on an alternative argument order and numbering for most of these arch-specific handlers. A future patch series may allow for asm/unistd.h to signal through its defines that a generic implementation of these syscall handlers with the correct calling convention be emitted, through the __ARCH_WANT_COMPAT_SYS_... convention. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-16-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscalls.h | 10 +++--- arch/powerpc/kernel/sys_ppc32.c | 38 ++++++++++++++-------- arch/powerpc/kernel/syscalls.c | 17 +++++++--- arch/powerpc/kernel/syscalls/syscall.tbl | 22 ++++++------- tools/perf/arch/powerpc/entry/syscalls/syscall.tbl | 22 ++++++------- 5 files changed, 64 insertions(+), 45 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 6610dc8d078a..8e1787ff6c93 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -28,10 +28,10 @@ long sys_mmap(unsigned long addr, size_t len, long sys_mmap2(unsigned long addr, size_t len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); -long ppc64_personality(unsigned long personality); +long sys_ppc64_personality(unsigned long personality); long sys_rtas(struct rtas_args __user *uargs); -long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, - u32 len_high, u32 len_low); +long sys_ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, + u32 len_high, u32 len_low); #ifdef CONFIG_COMPAT long compat_sys_mmap2(unsigned long addr, size_t len, @@ -52,8 +52,8 @@ int compat_sys_truncate64(const char __user *path, u32 reg4, int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, unsigned long len2); -long ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2, - size_t len, int advice); +long compat_sys_ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2, + size_t len, int advice); long compat_sys_sync_file_range2(int fd, unsigned int flags, unsigned int offset1, unsigned int offset2, diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 776ae7565fc5..dcc3c9fd4cfd 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -47,45 +47,55 @@ #include #include -compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, - u32 reg6, u32 pos1, u32 pos2) +COMPAT_SYSCALL_DEFINE6(ppc_pread64, + unsigned int, fd, + char __user *, ubuf, compat_size_t, count, + u32, reg6, u32, pos1, u32, pos2) { return ksys_pread64(fd, ubuf, count, merge_64(pos1, pos2)); } -compat_ssize_t compat_sys_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count, - u32 reg6, u32 pos1, u32 pos2) +COMPAT_SYSCALL_DEFINE6(ppc_pwrite64, + unsigned int, fd, + const char __user *, ubuf, compat_size_t, count, + u32, reg6, u32, pos1, u32, pos2) { return ksys_pwrite64(fd, ubuf, count, merge_64(pos1, pos2)); } -compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offset1, u32 offset2, u32 count) +COMPAT_SYSCALL_DEFINE5(ppc_readahead, + int, fd, u32, r4, + u32, offset1, u32, offset2, u32, count) { return ksys_readahead(fd, merge_64(offset1, offset2), count); } -int compat_sys_truncate64(const char __user * path, u32 reg4, - unsigned long len1, unsigned long len2) +COMPAT_SYSCALL_DEFINE4(ppc_truncate64, + const char __user *, path, u32, reg4, + unsigned long, len1, unsigned long, len2) { return ksys_truncate(path, merge_64(len1, len2)); } -int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, - unsigned long len2) +COMPAT_SYSCALL_DEFINE4(ppc_ftruncate64, + unsigned int, fd, u32, reg4, + unsigned long, len1, unsigned long, len2) { return ksys_ftruncate(fd, merge_64(len1, len2)); } -long ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2, - size_t len, int advice) +COMPAT_SYSCALL_DEFINE6(ppc32_fadvise64, + int, fd, u32, unused, u32, offset1, u32, offset2, + size_t, len, int, advice) { return ksys_fadvise64_64(fd, merge_64(offset1, offset2), len, advice); } -long compat_sys_sync_file_range2(int fd, unsigned int flags, - unsigned offset1, unsigned offset2, - unsigned nbytes1, unsigned nbytes2) +COMPAT_SYSCALL_DEFINE6(ppc_sync_file_range2, + int, fd, unsigned int, flags, + unsigned int, offset1, unsigned int, offset2, + unsigned int, nbytes1, unsigned int, nbytes2) { loff_t offset = merge_64(offset1, offset2); loff_t nbytes = merge_64(nbytes1, nbytes2); diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index 8d4b7f6d7cf3..68ebb23a5af4 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -86,14 +86,23 @@ static long do_ppc64_personality(unsigned long personality) ret = (ret & ~PER_MASK) | PER_LINUX; return ret; } -long ppc64_personality(unsigned long personality) + +SYSCALL_DEFINE1(ppc64_personality, unsigned long, personality) { return do_ppc64_personality(personality); } -#endif -long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, - u32 len_high, u32 len_low) +#ifdef CONFIG_COMPAT +COMPAT_SYSCALL_DEFINE1(ppc64_personality, unsigned long, personality) +{ + return do_ppc64_personality(personality); +} +#endif /* CONFIG_COMPAT */ +#endif /* CONFIG_PPC64 */ + +SYSCALL_DEFINE6(ppc_fadvise64_64, + int, fd, int, advice, u32, offset_high, u32, offset_low, + u32, len_high, u32, len_low) { return ksys_fadvise64_64(fd, merge_64(offset_high, offset_low), merge_64(len_high, len_low), advice); diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index 64f27cbbdd2c..2bca64f96164 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -178,9 +178,9 @@ 133 common fchdir sys_fchdir 134 common bdflush sys_ni_syscall 135 common sysfs sys_sysfs -136 32 personality sys_personality ppc64_personality -136 64 personality ppc64_personality -136 spu personality ppc64_personality +136 32 personality sys_personality compat_sys_ppc64_personality +136 64 personality sys_ppc64_personality +136 spu personality sys_ppc64_personality 137 common afs_syscall sys_ni_syscall 138 common setfsuid sys_setfsuid 139 common setfsgid sys_setfsgid @@ -228,8 +228,8 @@ 176 64 rt_sigtimedwait sys_rt_sigtimedwait 177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend -179 common pread64 sys_pread64 compat_sys_pread64 -180 common pwrite64 sys_pwrite64 compat_sys_pwrite64 +179 common pread64 sys_pread64 compat_sys_ppc_pread64 +180 common pwrite64 sys_pwrite64 compat_sys_ppc_pwrite64 181 common chown sys_chown 182 common getcwd sys_getcwd 183 common capget sys_capget @@ -242,10 +242,10 @@ 188 common putpmsg sys_ni_syscall 189 nospu vfork sys_vfork 190 common ugetrlimit sys_getrlimit compat_sys_getrlimit -191 common readahead sys_readahead compat_sys_readahead +191 common readahead sys_readahead compat_sys_ppc_readahead 192 32 mmap2 sys_mmap2 compat_sys_mmap2 -193 32 truncate64 sys_truncate64 compat_sys_truncate64 -194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 +193 32 truncate64 sys_truncate64 compat_sys_ppc_truncate64 +194 32 ftruncate64 sys_ftruncate64 compat_sys_ppc_ftruncate64 195 32 stat64 sys_stat64 196 32 lstat64 sys_lstat64 197 32 fstat64 sys_fstat64 @@ -288,7 +288,7 @@ 230 common io_submit sys_io_submit compat_sys_io_submit 231 common io_cancel sys_io_cancel 232 nospu set_tid_address sys_set_tid_address -233 common fadvise64 sys_fadvise64 ppc32_fadvise64 +233 common fadvise64 sys_fadvise64 compat_sys_ppc32_fadvise64 234 nospu exit_group sys_exit_group 235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie 236 common epoll_create sys_epoll_create @@ -323,7 +323,7 @@ 251 spu utimes sys_utimes 252 common statfs64 sys_statfs64 compat_sys_statfs64 253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 -254 32 fadvise64_64 ppc_fadvise64_64 +254 32 fadvise64_64 sys_ppc_fadvise64_64 254 spu fadvise64_64 sys_ni_syscall 255 common rtas sys_rtas 256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall @@ -390,7 +390,7 @@ 305 common signalfd sys_signalfd compat_sys_signalfd 306 common timerfd_create sys_timerfd_create 307 common eventfd sys_eventfd -308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2 +308 common sync_file_range2 sys_sync_file_range2 compat_sys_ppc_sync_file_range2 309 nospu fallocate sys_fallocate compat_sys_fallocate 310 nospu subpage_prot sys_subpage_prot 311 32 timerfd_settime sys_timerfd_settime32 diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index 64f27cbbdd2c..2bca64f96164 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -178,9 +178,9 @@ 133 common fchdir sys_fchdir 134 common bdflush sys_ni_syscall 135 common sysfs sys_sysfs -136 32 personality sys_personality ppc64_personality -136 64 personality ppc64_personality -136 spu personality ppc64_personality +136 32 personality sys_personality compat_sys_ppc64_personality +136 64 personality sys_ppc64_personality +136 spu personality sys_ppc64_personality 137 common afs_syscall sys_ni_syscall 138 common setfsuid sys_setfsuid 139 common setfsgid sys_setfsgid @@ -228,8 +228,8 @@ 176 64 rt_sigtimedwait sys_rt_sigtimedwait 177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend -179 common pread64 sys_pread64 compat_sys_pread64 -180 common pwrite64 sys_pwrite64 compat_sys_pwrite64 +179 common pread64 sys_pread64 compat_sys_ppc_pread64 +180 common pwrite64 sys_pwrite64 compat_sys_ppc_pwrite64 181 common chown sys_chown 182 common getcwd sys_getcwd 183 common capget sys_capget @@ -242,10 +242,10 @@ 188 common putpmsg sys_ni_syscall 189 nospu vfork sys_vfork 190 common ugetrlimit sys_getrlimit compat_sys_getrlimit -191 common readahead sys_readahead compat_sys_readahead +191 common readahead sys_readahead compat_sys_ppc_readahead 192 32 mmap2 sys_mmap2 compat_sys_mmap2 -193 32 truncate64 sys_truncate64 compat_sys_truncate64 -194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 +193 32 truncate64 sys_truncate64 compat_sys_ppc_truncate64 +194 32 ftruncate64 sys_ftruncate64 compat_sys_ppc_ftruncate64 195 32 stat64 sys_stat64 196 32 lstat64 sys_lstat64 197 32 fstat64 sys_fstat64 @@ -288,7 +288,7 @@ 230 common io_submit sys_io_submit compat_sys_io_submit 231 common io_cancel sys_io_cancel 232 nospu set_tid_address sys_set_tid_address -233 common fadvise64 sys_fadvise64 ppc32_fadvise64 +233 common fadvise64 sys_fadvise64 compat_sys_ppc32_fadvise64 234 nospu exit_group sys_exit_group 235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie 236 common epoll_create sys_epoll_create @@ -323,7 +323,7 @@ 251 spu utimes sys_utimes 252 common statfs64 sys_statfs64 compat_sys_statfs64 253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 -254 32 fadvise64_64 ppc_fadvise64_64 +254 32 fadvise64_64 sys_ppc_fadvise64_64 254 spu fadvise64_64 sys_ni_syscall 255 common rtas sys_rtas 256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall @@ -390,7 +390,7 @@ 305 common signalfd sys_signalfd compat_sys_signalfd 306 common timerfd_create sys_timerfd_create 307 common eventfd sys_eventfd -308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2 +308 common sync_file_range2 sys_sync_file_range2 compat_sys_ppc_sync_file_range2 309 nospu fallocate sys_fallocate compat_sys_fallocate 310 nospu subpage_prot sys_subpage_prot 311 32 timerfd_settime sys_timerfd_settime32 -- cgit v1.2.3-70-g09d2 From 8cd1def4b8e4a592949509fac443e850da8428d0 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:56 +1000 Subject: powerpc: Include all arch-specific syscall prototypes Forward declare all syscall handler prototypes where a generic prototype is not provided in either linux/syscalls.h or linux/compat.h in asm/syscalls.h. This is required for compile-time type-checking for syscall handlers, which is implemented later in this series. 32-bit compatibility syscall handlers are expressed in terms of types in ppc32.h. Expose this header globally. Signed-off-by: Rohan McLure Acked-by: Nicholas Piggin [mpe: Use standard include guard naming for syscalls_32.h] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-17-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscalls.h | 97 ++++++++++++++++++++++++++-------- arch/powerpc/include/asm/syscalls_32.h | 60 +++++++++++++++++++++ arch/powerpc/kernel/ppc32.h | 60 --------------------- arch/powerpc/kernel/signal_32.c | 2 +- arch/powerpc/perf/callchain_32.c | 2 +- 5 files changed, 137 insertions(+), 84 deletions(-) create mode 100644 arch/powerpc/include/asm/syscalls_32.h delete mode 100644 arch/powerpc/kernel/ppc32.h (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 8e1787ff6c93..7911738ad4a9 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -8,6 +8,14 @@ #include #include +#ifdef CONFIG_PPC64 +#include +#endif +#include +#include + +struct rtas_args; + /* * long long munging: * The 32 bit ABI passes long longs in an odd even register pair. @@ -20,44 +28,89 @@ #define merge_64(high, low) (((u64)high << 32) | low) #endif -struct rtas_args; +long sys_ni_syscall(void); + +/* + * PowerPC architecture-specific syscalls + */ + +long sys_rtas(struct rtas_args __user *uargs); +#ifdef CONFIG_PPC64 +long sys_ppc64_personality(unsigned long personality); +#ifdef CONFIG_COMPAT +long compat_sys_ppc64_personality(unsigned long personality); +#endif /* CONFIG_COMPAT */ +#endif /* CONFIG_PPC64 */ + +long sys_swapcontext(struct ucontext __user *old_ctx, + struct ucontext __user *new_ctx, long ctx_size); long sys_mmap(unsigned long addr, size_t len, unsigned long prot, unsigned long flags, unsigned long fd, off_t offset); long sys_mmap2(unsigned long addr, size_t len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); -long sys_ppc64_personality(unsigned long personality); -long sys_rtas(struct rtas_args __user *uargs); -long sys_ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, - u32 len_high, u32 len_low); +long sys_switch_endian(void); -#ifdef CONFIG_COMPAT -long compat_sys_mmap2(unsigned long addr, size_t len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff); - -compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, - u32 reg6, u32 pos1, u32 pos2); +#ifdef CONFIG_PPC32 +long sys_sigreturn(void); +long sys_debug_setcontext(struct ucontext __user *ctx, int ndbg, + struct sig_dbg_op __user *dbg); +#endif -compat_ssize_t compat_sys_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count, - u32 reg6, u32 pos1, u32 pos2); +long sys_rt_sigreturn(void); -compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offset1, u32 offset2, u32 count); +long sys_subpage_prot(unsigned long addr, + unsigned long len, u32 __user *map); -int compat_sys_truncate64(const char __user *path, u32 reg4, - unsigned long len1, unsigned long len2); +#ifdef CONFIG_COMPAT +long compat_sys_swapcontext(struct ucontext32 __user *old_ctx, + struct ucontext32 __user *new_ctx, + int ctx_size); +long compat_sys_old_getrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +long compat_sys_sigreturn(void); +long compat_sys_rt_sigreturn(void); +#endif /* CONFIG_COMPAT */ -int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, - unsigned long len2); +/* + * Architecture specific signatures required by long long munging: + * The 32 bit ABI passes long longs in an odd even register pair. + * The following signatures provide a machine long parameter for + * each register that will be supplied. The implementation is + * responsible for combining parameter pairs. + */ +#ifdef CONFIG_COMPAT +long compat_sys_mmap2(unsigned long addr, size_t len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +long compat_sys_ppc_pread64(unsigned int fd, + char __user *ubuf, compat_size_t count, + u32 reg6, u32 pos1, u32 pos2); +long compat_sys_ppc_pwrite64(unsigned int fd, + const char __user *ubuf, compat_size_t count, + u32 reg6, u32 pos1, u32 pos2); +long compat_sys_ppc_readahead(int fd, u32 r4, + u32 offset1, u32 offset2, u32 count); +long compat_sys_ppc_truncate64(const char __user *path, u32 reg4, + unsigned long len1, unsigned long len2); +long compat_sys_ppc_ftruncate64(unsigned int fd, u32 reg4, + unsigned long len1, unsigned long len2); long compat_sys_ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2, size_t len, int advice); +long compat_sys_ppc_sync_file_range2(int fd, unsigned int flags, + unsigned int offset1, + unsigned int offset2, + unsigned int nbytes1, + unsigned int nbytes2); +#endif /* CONFIG_COMPAT */ -long compat_sys_sync_file_range2(int fd, unsigned int flags, - unsigned int offset1, unsigned int offset2, - unsigned int nbytes1, unsigned int nbytes2); +#if defined(CONFIG_PPC32) || defined(CONFIG_COMPAT) +long sys_ppc_fadvise64_64(int fd, int advice, + u32 offset_high, u32 offset_low, + u32 len_high, u32 len_low); #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/syscalls_32.h b/arch/powerpc/include/asm/syscalls_32.h new file mode 100644 index 000000000000..749255568be9 --- /dev/null +++ b/arch/powerpc/include/asm/syscalls_32.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_POWERPC_SYSCALLS_32_H +#define _ASM_POWERPC_SYSCALLS_32_H + +#include +#include +#include + +/* + * Data types and macros for providing 32b PowerPC support. + */ + +/* These are here to support 32-bit syscalls on a 64-bit kernel. */ + +struct pt_regs32 { + unsigned int gpr[32]; + unsigned int nip; + unsigned int msr; + unsigned int orig_gpr3; /* Used for restarting system calls */ + unsigned int ctr; + unsigned int link; + unsigned int xer; + unsigned int ccr; + unsigned int mq; /* 601 only (not used at present) */ + unsigned int trap; /* Reason for being here */ + unsigned int dar; /* Fault registers */ + unsigned int dsisr; + unsigned int result; /* Result of a system call */ +}; + +struct sigcontext32 { + unsigned int _unused[4]; + int signal; + compat_uptr_t handler; + unsigned int oldmask; + compat_uptr_t regs; /* 4 byte pointer to the pt_regs32 structure. */ +}; + +struct mcontext32 { + elf_gregset_t32 mc_gregs; + elf_fpregset_t mc_fregs; + unsigned int mc_pad[2]; + elf_vrregset_t32 mc_vregs __attribute__((__aligned__(16))); + elf_vsrreghalf_t32 mc_vsregs __attribute__((__aligned__(16))); +}; + +struct ucontext32 { + unsigned int uc_flags; + unsigned int uc_link; + compat_stack_t uc_stack; + int uc_pad[7]; + compat_uptr_t uc_regs; /* points to uc_mcontext field */ + compat_sigset_t uc_sigmask; /* mask last for extensibility */ + /* glibc has 1024-bit signal masks, ours are 64-bit */ + int uc_maskext[30]; + int uc_pad2[3]; + struct mcontext32 uc_mcontext; +}; + +#endif // _ASM_POWERPC_SYSCALLS_32_H diff --git a/arch/powerpc/kernel/ppc32.h b/arch/powerpc/kernel/ppc32.h deleted file mode 100644 index 2346f8c7ff2e..000000000000 --- a/arch/powerpc/kernel/ppc32.h +++ /dev/null @@ -1,60 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef _PPC64_PPC32_H -#define _PPC64_PPC32_H - -#include -#include -#include - -/* - * Data types and macros for providing 32b PowerPC support. - */ - -/* These are here to support 32-bit syscalls on a 64-bit kernel. */ - -struct pt_regs32 { - unsigned int gpr[32]; - unsigned int nip; - unsigned int msr; - unsigned int orig_gpr3; /* Used for restarting system calls */ - unsigned int ctr; - unsigned int link; - unsigned int xer; - unsigned int ccr; - unsigned int mq; /* 601 only (not used at present) */ - unsigned int trap; /* Reason for being here */ - unsigned int dar; /* Fault registers */ - unsigned int dsisr; - unsigned int result; /* Result of a system call */ -}; - -struct sigcontext32 { - unsigned int _unused[4]; - int signal; - compat_uptr_t handler; - unsigned int oldmask; - compat_uptr_t regs; /* 4 byte pointer to the pt_regs32 structure. */ -}; - -struct mcontext32 { - elf_gregset_t32 mc_gregs; - elf_fpregset_t mc_fregs; - unsigned int mc_pad[2]; - elf_vrregset_t32 mc_vregs __attribute__((__aligned__(16))); - elf_vsrreghalf_t32 mc_vsregs __attribute__((__aligned__(16))); -}; - -struct ucontext32 { - unsigned int uc_flags; - unsigned int uc_link; - compat_stack_t uc_stack; - int uc_pad[7]; - compat_uptr_t uc_regs; /* points to uc_mcontext field */ - compat_sigset_t uc_sigmask; /* mask last for extensibility */ - /* glibc has 1024-bit signal masks, ours are 64-bit */ - int uc_maskext[30]; - int uc_pad2[3]; - struct mcontext32 uc_mcontext; -}; - -#endif /* _PPC64_PPC32_H */ diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 157a7403e3eb..c114c7f25645 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -43,7 +43,7 @@ #include #include #ifdef CONFIG_PPC64 -#include "ppc32.h" +#include #include #else #include diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c index b83c47b7947f..ea8cfe3806dc 100644 --- a/arch/powerpc/perf/callchain_32.c +++ b/arch/powerpc/perf/callchain_32.c @@ -19,7 +19,7 @@ #include "callchain.h" #ifdef CONFIG_PPC64 -#include "../kernel/ppc32.h" +#include #else /* CONFIG_PPC64 */ #define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE -- cgit v1.2.3-70-g09d2 From 8640de0dee49cec50040d9845a2bc96fd15adc9e Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:58 +1000 Subject: powerpc: Use common syscall handler type Cause syscall handlers to be typed as follows when called indirectly throughout the kernel. This is to allow for better type checking. typedef long (*syscall_fn)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); Since both 32 and 64-bit abis allow for at least the first six machine-word length parameters to a function to be passed by registers, even handlers which admit fewer than six parameters may be viewed as having the above type. Coercing syscalls to syscall_fn requires a cast to void* to avoid -Wcast-function-type. Fixup comparisons in VDSO to avoid pointer-integer comparison. Introduce explicit cast on systems with SPUs. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-19-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscall.h | 7 +++++-- arch/powerpc/include/asm/syscalls.h | 1 + arch/powerpc/kernel/syscall.c | 2 -- arch/powerpc/kernel/systbl.c | 11 ++++++++--- arch/powerpc/kernel/vdso.c | 4 ++-- arch/powerpc/platforms/cell/spu_callbacks.c | 6 +++--- 6 files changed, 19 insertions(+), 12 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index 25fc8ad9a27a..d2a8dfd5de33 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h @@ -14,9 +14,12 @@ #include #include +typedef long (*syscall_fn)(unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, unsigned long); + /* ftrace syscalls requires exporting the sys_call_table */ -extern const unsigned long sys_call_table[]; -extern const unsigned long compat_sys_call_table[]; +extern const syscall_fn sys_call_table[]; +extern const syscall_fn compat_sys_call_table[]; static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 7911738ad4a9..211bb8393dee 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -8,6 +8,7 @@ #include #include +#include #ifdef CONFIG_PPC64 #include #endif diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c index 64102a64fd84..9875486f6168 100644 --- a/arch/powerpc/kernel/syscall.c +++ b/arch/powerpc/kernel/syscall.c @@ -12,8 +12,6 @@ #include -typedef long (*syscall_fn)(long, long, long, long, long, long); - /* Has to run notrace because it is entered not completely "reconciled" */ notrace long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, diff --git a/arch/powerpc/kernel/systbl.c b/arch/powerpc/kernel/systbl.c index ce52bd2ec292..d64dfafc2e5e 100644 --- a/arch/powerpc/kernel/systbl.c +++ b/arch/powerpc/kernel/systbl.c @@ -16,9 +16,14 @@ #include #define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry) -#define __SYSCALL(nr, entry) [nr] = (unsigned long) &entry, -const unsigned long sys_call_table[] = { +/* + * Coerce syscall handlers with arbitrary parameters to common type + * requires cast to void* to avoid -Wcast-function-type. + */ +#define __SYSCALL(nr, entry) [nr] = (void *) entry, + +const syscall_fn sys_call_table[] = { #ifdef CONFIG_PPC64 #include #else @@ -29,7 +34,7 @@ const unsigned long sys_call_table[] = { #ifdef CONFIG_COMPAT #undef __SYSCALL_WITH_COMPAT #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) -const unsigned long compat_sys_call_table[] = { +const syscall_fn compat_sys_call_table[] = { #include }; #endif /* CONFIG_COMPAT */ diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index bf9574ec26ce..fcca06d200d3 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -304,10 +304,10 @@ static void __init vdso_setup_syscall_map(void) unsigned int i; for (i = 0; i < NR_syscalls; i++) { - if (sys_call_table[i] != (unsigned long)&sys_ni_syscall) + if (sys_call_table[i] != (void *)&sys_ni_syscall) vdso_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f); if (IS_ENABLED(CONFIG_COMPAT) && - compat_sys_call_table[i] != (unsigned long)&sys_ni_syscall) + compat_sys_call_table[i] != (void *)&sys_ni_syscall) vdso_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f); } } diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c index fe0d8797a00a..e780c14c5733 100644 --- a/arch/powerpc/platforms/cell/spu_callbacks.c +++ b/arch/powerpc/platforms/cell/spu_callbacks.c @@ -34,15 +34,15 @@ * mbind, mq_open, ipc, ... */ -static void *spu_syscall_table[] = { +static const syscall_fn spu_syscall_table[] = { #define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry) -#define __SYSCALL(nr, entry) [nr] = entry, +#define __SYSCALL(nr, entry) [nr] = (void *) entry, #include }; long spu_sys_callback(struct spu_syscall_block *s) { - long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6); + syscall_fn syscall; if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) { pr_debug("%s: invalid syscall #%lld", __func__, s->nr_ret); -- cgit v1.2.3-70-g09d2 From f8971c627b14040e533768985a99f4fd6ffa420f Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:56:00 +1000 Subject: powerpc: Change system_call_exception calling convention Change system_call_exception arguments to pass a pointer to a stack frame container caller state, as well as the original r0, which determines the number of the syscall. This has been observed to yield improved performance to passing them by registers, circumventing the need to allocate a stack frame. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin [mpe: Retain clearing of high bits of args for compat tasks] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-21-rmclure@linux.ibm.com --- arch/powerpc/include/asm/interrupt.h | 3 +-- arch/powerpc/kernel/entry_32.S | 6 +++--- arch/powerpc/kernel/interrupt_64.S | 20 ++++++++++---------- arch/powerpc/kernel/syscall.c | 18 +++++++++--------- 4 files changed, 23 insertions(+), 24 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 84a1cdc3204c..0d3405bd55c4 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -665,8 +665,7 @@ static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs) local_irq_enable(); } -long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, - unsigned long r0, struct pt_regs *regs); +long system_call_exception(struct pt_regs *regs, unsigned long r0); notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv); notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs); notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index e3d43b6e3197..55fcc70f2cc0 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -122,9 +122,9 @@ transfer_to_syscall: SAVE_NVGPRS(r1) kuep_lock - /* Calling convention has r9 = orig r0, r10 = regs */ - addi r10,r1,STACK_FRAME_OVERHEAD - mr r9,r0 + /* Calling convention has r3 = regs, r4 = orig r0 */ + addi r3,r1,STACK_FRAME_OVERHEAD + mr r4,r0 bl system_call_exception ret_from_syscall: diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index 931f984b02b1..90dbd6a8d4da 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -60,7 +60,7 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) ld r2,PACATOC(r13) mfcr r12 li r11,0 - /* Can we avoid saving r3-r8 in common case? */ + /* Save syscall parameters in r3-r8 */ SAVE_GPRS(3, 8, r1) /* Zero r9-r12, this should only be required when restoring all GPRs */ std r11,GPR9(r1) @@ -77,9 +77,11 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) std r11,_TRAP(r1) std r12,_CCR(r1) std r3,ORIG_GPR3(r1) - addi r10,r1,STACK_FRAME_OVERHEAD + /* Calling convention has r3 = regs, r4 = orig r0 */ + addi r3,r1,STACK_FRAME_OVERHEAD + mr r4,r0 ld r11,exception_marker@toc(r2) - std r11,-16(r10) /* "regshere" marker */ + std r11,-16(r3) /* "regshere" marker */ BEGIN_FTR_SECTION HMT_MEDIUM @@ -94,8 +96,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) * but this is the best we can do. */ - /* Calling convention has r9 = orig r0, r10 = regs */ - mr r9,r0 bl system_call_exception .Lsyscall_vectored_\name\()_exit: @@ -227,7 +227,7 @@ END_BTB_FLUSH_SECTION ld r2,PACATOC(r13) mfcr r12 li r11,0 - /* Can we avoid saving r3-r8 in common case? */ + /* Save syscall parameters in r3-r8 */ SAVE_GPRS(3, 8, r1) /* Zero r9-r12, this should only be required when restoring all GPRs */ std r11,GPR9(r1) @@ -250,9 +250,11 @@ END_BTB_FLUSH_SECTION std r11,_TRAP(r1) std r12,_CCR(r1) std r3,ORIG_GPR3(r1) - addi r10,r1,STACK_FRAME_OVERHEAD + /* Calling convention has r3 = regs, r4 = orig r0 */ + addi r3,r1,STACK_FRAME_OVERHEAD + mr r4,r0 ld r11,exception_marker@toc(r2) - std r11,-16(r10) /* "regshere" marker */ + std r11,-16(r3) /* "regshere" marker */ #ifdef CONFIG_PPC_BOOK3S li r11,1 @@ -273,8 +275,6 @@ END_BTB_FLUSH_SECTION wrteei 1 #endif - /* Calling convention has r9 = orig r0, r10 = regs */ - mr r9,r0 bl system_call_exception .Lsyscall_exit: diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c index 9875486f6168..2c002cbbc676 100644 --- a/arch/powerpc/kernel/syscall.c +++ b/arch/powerpc/kernel/syscall.c @@ -13,10 +13,9 @@ /* Has to run notrace because it is entered not completely "reconciled" */ -notrace long system_call_exception(long r3, long r4, long r5, - long r6, long r7, long r8, - unsigned long r0, struct pt_regs *regs) +notrace long system_call_exception(struct pt_regs *regs, unsigned long r0) { + unsigned long r3, r4, r5, r6, r7, r8; long ret; syscall_fn f; @@ -136,12 +135,6 @@ notrace long system_call_exception(long r3, long r4, long r5, r0 = do_syscall_trace_enter(regs); if (unlikely(r0 >= NR_syscalls)) return regs->gpr[3]; - r3 = regs->gpr[3]; - r4 = regs->gpr[4]; - r5 = regs->gpr[5]; - r6 = regs->gpr[6]; - r7 = regs->gpr[7]; - r8 = regs->gpr[8]; } else if (unlikely(r0 >= NR_syscalls)) { if (unlikely(trap_is_unsupported_scv(regs))) { @@ -152,6 +145,13 @@ notrace long system_call_exception(long r3, long r4, long r5, return -ENOSYS; } + r3 = regs->gpr[3]; + r4 = regs->gpr[4]; + r5 = regs->gpr[5]; + r6 = regs->gpr[6]; + r7 = regs->gpr[7]; + r8 = regs->gpr[8]; + /* May be faster to do array_index_nospec? */ barrier_nospec(); -- cgit v1.2.3-70-g09d2 From 7e92e01b724526b98cbc7f03dd4afa0295780d56 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:56:01 +1000 Subject: powerpc: Provide syscall wrapper Implement syscall wrapper as per s390, x86, arm64. When enabled cause handlers to accept parameters from a stack frame rather than from user scratch register state. This allows for user registers to be safely cleared in order to reduce caller influence on speculation within syscall routine. The wrapper is a macro that emits syscall handler symbols that call into the target handler, obtaining its parameters from a struct pt_regs on the stack. As registers are already saved to the stack prior to calling system_call_exception, it appears that this function is executed more efficiently with the new stack-pointer convention than with parameters passed by registers, avoiding the allocation of a stack frame for this method. On a 32-bit system, we see >20% performance increases on the null_syscall microbenchmark, and on a Power 8 the performance gains amortise the cost of clearing and restoring registers which is implemented at the end of this series, seeing final result of ~5.6% performance improvement on null_syscall. Syscalls are wrapped in this fashion on all platforms except for the Cell processor as this commit does not provide SPU support. This can be quickly fixed in a successive patch, but requires spu_sys_callback to allocate a pt_regs structure to satisfy the wrapped calling convention. Co-developed-by: Andrew Donnellan Signed-off-by: Andrew Donnellan Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin [mpe: Make incompatible with COMPAT to retain clearing of high bits of args] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-22-rmclure@linux.ibm.com --- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/syscall.h | 4 +++ arch/powerpc/include/asm/syscall_wrapper.h | 51 ++++++++++++++++++++++++++++++ arch/powerpc/include/asm/syscalls.h | 24 ++++++++++++-- arch/powerpc/kernel/syscall.c | 34 ++++++++++---------- arch/powerpc/kernel/systbl.c | 7 ++++ arch/powerpc/kernel/vdso.c | 2 ++ 7 files changed, 105 insertions(+), 18 deletions(-) create mode 100644 arch/powerpc/include/asm/syscall_wrapper.h (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 44d98d32e3bf..237c0e071e5e 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -137,6 +137,7 @@ config PPC select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S || PPC_8xx || 40x) && !HIBERNATION select ARCH_HAS_STRICT_KERNEL_RWX if PPC_85xx && !HIBERNATION && !RANDOMIZE_BASE select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX + select ARCH_HAS_SYSCALL_WRAPPER if !SPU_BASE && !COMPAT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UBSAN_SANITIZE_ALL diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index d2a8dfd5de33..3dd36c5e334a 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h @@ -14,8 +14,12 @@ #include #include +#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +typedef long (*syscall_fn)(const struct pt_regs *); +#else typedef long (*syscall_fn)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); +#endif /* ftrace syscalls requires exporting the sys_call_table */ extern const syscall_fn sys_call_table[]; diff --git a/arch/powerpc/include/asm/syscall_wrapper.h b/arch/powerpc/include/asm/syscall_wrapper.h new file mode 100644 index 000000000000..75b41b173f7a --- /dev/null +++ b/arch/powerpc/include/asm/syscall_wrapper.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * syscall_wrapper.h - powerpc specific wrappers to syscall definitions + * + * Based on arch/{x86,arm64}/include/asm/syscall_wrapper.h + */ + +#ifndef __ASM_POWERPC_SYSCALL_WRAPPER_H +#define __ASM_POWERPC_SYSCALL_WRAPPER_H + +struct pt_regs; + +#define SC_POWERPC_REGS_TO_ARGS(x, ...) \ + __MAP(x,__SC_ARGS \ + ,,regs->gpr[3],,regs->gpr[4],,regs->gpr[5] \ + ,,regs->gpr[6],,regs->gpr[7],,regs->gpr[8]) + +#define __SYSCALL_DEFINEx(x, name, ...) \ + long __powerpc_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__powerpc_sys##name, ERRNO); \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + long __powerpc_sys##name(const struct pt_regs *regs) \ + { \ + return __se_sys##name(SC_POWERPC_REGS_TO_ARGS(x,__VA_ARGS__)); \ + } \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ + return ret; \ + } \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + long __powerpc_sys_##sname(const struct pt_regs *__unused); \ + ALLOW_ERROR_INJECTION(__powerpc_sys_##sname, ERRNO); \ + long __powerpc_sys_##sname(const struct pt_regs *__unused) + +#define COND_SYSCALL(name) \ + long __powerpc_sys_##name(const struct pt_regs *regs); \ + long __weak __powerpc_sys_##name(const struct pt_regs *regs) \ + { \ + return sys_ni_syscall(); \ + } + +#define SYS_NI(name) SYSCALL_ALIAS(__powerpc_sys_##name, sys_ni_posix_timers); + +#endif // __ASM_POWERPC_SYSCALL_WRAPPER_H diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 211bb8393dee..49bbc3e0733d 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -15,6 +15,12 @@ #include #include +#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +long sys_ni_syscall(void); +#else +long sys_ni_syscall(const struct pt_regs *regs); +#endif + struct rtas_args; /* @@ -29,12 +35,12 @@ struct rtas_args; #define merge_64(high, low) (((u64)high << 32) | low) #endif -long sys_ni_syscall(void); - /* * PowerPC architecture-specific syscalls */ +#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER + long sys_rtas(struct rtas_args __user *uargs); #ifdef CONFIG_PPC64 @@ -114,5 +120,19 @@ long sys_ppc_fadvise64_64(int fd, int advice, u32 len_high, u32 len_low); #endif +#else + +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#define __SYSCALL(nr, entry) \ + long __powerpc_##entry(const struct pt_regs *regs); + +#ifdef CONFIG_PPC64 +#include +#else +#include +#endif /* CONFIG_PPC64 */ + +#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ + #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_SYSCALLS_H */ diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c index 2c002cbbc676..18b9d325395f 100644 --- a/arch/powerpc/kernel/syscall.c +++ b/arch/powerpc/kernel/syscall.c @@ -15,7 +15,6 @@ /* Has to run notrace because it is entered not completely "reconciled" */ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0) { - unsigned long r3, r4, r5, r6, r7, r8; long ret; syscall_fn f; @@ -145,31 +144,34 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0) return -ENOSYS; } - r3 = regs->gpr[3]; - r4 = regs->gpr[4]; - r5 = regs->gpr[5]; - r6 = regs->gpr[6]; - r7 = regs->gpr[7]; - r8 = regs->gpr[8]; - /* May be faster to do array_index_nospec? */ barrier_nospec(); +#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER + // No COMPAT if we have SYSCALL_WRAPPER, see Kconfig + f = (void *)sys_call_table[r0]; + ret = f(regs); +#else if (unlikely(is_compat_task())) { + unsigned long r3, r4, r5, r6, r7, r8; + f = (void *)compat_sys_call_table[r0]; - r3 &= 0x00000000ffffffffULL; - r4 &= 0x00000000ffffffffULL; - r5 &= 0x00000000ffffffffULL; - r6 &= 0x00000000ffffffffULL; - r7 &= 0x00000000ffffffffULL; - r8 &= 0x00000000ffffffffULL; + r3 = regs->gpr[3] & 0x00000000ffffffffULL; + r4 = regs->gpr[4] & 0x00000000ffffffffULL; + r5 = regs->gpr[5] & 0x00000000ffffffffULL; + r6 = regs->gpr[6] & 0x00000000ffffffffULL; + r7 = regs->gpr[7] & 0x00000000ffffffffULL; + r8 = regs->gpr[8] & 0x00000000ffffffffULL; + ret = f(r3, r4, r5, r6, r7, r8); } else { f = (void *)sys_call_table[r0]; - } - ret = f(r3, r4, r5, r6, r7, r8); + ret = f(regs->gpr[3], regs->gpr[4], regs->gpr[5], + regs->gpr[6], regs->gpr[7], regs->gpr[8]); + } +#endif /* * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(), diff --git a/arch/powerpc/kernel/systbl.c b/arch/powerpc/kernel/systbl.c index d64dfafc2e5e..9d7c5a596171 100644 --- a/arch/powerpc/kernel/systbl.c +++ b/arch/powerpc/kernel/systbl.c @@ -15,13 +15,20 @@ #include #include +#undef __SYSCALL_WITH_COMPAT #define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry) +#undef __SYSCALL +#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +#define __SYSCALL(nr, entry) [nr] = __powerpc_##entry, +#define __powerpc_sys_ni_syscall sys_ni_syscall +#else /* * Coerce syscall handlers with arbitrary parameters to common type * requires cast to void* to avoid -Wcast-function-type. */ #define __SYSCALL(nr, entry) [nr] = (void *) entry, +#endif const syscall_fn sys_call_table[] = { #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index fcca06d200d3..e1f36fd61db3 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -39,6 +39,8 @@ extern char vdso32_start, vdso32_end; extern char vdso64_start, vdso64_end; +long sys_ni_syscall(void); + /* * The vdso data page (aka. systemcfg for old ppc64 fans) is here. * Once the early boot kernel code no longer needs to muck around -- cgit v1.2.3-70-g09d2 From b19448fe846baad689ff51a991ebfc74b4b5e0a8 Mon Sep 17 00:00:00 2001 From: Pali Rohár Date: Tue, 23 Aug 2022 01:15:01 +0200 Subject: powerpc: Add support for early debugging via Serial 16550 console MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently powerpc early debugging contains lot of platform specific options, but does not support standard UART / serial 16550 console. Later legacy_serial.c code supports registering UART as early debug console from device tree but it is not early during booting, but rather later after machine description code finishes. So for real early debugging via UART is current code unsuitable. Add support for new early debugging option CONFIG_PPC_EARLY_DEBUG_16550 which enable Serial 16550 console on address defined by new option CONFIG_PPC_EARLY_DEBUG_16550_PHYSADDR and by stride by option CONFIG_PPC_EARLY_DEBUG_16550_STRIDE. With this change it is possible to debug powerpc machine descriptor code. For example this early debugging code can print on serial console also "No suitable machine description found" error which is done before legacy_serial.c code. Signed-off-by: Pali Rohár Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220822231501.16827-1-pali@kernel.org --- arch/powerpc/Kconfig.debug | 15 +++++++++++++++ arch/powerpc/include/asm/udbg.h | 1 + arch/powerpc/kernel/udbg.c | 2 ++ arch/powerpc/kernel/udbg_16550.c | 33 +++++++++++++++++++++++++++++++++ 4 files changed, 51 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index ae727d4218b9..6aaf8dc60610 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -283,6 +283,12 @@ config PPC_EARLY_DEBUG_MEMCONS This console provides input and output buffers stored within the kernel BSS and should be safe to select on any system. A debugger can then be used to read kernel output or send input to the console. + +config PPC_EARLY_DEBUG_16550 + bool "Serial 16550" + depends on PPC_UDBG_16550 + help + Select this to enable early debugging via Serial 16550 console endchoice config PPC_MEMCONS_OUTPUT_SIZE @@ -354,6 +360,15 @@ config PPC_EARLY_DEBUG_CPM_ADDR platform probing is done, all platforms selected must share the same address. +config PPC_EARLY_DEBUG_16550_PHYSADDR + hex "Early debug Serial 16550 physical address" + depends on PPC_EARLY_DEBUG_16550 + +config PPC_EARLY_DEBUG_16550_STRIDE + int "Early debug Serial 16550 stride" + depends on PPC_EARLY_DEBUG_16550 + default 1 + config FAIL_IOMMU bool "Fault-injection capability for IOMMU" depends on FAULT_INJECTION diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index 524c2085070f..88add5593e6f 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h @@ -52,6 +52,7 @@ extern void __init udbg_init_ehv_bc(void); extern void __init udbg_init_ps3gelic(void); extern void __init udbg_init_debug_opal_raw(void); extern void __init udbg_init_debug_opal_hvsi(void); +extern void __init udbg_init_debug_16550(void); #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_UDBG_H */ diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index b1544b2f6321..92b3fc258d11 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -67,6 +67,8 @@ void __init udbg_early_init(void) udbg_init_debug_opal_raw(); #elif defined(CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI) udbg_init_debug_opal_hvsi(); +#elif defined(CONFIG_PPC_EARLY_DEBUG_16550) + udbg_init_debug_16550(); #endif #ifdef CONFIG_PPC_EARLY_DEBUG diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index ddfbc74bf85f..74ddf836f7a2 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -8,6 +8,7 @@ #include #include #include +#include extern u8 real_readb(volatile u8 __iomem *addr); extern void real_writeb(u8 data, volatile u8 __iomem *addr); @@ -296,3 +297,35 @@ void __init udbg_init_40x_realmode(void) } #endif /* CONFIG_PPC_EARLY_DEBUG_40x */ + +#ifdef CONFIG_PPC_EARLY_DEBUG_16550 + +static void __iomem *udbg_uart_early_addr; + +void __init udbg_init_debug_16550(void) +{ + udbg_uart_early_addr = early_ioremap(CONFIG_PPC_EARLY_DEBUG_16550_PHYSADDR, 0x1000); + udbg_uart_init_mmio(udbg_uart_early_addr, CONFIG_PPC_EARLY_DEBUG_16550_STRIDE); +} + +static int __init udbg_init_debug_16550_ioremap(void) +{ + void __iomem *addr; + + if (!udbg_uart_early_addr) + return 0; + + addr = ioremap(CONFIG_PPC_EARLY_DEBUG_16550_PHYSADDR, 0x1000); + if (WARN_ON(!addr)) + return -ENOMEM; + + udbg_uart_init_mmio(addr, CONFIG_PPC_EARLY_DEBUG_16550_STRIDE); + early_iounmap(udbg_uart_early_addr, 0x1000); + udbg_uart_early_addr = NULL; + + return 0; +} + +early_initcall(udbg_init_debug_16550_ioremap); + +#endif /* CONFIG_PPC_EARLY_DEBUG_16550 */ -- cgit v1.2.3-70-g09d2 From dabeb572adf24bbd7cb21d1cc4d118bdf2c2ab74 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 20 Sep 2022 22:22:58 +1000 Subject: powerpc: add ISA v3.0 / v3.1 wait opcode macro The wait instruction encoding changed between ISA v2.07 and ISA v3.0. In v3.1 the instruction gained a new field. Update the PPC_WAIT macro to the current encoding. Rename the older incompatible one with a _v203 suffix as it was introduced in v2.03 (the WC field was introduced in v2.07 but the kernel only uses WC=0). Reviewed-by: Segher Boessenkool Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220920122259.363092-1-npiggin@gmail.com --- arch/powerpc/include/asm/ppc-opcode.h | 7 +++++-- arch/powerpc/kernel/idle_64e.S | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index c6d724104ed1..21e33e46f4b8 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -330,6 +330,7 @@ #define __PPC_XSP(s) ((((s) & 0x1e) | (((s) >> 5) & 0x1)) << 21) #define __PPC_XTP(s) __PPC_XSP(s) #define __PPC_T_TLB(t) (((t) & 0x3) << 21) +#define __PPC_PL(p) (((p) & 0x3) << 16) #define __PPC_WC(w) (((w) & 0x3) << 21) #define __PPC_WS(w) (((w) & 0x1f) << 11) #define __PPC_SH(s) __PPC_WS(s) @@ -388,7 +389,8 @@ #define PPC_RAW_RFDI (0x4c00004e) #define PPC_RAW_RFMCI (0x4c00004c) #define PPC_RAW_TLBILX(t, a, b) (0x7c000024 | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b)) -#define PPC_RAW_WAIT(w) (0x7c00007c | __PPC_WC(w)) +#define PPC_RAW_WAIT_v203 (0x7c00007c) +#define PPC_RAW_WAIT(w, p) (0x7c00003c | __PPC_WC(w) | __PPC_PL(p)) #define PPC_RAW_TLBIE(lp, a) (0x7c000264 | ___PPC_RB(a) | ___PPC_RS(lp)) #define PPC_RAW_TLBIE_5(rb, rs, ric, prs, r) \ (0x7c000264 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r)) @@ -606,7 +608,8 @@ #define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b) #define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b) #define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b) -#define PPC_WAIT(w) stringify_in_c(.long PPC_RAW_WAIT(w)) +#define PPC_WAIT_v203 stringify_in_c(.long PPC_RAW_WAIT_v203) +#define PPC_WAIT(w, p) stringify_in_c(.long PPC_RAW_WAIT(w, p)) #define PPC_TLBIE(lp, a) stringify_in_c(.long PPC_RAW_TLBIE(lp, a)) #define PPC_TLBIE_5(rb, rs, ric, prs, r) \ stringify_in_c(.long PPC_RAW_TLBIE_5(rb, rs, ric, prs, r)) diff --git a/arch/powerpc/kernel/idle_64e.S b/arch/powerpc/kernel/idle_64e.S index 1736aad2afe9..0fc680e03dee 100644 --- a/arch/powerpc/kernel/idle_64e.S +++ b/arch/powerpc/kernel/idle_64e.S @@ -75,7 +75,7 @@ _GLOBAL(\name) .macro BOOK3E_IDLE_LOOP 1: - PPC_WAIT(0) + PPC_WAIT_v203 b 1b .endm -- cgit v1.2.3-70-g09d2 From 9c7bfc2dc21e737e8e4a753630bce675e1e7c0ad Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 20 Sep 2022 22:22:59 +1000 Subject: powerpc/64s: Make POWER10 and later use pause_short in cpu_relax loops We want to move away from using SMT priority updates for cpu_relax, and use a 'wait' instruction which is similar to x86. As well as being a much better fit for what everybody else uses and tests with, priority nops are stateful which is nasty (interrupts have to consider they might be taken at a different priority), and they're expensive to execute, similar to a mtSPR which can effect other threads in the pipe. This has shown to give results that are less affected by code alignment on benchmarks that cause a lot of spin waiting (e.g., rwsem contention on unixbench filesystem benchmarks) on POWER10. QEMU TCG only supports this instruction correctly since v7.1, versions without the fix may cause hangs whne running POWER10 CPUs. Reviewed-by: Segher Boessenkool Signed-off-by: Nicholas Piggin [mpe: Fix checkpatch warnings RE the macros] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220920122259.363092-2-npiggin@gmail.com --- arch/powerpc/include/asm/processor.h | 22 +++++++++++++++++----- arch/powerpc/include/asm/vdso/processor.h | 8 +++++++- 2 files changed, 24 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 97a77b37daa3..9bff4ab8242d 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -355,11 +355,23 @@ static inline unsigned long __pack_fe01(unsigned int fpmode) #ifdef CONFIG_PPC64 -#define spin_begin() HMT_low() - -#define spin_cpu_relax() barrier() - -#define spin_end() HMT_medium() +#define spin_begin() \ + asm volatile(ASM_FTR_IFCLR( \ + "or 1,1,1", /* HMT_LOW */ \ + "nop", /* v3.1 uses pause_short in cpu_relax instead */ \ + %0) :: "i" (CPU_FTR_ARCH_31) : "memory") + +#define spin_cpu_relax() \ + asm volatile(ASM_FTR_IFCLR( \ + "nop", /* Before v3.1 use priority nops in spin_begin/end */ \ + PPC_WAIT(2, 0), /* aka pause_short */ \ + %0) :: "i" (CPU_FTR_ARCH_31) : "memory") + +#define spin_end() \ + asm volatile(ASM_FTR_IFCLR( \ + "or 2,2,2", /* HMT_MEDIUM */ \ + "nop", \ + %0) :: "i" (CPU_FTR_ARCH_31) : "memory") #endif diff --git a/arch/powerpc/include/asm/vdso/processor.h b/arch/powerpc/include/asm/vdso/processor.h index 8d79f994b4aa..80d13207c568 100644 --- a/arch/powerpc/include/asm/vdso/processor.h +++ b/arch/powerpc/include/asm/vdso/processor.h @@ -22,7 +22,13 @@ #endif #ifdef CONFIG_PPC64 -#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0) +#define cpu_relax() \ + asm volatile(ASM_FTR_IFCLR( \ + /* Pre-POWER10 uses low ; medium priority nops */ \ + "or 1,1,1 ; or 2,2,2", \ + /* POWER10 onward uses pause_short (wait 2,0) */ \ + PPC_WAIT(2, 0), \ + %0) :: "i" (CPU_FTR_ARCH_31) : "memory") #else #define cpu_relax() barrier() #endif -- cgit v1.2.3-70-g09d2 From a5edf9815dd739fce660b4c8658f61b7d2517042 Mon Sep 17 00:00:00 2001 From: Nicholas Miehlbradt Date: Mon, 26 Sep 2022 07:57:26 +0000 Subject: powerpc/64s: Enable KFENCE on book3s64 KFENCE support was added for ppc32 in commit 90cbac0e995d ("powerpc: Enable KFENCE for PPC32"). Enable KFENCE on ppc64 architecture with hash and radix MMUs. It uses the same mechanism as debug pagealloc to protect/unprotect pages. All KFENCE kunit tests pass on both MMUs. KFENCE memory is initially allocated using memblock but is later marked as SLAB allocated. This necessitates the change to __pud_free to ensure that the KFENCE pages are freed appropriately. Based on previous work by Christophe Leroy and Jordan Niethe. Signed-off-by: Nicholas Miehlbradt Reviewed-by: Russell Currey Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926075726.2846-4-nicholas@linux.ibm.com --- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/book3s/64/pgalloc.h | 6 ++++-- arch/powerpc/include/asm/book3s/64/pgtable.h | 2 +- arch/powerpc/include/asm/kfence.h | 15 +++++++++++++++ arch/powerpc/mm/book3s64/hash_utils.c | 10 +++++----- arch/powerpc/mm/book3s64/radix_pgtable.c | 6 ++++-- 6 files changed, 30 insertions(+), 11 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 237c0e071e5e..81c9f895d690 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -195,7 +195,7 @@ config PPC select HAVE_ARCH_KASAN if PPC_RADIX_MMU select HAVE_ARCH_KASAN if PPC_BOOK3E_64 select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN - select HAVE_ARCH_KFENCE if PPC_BOOK3S_32 || PPC_8xx || 40x + select HAVE_ARCH_KFENCE if ARCH_SUPPORTS_DEBUG_PAGEALLOC select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index e1af0b394ceb..dd2cff53a111 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -113,9 +113,11 @@ static inline void __pud_free(pud_t *pud) /* * Early pud pages allocated via memblock allocator - * can't be directly freed to slab + * can't be directly freed to slab. KFENCE pages have + * both reserved and slab flags set so need to be freed + * kmem_cache_free. */ - if (PageReserved(page)) + if (PageReserved(page) && !PageSlab(page)) free_reserved_page(page); else kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud); diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index b5f8264cc980..c436d8422654 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1106,7 +1106,7 @@ static inline void vmemmap_remove_mapping(unsigned long start, } #endif -#ifdef CONFIG_DEBUG_PAGEALLOC +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) static inline void __kernel_map_pages(struct page *page, int numpages, int enable) { if (radix_enabled()) diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h index a9846b68c6b9..6fd2b4d486c5 100644 --- a/arch/powerpc/include/asm/kfence.h +++ b/arch/powerpc/include/asm/kfence.h @@ -11,11 +11,25 @@ #include #include +#ifdef CONFIG_PPC64_ELF_ABI_V1 +#define ARCH_FUNC_PREFIX "." +#endif + static inline bool arch_kfence_init_pool(void) { return true; } +#ifdef CONFIG_PPC64 +static inline bool kfence_protect_page(unsigned long addr, bool protect) +{ + struct page *page = virt_to_page(addr); + + __kernel_map_pages(page, 1, !protect); + + return true; +} +#else static inline bool kfence_protect_page(unsigned long addr, bool protect) { pte_t *kpte = virt_to_kpte(addr); @@ -29,5 +43,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) return true; } +#endif #endif /* __ASM_POWERPC_KFENCE_H */ diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 6d985acb1d39..df008edf7be0 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -424,7 +424,7 @@ repeat: break; cond_resched(); - if (debug_pagealloc_enabled() && + if (debug_pagealloc_enabled_or_kfence() && (paddr >> PAGE_SHIFT) < linear_map_hash_count) linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; } @@ -773,7 +773,7 @@ static void __init htab_init_page_sizes(void) bool aligned = true; init_hpte_page_sizes(); - if (!debug_pagealloc_enabled()) { + if (!debug_pagealloc_enabled_or_kfence()) { /* * Pick a size for the linear mapping. Currently, we only * support 16M, 1M and 4K which is the default @@ -1061,7 +1061,7 @@ static void __init htab_initialize(void) prot = pgprot_val(PAGE_KERNEL); - if (debug_pagealloc_enabled()) { + if (debug_pagealloc_enabled_or_kfence()) { linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; linear_map_hash_slots = memblock_alloc_try_nid( linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT, @@ -1980,7 +1980,7 @@ repeat: return slot; } -#ifdef CONFIG_DEBUG_PAGEALLOC +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) static DEFINE_SPINLOCK(linear_map_hash_lock); static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) @@ -2053,7 +2053,7 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable) } local_irq_restore(flags); } -#endif /* CONFIG_DEBUG_PAGEALLOC */ +#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 94a05c66be95..70f1580afc3a 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -34,6 +34,8 @@ #include +#include + unsigned int mmu_base_pid; unsigned long radix_mem_block_size __ro_after_init; @@ -276,7 +278,7 @@ static int __meminit create_physical_mapping(unsigned long start, int psize; unsigned long max_mapping_size = radix_mem_block_size; - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_or_kfence()) max_mapping_size = PAGE_SIZE; start = ALIGN(start, PAGE_SIZE); @@ -899,7 +901,7 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long #endif #endif -#ifdef CONFIG_DEBUG_PAGEALLOC +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) void radix__kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long addr; -- cgit v1.2.3-70-g09d2 From 56adbb7a8b6cc7fc9b940829c38494e53c9e57d1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:42:59 +1000 Subject: powerpc/64/interrupt: Fix false warning in context tracking due to idle state Commit 171476775d32 ("context_tracking: Convert state to atomic_t") added a CONTEXT_IDLE state which can be encountered by interrupts from kernel mode in the idle thread, causing a false positive warning. Fixes: 171476775d32 ("context_tracking: Convert state to atomic_t") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926054305.2671436-2-npiggin@gmail.com --- arch/powerpc/include/asm/interrupt.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 0d3405bd55c4..73031783bb1e 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -195,7 +195,8 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs) * so avoid recursion. */ if (TRAP(regs) != INTERRUPT_PROGRAM) { - CT_WARN_ON(ct_state() != CONTEXT_KERNEL); + CT_WARN_ON(ct_state() != CONTEXT_KERNEL && + ct_state() != CONTEXT_IDLE); if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) BUG_ON(is_implicit_soft_masked(regs)); } -- cgit v1.2.3-70-g09d2 From 9524f2278f2e6925f147d9140c83f658e7a7c84f Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:43:02 +1000 Subject: powerpc/64s: Fix irq state management in runlatch functions When irqs are soft-disabled, MSR[EE] is volatile and can change from 1 to 0 asynchronously (if a PACA_IRQ_MUST_HARD_MASK interrupt hits). So it can not be used to check hard IRQ enabled status, except to confirm it is disabled. ppc64_runlatch_on/off functions use MSR this way to decide whether to re-enable MSR[EE] after disabling it, which leads to MSR[EE] being enabled when it shouldn't be (when a PACA_IRQ_MUST_HARD_MASK had disabled it between reading the MSR and clearing EE). This has been tolerated in the kernel previously, and it doesn't seem to cause a problem, but it is unexpected and may trip warnings or cause other problems as we tighten up this state management. Fix this by only re-enabling if PACA_IRQ_HARD_DIS is clear. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926054305.2671436-5-npiggin@gmail.com --- arch/powerpc/include/asm/runlatch.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/runlatch.h b/arch/powerpc/include/asm/runlatch.h index cfb390edf7d0..ceb66d761fe1 100644 --- a/arch/powerpc/include/asm/runlatch.h +++ b/arch/powerpc/include/asm/runlatch.h @@ -19,10 +19,9 @@ extern void __ppc64_runlatch_off(void); do { \ if (cpu_has_feature(CPU_FTR_CTRL) && \ test_thread_local_flags(_TLF_RUNLATCH)) { \ - unsigned long msr = mfmsr(); \ __hard_irq_disable(); \ __ppc64_runlatch_off(); \ - if (msr & MSR_EE) \ + if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) \ __hard_irq_enable(); \ } \ } while (0) @@ -31,10 +30,9 @@ extern void __ppc64_runlatch_off(void); do { \ if (cpu_has_feature(CPU_FTR_CTRL) && \ !test_thread_local_flags(_TLF_RUNLATCH)) { \ - unsigned long msr = mfmsr(); \ __hard_irq_disable(); \ __ppc64_runlatch_on(); \ - if (msr & MSR_EE) \ + if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) \ __hard_irq_enable(); \ } \ } while (0) -- cgit v1.2.3-70-g09d2 From f7bff6e7759b1abb59334f6448f9ef3172c4c04a Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:43:04 +1000 Subject: powerpc/64/interrupt: avoid BUG/WARN recursion in interrupt entry BUG/WARN are handled with a program interrupt which can turn into an infinite recursion when there are bugs in interrupt handler entry (which can be irritated by bugs in other parts of the code). There is one feeble attempt to avoid this recursion, but it misses several cases. Make a tidier macro for this and switch most bugs in the interrupt entry wrapper over to use it. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926054305.2671436-7-npiggin@gmail.com --- arch/powerpc/include/asm/interrupt.h | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 73031783bb1e..4745bb9998bd 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -74,6 +74,19 @@ #include #include +#ifdef CONFIG_PPC64 +/* + * WARN/BUG is handled with a program interrupt so minimise checks here to + * avoid recursion and maximise the chance of getting the first oops handled. + */ +#define INT_SOFT_MASK_BUG_ON(regs, cond) \ +do { \ + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && \ + (user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM))) \ + BUG_ON(cond); \ +} while (0) +#endif + #ifdef CONFIG_PPC_BOOK3S_64 extern char __end_soft_masked[]; bool search_kernel_soft_mask_table(unsigned long addr); @@ -170,8 +183,7 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs) * context. */ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) { - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - BUG_ON(!(regs->msr & MSR_EE)); + INT_SOFT_MASK_BUG_ON(regs, !(regs->msr & MSR_EE)); __hard_irq_enable(); } else { __hard_RI_enable(); @@ -194,20 +206,15 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs) * CT_WARN_ON comes here via program_check_exception, * so avoid recursion. */ - if (TRAP(regs) != INTERRUPT_PROGRAM) { + if (TRAP(regs) != INTERRUPT_PROGRAM) CT_WARN_ON(ct_state() != CONTEXT_KERNEL && ct_state() != CONTEXT_IDLE); - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - BUG_ON(is_implicit_soft_masked(regs)); - } - - /* Move this under a debugging check */ - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && - arch_irq_disabled_regs(regs)) - BUG_ON(search_kernel_restart_table(regs->nip)); + INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs)); + INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) && + search_kernel_restart_table(regs->nip)); } - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE)); + INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) && + !(regs->msr & MSR_EE)); #endif booke_restore_dbcr0(); -- cgit v1.2.3-70-g09d2 From 17773afdcd1589c5925a984f512330410cb2ba4f Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 13:40:53 +1000 Subject: powerpc/64: use 32-bit immediate for STACK_FRAME_REGS_MARKER Using a 32-bit constant for this marker allows it to be loaded with two ALU instructions, like 32-bit. This avoids a TOC entry and a TOC load that depends on the r2 value that has just been loaded from the PACA. This changes the value for 32-bit as well, so both have the same value in the low 4 bytes and 64-bit has 0 in the top bytes. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926034057.2360083-2-npiggin@gmail.com --- arch/powerpc/include/asm/ptrace.h | 4 ++-- arch/powerpc/kernel/entry_32.S | 6 +++--- arch/powerpc/kernel/exceptions-64e.S | 8 +------- arch/powerpc/kernel/exceptions-64s.S | 2 +- arch/powerpc/kernel/head_64.S | 7 ------- arch/powerpc/kernel/interrupt_64.S | 6 +++--- 6 files changed, 10 insertions(+), 23 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index a03403695cd4..5b496e589d54 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -99,6 +99,8 @@ struct pt_regs #define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)) +#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) + #ifdef __powerpc64__ /* @@ -115,7 +117,6 @@ struct pt_regs #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */ -#define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265) #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \ STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE) #define STACK_FRAME_MARKER 12 @@ -136,7 +137,6 @@ struct pt_regs #define KERNEL_REDZONE_SIZE 0 #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */ #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */ -#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD) #define STACK_FRAME_MARKER 2 #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 55fcc70f2cc0..3fc7c9886bb7 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -263,7 +263,7 @@ fast_exception_return: mtcr r10 lwz r10,_LINK(r11) mtlr r10 - /* Clear the exception_marker on the stack to avoid confusing stacktrace */ + /* Clear the exception marker on the stack to avoid confusing stacktrace */ li r10, 0 stw r10, 8(r11) REST_GPR(10, r11) @@ -320,7 +320,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) li r0,0 /* - * Leaving a stale exception_marker on the stack can confuse + * Leaving a stale exception marker on the stack can confuse * the reliable stack unwinder later on. Clear it. */ stw r0,8(r1) @@ -372,7 +372,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) mtspr SPRN_XER,r5 /* - * Leaving a stale exception_marker on the stack can confuse + * Leaving a stale exception marker on the stack can confuse * the reliable stack unwinder later on. Clear it. */ stw r0,8(r1) diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index d6b7aa0229be..478127153529 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -378,7 +378,7 @@ exc_##n##_common: \ ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \ lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \ lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \ - ld r12,exception_marker@toc(r2); \ + LOAD_REG_IMMEDIATE(r12, STACK_FRAME_REGS_MARKER); \ ZEROIZE_GPR(0); \ std r3,GPR10(r1); /* save r10 to stackframe */ \ std r4,GPR11(r1); /* save r11 to stackframe */ \ @@ -459,12 +459,6 @@ exc_##n##_bad_stack: \ bl hdlr; \ b interrupt_return -/* This value is used to mark exception frames on the stack. */ - .section ".toc","aw" -exception_marker: - .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER - - /* * And here we have the exception vectors ! */ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 43e538502f52..3b56fc689a04 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -589,7 +589,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) li r9,IVEC std r9,_TRAP(r1) /* set trap number */ li r10,0 - ld r11,exception_marker@toc(r2) + LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) std r10,RESULT(r1) /* clear regs->result */ std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ .endm diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index da544ea0ce01..c79da95dfd2e 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -192,13 +192,6 @@ __secondary_hold: #endif CLOSE_FIXED_SECTION(first_256B) -/* This value is used to mark exception frames on the stack. */ - .section ".toc","aw" -/* This value is used to mark exception frames on the stack. */ -exception_marker: - .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER - .previous - /* * On server, we include the exception vectors code here as it * relies on absolute addressing which is only possible within diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index 1dfa67e9199a..f02c55930b5c 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -80,7 +80,7 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) /* Calling convention has r3 = regs, r4 = orig r0 */ addi r3,r1,STACK_FRAME_OVERHEAD mr r4,r0 - ld r11,exception_marker@toc(r2) + LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) std r11,-16(r3) /* "regshere" marker */ BEGIN_FTR_SECTION @@ -253,7 +253,7 @@ END_BTB_FLUSH_SECTION /* Calling convention has r3 = regs, r4 = orig r0 */ addi r3,r1,STACK_FRAME_OVERHEAD mr r4,r0 - ld r11,exception_marker@toc(r2) + LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) std r11,-16(r3) /* "regshere" marker */ #ifdef CONFIG_PPC_BOOK3S @@ -614,7 +614,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) mtspr SPRN_XER,r5 /* - * Leaving a stale exception_marker on the stack can confuse + * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse * the reliable stack unwinder later on. Clear it. */ std r0,STACK_FRAME_OVERHEAD-16(r1) -- cgit v1.2.3-70-g09d2 From 754f611774e4b9357a944f5b703dd291c85161cf Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 13:40:55 +1000 Subject: powerpc/64: switch asm helpers from GOT to TOC relative addressing There is no need to use GOT addressing within the kernel. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926034057.2360083-4-npiggin@gmail.com --- arch/powerpc/boot/ppc_asm.h | 3 ++- arch/powerpc/include/asm/ppc_asm.h | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/boot/ppc_asm.h b/arch/powerpc/boot/ppc_asm.h index 2824a3e32aab..a66cfd76fa4d 100644 --- a/arch/powerpc/boot/ppc_asm.h +++ b/arch/powerpc/boot/ppc_asm.h @@ -86,7 +86,8 @@ #ifdef CONFIG_PPC64_BOOT_WRAPPER #define LOAD_REG_ADDR(reg,name) \ - ld reg,name@got(r2) + addis reg,r2,name@toc@ha; \ + addi reg,reg,name@toc@l #else #define LOAD_REG_ADDR(reg,name) \ lis reg,name@ha; \ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index eeb7dc8cd45f..9972f3e90c9d 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -337,7 +337,8 @@ n: rldimi reg, tmp, 32, 0 #define LOAD_REG_ADDR(reg,name) \ - ld reg,name@got(r2) + addis reg,r2,name@toc@ha; \ + addi reg,reg,name@toc@l #define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name) #define ADDROFF(name) 0 -- cgit v1.2.3-70-g09d2 From 8e93fb33c84f68db20c0bc2821334a4c54c3e251 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 13:40:56 +1000 Subject: powerpc/64: provide a helper macro to load r2 with the kernel TOC A later change stops the kernel using r2 and loads it with a poison value. Provide a PACATOC loading abstraction which can hide this detail. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926034057.2360083-5-npiggin@gmail.com --- arch/powerpc/include/asm/ppc_asm.h | 6 ++++++ arch/powerpc/kernel/exceptions-64e.S | 12 ++++++------ arch/powerpc/kernel/exceptions-64s.S | 6 +++--- arch/powerpc/kernel/head_64.S | 4 ++-- arch/powerpc/kernel/interrupt_64.S | 12 ++++++------ arch/powerpc/kernel/optprobes_head.S | 2 +- arch/powerpc/kernel/trace/ftrace_low.S | 2 +- arch/powerpc/kernel/trace/ftrace_mprofile.S | 3 +-- arch/powerpc/kvm/book3s_64_entry.S | 2 +- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 ++-- arch/powerpc/kvm/tm.S | 2 +- arch/powerpc/mm/nohash/tlb_low_64e.S | 2 +- arch/powerpc/platforms/powernv/opal-wrappers.S | 2 +- 13 files changed, 32 insertions(+), 27 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 9972f3e90c9d..cf6bec9770d6 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -327,6 +327,12 @@ n: #ifdef __powerpc64__ +#define __LOAD_PACA_TOC(reg) \ + ld reg,PACATOC(r13) + +#define LOAD_PACA_TOC() \ + __LOAD_PACA_TOC(r2) + #define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE reg, expr #define LOAD_REG_IMMEDIATE_SYM(reg, tmp, expr) \ diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 478127153529..ece1bd2a4a39 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -371,7 +371,7 @@ exc_##n##_common: \ ld r4,excf+EX_R11(r13); /* get back r11 */ \ mfspr r5,scratch; /* get back r13 */ \ SAVE_GPR(12, r1); /* save r12 in stackframe */ \ - ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ + LOAD_PACA_TOC(); /* get kernel TOC into r2 */ \ mflr r6; /* save LR in stackframe */ \ mfctr r7; /* save CTR in stackframe */ \ mfspr r8,SPRN_XER; /* save XER in stackframe */ \ @@ -687,7 +687,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) beq+ 1f #ifdef CONFIG_RELOCATABLE - ld r15,PACATOC(r13) + __LOAD_PACA_TOC(r15) ld r14,interrupt_base_book3e@got(r15) ld r15,__end_interrupts@got(r15) cmpld cr0,r10,r14 @@ -758,7 +758,7 @@ kernel_dbg_exc: beq+ 1f #ifdef CONFIG_RELOCATABLE - ld r15,PACATOC(r13) + __LOAD_PACA_TOC(r15) ld r14,interrupt_base_book3e@got(r15) ld r15,__end_interrupts@got(r15) cmpld cr0,r10,r14 @@ -883,7 +883,7 @@ kernel_dbg_exc: .macro SEARCH_RESTART_TABLE #ifdef CONFIG_RELOCATABLE - ld r11,PACATOC(r13) + __LOAD_PACA_TOC(r11) ld r14,__start___restart_table@got(r11) ld r15,__stop___restart_table@got(r11) #else @@ -1061,7 +1061,7 @@ bad_stack_book3e: std r11,0(r1) ZEROIZE_GPR(12) std r12,0(r11) - ld r2,PACATOC(r13) + LOAD_PACA_TOC() 1: addi r3,r1,STACK_FRAME_OVERHEAD bl kernel_bad_stack b 1b @@ -1302,7 +1302,7 @@ a2_tlbinit_after_linear_map: /* Now we branch the new virtual address mapped by this entry */ #ifdef CONFIG_RELOCATABLE - ld r5,PACATOC(r13) + __LOAD_PACA_TOC(r5) ld r3,1f@got(r5) #else LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 3b56fc689a04..5956ad47a8a0 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -580,7 +580,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) std r2,GPR2(r1) /* save r2 in stackframe */ SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */ mflr r9 /* Get LR, later save to stack */ - ld r2,PACATOC(r13) /* get kernel TOC into r2 */ + LOAD_PACA_TOC() /* get kernel TOC into r2 */ std r9,_LINK(r1) lbz r10,PACAIRQSOFTMASK(r13) mfspr r11,SPRN_XER /* save XER in stackframe */ @@ -610,7 +610,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) .macro SEARCH_RESTART_TABLE #ifdef CONFIG_RELOCATABLE mr r12,r2 - ld r2,PACATOC(r13) + LOAD_PACA_TOC() LOAD_REG_ADDR(r9, __start___restart_table) LOAD_REG_ADDR(r10, __stop___restart_table) mr r2,r12 @@ -640,7 +640,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) .macro SEARCH_SOFT_MASK_TABLE #ifdef CONFIG_RELOCATABLE mr r12,r2 - ld r2,PACATOC(r13) + LOAD_PACA_TOC() LOAD_REG_ADDR(r9, __start___soft_mask_table) LOAD_REG_ADDR(r10, __stop___soft_mask_table) mr r2,r12 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index c79da95dfd2e..c63e0c086f03 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -841,7 +841,7 @@ __secondary_start: * before going into C code. */ start_secondary_prolog: - ld r2,PACATOC(r13) + LOAD_PACA_TOC() li r3,0 std r3,0(r1) /* Zero the stack frame pointer */ bl start_secondary @@ -981,7 +981,7 @@ start_here_common: std r1,PACAKSAVE(r13) /* Load the TOC (virtual address) */ - ld r2,PACATOC(r13) + LOAD_PACA_TOC() /* Mark interrupts soft and hard disabled (they might be enabled * in the PACA when doing hotplug) diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index f02c55930b5c..904a5608cbe3 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -57,7 +57,7 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) std r0,GPR0(r1) std r10,GPR1(r1) std r2,GPR2(r1) - ld r2,PACATOC(r13) + LOAD_PACA_TOC() mfcr r12 li r11,0 /* Save syscall parameters in r3-r8 */ @@ -174,7 +174,7 @@ syscall_vectored_\name\()_restart: _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart) GET_PACA(r13) ld r1,PACA_EXIT_SAVE_R1(r13) - ld r2,PACATOC(r13) + LOAD_PACA_TOC() ld r3,RESULT(r1) addi r4,r1,STACK_FRAME_OVERHEAD li r11,IRQS_ALL_DISABLED @@ -224,7 +224,7 @@ START_BTB_FLUSH_SECTION BTB_FLUSH(r10) END_BTB_FLUSH_SECTION #endif - ld r2,PACATOC(r13) + LOAD_PACA_TOC() mfcr r12 li r11,0 /* Save syscall parameters in r3-r8 */ @@ -355,7 +355,7 @@ syscall_restart: _ASM_NOKPROBE_SYMBOL(syscall_restart) GET_PACA(r13) ld r1,PACA_EXIT_SAVE_R1(r13) - ld r2,PACATOC(r13) + LOAD_PACA_TOC() ld r3,RESULT(r1) addi r4,r1,STACK_FRAME_OVERHEAD li r11,IRQS_ALL_DISABLED @@ -502,7 +502,7 @@ interrupt_return_\srr\()_user_restart: _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart) GET_PACA(r13) ld r1,PACA_EXIT_SAVE_R1(r13) - ld r2,PACATOC(r13) + LOAD_PACA_TOC() addi r3,r1,STACK_FRAME_OVERHEAD li r11,IRQS_ALL_DISABLED stb r11,PACAIRQSOFTMASK(r13) @@ -663,7 +663,7 @@ interrupt_return_\srr\()_kernel_restart: _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart) GET_PACA(r13) ld r1,PACA_EXIT_SAVE_R1(r13) - ld r2,PACATOC(r13) + LOAD_PACA_TOC() addi r3,r1,STACK_FRAME_OVERHEAD li r11,IRQS_ALL_DISABLED stb r11,PACAIRQSOFTMASK(r13) diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S index 5c7f0b4b784b..cd4e7bc32609 100644 --- a/arch/powerpc/kernel/optprobes_head.S +++ b/arch/powerpc/kernel/optprobes_head.S @@ -73,7 +73,7 @@ optprobe_template_entry: * further below. */ #ifdef CONFIG_PPC64 - ld r2,PACATOC(r13) + LOAD_PACA_TOC() #endif .global optprobe_template_op_address diff --git a/arch/powerpc/kernel/trace/ftrace_low.S b/arch/powerpc/kernel/trace/ftrace_low.S index 0bddf1fa6636..294d1e05958a 100644 --- a/arch/powerpc/kernel/trace/ftrace_low.S +++ b/arch/powerpc/kernel/trace/ftrace_low.S @@ -48,7 +48,7 @@ _GLOBAL(return_to_handler) * We might be called from a module. * Switch to our TOC to run inside the core kernel. */ - ld r2, PACATOC(r13) + LOAD_PACA_TOC() #else stwu r1, -16(r1) stw r3, 8(r1) diff --git a/arch/powerpc/kernel/trace/ftrace_mprofile.S b/arch/powerpc/kernel/trace/ftrace_mprofile.S index 33fcfb2eaded..d031093bc436 100644 --- a/arch/powerpc/kernel/trace/ftrace_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_mprofile.S @@ -83,8 +83,7 @@ #ifdef CONFIG_PPC64 /* Save callee's TOC in the ABI compliant location */ std r2, STK_GOT(r1) - ld r2,PACATOC(r13) /* get kernel TOC in r2 */ - + LOAD_PACA_TOC() /* get kernel TOC in r2 */ LOAD_REG_ADDR(r3, function_trace_op) ld r5,0(r3) #else diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S index e43704547a1e..6c2b1d17cb63 100644 --- a/arch/powerpc/kvm/book3s_64_entry.S +++ b/arch/powerpc/kvm/book3s_64_entry.S @@ -315,7 +315,7 @@ kvmppc_p9_exit_interrupt: reg = reg + 1 .endr - ld r2,PACATOC(r13) + LOAD_PACA_TOC() mflr r4 std r4,VCPU_LR(r3) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 7ded202bf995..c984021e62c8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1024,7 +1024,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) /* Restore R1/R2 so we can handle faults */ ld r1, HSTATE_HOST_R1(r13) - ld r2, PACATOC(r13) + LOAD_PACA_TOC() mfspr r10, SPRN_SRR0 mfspr r11, SPRN_SRR1 @@ -2727,7 +2727,7 @@ kvmppc_bad_host_intr: std r4, _CTR(r1) std r5, _XER(r1) std r6, SOFTE(r1) - ld r2, PACATOC(r13) + LOAD_PACA_TOC() LOAD_REG_IMMEDIATE(3, 0x7265677368657265) std r3, STACK_FRAME_OVERHEAD-16(r1) diff --git a/arch/powerpc/kvm/tm.S b/arch/powerpc/kvm/tm.S index 3bf17c854be4..2158f61e317f 100644 --- a/arch/powerpc/kvm/tm.S +++ b/arch/powerpc/kvm/tm.S @@ -110,7 +110,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) mtmsrd r2, 1 /* Reload TOC pointer. */ - ld r2, PACATOC(r13) + LOAD_PACA_TOC() /* Save all but r0-r2, r9 & r13 */ reg = 3 diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S index be26f33a6ac0..b3b3dfeec8f5 100644 --- a/arch/powerpc/mm/nohash/tlb_low_64e.S +++ b/arch/powerpc/mm/nohash/tlb_low_64e.S @@ -1118,7 +1118,7 @@ tlb_load_linear: * we only use 1G pages for now. That might have to be changed in a * final implementation, especially when dealing with hypervisors */ - ld r11,PACATOC(r13) + __LOAD_PACA_TOC(r11) ld r11,linear_map_top@got(r11) ld r10,0(r11) tovirt(10,10) diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index e5acc33b3b20..0ed95f753416 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -57,7 +57,7 @@ opal_return: .long 0xa64b7b7d /* mthsrr1 r11 */ .long 0x2402004c /* hrfid */ #endif - ld r2,PACATOC(r13) + LOAD_PACA_TOC() ld r0,PPC_LR_STKOFF(r1) mtlr r0 blr -- cgit v1.2.3-70-g09d2 From 3569d84bb26f6f07d426446da3d2c836180f1565 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 13:40:57 +1000 Subject: powerpc/64e: provide an addressing macro for use with TOC in alternate register The interrupt entry code carefully saves a minimal number of registers, so in some places the TOC is required, it is loaded into a different register, so provide a macro that can supply an alternate TOC register. This continues to use got addressing because TOC-relative results in "got/toc optimization is not supported" messages by the linker. Having r2 be one of the saved registers and using that for TOC addressing may be the best way to avoid that and switch this to TOC addressing. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926034057.2360083-6-npiggin@gmail.com --- arch/powerpc/include/asm/ppc_asm.h | 11 +++++++++++ arch/powerpc/kernel/exceptions-64e.S | 14 +++++++------- arch/powerpc/mm/nohash/tlb_low_64e.S | 2 +- 3 files changed, 19 insertions(+), 8 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index cf6bec9770d6..753a2757bcd4 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -346,6 +346,17 @@ n: addis reg,r2,name@toc@ha; \ addi reg,reg,name@toc@l +#ifdef CONFIG_PPC_BOOK3E_64 +/* + * This is used in register-constrained interrupt handlers. Not to be used + * by BOOK3S. ld complains with "got/toc optimization is not supported" if r2 + * is not used for the TOC offset, so use @got(tocreg). If the interrupt + * handlers saved r2 instead, LOAD_REG_ADDR could be used. + */ +#define LOAD_REG_ADDR_ALTTOC(reg,tocreg,name) \ + ld reg,name@got(tocreg) +#endif + #define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name) #define ADDROFF(name) 0 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index ece1bd2a4a39..930e36099015 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -688,8 +688,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #ifdef CONFIG_RELOCATABLE __LOAD_PACA_TOC(r15) - ld r14,interrupt_base_book3e@got(r15) - ld r15,__end_interrupts@got(r15) + LOAD_REG_ADDR_ALTTOC(r14, r15, interrupt_base_book3e) + LOAD_REG_ADDR_ALTTOC(r15, r15, __end_interrupts) cmpld cr0,r10,r14 cmpld cr1,r10,r15 #else @@ -759,8 +759,8 @@ kernel_dbg_exc: #ifdef CONFIG_RELOCATABLE __LOAD_PACA_TOC(r15) - ld r14,interrupt_base_book3e@got(r15) - ld r15,__end_interrupts@got(r15) + LOAD_REG_ADDR_ALTTOC(r14, r15, interrupt_base_book3e) + LOAD_REG_ADDR_ALTTOC(r15, r15, __end_interrupts) cmpld cr0,r10,r14 cmpld cr1,r10,r15 #else @@ -884,8 +884,8 @@ kernel_dbg_exc: .macro SEARCH_RESTART_TABLE #ifdef CONFIG_RELOCATABLE __LOAD_PACA_TOC(r11) - ld r14,__start___restart_table@got(r11) - ld r15,__stop___restart_table@got(r11) + LOAD_REG_ADDR_ALTTOC(r14, r11, __start___restart_table) + LOAD_REG_ADDR_ALTTOC(r15, r11, __stop___restart_table) #else LOAD_REG_IMMEDIATE_SYM(r14, r11, __start___restart_table) LOAD_REG_IMMEDIATE_SYM(r15, r11, __stop___restart_table) @@ -1303,7 +1303,7 @@ a2_tlbinit_after_linear_map: /* Now we branch the new virtual address mapped by this entry */ #ifdef CONFIG_RELOCATABLE __LOAD_PACA_TOC(r5) - ld r3,1f@got(r5) + LOAD_REG_ADDR_ALTTOC(r3, r5, 1f) #else LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f) #endif diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S index b3b3dfeec8f5..76cf456d7976 100644 --- a/arch/powerpc/mm/nohash/tlb_low_64e.S +++ b/arch/powerpc/mm/nohash/tlb_low_64e.S @@ -1119,7 +1119,7 @@ tlb_load_linear: * final implementation, especially when dealing with hypervisors */ __LOAD_PACA_TOC(r11) - ld r11,linear_map_top@got(r11) + LOAD_REG_ADDR_ALTTOC(r11, r11, linear_map_top) ld r10,0(r11) tovirt(10,10) cmpld cr0,r16,r10 -- cgit v1.2.3-70-g09d2 From 2f5182cffa43f31c241131a2c10a4ecd8e90fb3e Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:56:17 +1000 Subject: powerpc/64s: early boot machine check handler Use the early boot interrupt fixup in the machine check handler to allow the machine check handler to run before interrupt endian is set up. Branch to an early boot handler that just does a basic crash, which allows it to run before ppc_md is set up. MSR[ME] is enabled on the boot CPU earlier, and the machine check stack is temporarily set to the middle of the init task stack. This allows machine checks (e.g., due to invalid data access in real mode) to print something useful earlier in boot (as soon as udbg is set up, if CONFIG_PPC_EARLY_DEBUG=y). Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926055620.2676869-3-npiggin@gmail.com --- arch/powerpc/include/asm/asm-prototypes.h | 1 + arch/powerpc/kernel/exceptions-64s.S | 6 +++++- arch/powerpc/kernel/setup_64.c | 14 ++++++++++++++ arch/powerpc/kernel/traps.c | 14 ++++++++++++++ 4 files changed, 34 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index e1b3e90eec94..274bce76f5da 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -36,6 +36,7 @@ int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3, int64_t opcode, uint64_t msr); /* misc runtime */ +void enable_machine_check(void); extern u64 __bswapdi2(u64); extern s64 __lshrdi3(s64, int); extern s64 __ashldi3(s64, int); diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index c1ce7deeeb6e..8d658c740db8 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1134,6 +1134,7 @@ INT_DEFINE_BEGIN(machine_check) INT_DEFINE_END(machine_check) EXC_REAL_BEGIN(machine_check, 0x200, 0x100) + EARLY_BOOT_FIXUP GEN_INT_ENTRY machine_check_early, virt=0 EXC_REAL_END(machine_check, 0x200, 0x100) EXC_VIRT_NONE(0x4200, 0x100) @@ -1198,6 +1199,9 @@ BEGIN_FTR_SECTION bl enable_machine_check END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) addi r3,r1,STACK_FRAME_OVERHEAD +BEGIN_FTR_SECTION + bl machine_check_early_boot +END_FTR_SECTION(0, 1) // nop out after boot bl machine_check_early std r3,RESULT(r1) /* Save result */ ld r12,_MSR(r1) @@ -3096,7 +3100,7 @@ CLOSE_FIXED_SECTION(virt_trampolines); USE_TEXT_SECTION() /* MSR[RI] should be clear because this uses SRR[01] */ -enable_machine_check: +_GLOBAL(enable_machine_check) mflr r0 bcl 20,31,$+4 0: mflr r3 diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index e1ac803c9046..f50476da2f2c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -34,6 +34,7 @@ #include #include +#include #include #include #include @@ -180,6 +181,16 @@ static void __init fixup_boot_paca(void) { /* The boot cpu is started */ get_paca()->cpu_start = 1; +#ifdef CONFIG_PPC_BOOK3S_64 + /* + * Give the early boot machine check stack somewhere to use, use + * half of the init stack. This is a bit hacky but there should not be + * deep stack usage in early init so shouldn't overflow it or overwrite + * things. + */ + get_paca()->mc_emergency_sp = (void *)&init_thread_union + + (THREAD_SIZE/2); +#endif /* Allow percpu accesses to work until we setup percpu data */ get_paca()->data_offset = 0; /* Mark interrupts soft and hard disabled in PACA */ @@ -357,6 +368,9 @@ void __init early_setup(unsigned long dt_ptr) /* -------- printk is now safe to use ------- */ + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && (mfmsr() & MSR_HV)) + enable_machine_check(); + /* Try new device tree based feature discovery ... */ if (!dt_cpu_ftrs_init(__va(dt_ptr))) /* Otherwise use the old style CPU table */ diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 62ec50a7a8ef..9bdd79aa51cf 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -68,6 +68,7 @@ #include #include #include +#include #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) int (*__debugger)(struct pt_regs *regs) __read_mostly; @@ -850,6 +851,19 @@ bail: } #ifdef CONFIG_PPC_BOOK3S_64 +DEFINE_INTERRUPT_HANDLER_RAW(machine_check_early_boot) +{ + udbg_printf("Machine check (early boot)\n"); + udbg_printf("SRR0=0x%016lx SRR1=0x%016lx\n", regs->nip, regs->msr); + udbg_printf(" DAR=0x%016lx DSISR=0x%08lx\n", regs->dar, regs->dsisr); + udbg_printf(" LR=0x%016lx R1=0x%08lx\n", regs->link, regs->gpr[1]); + udbg_printf("------\n"); + die("Machine check (early boot)", regs, SIGBUS); + for (;;) + ; + return 0; +} + DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async) { __machine_check_exception(regs); -- cgit v1.2.3-70-g09d2 From 6bd7ff497b4af13ea3d53781ffca7dc744dbb4da Mon Sep 17 00:00:00 2001 From: Pali Rohár Date: Tue, 23 Aug 2022 01:17:51 +0200 Subject: powerpc/udbg: Remove extern function prototypes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 'extern' keyword is pointless and deprecated for function prototypes. Signed-off-by: Pali Rohár Suggested-by: Gabriel Paubert Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220822231751.16973-1-pali@kernel.org --- arch/powerpc/include/asm/udbg.h | 54 ++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 27 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index 88add5593e6f..b1f094728b35 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h @@ -15,13 +15,13 @@ extern void (*udbg_flush)(void); extern int (*udbg_getc)(void); extern int (*udbg_getc_poll)(void); -extern void udbg_puts(const char *s); -extern int udbg_write(const char *s, int n); +void udbg_puts(const char *s); +int udbg_write(const char *s, int n); -extern void register_early_udbg_console(void); -extern void udbg_printf(const char *fmt, ...) +void register_early_udbg_console(void); +void udbg_printf(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); -extern void udbg_progress(char *s, unsigned short hex); +void udbg_progress(char *s, unsigned short hex); void __init udbg_uart_init_mmio(void __iomem *addr, unsigned int stride); void __init udbg_uart_init_pio(unsigned long port, unsigned int stride); @@ -31,28 +31,28 @@ unsigned int __init udbg_probe_uart_speed(unsigned int clock); struct device_node; void __init udbg_scc_init(int force_scc); -extern int udbg_adb_init(int force_btext); -extern void udbg_adb_init_early(void); - -extern void __init udbg_early_init(void); -extern void __init udbg_init_debug_lpar(void); -extern void __init udbg_init_debug_lpar_hvsi(void); -extern void __init udbg_init_pmac_realmode(void); -extern void __init udbg_init_maple_realmode(void); -extern void __init udbg_init_pas_realmode(void); -extern void __init udbg_init_rtas_panel(void); -extern void __init udbg_init_rtas_console(void); -extern void __init udbg_init_btext(void); -extern void __init udbg_init_44x_as1(void); -extern void __init udbg_init_40x_realmode(void); -extern void __init udbg_init_cpm(void); -extern void __init udbg_init_usbgecko(void); -extern void __init udbg_init_memcons(void); -extern void __init udbg_init_ehv_bc(void); -extern void __init udbg_init_ps3gelic(void); -extern void __init udbg_init_debug_opal_raw(void); -extern void __init udbg_init_debug_opal_hvsi(void); -extern void __init udbg_init_debug_16550(void); +int udbg_adb_init(int force_btext); +void udbg_adb_init_early(void); + +void __init udbg_early_init(void); +void __init udbg_init_debug_lpar(void); +void __init udbg_init_debug_lpar_hvsi(void); +void __init udbg_init_pmac_realmode(void); +void __init udbg_init_maple_realmode(void); +void __init udbg_init_pas_realmode(void); +void __init udbg_init_rtas_panel(void); +void __init udbg_init_rtas_console(void); +void __init udbg_init_btext(void); +void __init udbg_init_44x_as1(void); +void __init udbg_init_40x_realmode(void); +void __init udbg_init_cpm(void); +void __init udbg_init_usbgecko(void); +void __init udbg_init_memcons(void); +void __init udbg_init_ehv_bc(void); +void __init udbg_init_ps3gelic(void); +void __init udbg_init_debug_opal_raw(void); +void __init udbg_init_debug_opal_hvsi(void); +void __init udbg_init_debug_16550(void); #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_UDBG_H */ -- cgit v1.2.3-70-g09d2 From bbd71709087a9d486d1da42399eec14e106072f2 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 28 Sep 2022 01:04:18 +1000 Subject: powerpc: Make stack frame marker upper case Now that the stack frame regs marker is only 32-bits it is not as obvious in memory dumps and easier to miss, eg: c000000004733e40 0000000000000000 0000000000000000 |................| c000000004733e50 0000000000000000 0000000000000000 |................| c000000004733e60 0000000000000000 0000000000000000 |................| c000000004733e70 7367657200000000 0000000000000000 |sger............| c000000004733e80 a700000000000000 708897f7ff7f0000 |........p.......| c000000004733e90 0073428fff7f0000 208997f7ff7f0000 |.sB..... .......| c000000004733ea0 0100000000000000 ffffffffffffffff |................| c000000004733eb0 0000000000000000 0000000000000000 |................| So make it upper case to make it stand out a bit more: c000000004733e70 5347455200000000 0000000000000000 |SGER............| Acked-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220927150419.1503001-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/ptrace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 5b496e589d54..6c23d1d25dc7 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -99,7 +99,7 @@ struct pt_regs #define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)) -#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) +#define STACK_FRAME_REGS_MARKER ASM_CONST(0x52454753) #ifdef __powerpc64__ -- cgit v1.2.3-70-g09d2 From 19c95df1277c48e3ef8cc7d9f1d315dce949f203 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 28 Sep 2022 01:04:19 +1000 Subject: powerpc: Reverse stack frame marker on little endian On little endian the stack frame marker appears reversed when dumping memory sequentially, as is typical in xmon or gdb, eg: c000000004733e40 0000000000000000 0000000000000000 |................| c000000004733e50 0000000000000000 0000000000000000 |................| c000000004733e60 0000000000000000 0000000000000000 |................| c000000004733e70 5347455200000000 0000000000000000 |SGER............| c000000004733e80 a700000000000000 708897f7ff7f0000 |........p.......| c000000004733e90 0073428fff7f0000 208997f7ff7f0000 |.sB..... .......| c000000004733ea0 0100000000000000 ffffffffffffffff |................| c000000004733eb0 0000000000000000 0000000000000000 |................| To make it easier to recognise, reverse the value on little endian, so it always appears as "REGS", eg: c000000004733e70 5245475300000000 0000000000000000 |REGS............| Acked-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220927150419.1503001-2-mpe@ellerman.id.au --- arch/powerpc/include/asm/ptrace.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 6c23d1d25dc7..2efec6d87049 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -99,7 +99,12 @@ struct pt_regs #define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)) +// Always displays as "REGS" in memory dumps +#ifdef CONFIG_CPU_BIG_ENDIAN #define STACK_FRAME_REGS_MARKER ASM_CONST(0x52454753) +#else +#define STACK_FRAME_REGS_MARKER ASM_CONST(0x53474552) +#endif #ifdef __powerpc64__ -- cgit v1.2.3-70-g09d2 From 335e1a91042764629fbbcd8c7e40379fa3762d35 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Tue, 27 Sep 2022 18:29:27 -0700 Subject: powerpc: Ignore DSI error caused by the copy/paste instruction MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The data storage interrupt (DSI) error will be generated when the paste operation is issued on the suspended Nest Accelerator (NX) window due to NX state changes. The hypervisor expects the partition to ignore this error during page fault handling. To differentiate DSI caused by an actual HW configuration or by the NX window, a new “ibm,pi-features” type value is defined. Byte 0, bit 3 of pi-attribute-specifier-type is now defined to indicate this DSI error. If this error is not ignored, the user space can get SIGBUS when the NX request is issued. This patch adds changes to read ibm,pi-features property and ignore DSI error during page fault handling if MMU_FTR_NX_DSI is defined. Signed-off-by: Haren Myneni [mpe: Mention PAPR version in comment] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/b9cd844b85eb8f70459109ce1b14e44c4cc85fa7.camel@linux.ibm.com --- arch/powerpc/include/asm/mmu.h | 5 ++++- arch/powerpc/kernel/prom.c | 36 ++++++++++++++++++++++++++---------- arch/powerpc/mm/fault.c | 17 ++++++++++++++++- 3 files changed, 46 insertions(+), 12 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 39057320e436..94b981152667 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -120,6 +120,9 @@ */ #define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000) +// NX paste RMA reject in DSI +#define MMU_FTR_NX_DSI ASM_CONST(0x80000000) + /* MMU feature bit sets for various CPUs */ #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 (MMU_FTR_HPTE_TABLE | MMU_FTR_TLBIEL | MMU_FTR_16M_PAGE) #define MMU_FTRS_POWER MMU_FTRS_DEFAULT_HPTE_ARCH_V2 @@ -181,7 +184,7 @@ enum { #endif #ifdef CONFIG_PPC_RADIX_MMU MMU_FTR_TYPE_RADIX | - MMU_FTR_GTSE | + MMU_FTR_GTSE | MMU_FTR_NX_DSI | #endif /* CONFIG_PPC_RADIX_MMU */ #endif #ifdef CONFIG_PPC_KUAP diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index a730b951b64b..2e7a04dab2f7 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -137,7 +137,7 @@ static void __init move_device_tree(void) } /* - * ibm,pa-features is a per-cpu property that contains a string of + * ibm,pa/pi-features is a per-cpu property that contains a string of * attribute descriptors, each of which has a 2 byte header plus up * to 254 bytes worth of processor attribute bits. First header * byte specifies the number of bytes following the header. @@ -149,15 +149,17 @@ static void __init move_device_tree(void) * is supported/not supported. Note that the bit numbers are * big-endian to match the definition in PAPR. */ -static struct ibm_pa_feature { +struct ibm_feature { unsigned long cpu_features; /* CPU_FTR_xxx bit */ unsigned long mmu_features; /* MMU_FTR_xxx bit */ unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */ - unsigned char pabyte; /* byte number in ibm,pa-features */ + unsigned char pabyte; /* byte number in ibm,pa/pi-features */ unsigned char pabit; /* bit number (big-endian) */ unsigned char invert; /* if 1, pa bit set => clear feature */ -} ibm_pa_features[] __initdata = { +}; + +static struct ibm_feature ibm_pa_features[] __initdata = { { .pabyte = 0, .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU }, { .pabyte = 0, .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU }, { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL }, @@ -179,9 +181,19 @@ static struct ibm_pa_feature { { .pabyte = 64, .pabit = 0, .cpu_features = CPU_FTR_DAWR1 }, }; +/* + * ibm,pi-features property provides the support of processor specific + * options not described in ibm,pa-features. Right now use byte 0, bit 3 + * which indicates the occurrence of DSI interrupt when the paste operation + * on the suspended NX window. + */ +static struct ibm_feature ibm_pi_features[] __initdata = { + { .pabyte = 0, .pabit = 3, .mmu_features = MMU_FTR_NX_DSI }, +}; + static void __init scan_features(unsigned long node, const unsigned char *ftrs, unsigned long tablelen, - struct ibm_pa_feature *fp, + struct ibm_feature *fp, unsigned long ft_size) { unsigned long i, len, bit; @@ -218,17 +230,18 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs, } } -static void __init check_cpu_pa_features(unsigned long node) +static void __init check_cpu_features(unsigned long node, char *name, + struct ibm_feature *fp, + unsigned long size) { const unsigned char *pa_ftrs; int tablelen; - pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen); + pa_ftrs = of_get_flat_dt_prop(node, name, &tablelen); if (pa_ftrs == NULL) return; - scan_features(node, pa_ftrs, tablelen, - ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); + scan_features(node, pa_ftrs, tablelen, fp, size); } #ifdef CONFIG_PPC_64S_HASH_MMU @@ -380,7 +393,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node, identify_cpu(0, be32_to_cpup(prop)); check_cpu_feature_properties(node); - check_cpu_pa_features(node); + check_cpu_features(node, "ibm,pa-features", ibm_pa_features, + ARRAY_SIZE(ibm_pa_features)); + check_cpu_features(node, "ibm,pi-features", ibm_pi_features, + ARRAY_SIZE(ibm_pi_features)); } identical_pvr_fixup(node); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 1566804e4b3d..2bef19cc1b98 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -371,7 +371,22 @@ static void sanity_check_fault(bool is_write, bool is_user, #elif defined(CONFIG_PPC_8xx) #define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G) #elif defined(CONFIG_PPC64) -#define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_64S) +static int page_fault_is_bad(unsigned long err) +{ + unsigned long flag = DSISR_BAD_FAULT_64S; + + /* + * PAPR+ v2.11 § 14.15.3.4.1 (unreleased) + * If byte 0, bit 3 of pi-attribute-specifier-type in + * ibm,pi-features property is defined, ignore the DSI error + * which is caused by the paste instruction on the + * suspended NX window. + */ + if (mmu_has_feature(MMU_FTR_NX_DSI)) + flag &= ~DSISR_BAD_COPYPASTE; + + return err & flag; +} #else #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S) #endif -- cgit v1.2.3-70-g09d2 From d368e0c478a628f36680650f8d1d1634037b046e Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 7 Sep 2022 13:49:41 +0530 Subject: powerpc/mm/book3s/hash: Rename flush_tlb_pmd_range This function does the hash page table update. Hence rename it to indicate this better to avoid confusion with flush_pmd_tlb_range() Signed-off-by: Aneesh Kumar K.V [mpe: Drop unnecessary extern] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220907081941.209501-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/tlbflush-hash.h | 4 +--- arch/powerpc/mm/book3s64/hash_pgtable.c | 2 +- arch/powerpc/mm/book3s64/hash_tlb.c | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h index 8b762f282190..fab8332fe1ad 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h @@ -112,13 +112,11 @@ static inline void hash__flush_tlb_kernel_range(unsigned long start, struct mmu_gather; extern void hash__tlb_flush(struct mmu_gather *tlb); -void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr); #ifdef CONFIG_PPC_64S_HASH_MMU /* Private function for use by PCI IO mapping code */ extern void __flush_hash_table_range(unsigned long start, unsigned long end); -extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr); +void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr); #else static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { } #endif diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index 28332001bd87..747492edb75a 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -256,7 +256,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres * the __collapse_huge_page_copy can result in copying * the old content. */ - flush_tlb_pmd_range(vma->vm_mm, &pmd, address); + flush_hash_table_pmd_range(vma->vm_mm, &pmd, address); return pmd; } diff --git a/arch/powerpc/mm/book3s64/hash_tlb.c b/arch/powerpc/mm/book3s64/hash_tlb.c index eb0bccaf221e..a64ea0a7ef96 100644 --- a/arch/powerpc/mm/book3s64/hash_tlb.c +++ b/arch/powerpc/mm/book3s64/hash_tlb.c @@ -221,7 +221,7 @@ void __flush_hash_table_range(unsigned long start, unsigned long end) local_irq_restore(flags); } -void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr) +void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr) { pte_t *pte; pte_t *start_pte; -- cgit v1.2.3-70-g09d2 From 41dc056391b334fae646b55ee020bfa8f67b60c8 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 30 Sep 2022 18:27:04 +1000 Subject: powerpc: Add hardware description string Create a hardware description string, which we will use to record various details of the hardware platform we are running on. Print the accumulated description at boot, and use it to set the generic description which is printed in oopses. To begin with add ppc_md.name, aka the "machine description". Example output at boot with the full series applied: Linux version 6.0.0-rc2-gcc-11.1.0-00199-g893f9007a5ce-dirty (michael@alpine1-p1) (powerpc64-linux-gcc (GCC) 11.1.0, GNU ld (GNU Binutils) 2.36.1) #844 SMP Thu Sep 29 22:29:53 AEST 2022 Hardware name: IBM pSeries (emulated by qemu) POWER9 (raw) 0x4e1200 0xf000005 of:SLOF,git-5b4c5a pSeries printk: bootconsole [udbg0] enabled Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220930082709.55830-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/setup.h | 2 ++ arch/powerpc/kernel/setup-common.c | 19 ++++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 85143849a586..e29e83f8a89c 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -88,6 +88,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long pp, unsigned long r6, unsigned long r7, unsigned long kbase); +extern struct seq_buf ppc_hw_desc; + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_SETUP_H */ diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index dd98f43bd685..6d041993a45d 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -25,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -588,6 +590,15 @@ static __init int add_pcspkr(void) device_initcall(add_pcspkr); #endif /* CONFIG_PCSPKR_PLATFORM */ +static char ppc_hw_desc_buf[128] __initdata; + +struct seq_buf ppc_hw_desc __initdata = { + .buffer = ppc_hw_desc_buf, + .size = sizeof(ppc_hw_desc_buf), + .len = 0, + .readpos = 0, +}; + static __init void probe_machine(void) { extern struct machdep_calls __machine_desc_start; @@ -628,7 +639,13 @@ static __init void probe_machine(void) for (;;); } - printk(KERN_INFO "Using %s machine description\n", ppc_md.name); + // Append the machine name to other info we've gathered + seq_buf_puts(&ppc_hw_desc, ppc_md.name); + + // Set the generic hardware description shown in oopses + dump_stack_set_arch_desc(ppc_hw_desc.buffer); + + pr_info("Hardware name: %s\n", ppc_hw_desc.buffer); } /* Match a class of boards, not a specific device configuration. */ -- cgit v1.2.3-70-g09d2 From 0fa6831811f62cfc10415d731bcf9fde2647ad81 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 4 Oct 2022 15:11:57 +1000 Subject: powerpc/64: Fix msr_check_and_set/clear MSR[EE] race irq soft-masking means that when Linux irqs are disabled, the MSR[EE] value can change from 1 to 0 asynchronously: if a masked interrupt of the PACA_IRQ_MUST_HARD_MASK variety fires while irqs are disabled, the masked handler will return with MSR[EE]=0. This means a sequence like mtmsr(mfmsr() | MSR_FP) is racy if it can be called with local irqs disabled, unless a hard_irq_disable has been done. Reported-by: Sachin Sant Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20221004051157.308999-2-npiggin@gmail.com --- arch/powerpc/include/asm/hw_irq.h | 24 ++++++++++++++++++++++++ arch/powerpc/kernel/process.c | 4 ++-- 2 files changed, 26 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index e8de249339d8..77fa88c2aed0 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -471,6 +471,30 @@ static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned l } #endif /* CONFIG_PPC64 */ +static inline unsigned long mtmsr_isync_irqsafe(unsigned long msr) +{ +#ifdef CONFIG_PPC64 + if (arch_irqs_disabled()) { + /* + * With soft-masking, MSR[EE] can change from 1 to 0 + * asynchronously when irqs are disabled, and we don't want to + * set MSR[EE] back to 1 here if that has happened. A race-free + * way to do this is ensure EE is already 0. Another way it + * could be done is with a RESTART_TABLE handler, but that's + * probably overkill here. + */ + msr &= ~MSR_EE; + mtmsr_isync(msr); + irq_soft_mask_set(IRQS_ALL_DISABLED); + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + } else +#endif + mtmsr_isync(msr); + + return msr; +} + + #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 0fbda89cd1bb..37df0428e4fb 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -127,7 +127,7 @@ unsigned long notrace msr_check_and_set(unsigned long bits) newmsr |= MSR_VSX; if (oldmsr != newmsr) - mtmsr_isync(newmsr); + newmsr = mtmsr_isync_irqsafe(newmsr); return newmsr; } @@ -145,7 +145,7 @@ void notrace __msr_check_and_clear(unsigned long bits) newmsr &= ~MSR_VSX; if (oldmsr != newmsr) - mtmsr_isync(newmsr); + mtmsr_isync_irqsafe(newmsr); } EXPORT_SYMBOL(__msr_check_and_clear); -- cgit v1.2.3-70-g09d2 From 94746890202cf18e5266b4de77895243e55b0a79 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 6 Oct 2022 23:34:17 +1100 Subject: powerpc: Don't add __powerpc_ prefix to syscall entry points When using syscall wrappers the __SYSCALL_DEFINEx() and related macros add a "__powerpc_" prefix to all syscall entry points. So for example sys_mmap becomes __powerpc_sys_mmap. This risks breaking workflows and tools that expect the old naming scheme. At a minimum setting a breakpoint on eg. sys_mmap with gdb no longer works. There seems to be no compelling reason to add the "__powerpc_" prefix, other than that it follows what some other arches do (x86, arm64, s390). But unlike other arches powerpc doesn't always enable syscall wrappers, so the syscall entry points can change name depending on CONFIG options. For those reasons drop the "__powerpc_" prefix, reverting to the existing naming. Doing so reveals two prototypes in signal.h that have the incorrect type when syscall wrappers are enabled. There are already prototypes for both functions in syscalls.h, so drop the ones from signal.h. Fixes: 7e92e01b7245 ("powerpc: Provide syscall wrapper") Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20221006135940.1223988-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/syscall_wrapper.h | 18 ++++++++---------- arch/powerpc/include/asm/syscalls.h | 2 +- arch/powerpc/kernel/signal.h | 3 --- arch/powerpc/kernel/systbl.c | 3 +-- 4 files changed, 10 insertions(+), 16 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/syscall_wrapper.h b/arch/powerpc/include/asm/syscall_wrapper.h index 75b41b173f7a..67486c67e8a2 100644 --- a/arch/powerpc/include/asm/syscall_wrapper.h +++ b/arch/powerpc/include/asm/syscall_wrapper.h @@ -16,11 +16,11 @@ struct pt_regs; ,,regs->gpr[6],,regs->gpr[7],,regs->gpr[8]) #define __SYSCALL_DEFINEx(x, name, ...) \ - long __powerpc_sys##name(const struct pt_regs *regs); \ - ALLOW_ERROR_INJECTION(__powerpc_sys##name, ERRNO); \ + long sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(sys##name, ERRNO); \ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ - long __powerpc_sys##name(const struct pt_regs *regs) \ + long sys##name(const struct pt_regs *regs) \ { \ return __se_sys##name(SC_POWERPC_REGS_TO_ARGS(x,__VA_ARGS__)); \ } \ @@ -35,17 +35,15 @@ struct pt_regs; #define SYSCALL_DEFINE0(sname) \ SYSCALL_METADATA(_##sname, 0); \ - long __powerpc_sys_##sname(const struct pt_regs *__unused); \ - ALLOW_ERROR_INJECTION(__powerpc_sys_##sname, ERRNO); \ - long __powerpc_sys_##sname(const struct pt_regs *__unused) + long sys_##sname(const struct pt_regs *__unused); \ + ALLOW_ERROR_INJECTION(sys_##sname, ERRNO); \ + long sys_##sname(const struct pt_regs *__unused) #define COND_SYSCALL(name) \ - long __powerpc_sys_##name(const struct pt_regs *regs); \ - long __weak __powerpc_sys_##name(const struct pt_regs *regs) \ + long sys_##name(const struct pt_regs *regs); \ + long __weak sys_##name(const struct pt_regs *regs) \ { \ return sys_ni_syscall(); \ } -#define SYS_NI(name) SYSCALL_ALIAS(__powerpc_sys_##name, sys_ni_posix_timers); - #endif // __ASM_POWERPC_SYSCALL_WRAPPER_H diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 49bbc3e0733d..9840d572da55 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -124,7 +124,7 @@ long sys_ppc_fadvise64_64(int fd, int advice, #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) #define __SYSCALL(nr, entry) \ - long __powerpc_##entry(const struct pt_regs *regs); + long entry(const struct pt_regs *regs); #ifdef CONFIG_PPC64 #include diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h index 618aeccdf691..a429c57ed433 100644 --- a/arch/powerpc/kernel/signal.h +++ b/arch/powerpc/kernel/signal.h @@ -196,9 +196,6 @@ extern int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, #else /* CONFIG_PPC64 */ -extern long sys_rt_sigreturn(void); -extern long sys_sigreturn(void); - static inline int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct task_struct *tsk) { diff --git a/arch/powerpc/kernel/systbl.c b/arch/powerpc/kernel/systbl.c index 9d7c5a596171..4305f2a2162f 100644 --- a/arch/powerpc/kernel/systbl.c +++ b/arch/powerpc/kernel/systbl.c @@ -20,8 +20,7 @@ #undef __SYSCALL #ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER -#define __SYSCALL(nr, entry) [nr] = __powerpc_##entry, -#define __powerpc_sys_ni_syscall sys_ni_syscall +#define __SYSCALL(nr, entry) [nr] = entry, #else /* * Coerce syscall handlers with arbitrary parameters to common type -- cgit v1.2.3-70-g09d2