summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/Kconfig20
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/dts/a4m072.dts6
-rw-r--r--arch/powerpc/boot/dts/charon.dts8
-rw-r--r--arch/powerpc/boot/dts/digsy_mtc.dts8
-rw-r--r--arch/powerpc/boot/dts/lite5200.dts8
-rw-r--r--arch/powerpc/boot/dts/lite5200b.dts8
-rw-r--r--arch/powerpc/boot/dts/media5200.dts8
-rw-r--r--arch/powerpc/boot/dts/mpc5200b.dtsi6
-rw-r--r--arch/powerpc/boot/dts/mucmc52.dts6
-rw-r--r--arch/powerpc/boot/dts/o2d.dts2
-rw-r--r--arch/powerpc/boot/dts/o2d.dtsi2
-rw-r--r--arch/powerpc/boot/dts/o2dnt2.dts2
-rw-r--r--arch/powerpc/boot/dts/o3dnt.dts2
-rw-r--r--arch/powerpc/boot/dts/pcm030.dts6
-rw-r--r--arch/powerpc/boot/dts/pcm032.dts8
-rw-r--r--arch/powerpc/boot/dts/tqm5200.dts8
-rw-r--r--arch/powerpc/boot/serial.c2
-rwxr-xr-xarch/powerpc/boot/wrapper2
-rw-r--r--arch/powerpc/configs/cell_defconfig1
-rw-r--r--arch/powerpc/configs/g5_defconfig1
-rw-r--r--arch/powerpc/configs/maple_defconfig1
-rw-r--r--arch/powerpc/configs/microwatt_defconfig1
-rw-r--r--arch/powerpc/configs/pasemi_defconfig1
-rw-r--r--arch/powerpc/configs/powernv_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/configs/skiroot_defconfig1
-rw-r--r--arch/powerpc/include/asm/asm-const.h2
-rw-r--r--arch/powerpc/include/asm/atomic.h8
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h10
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h3
-rw-r--r--arch/powerpc/include/asm/io.h4
-rw-r--r--arch/powerpc/include/asm/iommu.h6
-rw-r--r--arch/powerpc/include/asm/kexec.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h4
-rw-r--r--arch/powerpc/include/asm/machdep.h13
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h21
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h22
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/nohash/pte-book3e.h18
-rw-r--r--arch/powerpc/include/asm/nohash/tlbflush.h15
-rw-r--r--arch/powerpc/include/asm/paravirt.h40
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h18
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h4
-rw-r--r--arch/powerpc/include/asm/static_call.h28
-rw-r--r--arch/powerpc/include/asm/uaccess.h6
-rw-r--r--arch/powerpc/include/uapi/asm/perf_regs.h28
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/align.c1
-rw-r--r--arch/powerpc/kernel/eeh.c12
-rw-r--r--arch/powerpc/kernel/firmware.c7
-rw-r--r--arch/powerpc/kernel/head_8xx.S2
-rw-r--r--arch/powerpc/kernel/head_booke.h15
-rw-r--r--arch/powerpc/kernel/hw_breakpoint_constraints.c15
-rw-r--r--arch/powerpc/kernel/interrupt.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/static_call.c37
-rw-r--r--arch/powerpc/kernel/swsusp_64.c5
-rw-r--r--arch/powerpc/kernel/swsusp_asm64.S1
-rw-r--r--arch/powerpc/kernel/sysfs.c3
-rw-r--r--arch/powerpc/kernel/time.c22
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S12
-rw-r--r--arch/powerpc/kexec/core.c13
-rw-r--r--arch/powerpc/kexec/core_32.c2
-rw-r--r--arch/powerpc/kexec/core_64.c2
-rw-r--r--arch/powerpc/kexec/file_load_64.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv.c30
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c9
-rw-r--r--arch/powerpc/kvm/booke.c16
-rw-r--r--arch/powerpc/kvm/powerpc.c4
-rw-r--r--arch/powerpc/lib/feature-fixups.c11
-rw-r--r--arch/powerpc/lib/sstep.c197
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c2
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c7
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/powerpc/mm/mmu_decl.h4
-rw-r--r--arch/powerpc/mm/nohash/Makefile4
-rw-r--r--arch/powerpc/mm/nohash/fsl_book3e.c (renamed from arch/powerpc/mm/nohash/fsl_booke.c)76
-rw-r--r--arch/powerpc/mm/nohash/kaslr_booke.c2
-rw-r--r--arch/powerpc/mm/nohash/tlb.c6
-rw-r--r--arch/powerpc/mm/nohash/tlb_low.S8
-rw-r--r--arch/powerpc/mm/nohash/tlb_low_64e.S8
-rw-r--r--arch/powerpc/mm/pgtable.c2
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c2
-rw-r--r--arch/powerpc/perf/perf_regs.c4
-rw-r--r--arch/powerpc/perf/power10-events-list.h8
-rw-r--r--arch/powerpc/perf/power10-pmu.c44
-rw-r--r--arch/powerpc/platforms/44x/fsp2.c2
-rw-r--r--arch/powerpc/platforms/44x/ppc476.c4
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c7
-rw-r--r--arch/powerpc/platforms/85xx/Makefile4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c7
-rw-r--r--arch/powerpc/platforms/85xx/smp.c12
-rw-r--r--arch/powerpc/platforms/book3s/vas-api.c4
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h1
-rw-r--r--arch/powerpc/platforms/powermac/setup.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-prd.c12
-rw-r--r--arch/powerpc/platforms/powernv/pci-sriov.c6
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c298
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c14
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c34
-rw-r--r--arch/powerpc/platforms/pseries/setup.c3
-rw-r--r--arch/powerpc/sysdev/dcr-low.S2
-rw-r--r--arch/powerpc/xmon/xmon.c3
109 files changed, 832 insertions, 574 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ba5b66189358..dea74d7717c0 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -138,7 +138,8 @@ config PPC
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
select ARCH_HAS_SET_MEMORY
- select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
+ select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S || PPC_8xx || 40x) && !HIBERNATION
+ select ARCH_HAS_STRICT_KERNEL_RWX if FSL_BOOKE && !HIBERNATION && !RANDOMIZE_BASE
select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX && !PPC_BOOK3S_32
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
@@ -148,9 +149,10 @@ config PPC
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
+ select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
- select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC32 || PPC_BOOK3S_64
+ select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_USE_MEMTEST
@@ -190,7 +192,7 @@ config PPC
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14
- select HAVE_ARCH_KFENCE if PPC32
+ select HAVE_ARCH_KFENCE if PPC_BOOK3S_32 || PPC_8xx || 40x
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
@@ -241,6 +243,7 @@ config PPC
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
+ select HAVE_STATIC_CALL if PPC32
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
select HUGETLB_PAGE_SIZE_VARIABLE if PPC_BOOK3S_64 && HUGETLB_PAGE
@@ -707,6 +710,7 @@ config ARCH_MEMORY_PROBE
choice
prompt "Page size"
+ default PPC_64K_PAGES if PPC_BOOK3S_64
default PPC_4K_PAGES
help
Select the kernel logical page size. Increasing the page size
@@ -778,7 +782,8 @@ config DATA_SHIFT_BOOL
bool "Set custom data alignment"
depends on ADVANCED_OPTIONS
depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE
- depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX)
+ depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX) || \
+ FSL_BOOKE
help
This option allows you to set the kernel data alignment. When
RAM is mapped by blocks, the alignment needs to fit the size and
@@ -791,11 +796,13 @@ config DATA_SHIFT
default 24 if STRICT_KERNEL_RWX && PPC64
range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
+ range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_FSL_BOOKE
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
default 23 if STRICT_KERNEL_RWX && PPC_8xx
default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA
default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
+ default 24 if STRICT_KERNEL_RWX && FSL_BOOKE
default PPC_PAGE_SHIFT
help
On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO.
@@ -1123,7 +1130,10 @@ config LOWMEM_CAM_NUM_BOOL
config LOWMEM_CAM_NUM
depends on FSL_BOOKE
int "Number of CAMs to use to map low memory" if LOWMEM_CAM_NUM_BOOL
- default 3
+ default 3 if !STRICT_KERNEL_RWX
+ default 9 if DATA_SHIFT >= 24
+ default 12 if DATA_SHIFT >= 22
+ default 15
config DYNAMIC_MEMSTART
bool "Enable page aligned dynamic load address for kernel"
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 089ee3ea55c8..9993c6256ad2 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -123,7 +123,7 @@ src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
oflib.c ofconsole.c cuboot.c
src-wlib-$(CONFIG_PPC_MPC52xx) += mpc52xx-psc.c
-src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S opal.c
+src-wlib-$(CONFIG_PPC_POWERNV) += opal-calls.S opal.c
ifndef CONFIG_PPC64_BOOT_WRAPPER
src-wlib-y += crtsavres.S
endif
diff --git a/arch/powerpc/boot/dts/a4m072.dts b/arch/powerpc/boot/dts/a4m072.dts
index a9cef5726422..d4270a2ec6c7 100644
--- a/arch/powerpc/boot/dts/a4m072.dts
+++ b/arch/powerpc/boot/dts/a4m072.dts
@@ -140,8 +140,8 @@
clock-frequency = <0>; /* From boot loader */
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000
- 0x02000000 0 0x90000000 0x90000000 0 0x10000000
- 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000>,
+ <0x02000000 0 0x90000000 0x90000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
};
};
diff --git a/arch/powerpc/boot/dts/charon.dts b/arch/powerpc/boot/dts/charon.dts
index 408b486b13df..ea6e76ae2545 100644
--- a/arch/powerpc/boot/dts/charon.dts
+++ b/arch/powerpc/boot/dts/charon.dts
@@ -35,7 +35,7 @@
};
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x00000000 0x08000000>; // 128MB
};
@@ -225,8 +225,8 @@
clock-frequency = <0>; // From boot loader
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000
- 0x02000000 0 0x90000000 0x90000000 0 0x10000000
- 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000>,
+ <0x02000000 0 0x90000000 0x90000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
};
};
diff --git a/arch/powerpc/boot/dts/digsy_mtc.dts b/arch/powerpc/boot/dts/digsy_mtc.dts
index 0e5e9d3acf79..57024a4c1e7d 100644
--- a/arch/powerpc/boot/dts/digsy_mtc.dts
+++ b/arch/powerpc/boot/dts/digsy_mtc.dts
@@ -16,7 +16,7 @@
model = "intercontrol,digsy-mtc";
compatible = "intercontrol,digsy-mtc";
- memory {
+ memory@0 {
reg = <0x00000000 0x02000000>; // 32MB
};
@@ -98,9 +98,9 @@
clock-frequency = <0>; // From boot loader
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000
- 0x02000000 0 0x90000000 0x90000000 0 0x10000000
- 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000>,
+ <0x02000000 0 0x90000000 0x90000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
};
localbus {
diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts
index cb2782dd6132..b9d8487813b4 100644
--- a/arch/powerpc/boot/dts/lite5200.dts
+++ b/arch/powerpc/boot/dts/lite5200.dts
@@ -32,7 +32,7 @@
};
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x00000000 0x04000000>; // 64MB
};
@@ -283,9 +283,9 @@
clock-frequency = <0>; // From boot loader
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
- 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
- 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000>,
+ <0x02000000 0 0xa0000000 0xa0000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
};
localbus {
diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts
index 2b86c81f9048..7e2d91c7cb66 100644
--- a/arch/powerpc/boot/dts/lite5200b.dts
+++ b/arch/powerpc/boot/dts/lite5200b.dts
@@ -31,7 +31,7 @@
led4 { gpios = <&gpio_simple 2 1>; };
};
- memory {
+ memory@0 {
reg = <0x00000000 0x10000000>; // 256MB
};
@@ -116,9 +116,9 @@
clock-frequency = <0>; // From boot loader
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
- 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
- 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000>,
+ <0x02000000 0 0xa0000000 0xa0000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
};
localbus {
diff --git a/arch/powerpc/boot/dts/media5200.dts b/arch/powerpc/boot/dts/media5200.dts
index 61cae9dcddef..96524ede16cd 100644
--- a/arch/powerpc/boot/dts/media5200.dts
+++ b/arch/powerpc/boot/dts/media5200.dts
@@ -32,7 +32,7 @@
};
};
- memory {
+ memory@0 {
reg = <0x00000000 0x08000000>; // 128MB RAM
};
@@ -96,9 +96,9 @@
0xe000 0 0 1 &media5200_fpga 0 5 // CoralIP
>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
- 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
- 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000>,
+ <0x02000000 0 0xa0000000 0xa0000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
interrupt-parent = <&mpc5200_pic>;
};
diff --git a/arch/powerpc/boot/dts/mpc5200b.dtsi b/arch/powerpc/boot/dts/mpc5200b.dtsi
index 648fe31795f4..ffa82c7e1055 100644
--- a/arch/powerpc/boot/dts/mpc5200b.dtsi
+++ b/arch/powerpc/boot/dts/mpc5200b.dtsi
@@ -33,7 +33,7 @@
};
};
- memory: memory {
+ memory: memory@0 {
device_type = "memory";
reg = <0x00000000 0x04000000>; // 64MB
};
@@ -276,7 +276,9 @@
clock-frequency = <0>; // From boot loader
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
- // ranges = need to add
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000>,
+ <0x02000000 0 0x90000000 0x90000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
};
localbus: localbus {
diff --git a/arch/powerpc/boot/dts/mucmc52.dts b/arch/powerpc/boot/dts/mucmc52.dts
index c6c66306308d..e88a7bd4034d 100644
--- a/arch/powerpc/boot/dts/mucmc52.dts
+++ b/arch/powerpc/boot/dts/mucmc52.dts
@@ -106,9 +106,9 @@
0x8000 0 0 3 &mpc5200_pic 0 2 3
0x8000 0 0 4 &mpc5200_pic 0 1 3
>;
- ranges = <0x42000000 0 0x60000000 0x60000000 0 0x10000000
- 0x02000000 0 0x90000000 0x90000000 0 0x10000000
- 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x60000000 0x60000000 0 0x10000000>,
+ <0x02000000 0 0x90000000 0x90000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
};
localbus {
diff --git a/arch/powerpc/boot/dts/o2d.dts b/arch/powerpc/boot/dts/o2d.dts
index 24a46f65e529..e0a8d3034417 100644
--- a/arch/powerpc/boot/dts/o2d.dts
+++ b/arch/powerpc/boot/dts/o2d.dts
@@ -12,7 +12,7 @@
model = "ifm,o2d";
compatible = "ifm,o2d";
- memory {
+ memory@0 {
reg = <0x00000000 0x08000000>; // 128MB
};
diff --git a/arch/powerpc/boot/dts/o2d.dtsi b/arch/powerpc/boot/dts/o2d.dtsi
index 6661955a2be4..b55a9e5bd828 100644
--- a/arch/powerpc/boot/dts/o2d.dtsi
+++ b/arch/powerpc/boot/dts/o2d.dtsi
@@ -19,7 +19,7 @@
model = "ifm,o2d";
compatible = "ifm,o2d";
- memory {
+ memory@0 {
reg = <0x00000000 0x04000000>; // 64MB
};
diff --git a/arch/powerpc/boot/dts/o2dnt2.dts b/arch/powerpc/boot/dts/o2dnt2.dts
index eeba7f5507d5..c2eedbd1f5fc 100644
--- a/arch/powerpc/boot/dts/o2dnt2.dts
+++ b/arch/powerpc/boot/dts/o2dnt2.dts
@@ -12,7 +12,7 @@
model = "ifm,o2dnt2";
compatible = "ifm,o2d";
- memory {
+ memory@0 {
reg = <0x00000000 0x08000000>; // 128MB
};
diff --git a/arch/powerpc/boot/dts/o3dnt.dts b/arch/powerpc/boot/dts/o3dnt.dts
index fd00396b0593..e4c1bdd41271 100644
--- a/arch/powerpc/boot/dts/o3dnt.dts
+++ b/arch/powerpc/boot/dts/o3dnt.dts
@@ -12,7 +12,7 @@
model = "ifm,o3dnt";
compatible = "ifm,o2d";
- memory {
+ memory@0 {
reg = <0x00000000 0x04000000>; // 64MB
};
diff --git a/arch/powerpc/boot/dts/pcm030.dts b/arch/powerpc/boot/dts/pcm030.dts
index b1bc731f7afd..5cee474dcc4c 100644
--- a/arch/powerpc/boot/dts/pcm030.dts
+++ b/arch/powerpc/boot/dts/pcm030.dts
@@ -90,9 +90,9 @@
0xc800 0 0 2 &mpc5200_pic 1 2 3
0xc800 0 0 3 &mpc5200_pic 1 3 3
0xc800 0 0 4 &mpc5200_pic 0 0 3>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
- 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
- 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000>,
+ <0x02000000 0 0xa0000000 0xa0000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
};
localbus {
diff --git a/arch/powerpc/boot/dts/pcm032.dts b/arch/powerpc/boot/dts/pcm032.dts
index 780e13d99e7b..d00f13b62510 100644
--- a/arch/powerpc/boot/dts/pcm032.dts
+++ b/arch/powerpc/boot/dts/pcm032.dts
@@ -20,7 +20,7 @@
model = "phytec,pcm032";
compatible = "phytec,pcm032";
- memory {
+ memory@0 {
reg = <0x00000000 0x08000000>; // 128MB
};
@@ -87,9 +87,9 @@
0xc800 0 0 2 &mpc5200_pic 1 2 3
0xc800 0 0 3 &mpc5200_pic 1 3 3
0xc800 0 0 4 &mpc5200_pic 0 0 3>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
- 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
- 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000>,
+ <0x02000000 0 0xa0000000 0xa0000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
};
localbus {
diff --git a/arch/powerpc/boot/dts/tqm5200.dts b/arch/powerpc/boot/dts/tqm5200.dts
index 9ed0bc78967e..372177b19e60 100644
--- a/arch/powerpc/boot/dts/tqm5200.dts
+++ b/arch/powerpc/boot/dts/tqm5200.dts
@@ -32,7 +32,7 @@
};
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x00000000 0x04000000>; // 64MB
};
@@ -200,8 +200,8 @@
clock-frequency = <0>; // From boot loader
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000
- 0x02000000 0 0x90000000 0x90000000 0 0x10000000
- 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000>,
+ <0x02000000 0 0x90000000 0x90000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
};
};
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
index 9a19e5905485..54d2522be485 100644
--- a/arch/powerpc/boot/serial.c
+++ b/arch/powerpc/boot/serial.c
@@ -132,7 +132,7 @@ int serial_console_init(void)
else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart"))
rc = mpc5200_psc_console_init(devp, &serial_cd);
#endif
-#ifdef CONFIG_PPC64_BOOT_WRAPPER
+#ifdef CONFIG_PPC_POWERNV
else if (dt_is_compatible(devp, "ibm,opal-console-raw"))
rc = opal_console_init(devp, &serial_cd);
#endif
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 1cd82564c996..9184eda780fd 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -26,6 +26,8 @@
# Stop execution if any command fails
set -e
+export LC_ALL=C
+
# Allow for verbose output
if [ "$V" = 1 ]; then
set -x
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index cc2c0d51f493..7fd9e596ea33 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -36,7 +36,6 @@ CONFIG_GEN_RTC=y
CONFIG_BINFMT_MISC=m
CONFIG_IRQ_ALL_CPUS=y
CONFIG_NUMA=y
-CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_PCIEPORTBUS=y
CONFIG_NET=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 63d611cc160f..9d6212a8b195 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -26,6 +26,7 @@ CONFIG_CPU_FREQ_PMAC64=y
CONFIG_GEN_RTC=y
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
+CONFIG_PPC_4K_PAGES=y
CONFIG_PCI_MSI=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 9424c1e67e1c..c821a97f4a89 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -25,6 +25,7 @@ CONFIG_UDBG_RTAS_CONSOLE=y
CONFIG_GEN_RTC=y
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
+CONFIG_PPC_4K_PAGES=y
CONFIG_PCI_MSI=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/powerpc/configs/microwatt_defconfig b/arch/powerpc/configs/microwatt_defconfig
index 9465209b8c5b..07d87a4044b2 100644
--- a/arch/powerpc/configs/microwatt_defconfig
+++ b/arch/powerpc/configs/microwatt_defconfig
@@ -26,6 +26,7 @@ CONFIG_PPC_MICROWATT=y
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
CONFIG_CPU_FREQ=y
CONFIG_HZ_100=y
+CONFIG_PPC_4K_PAGES=y
# CONFIG_PPC_MEM_KEYS is not set
# CONFIG_SECCOMP is not set
# CONFIG_MQ_IOSCHED_KYBER is not set
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 78606b7e42df..e00a703581c3 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -22,7 +22,6 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_HZ_1000=y
-CONFIG_PPC_64K_PAGES=y
# CONFIG_SECCOMP is not set
CONFIG_PCI_MSI=y
CONFIG_PCCARD=y
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 8bfeea6c7de7..49f49c263935 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -62,7 +62,6 @@ CONFIG_MEMORY_FAILURE=y
CONFIG_HWPOISON_INJECT=m
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
-CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_PM=y
CONFIG_HOTPLUG_PCI=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 0ad2291337a7..203d0b7f0bb8 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -52,7 +52,6 @@ CONFIG_KEXEC_FILE=y
CONFIG_CRASH_DUMP=y
CONFIG_FA_DUMP=y
CONFIG_IRQ_ALL_CPUS=y
-CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_RPA=m
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index f300dcb937cc..7c95fab4b920 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -30,6 +30,7 @@ CONFIG_PS3_LPM=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y
CONFIG_KEXEC=y
+CONFIG_PPC_4K_PAGES=y
# CONFIG_SPARSEMEM_VMEMMAP is not set
# CONFIG_COMPACTION is not set
CONFIG_SCHED_SMT=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index b183629f1bcf..de7641adb899 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -56,7 +56,6 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
-CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_RPA=m
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index b806a5d3a695..396d508730a1 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -43,7 +43,6 @@ CONFIG_KEXEC_FILE=y
CONFIG_PRESERVE_FA_DUMP=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_NUMA=y
-CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_CMDLINE="console=tty0 console=hvc0 ipr.fast_reboot=1 quiet"
# CONFIG_SECCOMP is not set
diff --git a/arch/powerpc/include/asm/asm-const.h b/arch/powerpc/include/asm/asm-const.h
index dbfa5e1e3198..bfb3c3534877 100644
--- a/arch/powerpc/include/asm/asm-const.h
+++ b/arch/powerpc/include/asm/asm-const.h
@@ -12,6 +12,4 @@
# define ASM_CONST(x) __ASM_CONST(x)
#endif
-#define UPD_CONSTR "<>"
-
#endif /* _ASM_POWERPC_ASM_CONST_H */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 6a53ef178bfd..fd594fdbd84d 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -27,14 +27,14 @@ static __inline__ int arch_atomic_read(const atomic_t *v)
{
int t;
- __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter));
+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
return t;
}
static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
- __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
}
#define ATOMIC_OP(op, asm_op) \
@@ -320,14 +320,14 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
{
s64 t;
- __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter));
+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
return t;
}
static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
{
- __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
}
#define ATOMIC64_OP(op, asm_op) \
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index d959b0195ad9..674fe0e890dc 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -255,6 +255,8 @@ int hash__create_section_mapping(unsigned long start, unsigned long end,
int nid, pgprot_t prot);
int hash__remove_section_mapping(unsigned long start, unsigned long end);
+void hash__kernel_map_pages(struct page *page, int numpages, int enable);
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 5d34a8646f08..33e073d6b0c4 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1101,6 +1101,16 @@ static inline void vmemmap_remove_mapping(unsigned long start,
}
#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (radix_enabled())
+ radix__kernel_map_pages(page, numpages, enable);
+ else
+ hash__kernel_map_pages(page, numpages, enable);
+}
+#endif
+
static inline pte_t pmd_pte(pmd_t pmd)
{
return __pte_raw(pmd_raw(pmd));
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 59cab558e2f0..d090d9612348 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -316,5 +316,8 @@ int radix__create_section_mapping(unsigned long start, unsigned long end,
int nid, pgprot_t prot);
int radix__remove_section_mapping(unsigned long start, unsigned long end);
#endif /* CONFIG_MEMORY_HOTPLUG */
+
+void radix__kernel_map_pages(struct page *page, int numpages, int enable);
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index f130783c8301..beba4979bff9 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -122,7 +122,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
{ \
u##size ret; \
__asm__ __volatile__("sync;"#insn"%U1%X1 %0,%1;twi 0,%0,0;isync"\
- : "=r" (ret) : "m"UPD_CONSTR (*addr) : "memory"); \
+ : "=r" (ret) : "m<>" (*addr) : "memory"); \
return ret; \
}
@@ -130,7 +130,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \
__asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \
- : "=m"UPD_CONSTR (*addr) : "r" (val) : "memory"); \
+ : "=m<>" (*addr) : "r" (val) : "memory"); \
mmiowb_set_pending(); \
}
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index bf3b84128525..c361212ac160 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -280,12 +280,6 @@ extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
extern void iommu_init_early_pasemi(void);
#if defined(CONFIG_PPC64) && defined(CONFIG_PM)
-static inline void iommu_save(void)
-{
- if (ppc_md.iommu_save)
- ppc_md.iommu_save();
-}
-
static inline void iommu_restore(void)
{
if (ppc_md.iommu_restore)
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 88d0d7cf3a79..c6f250eca3fb 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -79,7 +79,6 @@ extern int crash_wake_offline;
struct kimage;
struct pt_regs;
extern void default_machine_kexec(struct kimage *image);
-extern int default_machine_kexec_prepare(struct kimage *image);
extern void default_machine_crash_shutdown(struct pt_regs *regs);
extern int crash_shutdown_register(crash_shutdown_t handler);
extern int crash_shutdown_unregister(crash_shutdown_t handler);
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 19b6942c6969..fff391b9b97b 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -378,6 +378,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
rb |= 1; /* L field */
rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
}
+ /*
+ * This sets both bits of the B field in the PTE. 0b1x values are
+ * reserved, but those will have been filtered by kvmppc_do_h_enter.
+ */
rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
return rb;
}
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 764f2732a821..a68311077d32 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -29,7 +29,6 @@ struct machdep_calls {
char *name;
#ifdef CONFIG_PPC64
#ifdef CONFIG_PM
- void (*iommu_save)(void);
void (*iommu_restore)(void);
#endif
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
@@ -43,7 +42,6 @@ struct machdep_calls {
void (*setup_arch)(void); /* Optional, may be NULL */
/* Optional, may be NULL. */
void (*show_cpuinfo)(struct seq_file *m);
- void (*show_percpuinfo)(struct seq_file *m, int i);
/* Returns the current operating frequency of "cpu" in Hz */
unsigned long (*get_proc_freq)(unsigned int cpu);
@@ -74,8 +72,6 @@ struct machdep_calls {
int (*set_rtc_time)(struct rtc_time *);
void (*get_rtc_time)(struct rtc_time *);
time64_t (*get_boot_time)(void);
- unsigned char (*rtc_read_val)(int addr);
- void (*rtc_write_val)(int addr, unsigned char val);
void (*calibrate_decr)(void);
@@ -141,8 +137,6 @@ struct machdep_calls {
May be NULL. */
void (*init)(void);
- void (*kgdb_map_scc)(void);
-
/*
* optional PCI "hooks"
*/
@@ -187,13 +181,6 @@ struct machdep_calls {
#ifdef CONFIG_KEXEC_CORE
void (*kexec_cpu_down)(int crash_shutdown, int secondary);
- /* Called to do what every setup is needed on image and the
- * reboot code buffer. Returns 0 on success.
- * Provide your own (maybe dummy) implementation if your platform
- * claims to support kexec.
- */
- int (*machine_kexec_prepare)(struct kimage *image);
-
/* Called to perform the _real_ kexec.
* Do NOT allocate memory or fail here. We are past the point of
* no return.
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index f06ae00f2a65..b67742e2a9b2 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -193,10 +193,12 @@ static inline pte_t pte_wrprotect(pte_t pte)
}
#endif
+#ifndef pte_mkexec
static inline pte_t pte_mkexec(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_EXEC);
}
+#endif
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
@@ -245,7 +247,7 @@ static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge)
static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
unsigned long clr, unsigned long set, int huge)
{
- pte_basic_t *entry = &p->pte;
+ pte_basic_t *entry = (pte_basic_t *)p;
pte_basic_t old = pte_val(*p);
pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
int num, i;
@@ -306,30 +308,29 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#ifndef ptep_set_wrprotect
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
- unsigned long set = pte_val(pte_wrprotect(__pte(0)));
-
- pte_update(mm, addr, ptep, clr, set, 0);
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
}
+#endif
+#ifndef __ptep_set_access_flags
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t entry,
unsigned long address,
int psize)
{
- pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0)))));
- pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0)))));
- unsigned long set = pte_val(entry) & pte_val(pte_set);
- unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr);
+ unsigned long set = pte_val(entry) &
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
int huge = psize > mmu_virtual_psize ? 1 : 0;
- pte_update(vma->vm_mm, address, ptep, clr, set, huge);
+ pte_update(vma->vm_mm, address, ptep, 0, set, huge);
flush_tlb_page(vma, address);
}
+#endif
static inline int pte_young(pte_t pte)
{
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index fcc48d590d88..1a89ebdc3acc 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -136,6 +136,28 @@ static inline pte_t pte_mkhuge(pte_t pte)
#define pte_mkhuge pte_mkhuge
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge);
+
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_update(mm, addr, ptep, 0, _PAGE_RO, 0);
+}
+#define ptep_set_wrprotect ptep_set_wrprotect
+
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
+ pte_t entry, unsigned long address, int psize)
+{
+ unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_EXEC);
+ unsigned long clr = ~pte_val(entry) & _PAGE_RO;
+ int huge = psize > mmu_virtual_psize ? 1 : 0;
+
+ pte_update(vma->vm_mm, address, ptep, clr, set, huge);
+
+ flush_tlb_page(vma, address);
+}
+#define __ptep_set_access_flags __ptep_set_access_flags
+
static inline unsigned long pgd_leaf_size(pgd_t pgd)
{
if (pgd_val(pgd) & _PMD_PAGE_8M)
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index d081704b13fb..9d2905a47410 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -118,11 +118,6 @@ static inline pte_t pte_wrprotect(pte_t pte)
return __pte(pte_val(pte) & ~_PAGE_RW);
}
-static inline pte_t pte_mkexec(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_EXEC);
-}
-
#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h
index 813918f40765..f798640422c2 100644
--- a/arch/powerpc/include/asm/nohash/pte-book3e.h
+++ b/arch/powerpc/include/asm/nohash/pte-book3e.h
@@ -48,7 +48,7 @@
#define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */
/* "Higher level" linux bit combinations */
-#define _PAGE_EXEC _PAGE_BAP_UX /* .. and was cache cleaned */
+#define _PAGE_EXEC (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */
#define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
#define _PAGE_KERNEL_RO (_PAGE_BAP_SR)
@@ -93,11 +93,11 @@
/* Permission masks used to generate the __P and __S table */
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX)
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
#ifndef __ASSEMBLY__
static inline pte_t pte_mkprivileged(pte_t pte)
@@ -113,6 +113,16 @@ static inline pte_t pte_mkuser(pte_t pte)
}
#define pte_mkuser pte_mkuser
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ if (pte_val(pte) & _PAGE_BAP_UR)
+ return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX);
+ else
+ return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX);
+}
+#define pte_mkexec pte_mkexec
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h
index 1edb7243e515..c08d25e3e626 100644
--- a/arch/powerpc/include/asm/nohash/tlbflush.h
+++ b/arch/powerpc/include/asm/nohash/tlbflush.h
@@ -32,11 +32,26 @@ extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+#ifdef CONFIG_PPC_8xx
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ unsigned int pid = READ_ONCE(mm->context.id);
+
+ if (pid != MMU_NO_CONTEXT)
+ asm volatile ("sync; tlbia; isync" : : : "memory");
+}
+
+static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ asm volatile ("tlbie %0; sync" : : "r" (vmaddr) : "memory");
+}
+#else
extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
int tsize, int ind);
+#endif
#ifdef CONFIG_SMP
extern void flush_tlb_mm(struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
index bcb7b5f917be..eb7df559ae74 100644
--- a/arch/powerpc/include/asm/paravirt.h
+++ b/arch/powerpc/include/asm/paravirt.h
@@ -21,7 +21,7 @@ static inline bool is_shared_processor(void)
return static_branch_unlikely(&shared_processor);
}
-/* If bit 0 is set, the cpu has been preempted */
+/* If bit 0 is set, the cpu has been ceded, conferred, or preempted */
static inline u32 yield_count_of(int cpu)
{
__be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
@@ -92,17 +92,47 @@ static inline void prod_cpu(int cpu)
#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
+ /*
+ * The dispatch/yield bit alone is an imperfect indicator of
+ * whether the hypervisor has dispatched @cpu to run on a physical
+ * processor. When it is clear, @cpu is definitely not preempted.
+ * But when it is set, it means only that it *might* be, subject to
+ * other conditions. So we check other properties of the VM and
+ * @cpu first, resorting to the yield count last.
+ */
+
+ /*
+ * Hypervisor preemption isn't possible in dedicated processor
+ * mode by definition.
+ */
if (!is_shared_processor())
return false;
#ifdef CONFIG_PPC_SPLPAR
if (!is_kvm_guest()) {
- int first_cpu = cpu_first_thread_sibling(smp_processor_id());
+ int first_cpu;
/*
- * Preemption can only happen at core granularity. This CPU
- * is not preempted if one of the CPU of this core is not
- * preempted.
+ * The result of vcpu_is_preempted() is used in a
+ * speculative way, and is always subject to invalidation
+ * by events internal and external to Linux. While we can
+ * be called in preemptable context (in the Linux sense),
+ * we're not accessing per-cpu resources in a way that can
+ * race destructively with Linux scheduler preemption and
+ * migration, and callers can tolerate the potential for
+ * error introduced by sampling the CPU index without
+ * pinning the task to it. So it is permissible to use
+ * raw_smp_processor_id() here to defeat the preempt debug
+ * warnings that can arise from using smp_processor_id()
+ * in arbitrary contexts.
+ */
+ first_cpu = cpu_first_thread_sibling(raw_smp_processor_id());
+
+ /*
+ * The PowerVM hypervisor dispatches VMs on a whole core
+ * basis. So we know that a thread sibling of the local CPU
+ * cannot have been preempted by the hypervisor, even if it
+ * has called H_CONFER, which will set the yield bit.
*/
if (cpu_first_thread_sibling(cpu) == first_cpu)
return false;
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index d11b4c61d686..efed0db7b1db 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -2,17 +2,33 @@
#ifndef _ASM_POWERPC_PGTABLE_TYPES_H
#define _ASM_POWERPC_PGTABLE_TYPES_H
+#if defined(__CHECKER__) || !defined(CONFIG_PPC32)
+#define STRICT_MM_TYPECHECKS
+#endif
+
/* PTE level */
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
typedef struct { pte_basic_t pte, pte1, pte2, pte3; } pte_t;
-#else
+#elif defined(STRICT_MM_TYPECHECKS)
typedef struct { pte_basic_t pte; } pte_t;
+#else
+typedef pte_basic_t pte_t;
#endif
+
+#if defined(STRICT_MM_TYPECHECKS) || \
+ (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES))
#define __pte(x) ((pte_t) { (x) })
static inline pte_basic_t pte_val(pte_t x)
{
return x.pte;
}
+#else
+#define __pte(x) ((pte_t)(x))
+static inline pte_basic_t pte_val(pte_t x)
+{
+ return x;
+}
+#endif
/* PMD level */
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 1c538a9a11e0..7be24048b8d1 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -28,8 +28,8 @@
#else
#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
-#define SAVE_NVGPRS(base) stmw 13, GPR0+4*13(base)
-#define REST_NVGPRS(base) lmw 13, GPR0+4*13(base)
+#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
+#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); REST_10GPRS(22, base)
#endif
#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
diff --git a/arch/powerpc/include/asm/static_call.h b/arch/powerpc/include/asm/static_call.h
new file mode 100644
index 000000000000..0a0bc79bd1fa
--- /dev/null
+++ b/arch/powerpc/include/asm/static_call.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_STATIC_CALL_H
+#define _ASM_POWERPC_STATIC_CALL_H
+
+#define __PPC_SCT(name, inst) \
+ asm(".pushsection .text, \"ax\" \n" \
+ ".align 5 \n" \
+ ".globl " STATIC_CALL_TRAMP_STR(name) " \n" \
+ STATIC_CALL_TRAMP_STR(name) ": \n" \
+ inst " \n" \
+ " lis 12,2f@ha \n" \
+ " lwz 12,2f@l(12) \n" \
+ " mtctr 12 \n" \
+ " bctr \n" \
+ "1: li 3, 0 \n" \
+ " blr \n" \
+ "2: .long 0 \n" \
+ ".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
+ ".size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \
+ ".popsection \n")
+
+#define PPC_SCT_RET0 20 /* Offset of label 1 */
+#define PPC_SCT_DATA 28 /* Offset of label 2 */
+
+#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) __PPC_SCT(name, "b " #func)
+#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) __PPC_SCT(name, "blr")
+
+#endif /* _ASM_POWERPC_STATIC_CALL_H */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 22c79ab40006..63316100080c 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -86,7 +86,7 @@ __pu_failed: \
"1: " op "%U1%X1 %0,%1 # put_user\n" \
EX_TABLE(1b, %l2) \
: \
- : "r" (x), "m"UPD_CONSTR (*addr) \
+ : "r" (x), "m<>" (*addr) \
: \
: label)
@@ -143,7 +143,7 @@ do { \
"1: "op"%U1%X1 %0, %1 # get_user\n" \
EX_TABLE(1b, %l2) \
: "=r" (x) \
- : "m"UPD_CONSTR (*addr) \
+ : "m<>" (*addr) \
: \
: label)
@@ -200,7 +200,7 @@ __gus_failed: \
".previous\n" \
EX_TABLE(1b, 3b) \
: "=r" (err), "=r" (x) \
- : "m"UPD_CONSTR (*addr), "i" (-EFAULT), "0" (err))
+ : "m<>" (*addr), "i" (-EFAULT), "0" (err))
#ifdef __powerpc64__
#define __get_user_asm2(x, addr, err) \
diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h
index 578b3ee86105..749a2e3af89e 100644
--- a/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -61,27 +61,35 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_PMC4,
PERF_REG_POWERPC_PMC5,
PERF_REG_POWERPC_PMC6,
- /* Max regs without the extended regs */
+ PERF_REG_POWERPC_SDAR,
+ PERF_REG_POWERPC_SIAR,
+ /* Max mask value for interrupt regs w/o extended regs */
PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1,
+ /* Max mask value for interrupt regs including extended regs */
+ PERF_REG_EXTENDED_MAX = PERF_REG_POWERPC_SIAR + 1,
};
#define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1)
-/* Exclude MMCR3, SIER2, SIER3 for CPU_FTR_ARCH_300 */
-#define PERF_EXCLUDE_REG_EXT_300 (7ULL << PERF_REG_POWERPC_MMCR3)
-
/*
* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300
- * includes 9 SPRS from MMCR0 to PMC6 excluding the
- * unsupported SPRS in PERF_EXCLUDE_REG_EXT_300.
+ * includes 11 SPRS from MMCR0 to SIAR excluding the
+ * unsupported SPRS MMCR3, SIER2 and SIER3.
*/
-#define PERF_REG_PMU_MASK_300 ((0xfffULL << PERF_REG_POWERPC_MMCR0) - PERF_EXCLUDE_REG_EXT_300)
+#define PERF_REG_PMU_MASK_300 \
+ ((1ULL << PERF_REG_POWERPC_MMCR0) | (1ULL << PERF_REG_POWERPC_MMCR1) | \
+ (1ULL << PERF_REG_POWERPC_MMCR2) | (1ULL << PERF_REG_POWERPC_PMC1) | \
+ (1ULL << PERF_REG_POWERPC_PMC2) | (1ULL << PERF_REG_POWERPC_PMC3) | \
+ (1ULL << PERF_REG_POWERPC_PMC4) | (1ULL << PERF_REG_POWERPC_PMC5) | \
+ (1ULL << PERF_REG_POWERPC_PMC6) | (1ULL << PERF_REG_POWERPC_SDAR) | \
+ (1ULL << PERF_REG_POWERPC_SIAR))
/*
* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31
- * includes 12 SPRs from MMCR0 to PMC6.
+ * includes 14 SPRs from MMCR0 to SIAR.
*/
-#define PERF_REG_PMU_MASK_31 (0xfffULL << PERF_REG_POWERPC_MMCR0)
+#define PERF_REG_PMU_MASK_31 \
+ (PERF_REG_PMU_MASK_300 | (1ULL << PERF_REG_POWERPC_MMCR3) | \
+ (1ULL << PERF_REG_POWERPC_SIER2) | (1ULL << PERF_REG_POWERPC_SIER3))
-#define PERF_REG_EXTENDED_MAX (PERF_REG_POWERPC_PMC6 + 1)
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 7be36c1e1db6..0e3640e14eb1 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -106,7 +106,7 @@ extra-y += vmlinux.lds
obj-$(CONFIG_RELOCATABLE) += reloc_$(BITS).o
-obj-$(CONFIG_PPC32) += entry_32.o setup_32.o early_32.o
+obj-$(CONFIG_PPC32) += entry_32.o setup_32.o early_32.o static_call.o
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index bbb4181621dd..bf96b954a4eb 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -349,6 +349,7 @@ int fix_alignment(struct pt_regs *regs)
if (op.type != CACHEOP + DCBZ)
return -EINVAL;
PPC_WARN_ALIGNMENT(dcbz, regs);
+ WARN_ON_ONCE(!user_mode(regs));
r = emulate_dcbz(op.ea, regs);
} else {
if (type == LARX || type == STCX)
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index e9b597ed423c..91e0f4cf1db3 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -589,6 +589,7 @@ EXPORT_SYMBOL(eeh_check_failure);
/**
* eeh_pci_enable - Enable MMIO or DMA transfers for this slot
* @pe: EEH PE
+ * @function: EEH option
*
* This routine should be called to reenable frozen MMIO or DMA
* so that it would work correctly again. It's useful while doing
@@ -761,8 +762,8 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
}
/**
- * eeh_set_pe_freset - Check the required reset for the indicated device
- * @data: EEH device
+ * eeh_set_dev_freset - Check the required reset for the indicated device
+ * @edev: EEH device
* @flag: return value
*
* Each device might have its preferred reset type: fundamental or
@@ -801,6 +802,7 @@ static void eeh_pe_refreeze_passed(struct eeh_pe *root)
/**
* eeh_pe_reset_full - Complete a full reset process on the indicated PE
* @pe: EEH PE
+ * @include_passed: include passed-through devices?
*
* This function executes a full reset procedure on a PE, including setting
* the appropriate flags, performing a fundamental or hot reset, and then
@@ -937,6 +939,7 @@ static struct notifier_block eeh_device_nb = {
/**
* eeh_init - System wide EEH initialization
+ * @ops: struct to trace EEH operation callback functions
*
* It's the platform's job to call this from an arch_initcall().
*/
@@ -1442,6 +1445,7 @@ static int eeh_pe_reenable_devices(struct eeh_pe *pe, bool include_passed)
* eeh_pe_reset - Issue PE reset according to specified type
* @pe: EEH PE
* @option: reset type
+ * @include_passed: include passed-through devices?
*
* The routine is called to reset the specified PE with the
* indicated type, either fundamental reset or hot reset.
@@ -1513,12 +1517,12 @@ EXPORT_SYMBOL_GPL(eeh_pe_configure);
* eeh_pe_inject_err - Injecting the specified PCI error to the indicated PE
* @pe: the indicated PE
* @type: error type
- * @function: error function
+ * @func: error function
* @addr: address
* @mask: address mask
*
* The routine is called to inject the specified PCI error, which
- * is determined by @type and @function, to the indicated PE for
+ * is determined by @type and @func, to the indicated PE for
* testing purpose.
*/
int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c
index c7022c41cc31..20328f72f9f2 100644
--- a/arch/powerpc/kernel/firmware.c
+++ b/arch/powerpc/kernel/firmware.c
@@ -31,11 +31,10 @@ int __init check_kvm_guest(void)
if (!hyper_node)
return 0;
- if (!of_device_is_compatible(hyper_node, "linux,kvm"))
- return 0;
-
- static_branch_enable(&kvm_guest);
+ if (of_device_is_compatible(hyper_node, "linux,kvm"))
+ static_branch_enable(&kvm_guest);
+ of_node_put(hyper_node);
return 0;
}
core_initcall(check_kvm_guest); // before kvm_guest_init()
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 9bdb95f5694f..2d596881b70e 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -755,7 +755,7 @@ _GLOBAL(mmu_pin_tlb)
cmplw r6, r9
bdnzt lt, 2b
-4: LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
+4: LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
2: ori r0, r6, MD_EVALID
mtspr SPRN_MD_CTR, r5
mtspr SPRN_MD_EPN, r0
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index e5503420b6c6..ef8d1b1c234e 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -465,12 +465,21 @@ label:
bl do_page_fault; \
b interrupt_return
+/*
+ * Instruction TLB Error interrupt handlers may call InstructionStorage
+ * directly without clearing ESR, so the ESR at this point may be left over
+ * from a prior interrupt.
+ *
+ * In any case, do_page_fault for BOOK3E does not use ESR and always expects
+ * dsisr to be 0. ESR_DST from a prior store in particular would confuse fault
+ * handling.
+ */
#define INSTRUCTION_STORAGE_EXCEPTION \
START_EXCEPTION(InstructionStorage) \
- NORMAL_EXCEPTION_PROLOG(0x400, INST_STORAGE); \
- mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
+ NORMAL_EXCEPTION_PROLOG(0x400, INST_STORAGE); \
+ li r5,0; /* Store 0 in regs->esr (dsisr) */ \
stw r5,_ESR(r11); \
- stw r12, _DEAR(r11); /* Pass SRR0 as arg2 */ \
+ stw r12, _DEAR(r11); /* Set regs->dear (dar) to SRR0 */ \
prepare_transfer_to_handler; \
bl do_page_fault; \
b interrupt_return
diff --git a/arch/powerpc/kernel/hw_breakpoint_constraints.c b/arch/powerpc/kernel/hw_breakpoint_constraints.c
index 675d1f66ab72..42b967e3d85c 100644
--- a/arch/powerpc/kernel/hw_breakpoint_constraints.c
+++ b/arch/powerpc/kernel/hw_breakpoint_constraints.c
@@ -127,15 +127,6 @@ bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr,
return false;
}
-static int cache_op_size(void)
-{
-#ifdef __powerpc64__
- return ppc64_caches.l1d.block_size;
-#else
- return L1_CACHE_BYTES;
-#endif
-}
-
void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
int *type, int *size, unsigned long *ea)
{
@@ -147,14 +138,14 @@ void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
analyse_instr(&op, regs, *instr);
*type = GETTYPE(op.type);
*ea = op.ea;
-#ifdef __powerpc64__
+
if (!(regs->msr & MSR_64BIT))
*ea &= 0xffffffffUL;
-#endif
+
*size = GETSIZE(op.type);
if (*type == CACHEOP) {
- *size = cache_op_size();
+ *size = l1_dcache_bytes();
*ea &= ~(*size - 1);
} else if (*type == LOAD_VMX || *type == STORE_VMX) {
*ea &= ~(*size - 1);
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index de10a2697258..835b626cd476 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -266,7 +266,7 @@ static void check_return_regs_valid(struct pt_regs *regs)
if (trap_is_scv(regs))
return;
- trap = regs->trap;
+ trap = TRAP(regs);
// EE in HV mode sets HSRRs like 0xea0
if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL)
trap = 0xea0;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index b1e43b69a559..0b7894eed58d 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -278,9 +278,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
proc_freq / 1000000, proc_freq % 1000000);
- if (ppc_md.show_percpuinfo != NULL)
- ppc_md.show_percpuinfo(m, cpu_id);
-
/* If we are a Freescale core do a simple check so
* we dont have to keep adding cases in the future */
if (PVR_VER(pvr) & 0x8000) {
diff --git a/arch/powerpc/kernel/static_call.c b/arch/powerpc/kernel/static_call.c
new file mode 100644
index 000000000000..863a7aa24650
--- /dev/null
+++ b/arch/powerpc/kernel/static_call.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/memory.h>
+#include <linux/static_call.h>
+
+#include <asm/code-patching.h>
+
+void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
+{
+ int err;
+ bool is_ret0 = (func == __static_call_return0);
+ unsigned long target = (unsigned long)(is_ret0 ? tramp + PPC_SCT_RET0 : func);
+ bool is_short = is_offset_in_branch_range((long)target - (long)tramp);
+
+ if (!tramp)
+ return;
+
+ mutex_lock(&text_mutex);
+
+ if (func && !is_short) {
+ err = patch_instruction(tramp + PPC_SCT_DATA, ppc_inst(target));
+ if (err)
+ goto out;
+ }
+
+ if (!func)
+ err = patch_instruction(tramp, ppc_inst(PPC_RAW_BLR()));
+ else if (is_short)
+ err = patch_branch(tramp, target, 0);
+ else
+ err = patch_instruction(tramp, ppc_inst(PPC_RAW_NOP()));
+out:
+ mutex_unlock(&text_mutex);
+
+ if (err)
+ panic("%s: patching failed %pS at %pS\n", __func__, func, tramp);
+}
+EXPORT_SYMBOL_GPL(arch_static_call_transform);
diff --git a/arch/powerpc/kernel/swsusp_64.c b/arch/powerpc/kernel/swsusp_64.c
index aeea97ad85cf..16ee3baaf09a 100644
--- a/arch/powerpc/kernel/swsusp_64.c
+++ b/arch/powerpc/kernel/swsusp_64.c
@@ -17,8 +17,3 @@ void do_after_copyback(void)
touch_softlockup_watchdog();
mb();
}
-
-void _iommu_save(void)
-{
- iommu_save();
-}
diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S
index 6d3189830dd3..96bb20715aa9 100644
--- a/arch/powerpc/kernel/swsusp_asm64.S
+++ b/arch/powerpc/kernel/swsusp_asm64.S
@@ -128,7 +128,6 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
* stack pointer on the stack like a real stackframe */
addi r1,r1,-128
- bl _iommu_save
bl swsusp_save
/* restore LR */
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index defecb3b1b15..08d8072d6e7a 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -928,7 +928,8 @@ static int unregister_cpu_online(unsigned int cpu)
struct device_attribute *attrs, *pmc_attrs;
int i, nattrs;
- BUG_ON(!c->hotpluggable);
+ if (WARN_RATELIMIT(!c->hotpluggable, "cpu %d can't be offlined\n", cpu))
+ return -EBUSY;
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_SMT))
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 934d8ae66cc6..cae8f03a44fe 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -631,8 +631,12 @@ void timer_broadcast_interrupt(void)
#endif
#ifdef CONFIG_SUSPEND
-static void generic_suspend_disable_irqs(void)
+/* Overrides the weak version in kernel/power/main.c */
+void arch_suspend_disable_irqs(void)
{
+ if (ppc_md.suspend_disable_irqs)
+ ppc_md.suspend_disable_irqs();
+
/* Disable the decrementer, so that it doesn't interfere
* with suspending.
*/
@@ -642,23 +646,11 @@ static void generic_suspend_disable_irqs(void)
set_dec(decrementer_max);
}
-static void generic_suspend_enable_irqs(void)
-{
- local_irq_enable();
-}
-
-/* Overrides the weak version in kernel/power/main.c */
-void arch_suspend_disable_irqs(void)
-{
- if (ppc_md.suspend_disable_irqs)
- ppc_md.suspend_disable_irqs();
- generic_suspend_disable_irqs();
-}
-
/* Overrides the weak version in kernel/power/main.c */
void arch_suspend_enable_irqs(void)
{
- generic_suspend_enable_irqs();
+ local_irq_enable();
+
if (ppc_md.suspend_enable_irqs)
ppc_md.suspend_enable_irqs();
}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 40bdefe9caa7..18e42c74abdd 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -143,6 +143,12 @@ SECTIONS
SOFT_MASK_TABLE(8)
RESTART_TABLE(8)
+ .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
+ __start_opd = .;
+ KEEP(*(.opd))
+ __end_opd = .;
+ }
+
. = ALIGN(8);
__stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
__start___stf_entry_barrier_fixup = .;
@@ -339,12 +345,6 @@ SECTIONS
*(.branch_lt)
}
- .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
- __start_opd = .;
- KEEP(*(.opd))
- __end_opd = .;
- }
-
. = ALIGN(256);
.got : AT(ADDR(.got) - LOAD_OFFSET) {
__toc_start = .;
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index 48525e8b5730..a2242017e55f 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -48,19 +48,6 @@ void machine_crash_shutdown(struct pt_regs *regs)
default_machine_crash_shutdown(regs);
}
-/*
- * Do what every setup is needed on image and the
- * reboot code buffer to allow us to avoid allocations
- * later.
- */
-int machine_kexec_prepare(struct kimage *image)
-{
- if (ppc_md.machine_kexec_prepare)
- return ppc_md.machine_kexec_prepare(image);
- else
- return default_machine_kexec_prepare(image);
-}
-
void machine_kexec_cleanup(struct kimage *image)
{
}
diff --git a/arch/powerpc/kexec/core_32.c b/arch/powerpc/kexec/core_32.c
index bf9f1f906d64..b50aed48d09d 100644
--- a/arch/powerpc/kexec/core_32.c
+++ b/arch/powerpc/kexec/core_32.c
@@ -63,7 +63,7 @@ void default_machine_kexec(struct kimage *image)
(*rnk)(page_list, reboot_code_buffer_phys, image->start);
}
-int default_machine_kexec_prepare(struct kimage *image)
+int machine_kexec_prepare(struct kimage *image)
{
return 0;
}
diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
index 89c069d664a5..66678518b938 100644
--- a/arch/powerpc/kexec/core_64.c
+++ b/arch/powerpc/kexec/core_64.c
@@ -32,7 +32,7 @@
#include <asm/svm.h>
#include <asm/ultravisor.h>
-int default_machine_kexec_prepare(struct kimage *image)
+int machine_kexec_prepare(struct kimage *image)
{
int i;
unsigned long begin, end; /* limits of segment */
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index 5056e175ca2c..b4981b651d9a 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -700,6 +700,7 @@ static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem)
if (ret) {
pr_err("Failed to set linux,usable-memory property for %s node",
dn->full_name);
+ of_node_put(dn);
goto out;
}
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2acb1c96cfaf..7b74fc0a986b 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3726,7 +3726,20 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
kvmppc_set_host_core(pcpu);
- guest_exit_irqoff();
+ context_tracking_guest_exit();
+ if (!vtime_accounting_enabled_this_cpu()) {
+ local_irq_enable();
+ /*
+ * Service IRQs here before vtime_account_guest_exit() so any
+ * ticks that occurred while running the guest are accounted to
+ * the guest. If vtime accounting is enabled, accounting uses
+ * TB rather than ticks, so it can be done without enabling
+ * interrupts here, which has the problem that it accounts
+ * interrupt processing overhead to the host.
+ */
+ local_irq_disable();
+ }
+ vtime_account_guest_exit();
local_irq_enable();
@@ -4510,7 +4523,20 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
kvmppc_set_host_core(pcpu);
- guest_exit_irqoff();
+ context_tracking_guest_exit();
+ if (!vtime_accounting_enabled_this_cpu()) {
+ local_irq_enable();
+ /*
+ * Service IRQs here before vtime_account_guest_exit() so any
+ * ticks that occurred while running the guest are accounted to
+ * the guest. If vtime accounting is enabled, accounting uses
+ * TB rather than ticks, so it can be done without enabling
+ * interrupts here, which has the problem that it accounts
+ * interrupt processing overhead to the host.
+ */
+ local_irq_disable();
+ }
+ vtime_account_guest_exit();
local_irq_enable();
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 632b2545072b..2c1f3c6e72d1 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -207,6 +207,15 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
if (kvm_is_radix(kvm))
return H_FUNCTION;
+ /*
+ * The HPTE gets used by compute_tlbie_rb() to set TLBIE bits, so
+ * these functions should work together -- must ensure a guest can not
+ * cause problems with the TLBIE that KVM executes.
+ */
+ if ((pteh >> HPTE_V_SSIZE_SHIFT) & 0x2) {
+ /* B=0b1x is a reserved value, disallow it. */
+ return H_PARAMETER;
+ }
psize = kvmppc_actual_pgsz(pteh, ptel);
if (!psize)
return H_PARAMETER;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 977801c83aff..8c15c90dd3a9 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1042,7 +1042,21 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
}
trace_kvm_exit(exit_nr, vcpu);
- guest_exit_irqoff();
+
+ context_tracking_guest_exit();
+ if (!vtime_accounting_enabled_this_cpu()) {
+ local_irq_enable();
+ /*
+ * Service IRQs here before vtime_account_guest_exit() so any
+ * ticks that occurred while running the guest are accounted to
+ * the guest. If vtime accounting is enabled, accounting uses
+ * TB rather than ticks, so it can be done without enabling
+ * interrupts here, which has the problem that it accounts
+ * interrupt processing overhead to the host.
+ */
+ local_irq_disable();
+ }
+ vtime_account_guest_exit();
local_irq_enable();
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 8ab90ce8738f..35e9cccdeef9 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -1094,7 +1094,7 @@ static inline u64 sp_to_dp(u32 fprs)
preempt_disable();
enable_kernel_fp();
- asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m"UPD_CONSTR (fprd) : "m"UPD_CONSTR (fprs)
+ asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
: "fr0");
preempt_enable();
return fprd;
@@ -1106,7 +1106,7 @@ static inline u32 dp_to_sp(u64 fprd)
preempt_disable();
enable_kernel_fp();
- asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m"UPD_CONSTR (fprs) : "m"UPD_CONSTR (fprd)
+ asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
: "fr0");
preempt_enable();
return fprs;
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index cda17bee5afe..c3e06922468b 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -228,6 +228,7 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
static bool stf_exit_reentrant = false;
static bool rfi_exit_reentrant = false;
+static DEFINE_MUTEX(exit_flush_lock);
static int __do_stf_barrier_fixups(void *data)
{
@@ -253,6 +254,9 @@ void do_stf_barrier_fixups(enum stf_barrier_type types)
* low level interrupt exit code before patching. After the patching,
* if allowed, then flip the branch to allow fast exits.
*/
+
+ // Prevent static key update races with do_rfi_flush_fixups()
+ mutex_lock(&exit_flush_lock);
static_branch_enable(&interrupt_exit_not_reentrant);
stop_machine(__do_stf_barrier_fixups, &types, NULL);
@@ -264,6 +268,8 @@ void do_stf_barrier_fixups(enum stf_barrier_type types)
if (stf_exit_reentrant && rfi_exit_reentrant)
static_branch_disable(&interrupt_exit_not_reentrant);
+
+ mutex_unlock(&exit_flush_lock);
}
void do_uaccess_flush_fixups(enum l1d_flush_type types)
@@ -486,6 +492,9 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
* without stop_machine, so this could be achieved with a broadcast
* IPI instead, but this matches the stf sequence.
*/
+
+ // Prevent static key update races with do_stf_barrier_fixups()
+ mutex_lock(&exit_flush_lock);
static_branch_enable(&interrupt_exit_not_reentrant);
stop_machine(__do_rfi_flush_fixups, &types, NULL);
@@ -497,6 +506,8 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
if (stf_exit_reentrant && rfi_exit_reentrant)
static_branch_disable(&interrupt_exit_not_reentrant);
+
+ mutex_unlock(&exit_flush_lock);
}
void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index d8d5f901cee1..86f49e3e7cf5 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -302,33 +302,51 @@ static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
}
}
-static nokprobe_inline int read_mem_aligned(unsigned long *dest,
- unsigned long ea, int nb,
- struct pt_regs *regs)
+static __always_inline int
+__read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
{
- int err = 0;
unsigned long x = 0;
switch (nb) {
case 1:
- err = __get_user(x, (unsigned char __user *) ea);
+ unsafe_get_user(x, (unsigned char __user *)ea, Efault);
break;
case 2:
- err = __get_user(x, (unsigned short __user *) ea);
+ unsafe_get_user(x, (unsigned short __user *)ea, Efault);
break;
case 4:
- err = __get_user(x, (unsigned int __user *) ea);
+ unsafe_get_user(x, (unsigned int __user *)ea, Efault);
break;
#ifdef __powerpc64__
case 8:
- err = __get_user(x, (unsigned long __user *) ea);
+ unsafe_get_user(x, (unsigned long __user *)ea, Efault);
break;
#endif
}
- if (!err)
- *dest = x;
- else
+ *dest = x;
+ return 0;
+
+Efault:
+ regs->dar = ea;
+ return -EFAULT;
+}
+
+static nokprobe_inline int
+read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
+{
+ int err;
+
+ if (is_kernel_addr(ea))
+ return __read_mem_aligned(dest, ea, nb, regs);
+
+ if (user_read_access_begin((void __user *)ea, nb)) {
+ err = __read_mem_aligned(dest, ea, nb, regs);
+ user_read_access_end();
+ } else {
+ err = -EFAULT;
regs->dar = ea;
+ }
+
return err;
}
@@ -336,10 +354,8 @@ static nokprobe_inline int read_mem_aligned(unsigned long *dest,
* Copy from userspace to a buffer, using the largest possible
* aligned accesses, up to sizeof(long).
*/
-static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
- struct pt_regs *regs)
+static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
{
- int err = 0;
int c;
for (; nb > 0; nb -= c) {
@@ -348,31 +364,46 @@ static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
c = max_align(nb);
switch (c) {
case 1:
- err = __get_user(*dest, (unsigned char __user *) ea);
+ unsafe_get_user(*dest, (u8 __user *)ea, Efault);
break;
case 2:
- err = __get_user(*(u16 *)dest,
- (unsigned short __user *) ea);
+ unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault);
break;
case 4:
- err = __get_user(*(u32 *)dest,
- (unsigned int __user *) ea);
+ unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault);
break;
#ifdef __powerpc64__
case 8:
- err = __get_user(*(unsigned long *)dest,
- (unsigned long __user *) ea);
+ unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault);
break;
#endif
}
- if (err) {
- regs->dar = ea;
- return err;
- }
dest += c;
ea += c;
}
return 0;
+
+Efault:
+ regs->dar = ea;
+ return -EFAULT;
+}
+
+static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
+{
+ int err;
+
+ if (is_kernel_addr(ea))
+ return __copy_mem_in(dest, ea, nb, regs);
+
+ if (user_read_access_begin((void __user *)ea, nb)) {
+ err = __copy_mem_in(dest, ea, nb, regs);
+ user_read_access_end();
+ } else {
+ err = -EFAULT;
+ regs->dar = ea;
+ }
+
+ return err;
}
static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
@@ -410,30 +441,48 @@ static int read_mem(unsigned long *dest, unsigned long ea, int nb,
}
NOKPROBE_SYMBOL(read_mem);
-static nokprobe_inline int write_mem_aligned(unsigned long val,
- unsigned long ea, int nb,
- struct pt_regs *regs)
+static __always_inline int
+__write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
{
- int err = 0;
-
switch (nb) {
case 1:
- err = __put_user(val, (unsigned char __user *) ea);
+ unsafe_put_user(val, (unsigned char __user *)ea, Efault);
break;
case 2:
- err = __put_user(val, (unsigned short __user *) ea);
+ unsafe_put_user(val, (unsigned short __user *)ea, Efault);
break;
case 4:
- err = __put_user(val, (unsigned int __user *) ea);
+ unsafe_put_user(val, (unsigned int __user *)ea, Efault);
break;
#ifdef __powerpc64__
case 8:
- err = __put_user(val, (unsigned long __user *) ea);
+ unsafe_put_user(val, (unsigned long __user *)ea, Efault);
break;
#endif
}
- if (err)
+ return 0;
+
+Efault:
+ regs->dar = ea;
+ return -EFAULT;
+}
+
+static nokprobe_inline int
+write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
+{
+ int err;
+
+ if (is_kernel_addr(ea))
+ return __write_mem_aligned(val, ea, nb, regs);
+
+ if (user_write_access_begin((void __user *)ea, nb)) {
+ err = __write_mem_aligned(val, ea, nb, regs);
+ user_write_access_end();
+ } else {
+ err = -EFAULT;
regs->dar = ea;
+ }
+
return err;
}
@@ -441,10 +490,8 @@ static nokprobe_inline int write_mem_aligned(unsigned long val,
* Copy from a buffer to userspace, using the largest possible
* aligned accesses, up to sizeof(long).
*/
-static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
- struct pt_regs *regs)
+static nokprobe_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
{
- int err = 0;
int c;
for (; nb > 0; nb -= c) {
@@ -453,31 +500,46 @@ static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
c = max_align(nb);
switch (c) {
case 1:
- err = __put_user(*dest, (unsigned char __user *) ea);
+ unsafe_put_user(*dest, (u8 __user *)ea, Efault);
break;
case 2:
- err = __put_user(*(u16 *)dest,
- (unsigned short __user *) ea);
+ unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault);
break;
case 4:
- err = __put_user(*(u32 *)dest,
- (unsigned int __user *) ea);
+ unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault);
break;
#ifdef __powerpc64__
case 8:
- err = __put_user(*(unsigned long *)dest,
- (unsigned long __user *) ea);
+ unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault);
break;
#endif
}
- if (err) {
- regs->dar = ea;
- return err;
- }
dest += c;
ea += c;
}
return 0;
+
+Efault:
+ regs->dar = ea;
+ return -EFAULT;
+}
+
+static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
+{
+ int err;
+
+ if (is_kernel_addr(ea))
+ return __copy_mem_out(dest, ea, nb, regs);
+
+ if (user_write_access_begin((void __user *)ea, nb)) {
+ err = __copy_mem_out(dest, ea, nb, regs);
+ user_write_access_end();
+ } else {
+ err = -EFAULT;
+ regs->dar = ea;
+ }
+
+ return err;
}
static nokprobe_inline int write_mem_unaligned(unsigned long val,
@@ -986,10 +1048,24 @@ static nokprobe_inline int do_vsx_store(struct instruction_op *op,
}
#endif /* CONFIG_VSX */
+static int __emulate_dcbz(unsigned long ea)
+{
+ unsigned long i;
+ unsigned long size = l1_dcache_bytes();
+
+ for (i = 0; i < size; i += sizeof(long))
+ unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault);
+
+ return 0;
+
+Efault:
+ return -EFAULT;
+}
+
int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
{
int err;
- unsigned long i, size;
+ unsigned long size;
#ifdef __powerpc64__
size = ppc64_caches.l1d.block_size;
@@ -1001,14 +1077,21 @@ int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
ea &= ~(size - 1);
if (!address_ok(regs, ea, size))
return -EFAULT;
- for (i = 0; i < size; i += sizeof(long)) {
- err = __put_user(0, (unsigned long __user *) (ea + i));
- if (err) {
- regs->dar = ea;
- return err;
- }
+
+ if (is_kernel_addr(ea)) {
+ err = __emulate_dcbz(ea);
+ } else if (user_write_access_begin((void __user *)ea, size)) {
+ err = __emulate_dcbz(ea);
+ user_write_access_end();
+ } else {
+ err = -EFAULT;
}
- return 0;
+
+ if (err)
+ regs->dar = ea;
+
+
+ return err;
}
NOKPROBE_SYMBOL(emulate_dcbz);
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index c145776d3ae5..cfd45245d009 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1988,7 +1988,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
mmu_kernel_ssize, 0);
}
-void __kernel_map_pages(struct page *page, int numpages, int enable)
+void hash__kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long flags, vaddr, lmi;
int i;
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index ae20add7954a..3a600bd7fbc6 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -920,6 +920,13 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long
#endif
#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void radix__kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ pr_warn_once("DEBUG_PAGEALLOC not supported in radix mode\n");
+}
+#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index c3c4e31462ec..bd5d91a31183 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -20,8 +20,8 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/kasan.h>
-#include <asm/sparsemem.h>
#include <asm/svm.h>
+#include <asm/mmzone.h>
#include <mm/mmu_decl.h>
@@ -256,7 +256,7 @@ void __init mem_init(void)
#endif
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
- set_max_mapnr(max_pfn);
+ set_max_mapnr(max_low_pfn);
kasan_late_init();
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index dd1cabc2ea0f..0dd4c18f8363 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -126,7 +126,7 @@ unsigned long mmu_mapin_ram(unsigned long base, unsigned long top);
#ifdef CONFIG_PPC_FSL_BOOK3E
extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx,
- bool dryrun);
+ bool dryrun, bool init);
extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
phys_addr_t phys);
#ifdef CONFIG_PPC32
@@ -168,7 +168,7 @@ static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; }
static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; }
#endif
-#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx)
+#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_FSL_BOOK3E)
void mmu_mark_initmem_nx(void);
void mmu_mark_rodata_ro(void);
#else
diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile
index 0424f6ce5bd8..b1f630d423d8 100644
--- a/arch/powerpc/mm/nohash/Makefile
+++ b/arch/powerpc/mm/nohash/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o
obj-$(CONFIG_40x) += 40x.o
obj-$(CONFIG_44x) += 44x.o
obj-$(CONFIG_PPC_8xx) += 8xx.o
-obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke.o
+obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_book3e.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_booke.o
ifdef CONFIG_HUGETLB_PAGE
obj-$(CONFIG_PPC_FSL_BOOK3E) += book3e_hugetlbpage.o
@@ -16,4 +16,4 @@ endif
# Disable kcov instrumentation on sensitive code
# This is necessary for booting with kcov enabled on book3e machines
KCOV_INSTRUMENT_tlb.o := n
-KCOV_INSTRUMENT_fsl_booke.o := n
+KCOV_INSTRUMENT_fsl_book3e.o := n
diff --git a/arch/powerpc/mm/nohash/fsl_booke.c b/arch/powerpc/mm/nohash/fsl_book3e.c
index 03dacbe940e5..b231a54f540c 100644
--- a/arch/powerpc/mm/nohash/fsl_booke.c
+++ b/arch/powerpc/mm/nohash/fsl_book3e.c
@@ -122,15 +122,18 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
- TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SX | MAS3_SR;
- TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
+ TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR;
+ TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0;
if (mmu_has_feature(MMU_FTR_BIG_PHYS))
TLBCAM[index].MAS7 = (u64)phys >> 32;
/* Below is unlikely -- only for large user pages or similar */
if (pte_user(__pte(flags))) {
- TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
- TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
+ TLBCAM[index].MAS3 |= MAS3_UR;
+ TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0;
+ TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0;
+ } else {
+ TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0;
}
tlbcam_addrs[index].start = virt;
@@ -165,19 +168,38 @@ unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
unsigned long ram, int max_cam_idx,
- bool dryrun)
+ bool dryrun, bool init)
{
int i;
unsigned long amount_mapped = 0;
+ unsigned long boundary;
+
+ if (strict_kernel_rwx_enabled())
+ boundary = (unsigned long)(_sinittext - _stext);
+ else
+ boundary = ram;
/* Calculate CAM values */
- for (i = 0; ram && i < max_cam_idx; i++) {
+ for (i = 0; boundary && i < max_cam_idx; i++) {
+ unsigned long cam_sz;
+ pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX;
+
+ cam_sz = calc_cam_sz(boundary, virt, phys);
+ if (!dryrun)
+ settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
+
+ boundary -= cam_sz;
+ amount_mapped += cam_sz;
+ virt += cam_sz;
+ phys += cam_sz;
+ }
+ for (ram -= amount_mapped; ram && i < max_cam_idx; i++) {
unsigned long cam_sz;
+ pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL;
cam_sz = calc_cam_sz(ram, virt, phys);
if (!dryrun)
- settlbcam(i, virt, phys, cam_sz,
- pgprot_val(PAGE_KERNEL_X), 0);
+ settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
ram -= cam_sz;
amount_mapped += cam_sz;
@@ -188,8 +210,13 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
if (dryrun)
return amount_mapped;
- loadcam_multi(0, i, max_cam_idx);
- tlbcam_index = i;
+ if (init) {
+ loadcam_multi(0, i, max_cam_idx);
+ tlbcam_index = i;
+ } else {
+ loadcam_multi(0, i, 0);
+ WARN_ON(i > tlbcam_index);
+ }
#ifdef CONFIG_PPC64
get_paca()->tcd.esel_next = i;
@@ -200,12 +227,12 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
return amount_mapped;
}
-unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun)
+unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init)
{
unsigned long virt = PAGE_OFFSET;
phys_addr_t phys = memstart_addr;
- return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun);
+ return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init);
}
#ifdef CONFIG_PPC32
@@ -246,7 +273,7 @@ void __init adjust_total_lowmem(void)
ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
i = switch_to_as1();
- __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false);
+ __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true);
restore_to_as0(i, 0, 0, 1);
pr_info("Memory CAM mapping: ");
@@ -258,6 +285,25 @@ void __init adjust_total_lowmem(void)
memblock_set_current_limit(memstart_addr + __max_low_memory);
}
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void mmu_mark_rodata_ro(void)
+{
+ /* Everything is done in mmu_mark_initmem_nx() */
+}
+#endif
+
+void mmu_mark_initmem_nx(void)
+{
+ unsigned long remapped;
+
+ if (!strict_kernel_rwx_enabled())
+ return;
+
+ remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false);
+
+ WARN_ON(__max_low_memory != remapped);
+}
+
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
@@ -317,11 +363,11 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
/* map a 64M area for the second relocation */
if (memstart_addr > start)
map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM,
- false);
+ false, true);
else
map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
0x4000000, CONFIG_LOWMEM_CAM_NUM,
- false);
+ false, true);
restore_to_as0(n, offset, __va(dt_ptr), 1);
/* We should never reach here */
panic("Relocation error");
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index 4c74e8a5482b..8fc49b1b4a91 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -314,7 +314,7 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
ram = min_t(phys_addr_t, __max_low_memory, size);
- ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true);
+ ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, false);
linear_sz = min_t(unsigned long, ram, SZ_512M);
/* If the linear size is smaller than 64M, do not randmize */
diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
index 5872f69141d5..89353d4f5604 100644
--- a/arch/powerpc/mm/nohash/tlb.c
+++ b/arch/powerpc/mm/nohash/tlb.c
@@ -185,6 +185,7 @@ EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
* processor
*/
+#ifndef CONFIG_PPC_8xx
/*
* These are the base non-SMP variants of page and mm flushing
*/
@@ -218,6 +219,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
mmu_get_tsize(mmu_virtual_psize), 0);
}
EXPORT_SYMBOL(local_flush_tlb_page);
+#endif
/*
* And here are the SMP non-local implementations
@@ -643,7 +645,7 @@ static void early_init_this_mmu(void)
if (map)
linear_map_top = map_mem_in_cams(linear_map_top,
- num_cams, false);
+ num_cams, true, true);
}
#endif
@@ -764,7 +766,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
- true);
+ false, true);
ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
} else
diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S
index 5add4a51e51f..dd39074de9af 100644
--- a/arch/powerpc/mm/nohash/tlb_low.S
+++ b/arch/powerpc/mm/nohash/tlb_low.S
@@ -369,7 +369,7 @@ _GLOBAL(_tlbivax_bcast)
* extern void loadcam_entry(unsigned int index)
*
* Load TLBCAM[index] entry in to the L2 CAM MMU
- * Must preserve r7, r8, r9, r10 and r11
+ * Must preserve r7, r8, r9, r10, r11, r12
*/
_GLOBAL(loadcam_entry)
mflr r5
@@ -401,7 +401,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
*
* r3 = first entry to write
* r4 = number of entries to write
- * r5 = temporary tlb entry
+ * r5 = temporary tlb entry (0 means no switch to AS1)
*/
_GLOBAL(loadcam_multi)
mflr r8
@@ -409,6 +409,8 @@ _GLOBAL(loadcam_multi)
mfmsr r11
andi. r11,r11,MSR_IS
bne 10f
+ mr. r12, r5
+ beq 10f
/*
* Set up temporary TLB entry that is the same as what we're
@@ -446,6 +448,8 @@ _GLOBAL(loadcam_multi)
/* Don't return to AS=0 if we were in AS=1 at function start */
andi. r11,r11,MSR_IS
bne 3f
+ cmpwi r12, 0
+ beq 3f
/* Return to AS=0 and clear the temporary entry */
mfmsr r6
diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S
index bf24451f3e71..9235e720e357 100644
--- a/arch/powerpc/mm/nohash/tlb_low_64e.S
+++ b/arch/powerpc/mm/nohash/tlb_low_64e.S
@@ -222,7 +222,7 @@ tlb_miss_kernel_bolted:
tlb_miss_fault_bolted:
/* We need to check if it was an instruction miss */
- andi. r10,r11,_PAGE_EXEC|_PAGE_BAP_SX
+ andi. r10,r11,_PAGE_BAP_UX|_PAGE_BAP_SX
bne itlb_miss_fault_bolted
dtlb_miss_fault_bolted:
tlb_epilog_bolted
@@ -239,7 +239,7 @@ itlb_miss_fault_bolted:
srdi r15,r16,60 /* get region */
bne- itlb_miss_fault_bolted
- li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
+ li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */
/* We do the user/kernel test for the PID here along with the RW test
*/
@@ -614,7 +614,7 @@ itlb_miss_fault_e6500:
/* We do the user/kernel test for the PID here along with the RW test
*/
- li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
+ li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */
oris r11,r11,_PAGE_ACCESSED@h
cmpldi cr0,r15,0 /* Check for user region */
@@ -734,7 +734,7 @@ normal_tlb_miss_done:
normal_tlb_miss_access_fault:
/* We need to check if it was an instruction miss */
- andi. r10,r11,_PAGE_EXEC
+ andi. r10,r11,_PAGE_BAP_UX
bne 1f
ld r14,EX_TLB_DEAR(r12)
ld r15,EX_TLB_ESR(r12)
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index cd16b407f47e..ce9482383144 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -271,7 +271,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_
{
pmd_t *pmd = pmd_off(mm, addr);
pte_basic_t val;
- pte_basic_t *entry = &ptep->pte;
+ pte_basic_t *entry = (pte_basic_t *)ptep;
int num, i;
/*
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index dcf5ecca19d9..fde1ed445ca4 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -173,7 +173,7 @@ void mark_rodata_ro(void)
}
#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC)
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long addr = (unsigned long)page_address(page);
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index fcbf7a917c56..90ce75f0f1e2 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -241,8 +241,8 @@ skip_codegen_passes:
fp->jited_len = alloclen;
bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
- bpf_jit_binary_lock_ro(bpf_hdr);
if (!fp->is_func || extra_pass) {
+ bpf_jit_binary_lock_ro(bpf_hdr);
bpf_prog_fill_jited_linfo(fp, addrs);
out_addrs:
kfree(addrs);
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index b931eed482c9..51d31b65e423 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -90,7 +90,11 @@ static u64 get_ext_regs_value(int idx)
return mfspr(SPRN_SIER2);
case PERF_REG_POWERPC_SIER3:
return mfspr(SPRN_SIER3);
+ case PERF_REG_POWERPC_SDAR:
+ return mfspr(SPRN_SDAR);
#endif
+ case PERF_REG_POWERPC_SIAR:
+ return mfspr(SPRN_SIAR);
default: return 0;
}
}
diff --git a/arch/powerpc/perf/power10-events-list.h b/arch/powerpc/perf/power10-events-list.h
index 93be7197d250..564f14097f07 100644
--- a/arch/powerpc/perf/power10-events-list.h
+++ b/arch/powerpc/perf/power10-events-list.h
@@ -9,10 +9,10 @@
/*
* Power10 event codes.
*/
-EVENT(PM_RUN_CYC, 0x600f4);
+EVENT(PM_CYC, 0x600f4);
EVENT(PM_DISP_STALL_CYC, 0x100f8);
EVENT(PM_EXEC_STALL, 0x30008);
-EVENT(PM_RUN_INST_CMPL, 0x500fa);
+EVENT(PM_INST_CMPL, 0x500fa);
EVENT(PM_BR_CMPL, 0x4d05e);
EVENT(PM_BR_MPRED_CMPL, 0x400f6);
EVENT(PM_BR_FIN, 0x2f04a);
@@ -50,8 +50,8 @@ EVENT(PM_DTLB_MISS, 0x300fc);
/* ITLB Reloaded */
EVENT(PM_ITLB_MISS, 0x400fc);
-EVENT(PM_RUN_CYC_ALT, 0x0001e);
-EVENT(PM_RUN_INST_CMPL_ALT, 0x00002);
+EVENT(PM_CYC_ALT, 0x0001e);
+EVENT(PM_INST_CMPL_ALT, 0x00002);
/*
* Memory Access Events
diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
index f9d64c63bb4a..9dd75f385837 100644
--- a/arch/powerpc/perf/power10-pmu.c
+++ b/arch/powerpc/perf/power10-pmu.c
@@ -91,8 +91,8 @@ extern u64 PERF_REG_EXTENDED_MASK;
/* Table of alternatives, sorted by column 0 */
static const unsigned int power10_event_alternatives[][MAX_ALT] = {
- { PM_RUN_CYC_ALT, PM_RUN_CYC },
- { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
+ { PM_CYC_ALT, PM_CYC },
+ { PM_INST_CMPL_ALT, PM_INST_CMPL },
};
static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[])
@@ -118,8 +118,8 @@ static int power10_check_attr_config(struct perf_event *ev)
return 0;
}
-GENERIC_EVENT_ATTR(cpu-cycles, PM_RUN_CYC);
-GENERIC_EVENT_ATTR(instructions, PM_RUN_INST_CMPL);
+GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
+GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL);
GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
@@ -148,8 +148,8 @@ CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
static struct attribute *power10_events_attr_dd1[] = {
- GENERIC_EVENT_PTR(PM_RUN_CYC),
- GENERIC_EVENT_PTR(PM_RUN_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_CYC),
+ GENERIC_EVENT_PTR(PM_INST_CMPL),
GENERIC_EVENT_PTR(PM_BR_CMPL),
GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
GENERIC_EVENT_PTR(PM_LD_REF_L1),
@@ -173,8 +173,8 @@ static struct attribute *power10_events_attr_dd1[] = {
};
static struct attribute *power10_events_attr[] = {
- GENERIC_EVENT_PTR(PM_RUN_CYC),
- GENERIC_EVENT_PTR(PM_RUN_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_CYC),
+ GENERIC_EVENT_PTR(PM_INST_CMPL),
GENERIC_EVENT_PTR(PM_BR_FIN),
GENERIC_EVENT_PTR(PM_MPRED_BR_FIN),
GENERIC_EVENT_PTR(PM_LD_REF_L1),
@@ -271,8 +271,8 @@ static const struct attribute_group *power10_pmu_attr_groups[] = {
};
static int power10_generic_events_dd1[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = PM_RUN_CYC,
- [PERF_COUNT_HW_INSTRUCTIONS] = PM_RUN_INST_CMPL,
+ [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
+ [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
[PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
@@ -280,8 +280,8 @@ static int power10_generic_events_dd1[] = {
};
static int power10_generic_events[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = PM_RUN_CYC,
- [PERF_COUNT_HW_INSTRUCTIONS] = PM_RUN_INST_CMPL,
+ [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
+ [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_FIN,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_MPRED_BR_FIN,
[PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
@@ -548,6 +548,24 @@ static u64 power10_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
#undef C
+/*
+ * Set the MMCR0[CC56RUN] bit to enable counting for
+ * PMC5 and PMC6 regardless of the state of CTRL[RUN],
+ * so that we can use counters 5 and 6 as PM_INST_CMPL and
+ * PM_CYC.
+ */
+static int power10_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], struct mmcr_regs *mmcr,
+ struct perf_event *pevents[], u32 flags)
+{
+ int ret;
+
+ ret = isa207_compute_mmcr(event, n_ev, hwc, mmcr, pevents, flags);
+ if (!ret)
+ mmcr->mmcr0 |= MMCR0_C56RUN;
+ return ret;
+}
+
static struct power_pmu power10_pmu = {
.name = "POWER10",
.n_counter = MAX_PMU_COUNTERS,
@@ -555,7 +573,7 @@ static struct power_pmu power10_pmu = {
.test_adder = ISA207_TEST_ADDER,
.group_constraint_mask = CNST_CACHE_PMC4_MASK,
.group_constraint_val = CNST_CACHE_PMC4_VAL,
- .compute_mmcr = isa207_compute_mmcr,
+ .compute_mmcr = power10_compute_mmcr,
.config_bhrb = power10_config_bhrb,
.bhrb_filter_map = power10_bhrb_filter_map,
.get_constraint = isa207_get_constraint,
diff --git a/arch/powerpc/platforms/44x/fsp2.c b/arch/powerpc/platforms/44x/fsp2.c
index b299e43f5ef9..823397c802de 100644
--- a/arch/powerpc/platforms/44x/fsp2.c
+++ b/arch/powerpc/platforms/44x/fsp2.c
@@ -208,6 +208,7 @@ static void node_irq_request(const char *compat, irq_handler_t errirq_handler)
if (irq == NO_IRQ) {
pr_err("device tree node %pOFn is missing a interrupt",
np);
+ of_node_put(np);
return;
}
@@ -215,6 +216,7 @@ static void node_irq_request(const char *compat, irq_handler_t errirq_handler)
if (rc) {
pr_err("fsp_of_probe: request_irq failed: np=%pOF rc=%d",
np, rc);
+ of_node_put(np);
return;
}
}
diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c
index 07f7e3ce67b5..fb7db5cedd4e 100644
--- a/arch/powerpc/platforms/44x/ppc476.c
+++ b/arch/powerpc/platforms/44x/ppc476.c
@@ -219,7 +219,7 @@ static int board_rev = -1;
static int __init ppc47x_get_board_rev(void)
{
int reg;
- u8 *fpga;
+ u8 __iomem *fpga;
struct device_node *np = NULL;
if (of_machine_is_compatible("ibm,currituck")) {
@@ -233,7 +233,7 @@ static int __init ppc47x_get_board_rev(void)
if (!np)
goto fail;
- fpga = (u8 *) of_iomap(np, 0);
+ fpga = of_iomap(np, 0);
of_node_put(np);
if (!fpga)
goto fail;
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index 409481016928..bb789f33c70e 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -135,11 +135,10 @@ static int mcu_gpiochip_add(struct mcu *mcu)
return gpiochip_add_data(gc, mcu);
}
-static int mcu_gpiochip_remove(struct mcu *mcu)
+static void mcu_gpiochip_remove(struct mcu *mcu)
{
kfree(mcu->gc.label);
gpiochip_remove(&mcu->gc);
- return 0;
}
static int mcu_probe(struct i2c_client *client)
@@ -198,9 +197,7 @@ static int mcu_remove(struct i2c_client *client)
glob_mcu = NULL;
}
- ret = mcu_gpiochip_remove(mcu);
- if (ret)
- return ret;
+ mcu_gpiochip_remove(mcu);
kfree(mcu);
return 0;
}
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 60e4e97a929d..260fbad7967b 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -3,7 +3,9 @@
# Makefile for the PowerPC 85xx linux kernel.
#
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_FSL_PMC) += mpc85xx_pm_ops.o
+ifneq ($(CONFIG_FSL_CORENET_RCPM),y)
+obj-$(CONFIG_SMP) += mpc85xx_pm_ops.o
+endif
obj-y += common.o
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c b/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
index 7c0133f558d0..4a8af80011a6 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
@@ -17,6 +17,7 @@
static struct ccsr_guts __iomem *guts;
+#ifdef CONFIG_FSL_PMC
static void mpc85xx_irq_mask(int cpu)
{
@@ -49,6 +50,7 @@ static void mpc85xx_cpu_up_prepare(int cpu)
{
}
+#endif
static void mpc85xx_freeze_time_base(bool freeze)
{
@@ -76,10 +78,12 @@ static const struct of_device_id mpc85xx_smp_guts_ids[] = {
static const struct fsl_pm_ops mpc85xx_pm_ops = {
.freeze_time_base = mpc85xx_freeze_time_base,
+#ifdef CONFIG_FSL_PMC
.irq_mask = mpc85xx_irq_mask,
.irq_unmask = mpc85xx_irq_unmask,
.cpu_die = mpc85xx_cpu_die,
.cpu_up_prepare = mpc85xx_cpu_up_prepare,
+#endif
};
int __init mpc85xx_setup_pmc(void)
@@ -94,9 +98,8 @@ int __init mpc85xx_setup_pmc(void)
pr_err("Could not map guts node address\n");
return -ENOMEM;
}
+ qoriq_pm_ops = &mpc85xx_pm_ops;
}
- qoriq_pm_ops = &mpc85xx_pm_ops;
-
return 0;
}
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index c6df294054fe..83f4a6389a28 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -40,7 +40,6 @@ struct epapr_spin_table {
u32 pir;
};
-#ifdef CONFIG_HOTPLUG_CPU
static u64 timebase;
static int tb_req;
static int tb_valid;
@@ -112,6 +111,7 @@ static void mpc85xx_take_timebase(void)
local_irq_restore(flags);
}
+#ifdef CONFIG_HOTPLUG_CPU
static void smp_85xx_cpu_offline_self(void)
{
unsigned int cpu = smp_processor_id();
@@ -495,21 +495,21 @@ void __init mpc85xx_smp_init(void)
smp_85xx_ops.probe = NULL;
}
-#ifdef CONFIG_HOTPLUG_CPU
#ifdef CONFIG_FSL_CORENET_RCPM
+ /* Assign a value to qoriq_pm_ops on PPC_E500MC */
fsl_rcpm_init();
-#endif
-
-#ifdef CONFIG_FSL_PMC
+#else
+ /* Assign a value to qoriq_pm_ops on !PPC_E500MC */
mpc85xx_setup_pmc();
#endif
if (qoriq_pm_ops) {
smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
+#ifdef CONFIG_HOTPLUG_CPU
smp_85xx_ops.cpu_offline_self = smp_85xx_cpu_offline_self;
smp_85xx_ops.cpu_die = qoriq_cpu_kill;
- }
#endif
+ }
smp_ops = &smp_85xx_ops;
#ifdef CONFIG_KEXEC_CORE
diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
index 30172e52e16b..4d82c92ddd52 100644
--- a/arch/powerpc/platforms/book3s/vas-api.c
+++ b/arch/powerpc/platforms/book3s/vas-api.c
@@ -303,7 +303,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
return -EINVAL;
}
- if (!cp_inst->coproc->vops && !cp_inst->coproc->vops->open_win) {
+ if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->open_win) {
pr_err("VAS API is not registered\n");
return -EACCES;
}
@@ -373,7 +373,7 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
return -EINVAL;
}
- if (!cp_inst->coproc->vops && !cp_inst->coproc->vops->paste_addr) {
+ if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) {
pr_err("%s(): VAS API is not registered\n", __func__);
return -EACCES;
}
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 0d715db434dc..29d2036dcc9d 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -27,7 +27,6 @@ extern void pmac_nvram_update(void);
extern unsigned char pmac_nvram_read_byte(int addr);
extern void pmac_nvram_write_byte(int addr, unsigned char val);
extern void pmac_pcibios_after_init(void);
-extern int of_show_percpuinfo(struct seq_file *m, int i);
extern void pmac_setup_pci_dma(void);
extern void pmac_check_ht_link(void);
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 86aee3f2483f..13e8a8a9841c 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -79,8 +79,6 @@ int pmac_newworld;
static int current_root_goodness = -1;
-extern struct machdep_calls pmac_md;
-
#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 00c5a59d82d9..717d1d30ade5 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -419,7 +419,7 @@ void __init opal_platform_dump_init(void)
int rc;
int dump_irq;
- /* ELOG not supported by firmware */
+ /* Dump not supported by firmware */
if (!opal_check_token(OPAL_DUMP_READ))
return;
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
index a191f4c60ce7..113bdb151f68 100644
--- a/arch/powerpc/platforms/powernv/opal-prd.c
+++ b/arch/powerpc/platforms/powernv/opal-prd.c
@@ -369,6 +369,12 @@ static struct notifier_block opal_prd_event_nb = {
.priority = 0,
};
+static struct notifier_block opal_prd_event_nb2 = {
+ .notifier_call = opal_prd_msg_notifier,
+ .next = NULL,
+ .priority = 0,
+};
+
static int opal_prd_probe(struct platform_device *pdev)
{
int rc;
@@ -390,9 +396,10 @@ static int opal_prd_probe(struct platform_device *pdev)
return rc;
}
- rc = opal_message_notifier_register(OPAL_MSG_PRD2, &opal_prd_event_nb);
+ rc = opal_message_notifier_register(OPAL_MSG_PRD2, &opal_prd_event_nb2);
if (rc) {
pr_err("Couldn't register PRD2 event notifier\n");
+ opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb);
return rc;
}
@@ -401,6 +408,8 @@ static int opal_prd_probe(struct platform_device *pdev)
pr_err("failed to register miscdev\n");
opal_message_notifier_unregister(OPAL_MSG_PRD,
&opal_prd_event_nb);
+ opal_message_notifier_unregister(OPAL_MSG_PRD2,
+ &opal_prd_event_nb2);
return rc;
}
@@ -411,6 +420,7 @@ static int opal_prd_remove(struct platform_device *pdev)
{
misc_deregister(&opal_prd_dev);
opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb);
+ opal_message_notifier_unregister(OPAL_MSG_PRD2, &opal_prd_event_nb2);
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c
index 28aac933a439..deddbb233fde 100644
--- a/arch/powerpc/platforms/powernv/pci-sriov.c
+++ b/arch/powerpc/platforms/powernv/pci-sriov.c
@@ -9,9 +9,6 @@
#include "pci.h"
-/* for pci_dev_is_added() */
-#include "../../../../drivers/pci/pci.h"
-
/*
* The majority of the complexity in supporting SR-IOV on PowerNV comes from
* the need to put the MMIO space for each VF into a separate PE. Internally
@@ -228,9 +225,6 @@ disable_iov:
void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)
{
- if (WARN_ON(pci_dev_is_added(pdev)))
- return;
-
if (pdev->is_virtfn) {
struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev);
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index d646c22e94ab..5ab44600c8d3 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -137,11 +137,6 @@ static void pseries_cpu_die(unsigned int cpu)
cpu, pcpu);
}
- /* Isolation and deallocation are definitely done by
- * drslot_chrp_cpu. If they were not they would be
- * done here. Change isolate state to Isolate and
- * change allocation-state to Unusable.
- */
paca_ptrs[cpu]->cpu_start = 0;
}
@@ -521,6 +516,27 @@ static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
return found;
}
+static int pseries_cpuhp_attach_nodes(struct device_node *dn)
+{
+ struct of_changeset cs;
+ int ret;
+
+ /*
+ * This device node is unattached but may have siblings; open-code the
+ * traversal.
+ */
+ for (of_changeset_init(&cs); dn != NULL; dn = dn->sibling) {
+ ret = of_changeset_attach_node(&cs, dn);
+ if (ret)
+ goto out;
+ }
+
+ ret = of_changeset_apply(&cs);
+out:
+ of_changeset_destroy(&cs);
+ return ret;
+}
+
static ssize_t dlpar_cpu_add(u32 drc_index)
{
struct device_node *dn, *parent;
@@ -563,7 +579,7 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
return -EINVAL;
}
- rc = dlpar_attach_node(dn, parent);
+ rc = pseries_cpuhp_attach_nodes(dn);
/* Regardless we are done with parent now */
of_node_put(parent);
@@ -600,6 +616,53 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
return rc;
}
+static unsigned int pseries_cpuhp_cache_use_count(const struct device_node *cachedn)
+{
+ unsigned int use_count = 0;
+ struct device_node *dn;
+
+ WARN_ON(!of_node_is_type(cachedn, "cache"));
+
+ for_each_of_cpu_node(dn) {
+ if (of_find_next_cache_node(dn) == cachedn)
+ use_count++;
+ }
+
+ for_each_node_by_type(dn, "cache") {
+ if (of_find_next_cache_node(dn) == cachedn)
+ use_count++;
+ }
+
+ return use_count;
+}
+
+static int pseries_cpuhp_detach_nodes(struct device_node *cpudn)
+{
+ struct device_node *dn;
+ struct of_changeset cs;
+ int ret = 0;
+
+ of_changeset_init(&cs);
+ ret = of_changeset_detach_node(&cs, cpudn);
+ if (ret)
+ goto out;
+
+ dn = cpudn;
+ while ((dn = of_find_next_cache_node(dn))) {
+ if (pseries_cpuhp_cache_use_count(dn) > 1)
+ break;
+
+ ret = of_changeset_detach_node(&cs, dn);
+ if (ret)
+ goto out;
+ }
+
+ ret = of_changeset_apply(&cs);
+out:
+ of_changeset_destroy(&cs);
+ return ret;
+}
+
static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
{
int rc;
@@ -621,7 +684,7 @@ static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
return rc;
}
- rc = dlpar_detach_node(dn);
+ rc = pseries_cpuhp_detach_nodes(dn);
if (rc) {
int saved_rc = rc;
@@ -673,231 +736,18 @@ static int dlpar_cpu_remove_by_index(u32 drc_index)
return rc;
}
-static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove)
-{
- struct device_node *dn;
- int cpus_found = 0;
- int rc;
-
- /* We want to find cpus_to_remove + 1 CPUs to ensure we do not
- * remove the last CPU.
- */
- for_each_node_by_type(dn, "cpu") {
- cpus_found++;
-
- if (cpus_found > cpus_to_remove) {
- of_node_put(dn);
- break;
- }
-
- /* Note that cpus_found is always 1 ahead of the index
- * into the cpu_drcs array, so we use cpus_found - 1
- */
- rc = of_property_read_u32(dn, "ibm,my-drc-index",
- &cpu_drcs[cpus_found - 1]);
- if (rc) {
- pr_warn("Error occurred getting drc-index for %pOFn\n",
- dn);
- of_node_put(dn);
- return -1;
- }
- }
-
- if (cpus_found < cpus_to_remove) {
- pr_warn("Failed to find enough CPUs (%d of %d) to remove\n",
- cpus_found, cpus_to_remove);
- } else if (cpus_found == cpus_to_remove) {
- pr_warn("Cannot remove all CPUs\n");
- }
-
- return cpus_found;
-}
-
-static int dlpar_cpu_remove_by_count(u32 cpus_to_remove)
-{
- u32 *cpu_drcs;
- int cpus_found;
- int cpus_removed = 0;
- int i, rc;
-
- pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove);
-
- cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL);
- if (!cpu_drcs)
- return -EINVAL;
-
- cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove);
- if (cpus_found <= cpus_to_remove) {
- kfree(cpu_drcs);
- return -EINVAL;
- }
-
- for (i = 0; i < cpus_to_remove; i++) {
- rc = dlpar_cpu_remove_by_index(cpu_drcs[i]);
- if (rc)
- break;
-
- cpus_removed++;
- }
-
- if (cpus_removed != cpus_to_remove) {
- pr_warn("CPU hot-remove failed, adding back removed CPUs\n");
-
- for (i = 0; i < cpus_removed; i++)
- dlpar_cpu_add(cpu_drcs[i]);
-
- rc = -EINVAL;
- } else {
- rc = 0;
- }
-
- kfree(cpu_drcs);
- return rc;
-}
-
-static int find_drc_info_cpus_to_add(struct device_node *cpus,
- struct property *info,
- u32 *cpu_drcs, u32 cpus_to_add)
-{
- struct of_drc_info drc;
- const __be32 *value;
- u32 count, drc_index;
- int cpus_found = 0;
- int i, j;
-
- if (!info)
- return -1;
-
- value = of_prop_next_u32(info, NULL, &count);
- if (value)
- value++;
-
- for (i = 0; i < count; i++) {
- of_read_drc_info_cell(&info, &value, &drc);
- if (strncmp(drc.drc_type, "CPU", 3))
- break;
-
- drc_index = drc.drc_index_start;
- for (j = 0; j < drc.num_sequential_elems; j++) {
- if (dlpar_cpu_exists(cpus, drc_index))
- continue;
-
- cpu_drcs[cpus_found++] = drc_index;
-
- if (cpus_found == cpus_to_add)
- return cpus_found;
-
- drc_index += drc.sequential_inc;
- }
- }
-
- return cpus_found;
-}
-
-static int find_drc_index_cpus_to_add(struct device_node *cpus,
- u32 *cpu_drcs, u32 cpus_to_add)
-{
- int cpus_found = 0;
- int index, rc;
- u32 drc_index;
-
- /* Search the ibm,drc-indexes array for possible CPU drcs to
- * add. Note that the format of the ibm,drc-indexes array is
- * the number of entries in the array followed by the array
- * of drc values so we start looking at index = 1.
- */
- index = 1;
- while (cpus_found < cpus_to_add) {
- rc = of_property_read_u32_index(cpus, "ibm,drc-indexes",
- index++, &drc_index);
-
- if (rc)
- break;
-
- if (dlpar_cpu_exists(cpus, drc_index))
- continue;
-
- cpu_drcs[cpus_found++] = drc_index;
- }
-
- return cpus_found;
-}
-
-static int dlpar_cpu_add_by_count(u32 cpus_to_add)
-{
- struct device_node *parent;
- struct property *info;
- u32 *cpu_drcs;
- int cpus_added = 0;
- int cpus_found;
- int i, rc;
-
- pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add);
-
- cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL);
- if (!cpu_drcs)
- return -EINVAL;
-
- parent = of_find_node_by_path("/cpus");
- if (!parent) {
- pr_warn("Could not find CPU root node in device tree\n");
- kfree(cpu_drcs);
- return -1;
- }
-
- info = of_find_property(parent, "ibm,drc-info", NULL);
- if (info)
- cpus_found = find_drc_info_cpus_to_add(parent, info, cpu_drcs, cpus_to_add);
- else
- cpus_found = find_drc_index_cpus_to_add(parent, cpu_drcs, cpus_to_add);
-
- of_node_put(parent);
-
- if (cpus_found < cpus_to_add) {
- pr_warn("Failed to find enough CPUs (%d of %d) to add\n",
- cpus_found, cpus_to_add);
- kfree(cpu_drcs);
- return -EINVAL;
- }
-
- for (i = 0; i < cpus_to_add; i++) {
- rc = dlpar_cpu_add(cpu_drcs[i]);
- if (rc)
- break;
-
- cpus_added++;
- }
-
- if (cpus_added < cpus_to_add) {
- pr_warn("CPU hot-add failed, removing any added CPUs\n");
-
- for (i = 0; i < cpus_added; i++)
- dlpar_cpu_remove_by_index(cpu_drcs[i]);
-
- rc = -EINVAL;
- } else {
- rc = 0;
- }
-
- kfree(cpu_drcs);
- return rc;
-}
-
int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
{
- u32 count, drc_index;
+ u32 drc_index;
int rc;
- count = hp_elog->_drc_u.drc_count;
drc_index = hp_elog->_drc_u.drc_index;
lock_device_hotplug();
switch (hp_elog->action) {
case PSERIES_HP_ELOG_ACTION_REMOVE:
- if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
- rc = dlpar_cpu_remove_by_count(count);
- else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
+ if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
rc = dlpar_cpu_remove_by_index(drc_index);
/*
* Setting the isolation state of an UNISOLATED/CONFIGURED
@@ -911,9 +761,7 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
rc = -EINVAL;
break;
case PSERIES_HP_ELOG_ACTION_ADD:
- if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
- rc = dlpar_cpu_add_by_count(count);
- else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
+ if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
rc = dlpar_cpu_add(drc_index);
else
rc = -EINVAL;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index a52af8fbf571..49b401536d29 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -929,8 +929,10 @@ static void find_existing_ddw_windows_named(const char *name)
}
window = ddw_list_new_entry(pdn, dma64);
- if (!window)
+ if (!window) {
+ of_node_put(pdn);
break;
+ }
spin_lock(&dma_win_list_lock);
list_add(&window->list, &dma_win_list);
@@ -1159,14 +1161,15 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
/* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */
static int iommu_get_page_shift(u32 query_page_size)
{
- /* Supported IO page-sizes according to LoPAR */
+ /* Supported IO page-sizes according to LoPAR, note that 2M is out of order */
const int shift[] = {
__builtin_ctzll(SZ_4K), __builtin_ctzll(SZ_64K), __builtin_ctzll(SZ_16M),
__builtin_ctzll(SZ_32M), __builtin_ctzll(SZ_64M), __builtin_ctzll(SZ_128M),
- __builtin_ctzll(SZ_256M), __builtin_ctzll(SZ_16G)
+ __builtin_ctzll(SZ_256M), __builtin_ctzll(SZ_16G), __builtin_ctzll(SZ_2M)
};
int i = ARRAY_SIZE(shift) - 1;
+ int ret = 0;
/*
* On LoPAR, ibm,query-pe-dma-window outputs "IO Page Sizes" using a bit field:
@@ -1176,11 +1179,10 @@ static int iommu_get_page_shift(u32 query_page_size)
*/
for (; i >= 0 ; i--) {
if (query_page_size & (1 << i))
- return shift[i];
+ ret = max(ret, shift[i]);
}
- /* No valid page size found. */
- return 0;
+ return ret;
}
static struct property *ddw_property_create(const char *propname, u32 liobn, u64 dma_addr,
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index e83e0891272d..210a37a065fb 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -63,6 +63,27 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
static int delete_dt_node(struct device_node *dn)
{
+ struct device_node *pdn;
+ bool is_platfac;
+
+ pdn = of_get_parent(dn);
+ is_platfac = of_node_is_type(dn, "ibm,platform-facilities") ||
+ of_node_is_type(pdn, "ibm,platform-facilities");
+ of_node_put(pdn);
+
+ /*
+ * The drivers that bind to nodes in the platform-facilities
+ * hierarchy don't support node removal, and the removal directive
+ * from firmware is always followed by an add of an equivalent
+ * node. The capability (e.g. RNG, encryption, compression)
+ * represented by the node is never interrupted by the migration.
+ * So ignore changes to this part of the tree.
+ */
+ if (is_platfac) {
+ pr_notice("ignoring remove operation for %pOFfp\n", dn);
+ return 0;
+ }
+
pr_debug("removing node %pOFfp\n", dn);
dlpar_detach_node(dn);
return 0;
@@ -222,6 +243,19 @@ static int add_dt_node(struct device_node *parent_dn, __be32 drc_index)
if (!dn)
return -ENOENT;
+ /*
+ * Since delete_dt_node() ignores this node type, this is the
+ * necessary counterpart. We also know that a platform-facilities
+ * node returned from dlpar_configure_connector() has children
+ * attached, and dlpar_attach_node() only adds the parent, leaking
+ * the children. So ignore these on the add side for now.
+ */
+ if (of_node_is_type(dn, "ibm,platform-facilities")) {
+ pr_notice("ignoring add operation for %pOF\n", dn);
+ dlpar_free_cc_nodes(dn);
+ return 0;
+ }
+
rc = dlpar_attach_node(dn, parent_dn);
if (rc)
dlpar_free_cc_nodes(dn);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index f79126f16258..2188054470c1 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -74,7 +74,6 @@
#include <asm/hvconsole.h>
#include "pseries.h"
-#include "../../../../drivers/pci/pci.h"
DEFINE_STATIC_KEY_FALSE(shared_processor);
EXPORT_SYMBOL(shared_processor);
@@ -750,7 +749,7 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
const int *indexes;
struct device_node *dn = pci_device_to_OF_node(pdev);
- if (!pdev->is_physfn || pci_dev_is_added(pdev))
+ if (!pdev->is_physfn)
return;
/*Firmware must support open sriov otherwise dont configure*/
indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S
index efeeb1b885a1..329b9c4ae542 100644
--- a/arch/powerpc/sysdev/dcr-low.S
+++ b/arch/powerpc/sysdev/dcr-low.S
@@ -11,7 +11,7 @@
#include <asm/export.h>
#define DCR_ACCESS_PROLOG(table) \
- cmpli cr0,r3,1024; \
+ cmplwi cr0,r3,1024; \
rlwinm r3,r3,4,18,27; \
lis r5,table@h; \
ori r5,r5,table@l; \
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index dd8241c009e5..8b28ff9d98d1 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -3264,8 +3264,7 @@ static void show_task(struct task_struct *volatile tsk)
* appropriate for calling from xmon. This could be moved
* to a common, generic, routine used by both.
*/
- state = (p_state == 0) ? 'R' :
- (p_state < 0) ? 'U' :
+ state = (p_state == TASK_RUNNING) ? 'R' :
(p_state & TASK_UNINTERRUPTIBLE) ? 'D' :
(p_state & TASK_STOPPED) ? 'T' :
(p_state & TASK_TRACED) ? 'C' :