diff options
Diffstat (limited to 'arch')
67 files changed, 406 insertions, 128 deletions
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 4067f5169144..955b0362cdfb 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -132,7 +132,7 @@ AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) ifeq ($(CONFIG_THUMB2_KERNEL),y) CFLAGS_ISA :=-Wa,-mimplicit-it=always $(AFLAGS_NOWARN) -AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb -D__thumb2__=2 +AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb CFLAGS_ISA +=-mthumb else CFLAGS_ISA :=$(call cc-option,-marm,) $(AFLAGS_NOWARN) diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-bonnell.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-bonnell.dts index d1971ddf06a5..7f755e5a4624 100644 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-bonnell.dts +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-bonnell.dts @@ -751,7 +751,7 @@ }; pca9849@75 { - compatible = "nxp,pca849"; + compatible = "nxp,pca9849"; reg = <0x75>; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/imx7d-smegw01.dts b/arch/arm/boot/dts/imx7d-smegw01.dts index 546268b8d0b1..c0f00f5db11e 100644 --- a/arch/arm/boot/dts/imx7d-smegw01.dts +++ b/arch/arm/boot/dts/imx7d-smegw01.dts @@ -198,6 +198,7 @@ &usbotg2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usbotg2>; + over-current-active-low; dr_mode = "host"; status = "okay"; }; @@ -374,7 +375,7 @@ pinctrl_usbotg2: usbotg2grp { fsl,pins = < - MX7D_PAD_UART3_RTS_B__USB_OTG2_OC 0x04 + MX7D_PAD_UART3_RTS_B__USB_OTG2_OC 0x5c >; }; diff --git a/arch/arm/boot/dts/nuvoton-wpcm450.dtsi b/arch/arm/boot/dts/nuvoton-wpcm450.dtsi index b637241316bb..fd671c7a1e5d 100644 --- a/arch/arm/boot/dts/nuvoton-wpcm450.dtsi +++ b/arch/arm/boot/dts/nuvoton-wpcm450.dtsi @@ -480,6 +480,7 @@ reg = <0xc8000000 0x1000>, <0xc0000000 0x4000000>; reg-names = "control", "memory"; clocks = <&clk 0>; + nuvoton,shm = <&shm>; status = "disabled"; }; diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 971e74546fb1..13e62c7c25dc 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -53,7 +53,12 @@ $(obj)/%-core.S: $(src)/%-armv4.pl clean-files += poly1305-core.S sha256-core.S sha512-core.S +aflags-thumb2-$(CONFIG_THUMB2_KERNEL) := -U__thumb2__ -D__thumb2__=1 + +AFLAGS_sha256-core.o += $(aflags-thumb2-y) +AFLAGS_sha512-core.o += $(aflags-thumb2-y) + # massage the perlasm code a bit so we only get the NEON routine if we need it poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5 poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7 -AFLAGS_poly1305-core.o += $(poly1305-aflags-y) +AFLAGS_poly1305-core.o += $(poly1305-aflags-y) $(aflags-thumb2-y) diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index c1494a4dee25..53f2d8774fdb 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -161,7 +161,7 @@ void __init paging_init(const struct machine_desc *mdesc) mpu_setup(); /* allocate the zero page. */ - zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!zero_page) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, PAGE_SIZE, PAGE_SIZE); diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index fa6999e24b07..e43f6d716b4b 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -6,6 +6,7 @@ * VM_EXEC */ #include <asm/asm-offsets.h> +#include <asm/pgtable.h> #include <asm/thread_info.h> #ifdef CONFIG_CPU_V7M diff --git a/arch/arm64/boot/dts/freescale/imx8dxl.dtsi b/arch/arm64/boot/dts/freescale/imx8dxl.dtsi index 0c64b9194621..214f21bd0cb4 100644 --- a/arch/arm64/boot/dts/freescale/imx8dxl.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8dxl.dtsi @@ -164,7 +164,7 @@ sc_pwrkey: keys { compatible = "fsl,imx8qxp-sc-key", "fsl,imx-sc-key"; - linux,keycode = <KEY_POWER>; + linux,keycodes = <KEY_POWER>; wakeup-source; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts b/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts index 752f409a30b1..9889319d4f04 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts @@ -88,6 +88,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_watchdog_gpio>; compatible = "linux,wdt-gpio"; + always-running; gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>; hw_algo = "level"; /* Reset triggers in 2..3 seconds */ diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h index 83c8f715cd90..b1f11098d248 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h +++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h @@ -602,7 +602,7 @@ #define MX8MM_IOMUXC_UART1_RXD_GPIO5_IO22 0x234 0x49C 0x000 0x5 0x0 #define MX8MM_IOMUXC_UART1_RXD_TPSMP_HDATA24 0x234 0x49C 0x000 0x7 0x0 #define MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x238 0x4A0 0x000 0x0 0x0 -#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x0 +#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x1 #define MX8MM_IOMUXC_UART1_TXD_ECSPI3_MOSI 0x238 0x4A0 0x000 0x1 0x0 #define MX8MM_IOMUXC_UART1_TXD_GPIO5_IO23 0x238 0x4A0 0x000 0x5 0x0 #define MX8MM_IOMUXC_UART1_TXD_TPSMP_HDATA25 0x238 0x4A0 0x000 0x7 0x0 diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso index 3ea73a6886ff..f6ad1a4b8b66 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso @@ -33,7 +33,6 @@ pinctrl-0 = <&pinctrl_uart2>; rts-gpios = <&gpio5 29 GPIO_ACTIVE_LOW>; cts-gpios = <&gpio5 28 GPIO_ACTIVE_LOW>; - uart-has-rtscts; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso index 2fa635e1c1a8..1f8ea20dfafc 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso @@ -33,7 +33,6 @@ pinctrl-0 = <&pinctrl_uart2>; rts-gpios = <&gpio5 29 GPIO_ACTIVE_LOW>; cts-gpios = <&gpio5 28 GPIO_ACTIVE_LOW>; - uart-has-rtscts; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi index 244ef8d6cc68..7761d5671cb1 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi @@ -222,7 +222,6 @@ pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_bten>; cts-gpios = <&gpio5 8 GPIO_ACTIVE_LOW>; rts-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>; - uart-has-rtscts; status = "okay"; bluetooth { diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts index 6433c205f8dd..64b366e83fa1 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts @@ -733,7 +733,6 @@ dtr-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>; dsr-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; dcd-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>; - uart-has-rtscts; status = "okay"; }; @@ -749,7 +748,6 @@ pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>; cts-gpios = <&gpio4 10 GPIO_ACTIVE_LOW>; rts-gpios = <&gpio4 9 GPIO_ACTIVE_LOW>; - uart-has-rtscts; status = "okay"; }; @@ -758,7 +756,6 @@ pinctrl-0 = <&pinctrl_uart4>, <&pinctrl_uart4_gpio>; cts-gpios = <&gpio5 11 GPIO_ACTIVE_LOW>; rts-gpios = <&gpio5 12 GPIO_ACTIVE_LOW>; - uart-has-rtscts; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts index 32872b0b1aaf..e8bc1fccc47b 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts @@ -664,7 +664,6 @@ pinctrl-0 = <&pinctrl_uart1>, <&pinctrl_uart1_gpio>; rts-gpios = <&gpio4 10 GPIO_ACTIVE_LOW>; cts-gpios = <&gpio4 24 GPIO_ACTIVE_LOW>; - uart-has-rtscts; status = "okay"; }; @@ -681,7 +680,6 @@ pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>; rts-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; cts-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; - uart-has-rtscts; status = "okay"; bluetooth { @@ -699,7 +697,6 @@ dtr-gpios = <&gpio4 3 GPIO_ACTIVE_LOW>; dsr-gpios = <&gpio4 4 GPIO_ACTIVE_LOW>; dcd-gpios = <&gpio4 6 GPIO_ACTIVE_LOW>; - uart-has-rtscts; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts index 8ce562246a08..acc2ba8e00a8 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts @@ -581,7 +581,6 @@ dtr-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>; dsr-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; dcd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>; - uart-has-rtscts; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi index 0d454e0e2f7c..702d87621bb4 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi @@ -98,6 +98,7 @@ off-on-delay = <500000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_eth>; + regulator-always-on; regulator-boot-on; regulator-max-microvolt = <3300000>; regulator-min-microvolt = <3300000>; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts index b9444e4a3d2d..7c12518dbc96 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts +++ b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts @@ -643,7 +643,6 @@ pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>; rts-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; cts-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; - uart-has-rtscts; status = "okay"; bluetooth { diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts index ceeca4966fc5..8eb7d5ee38da 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts +++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts @@ -623,7 +623,6 @@ pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>; cts-gpios = <&gpio3 21 GPIO_ACTIVE_LOW>; rts-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; - uart-has-rtscts; status = "okay"; bluetooth { diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 31d13a6001df..de4ff90785b2 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -48,8 +48,17 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); }) extern spinlock_t efi_rt_lock; +extern u64 *efi_rt_stack_top; efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); +/* + * efi_rt_stack_top[-1] contains the value the stack pointer had before + * switching to the EFI runtime stack. + */ +#define current_in_efi() \ + (!preemptible() && efi_rt_stack_top != NULL && \ + on_task_stack(current, READ_ONCE(efi_rt_stack_top[-1]), 1)) + #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) /* diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 4e5354beafb0..66ec8caa6ac0 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -106,4 +106,19 @@ static inline struct stack_info stackinfo_get_sdei_critical(void) #define stackinfo_get_sdei_critical() stackinfo_get_unknown() #endif +#ifdef CONFIG_EFI +extern u64 *efi_rt_stack_top; + +static inline struct stack_info stackinfo_get_efi(void) +{ + unsigned long high = (u64)efi_rt_stack_top; + unsigned long low = high - THREAD_SIZE; + + return (struct stack_info) { + .low = low, + .high = high, + }; +} +#endif + #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S index d872d18101d8..e8ae803662cf 100644 --- a/arch/arm64/kernel/efi-rt-wrapper.S +++ b/arch/arm64/kernel/efi-rt-wrapper.S @@ -46,7 +46,10 @@ SYM_FUNC_START(__efi_rt_asm_wrapper) mov x4, x6 blr x8 + mov x16, sp mov sp, x29 + str xzr, [x16, #8] // clear recorded task SP value + ldp x1, x2, [sp, #16] cmp x2, x18 ldp x29, x30, [sp], #112 @@ -71,6 +74,9 @@ SYM_FUNC_END(__efi_rt_asm_wrapper) SYM_CODE_START(__efi_rt_asm_recover) mov sp, x30 + ldr_l x16, efi_rt_stack_top // clear recorded task SP value + str xzr, [x16, #-8] + ldp x19, x20, [sp, #32] ldp x21, x22, [sp, #48] ldp x23, x24, [sp, #64] diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index fab05de2e12d..b273900f4566 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -11,6 +11,7 @@ #include <linux/init.h> #include <asm/efi.h> +#include <asm/stacktrace.h> static bool region_is_misaligned(const efi_memory_desc_t *md) { @@ -154,7 +155,7 @@ asmlinkage efi_status_t __efi_rt_asm_recover(void); bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg) { /* Check whether the exception occurred while running the firmware */ - if (current_work() != &efi_rts_work.work || regs->pc >= TASK_SIZE_64) + if (!current_in_efi() || regs->pc >= TASK_SIZE_64) return false; pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg); diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 117e2c180f3c..83154303e682 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -5,6 +5,7 @@ * Copyright (C) 2012 ARM Ltd. */ #include <linux/kernel.h> +#include <linux/efi.h> #include <linux/export.h> #include <linux/ftrace.h> #include <linux/sched.h> @@ -12,6 +13,7 @@ #include <linux/sched/task_stack.h> #include <linux/stacktrace.h> +#include <asm/efi.h> #include <asm/irq.h> #include <asm/stack_pointer.h> #include <asm/stacktrace.h> @@ -186,6 +188,13 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) : stackinfo_get_unknown(); \ }) +#define STACKINFO_EFI \ + ({ \ + ((task == current) && current_in_efi()) \ + ? stackinfo_get_efi() \ + : stackinfo_get_unknown(); \ + }) + noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) @@ -200,6 +209,9 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, STACKINFO_SDEI(normal), STACKINFO_SDEI(critical), #endif +#ifdef CONFIG_EFI + STACKINFO_EFI, +#endif }; struct unwind_state state = { .stacks = stacks, diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 5626ddb540ce..cf4c495a4321 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -1079,7 +1079,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, /* uaccess failed, don't leave stale tags */ if (num_tags != MTE_GRANULES_PER_PAGE) - mte_clear_page_tags(page); + mte_clear_page_tags(maddr); set_page_mte_tagged(page); kvm_release_pfn_dirty(pfn); diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 94a666dd1443..2642e9ce2819 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -2187,7 +2187,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | ite->collection->collection_id; val = cpu_to_le64(val); - return kvm_write_guest_lock(kvm, gpa, &val, ite_esz); + return vgic_write_guest_lock(kvm, gpa, &val, ite_esz); } /** @@ -2339,7 +2339,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev, (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) | (dev->num_eventid_bits - 1)); val = cpu_to_le64(val); - return kvm_write_guest_lock(kvm, ptr, &val, dte_esz); + return vgic_write_guest_lock(kvm, ptr, &val, dte_esz); } /** @@ -2526,7 +2526,7 @@ static int vgic_its_save_cte(struct vgic_its *its, ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) | collection->collection_id); val = cpu_to_le64(val); - return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz); + return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz); } /* @@ -2607,7 +2607,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its) */ val = 0; BUG_ON(cte_esz > sizeof(val)); - ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz); + ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz); return ret; } @@ -2743,7 +2743,6 @@ static int vgic_its_has_attr(struct kvm_device *dev, static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) { const struct vgic_its_abi *abi = vgic_its_get_abi(its); - struct vgic_dist *dist = &kvm->arch.vgic; int ret = 0; if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */ @@ -2763,9 +2762,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) vgic_its_reset(kvm, its); break; case KVM_DEV_ARM_ITS_SAVE_TABLES: - dist->save_its_tables_in_progress = true; ret = abi->save_tables(its); - dist->save_its_tables_in_progress = false; break; case KVM_DEV_ARM_ITS_RESTORE_TABLES: ret = abi->restore_tables(its); @@ -2792,7 +2789,7 @@ bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; - return dist->save_its_tables_in_progress; + return dist->table_write_in_progress; } static int vgic_its_set_attr(struct kvm_device *dev, diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 2074521d4a8c..684bdfaad4a9 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -339,7 +339,7 @@ retry: if (status) { /* clear consumed data */ val &= ~(1 << bit_nr); - ret = kvm_write_guest_lock(kvm, ptr, &val, 1); + ret = vgic_write_guest_lock(kvm, ptr, &val, 1); if (ret) return ret; } @@ -350,26 +350,23 @@ retry: * The deactivation of the doorbell interrupt will trigger the * unmapping of the associated vPE. */ -static void unmap_all_vpes(struct vgic_dist *dist) +static void unmap_all_vpes(struct kvm *kvm) { - struct irq_desc *desc; + struct vgic_dist *dist = &kvm->arch.vgic; int i; - for (i = 0; i < dist->its_vm.nr_vpes; i++) { - desc = irq_to_desc(dist->its_vm.vpes[i]->irq); - irq_domain_deactivate_irq(irq_desc_get_irq_data(desc)); - } + for (i = 0; i < dist->its_vm.nr_vpes; i++) + free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i)); } -static void map_all_vpes(struct vgic_dist *dist) +static void map_all_vpes(struct kvm *kvm) { - struct irq_desc *desc; + struct vgic_dist *dist = &kvm->arch.vgic; int i; - for (i = 0; i < dist->its_vm.nr_vpes; i++) { - desc = irq_to_desc(dist->its_vm.vpes[i]->irq); - irq_domain_activate_irq(irq_desc_get_irq_data(desc), false); - } + for (i = 0; i < dist->its_vm.nr_vpes; i++) + WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i), + dist->its_vm.vpes[i]->irq)); } /** @@ -394,7 +391,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) * and enabling of the doorbells have already been done. */ if (kvm_vgic_global_state.has_gicv4_1) { - unmap_all_vpes(dist); + unmap_all_vpes(kvm); vlpi_avail = true; } @@ -437,14 +434,14 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) else val &= ~(1 << bit_nr); - ret = kvm_write_guest_lock(kvm, ptr, &val, 1); + ret = vgic_write_guest_lock(kvm, ptr, &val, 1); if (ret) goto out; } out: if (vlpi_avail) - map_all_vpes(dist); + map_all_vpes(kvm); return ret; } diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index ad06ba6c9b00..a413718be92b 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -222,6 +222,11 @@ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val) *val = !!(*ptr & mask); } +int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq) +{ + return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu); +} + /** * vgic_v4_init - Initialize the GICv4 data structures * @kvm: Pointer to the VM being initialized @@ -283,8 +288,7 @@ int vgic_v4_init(struct kvm *kvm) irq_flags &= ~IRQ_NOAUTOEN; irq_set_status_flags(irq, irq_flags); - ret = request_irq(irq, vgic_v4_doorbell_handler, - 0, "vcpu", vcpu); + ret = vgic_v4_request_vpe_irq(vcpu, irq); if (ret) { kvm_err("failed to allocate vcpu IRQ%d\n", irq); /* diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index 0c8da72953f0..7f7f3c5ed85a 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -6,6 +6,7 @@ #define __KVM_ARM_VGIC_NEW_H__ #include <linux/irqchip/arm-gic-common.h> +#include <asm/kvm_mmu.h> #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ #define IMPLEMENTER_ARM 0x43b @@ -131,6 +132,19 @@ static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq) return vgic_irq_get_lr_count(irq) > 1; } +static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa, + const void *data, unsigned long len) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + int ret; + + dist->table_write_in_progress = true; + ret = kvm_write_guest_lock(kvm, gpa, data, len); + dist->table_write_in_progress = false; + + return ret; +} + /* * This struct provides an intermediate representation of the fields contained * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC @@ -331,5 +345,6 @@ int vgic_v4_init(struct kvm *kvm); void vgic_v4_teardown(struct kvm *kvm); void vgic_v4_configure_vsgis(struct kvm *kvm); void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); +int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq); #endif diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index f6a502e8f02c..6e948d015332 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -170,6 +170,9 @@ ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, u asmlinkage long ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *tp) { + struct timespec64 rtn_tp; + s64 tick_ns; + /* * ia64's clock_gettime() syscall is implemented as a vdso call * fsys_clock_gettime(). Currently it handles only @@ -185,8 +188,8 @@ ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user * switch (which_clock) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: - s64 tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq); - struct timespec64 rtn_tp = ns_to_timespec64(tick_ns); + tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq); + rtn_tp = ns_to_timespec64(tick_ns); return put_timespec64(&rtn_tp, tp); } diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 4dfe1f49c5c8..6817892a2c58 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -1303,7 +1303,7 @@ static char iodc_dbuf[4096] __page_aligned_bss; */ int pdc_iodc_print(const unsigned char *str, unsigned count) { - unsigned int i; + unsigned int i, found = 0; unsigned long flags; count = min_t(unsigned int, count, sizeof(iodc_dbuf)); @@ -1315,6 +1315,7 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) iodc_dbuf[i+0] = '\r'; iodc_dbuf[i+1] = '\n'; i += 2; + found = 1; goto print; default: iodc_dbuf[i] = str[i]; @@ -1330,7 +1331,7 @@ print: __pa(pdc_result), 0, __pa(iodc_dbuf), i, 0); spin_unlock_irqrestore(&pdc_lock, flags); - return i; + return i - found; } #if !defined(BOOTLOADER) diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 69c62933e952..ceb45f51d52e 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -126,6 +126,12 @@ long arch_ptrace(struct task_struct *child, long request, unsigned long tmp; long ret = -EIO; + unsigned long user_regs_struct_size = sizeof(struct user_regs_struct); +#ifdef CONFIG_64BIT + if (is_compat_task()) + user_regs_struct_size /= 2; +#endif + switch (request) { /* Read the word at location addr in the USER area. For ptraced @@ -166,7 +172,7 @@ long arch_ptrace(struct task_struct *child, long request, addr >= sizeof(struct pt_regs)) break; if (addr == PT_IAOQ0 || addr == PT_IAOQ1) { - data |= 3; /* ensure userspace privilege */ + data |= PRIV_USER; /* ensure userspace privilege */ } if ((addr >= PT_GR1 && addr <= PT_GR31) || addr == PT_IAOQ0 || addr == PT_IAOQ1 || @@ -181,14 +187,14 @@ long arch_ptrace(struct task_struct *child, long request, return copy_regset_to_user(child, task_user_regset_view(current), REGSET_GENERAL, - 0, sizeof(struct user_regs_struct), + 0, user_regs_struct_size, datap); case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child, task_user_regset_view(current), REGSET_GENERAL, - 0, sizeof(struct user_regs_struct), + 0, user_regs_struct_size, datap); case PTRACE_GETFPREGS: /* Get the child FPU state. */ @@ -285,7 +291,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, if (addr >= sizeof(struct pt_regs)) break; if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) { - data |= 3; /* ensure userspace privilege */ + data |= PRIV_USER; /* ensure userspace privilege */ } if (addr >= PT_FR0 && addr <= PT_FR31 + 4) { /* Special case, fp regs are 64 bits anyway */ @@ -302,6 +308,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, } } break; + case PTRACE_GETREGS: + case PTRACE_SETREGS: + case PTRACE_GETFPREGS: + case PTRACE_SETFPREGS: + return arch_ptrace(child, request, addr, data); default: ret = compat_ptrace_request(child, request, addr, data); @@ -484,7 +495,7 @@ static void set_reg(struct pt_regs *regs, int num, unsigned long val) case RI(iaoq[0]): case RI(iaoq[1]): /* set 2 lowest bits to ensure userspace privilege: */ - regs->iaoq[num - RI(iaoq[0])] = val | 3; + regs->iaoq[num - RI(iaoq[0])] = val | PRIV_USER; return; case RI(sar): regs->sar = val; return; diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index dd39313242b4..d5cd16270c5d 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -97,6 +97,8 @@ static inline void tlb_flush(struct mmu_gather *tlb) { if (radix_enabled()) radix__tlb_flush(tlb); + + return hash__tlb_flush(tlb); } #ifdef CONFIG_SMP diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 77fa88c2aed0..eb6d094083fd 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -173,6 +173,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask) return flags; } +static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask) +{ + unsigned long flags = irq_soft_mask_return(); + + irq_soft_mask_set(flags & ~mask); + + return flags; +} + static inline unsigned long arch_local_save_flags(void) { return irq_soft_mask_return(); @@ -192,7 +201,7 @@ static inline void arch_local_irq_enable(void) static inline unsigned long arch_local_irq_save(void) { - return irq_soft_mask_set_return(IRQS_DISABLED); + return irq_soft_mask_or_return(IRQS_DISABLED); } static inline bool arch_irqs_disabled_flags(unsigned long flags) @@ -331,10 +340,11 @@ bool power_pmu_wants_prompt_pmi(void); * is a different soft-masked interrupt pending that requires hard * masking. */ -static inline bool should_hard_irq_enable(void) +static inline bool should_hard_irq_enable(struct pt_regs *regs) { if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { - WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); + WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED); + WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS)); WARN_ON(mfmsr() & MSR_EE); } @@ -347,8 +357,17 @@ static inline bool should_hard_irq_enable(void) * * TODO: Add test for 64e */ - if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi()) - return false; + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { + if (!power_pmu_wants_prompt_pmi()) + return false; + /* + * If PMIs are disabled then IRQs should be disabled as well, + * so we shouldn't see this condition, check for it just in + * case because we are about to enable PMIs. + */ + if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED)) + return false; + } if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK) return false; @@ -358,18 +377,16 @@ static inline bool should_hard_irq_enable(void) /* * Do the hard enabling, only call this if should_hard_irq_enable is true. + * This allows PMI interrupts to profile irq handlers. */ static inline void do_hard_irq_enable(void) { - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { - WARN_ON(irq_soft_mask_return() == IRQS_ENABLED); - WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK); - WARN_ON(mfmsr() & MSR_EE); - } /* - * This allows PMI interrupts (and watchdog soft-NMIs) through. - * There is no other reason to enable this way. + * Asynch interrupts come in with IRQS_ALL_DISABLED, + * PACA_IRQ_HARD_DIS, and MSR[EE]=0. */ + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) + irq_soft_mask_andc_return(IRQS_PMI_DISABLED); get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; __hard_irq_enable(); } @@ -452,7 +469,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) return !(regs->msr & MSR_EE); } -static __always_inline bool should_hard_irq_enable(void) +static __always_inline bool should_hard_irq_enable(struct pt_regs *regs) { return false; } diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index f55c6fb34a3a..5712dd846263 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c @@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception) ppc_msgsync(); - if (should_hard_irq_enable()) + if (should_hard_irq_enable(regs)) do_hard_irq_enable(); kvmppc_clear_host_ipi(smp_processor_id()); diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S index d438ca74e96c..fdbee1093e2b 100644 --- a/arch/powerpc/kernel/head_85xx.S +++ b/arch/powerpc/kernel/head_85xx.S @@ -864,7 +864,7 @@ _GLOBAL(load_up_spe) * SPE unavailable trap from kernel - print a message, but let * the task use SPE in the kernel until it returns to user mode. */ -KernelSPE: +SYM_FUNC_START_LOCAL(KernelSPE) lwz r3,_MSR(r1) oris r3,r3,MSR_SPE@h stw r3,_MSR(r1) /* enable use of SPE after return */ @@ -881,6 +881,7 @@ KernelSPE: #endif .align 4,0 +SYM_FUNC_END(KernelSPE) #endif /* CONFIG_SPE */ /* diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index c5b9ce887483..c9535f2760b5 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp) irq = static_call(ppc_get_irq)(); /* We can hard enable interrupts now to allow perf interrupts */ - if (should_hard_irq_enable()) + if (should_hard_irq_enable(regs)) do_hard_irq_enable(); /* And finally process it */ diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index d68de3618741..e26eb6618ae5 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -515,7 +515,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt) } /* Conditionally hard-enable interrupts. */ - if (should_hard_irq_enable()) { + if (should_hard_irq_enable(regs)) { /* * Ensure a positive value is written to the decrementer, or * else some CPUs will continue to take decrementer exceptions. diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index af8854f9eae3..52085751f5f4 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -989,10 +989,13 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image) * linux,drconf-usable-memory properties. Get an approximate on the * number of usable memory entries and use for FDT size estimation. */ - usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) + - (2 * (resource_size(&crashk_res) / drmem_lmb_size()))); - - extra_size = (unsigned int)(usm_entries * sizeof(u64)); + if (drmem_lmb_size()) { + usm_entries = ((memory_hotplug_max() / drmem_lmb_size()) + + (2 * (resource_size(&crashk_res) / drmem_lmb_size()))); + extra_size = (unsigned int)(usm_entries * sizeof(u64)); + } else { + extra_size = 0; + } /* * Get the number of CPU nodes in the current DT. This allows to diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 0dce93ccaadf..e89281d3ba28 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -912,16 +912,15 @@ static int kvmppc_handle_debug(struct kvm_vcpu *vcpu) static void kvmppc_fill_pt_regs(struct pt_regs *regs) { - ulong r1, ip, msr, lr; + ulong r1, msr, lr; asm("mr %0, 1" : "=r"(r1)); asm("mflr %0" : "=r"(lr)); asm("mfmsr %0" : "=r"(msr)); - asm("bl 1f; 1: mflr %0" : "=r"(ip)); memset(regs, 0, sizeof(*regs)); regs->gpr[1] = r1; - regs->nip = ip; + regs->nip = _THIS_IP_; regs->msr = msr; regs->link = lr; } diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index cac727b01799..26245aaf12b8 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -234,6 +234,14 @@ void radix__mark_rodata_ro(void) end = (unsigned long)__end_rodata; radix__change_memory_range(start, end, _PAGE_WRITE); + + for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) { + end = start + PAGE_SIZE; + if (overlaps_interrupt_vector_text(start, end)) + radix__change_memory_range(start, end, _PAGE_WRITE); + else + break; + } } void radix__mark_initmem_nx(void) @@ -262,6 +270,22 @@ print_mapping(unsigned long start, unsigned long end, unsigned long size, bool e static unsigned long next_boundary(unsigned long addr, unsigned long end) { #ifdef CONFIG_STRICT_KERNEL_RWX + unsigned long stext_phys; + + stext_phys = __pa_symbol(_stext); + + // Relocatable kernel running at non-zero real address + if (stext_phys != 0) { + // The end of interrupts code at zero is a rodata boundary + unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys; + if (addr < end_intr) + return end_intr; + + // Start of relocated kernel text is a rodata boundary + if (addr < stext_phys) + return stext_phys; + } + if (addr < __pa_symbol(__srwx_boundary)) return __pa_symbol(__srwx_boundary); #endif diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 100e97daf76b..9d229ef7f86e 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -22,7 +22,7 @@ * Used to avoid races in counting the nest-pmu units during hotplug * register and unregister */ -static DEFINE_SPINLOCK(nest_init_lock); +static DEFINE_MUTEX(nest_init_lock); static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc); static struct imc_pmu **per_nest_pmu_arr; static cpumask_t nest_imc_cpumask; @@ -1629,7 +1629,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr) static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) { if (pmu_ptr->domain == IMC_DOMAIN_NEST) { - spin_lock(&nest_init_lock); + mutex_lock(&nest_init_lock); if (nest_pmus == 1) { cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE); kfree(nest_imc_refc); @@ -1639,7 +1639,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) if (nest_pmus > 0) nest_pmus--; - spin_unlock(&nest_init_lock); + mutex_unlock(&nest_init_lock); } /* Free core_imc memory */ @@ -1796,11 +1796,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id * rest. To handle the cpuhotplug callback unregister, we track * the number of nest pmus in "nest_pmus". */ - spin_lock(&nest_init_lock); + mutex_lock(&nest_init_lock); if (nest_pmus == 0) { ret = init_nest_pmu_ref(); if (ret) { - spin_unlock(&nest_init_lock); + mutex_unlock(&nest_init_lock); kfree(per_nest_pmu_arr); per_nest_pmu_arr = NULL; goto err_free_mem; @@ -1808,7 +1808,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id /* Register for cpu hotplug notification. */ ret = nest_pmu_cpumask_init(); if (ret) { - spin_unlock(&nest_init_lock); + mutex_unlock(&nest_init_lock); kfree(nest_imc_refc); kfree(per_nest_pmu_arr); per_nest_pmu_arr = NULL; @@ -1816,7 +1816,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id } } nest_pmus++; - spin_unlock(&nest_init_lock); + mutex_unlock(&nest_init_lock); break; case IMC_DOMAIN_CORE: ret = core_imc_pmu_cpumask_init(); diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index faf2c2177094..82153960ac00 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -80,6 +80,9 @@ ifeq ($(CONFIG_PERF_EVENTS),y) KBUILD_CFLAGS += -fno-omit-frame-pointer endif +# Avoid generating .eh_frame sections. +KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables + KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax) diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h index 7226e2462584..2c0f4c887289 100644 --- a/arch/riscv/include/asm/alternative-macros.h +++ b/arch/riscv/include/asm/alternative-macros.h @@ -46,7 +46,7 @@ .macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \ new_c_2, vendor_id_2, errata_id_2, enable_2 - ALTERNATIVE_CFG \old_c, \new_c_1, \vendor_id_1, \errata_id_1, \enable_1 + ALTERNATIVE_CFG "\old_c", "\new_c_1", \vendor_id_1, \errata_id_1, \enable_1 ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2 .endm diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 86328e3acb02..64ad1937e714 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -70,7 +70,6 @@ static_assert(RISCV_ISA_EXT_ID_MAX <= RISCV_ISA_EXT_MAX); */ enum riscv_isa_ext_key { RISCV_ISA_EXT_KEY_FPU, /* For 'F' and 'D' */ - RISCV_ISA_EXT_KEY_ZIHINTPAUSE, RISCV_ISA_EXT_KEY_SVINVAL, RISCV_ISA_EXT_KEY_MAX, }; @@ -91,8 +90,6 @@ static __always_inline int riscv_isa_ext2key(int num) return RISCV_ISA_EXT_KEY_FPU; case RISCV_ISA_EXT_d: return RISCV_ISA_EXT_KEY_FPU; - case RISCV_ISA_EXT_ZIHINTPAUSE: - return RISCV_ISA_EXT_KEY_ZIHINTPAUSE; case RISCV_ISA_EXT_SVINVAL: return RISCV_ISA_EXT_KEY_SVINVAL; default: diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h index fa70cfe507aa..14f5d27783b8 100644 --- a/arch/riscv/include/asm/vdso/processor.h +++ b/arch/riscv/include/asm/vdso/processor.h @@ -4,30 +4,26 @@ #ifndef __ASSEMBLY__ -#include <linux/jump_label.h> #include <asm/barrier.h> -#include <asm/hwcap.h> static inline void cpu_relax(void) { - if (!static_branch_likely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_ZIHINTPAUSE])) { #ifdef __riscv_muldiv - int dummy; - /* In lieu of a halt instruction, induce a long-latency stall. */ - __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); + int dummy; + /* In lieu of a halt instruction, induce a long-latency stall. */ + __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); #endif - } else { - /* - * Reduce instruction retirement. - * This assumes the PC changes. - */ -#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE - __asm__ __volatile__ ("pause"); + +#ifdef __riscv_zihintpause + /* + * Reduce instruction retirement. + * This assumes the PC changes. + */ + __asm__ __volatile__ ("pause"); #else - /* Encoding of the pause instruction */ - __asm__ __volatile__ (".4byte 0x100000F"); + /* Encoding of the pause instruction */ + __asm__ __volatile__ (".4byte 0x100000F"); #endif - } barrier(); } diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index b865046e4dbb..4bf6c449d78b 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -326,7 +326,7 @@ clear_bss_done: call soc_early_init tail start_kernel -#if CONFIG_RISCV_BOOT_SPINWAIT +#ifdef CONFIG_RISCV_BOOT_SPINWAIT .Lsecondary_start: /* Set trap vector to spin forever to help debug */ la a3, .Lsecondary_park diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index f21592d20306..41c7481afde3 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -48,6 +48,21 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) post_kprobe_handler(p, kcb, regs); } +static bool __kprobes arch_check_kprobe(struct kprobe *p) +{ + unsigned long tmp = (unsigned long)p->addr - p->offset; + unsigned long addr = (unsigned long)p->addr; + + while (tmp <= addr) { + if (tmp == addr) + return true; + + tmp += GET_INSN_LENGTH(*(u16 *)tmp); + } + + return false; +} + int __kprobes arch_prepare_kprobe(struct kprobe *p) { unsigned long probe_addr = (unsigned long)p->addr; @@ -55,6 +70,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) if (probe_addr & 0x1) return -EILSEQ; + if (!arch_check_kprobe(p)) + return -EILSEQ; + /* copy instruction */ p->opcode = *p->addr; diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c index d73e96f6ed7c..a20568bd1f1a 100644 --- a/arch/riscv/kernel/probes/simulate-insn.c +++ b/arch/riscv/kernel/probes/simulate-insn.c @@ -71,11 +71,11 @@ bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *reg u32 rd_index = (opcode >> 7) & 0x1f; u32 rs1_index = (opcode >> 15) & 0x1f; - ret = rv_insn_reg_set_val(regs, rd_index, addr + 4); + ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr); if (!ret) return ret; - ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr); + ret = rv_insn_reg_set_val(regs, rd_index, addr + 4); if (!ret) return ret; diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 3373df413c88..ddb2afba6d25 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -39,7 +39,6 @@ static DECLARE_COMPLETION(cpu_running); void __init smp_prepare_boot_cpu(void) { - init_cpu_topology(); } void __init smp_prepare_cpus(unsigned int max_cpus) @@ -48,6 +47,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) int ret; unsigned int curr_cpuid; + init_cpu_topology(); + curr_cpuid = smp_processor_id(); store_cpu_topology(curr_cpuid); numa_store_cpu_info(curr_cpuid); diff --git a/arch/s390/boot/decompressor.c b/arch/s390/boot/decompressor.c index 8dcd7af2911a..b519a1f045d8 100644 --- a/arch/s390/boot/decompressor.c +++ b/arch/s390/boot/decompressor.c @@ -80,6 +80,6 @@ void *decompress_kernel(void) void *output = (void *)decompress_offset; __decompress(_compressed_start, _compressed_end - _compressed_start, - NULL, NULL, output, 0, NULL, error); + NULL, NULL, output, vmlinux.image_size, NULL, error); return output; } diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index 3161b9ccd2a5..b6276a3521d7 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -4,6 +4,7 @@ * Written by Niibe Yutaka and Paul Mundt */ OUTPUT_ARCH(sh) +#define RUNTIME_DISCARD_EXIT #include <asm/thread_info.h> #include <asm/cache.h> #include <asm/vmlinux.lds.h> diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 9cf07322875a..73ed982d4100 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -14,13 +14,13 @@ endif ifdef CONFIG_CC_IS_GCC RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) -RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix) RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register) endif ifdef CONFIG_CC_IS_CLANG RETPOLINE_CFLAGS := -mretpoline-external-thunk RETPOLINE_VDSO_CFLAGS := -mretpoline endif +RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix) ifdef CONFIG_RETHUNK RETHUNK_CFLAGS := -mfunction-return=thunk-extern diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c index d4a314cc50d6..321a5011042d 100644 --- a/arch/x86/boot/compressed/ident_map_64.c +++ b/arch/x86/boot/compressed/ident_map_64.c @@ -180,6 +180,12 @@ void initialize_identity_maps(void *rmode) /* Load the new page-table. */ write_cr3(top_level_pgt); + + /* + * Now that the required page table mappings are established and a + * GHCB can be used, check for SNP guest/HV feature compatibility. + */ + snp_check_features(); } static pte_t *split_large_pmd(struct x86_mapping_info *info, diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 62208ec04ca4..20118fb7c53b 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -126,6 +126,7 @@ static inline void console_init(void) #ifdef CONFIG_AMD_MEM_ENCRYPT void sev_enable(struct boot_params *bp); +void snp_check_features(void); void sev_es_shutdown_ghcb(void); extern bool sev_es_check_ghcb_fault(unsigned long address); void snp_set_page_private(unsigned long paddr); @@ -143,6 +144,7 @@ static inline void sev_enable(struct boot_params *bp) if (bp) bp->cc_blob_address = 0; } +static inline void snp_check_features(void) { } static inline void sev_es_shutdown_ghcb(void) { } static inline bool sev_es_check_ghcb_fault(unsigned long address) { diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c index c93930d5ccbd..d63ad8f99f83 100644 --- a/arch/x86/boot/compressed/sev.c +++ b/arch/x86/boot/compressed/sev.c @@ -208,6 +208,23 @@ void sev_es_shutdown_ghcb(void) error("Can't unmap GHCB page"); } +static void __noreturn sev_es_ghcb_terminate(struct ghcb *ghcb, unsigned int set, + unsigned int reason, u64 exit_info_2) +{ + u64 exit_info_1 = SVM_VMGEXIT_TERM_REASON(set, reason); + + vc_ghcb_invalidate(ghcb); + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_TERM_REQUEST); + ghcb_set_sw_exit_info_1(ghcb, exit_info_1); + ghcb_set_sw_exit_info_2(ghcb, exit_info_2); + + sev_es_wr_ghcb_msr(__pa(ghcb)); + VMGEXIT(); + + while (true) + asm volatile("hlt\n" : : : "memory"); +} + bool sev_es_check_ghcb_fault(unsigned long address) { /* Check whether the fault was on the GHCB page */ @@ -270,6 +287,59 @@ static void enforce_vmpl0(void) sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0); } +/* + * SNP_FEATURES_IMPL_REQ is the mask of SNP features that will need + * guest side implementation for proper functioning of the guest. If any + * of these features are enabled in the hypervisor but are lacking guest + * side implementation, the behavior of the guest will be undefined. The + * guest could fail in non-obvious way making it difficult to debug. + * + * As the behavior of reserved feature bits is unknown to be on the + * safe side add them to the required features mask. + */ +#define SNP_FEATURES_IMPL_REQ (MSR_AMD64_SNP_VTOM | \ + MSR_AMD64_SNP_REFLECT_VC | \ + MSR_AMD64_SNP_RESTRICTED_INJ | \ + MSR_AMD64_SNP_ALT_INJ | \ + MSR_AMD64_SNP_DEBUG_SWAP | \ + MSR_AMD64_SNP_VMPL_SSS | \ + MSR_AMD64_SNP_SECURE_TSC | \ + MSR_AMD64_SNP_VMGEXIT_PARAM | \ + MSR_AMD64_SNP_VMSA_REG_PROTECTION | \ + MSR_AMD64_SNP_RESERVED_BIT13 | \ + MSR_AMD64_SNP_RESERVED_BIT15 | \ + MSR_AMD64_SNP_RESERVED_MASK) + +/* + * SNP_FEATURES_PRESENT is the mask of SNP features that are implemented + * by the guest kernel. As and when a new feature is implemented in the + * guest kernel, a corresponding bit should be added to the mask. + */ +#define SNP_FEATURES_PRESENT (0) + +void snp_check_features(void) +{ + u64 unsupported; + + if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) + return; + + /* + * Terminate the boot if hypervisor has enabled any feature lacking + * guest side implementation. Pass on the unsupported features mask through + * EXIT_INFO_2 of the GHCB protocol so that those features can be reported + * as part of the guest boot failure. + */ + unsupported = sev_status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT; + if (unsupported) { + if (ghcb_version < 2 || (!boot_ghcb && !early_setup_ghcb())) + sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); + + sev_es_ghcb_terminate(boot_ghcb, SEV_TERM_SET_GEN, + GHCB_SNP_UNSUPPORTED, unsupported); + } +} + void sev_enable(struct boot_params *bp) { unsigned int eax, ebx, ecx, edx; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index dfd2c124cdf8..bafdc2be479a 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -6339,6 +6339,7 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_SAPPHIRERAPIDS_X: + case INTEL_FAM6_EMERALDRAPIDS_X: pmem = true; x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids)); diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 3019fb1926e3..551741e79e03 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -677,6 +677,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &icx_cstates), + X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &icx_cstates), X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates), X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates), diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 65064d9f7fa6..8eb74cf386db 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -14,6 +14,7 @@ #include <asm/mmu.h> #include <asm/mpspec.h> #include <asm/x86_init.h> +#include <asm/cpufeature.h> #ifdef CONFIG_ACPI_APEI # include <asm/pgtable_types.h> @@ -63,6 +64,13 @@ extern int (*acpi_suspend_lowlevel)(void); /* Physical address to resume after wakeup */ unsigned long acpi_get_wakeup_address(void); +static inline bool acpi_skip_set_wakeup_address(void) +{ + return cpu_feature_enabled(X86_FEATURE_XENPV); +} + +#define acpi_skip_set_wakeup_address acpi_skip_set_wakeup_address + /* * Check if the CPU can handle C2 and deeper */ diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index b049d950612f..ca97442e8d49 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -39,7 +39,20 @@ static __always_inline unsigned long native_get_debugreg(int regno) asm("mov %%db6, %0" :"=r" (val)); break; case 7: - asm("mov %%db7, %0" :"=r" (val)); + /* + * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them + * with other code. + * + * This is needed because a DR7 access can cause a #VC exception + * when running under SEV-ES. Taking a #VC exception is not a + * safe thing to do just anywhere in the entry code and + * re-ordering might place the access into an unsafe location. + * + * This happened in the NMI handler, where the DR7 read was + * re-ordered to happen before the call to sev_es_ist_enter(), + * causing stack recursion. + */ + asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER); break; default: BUG(); @@ -66,7 +79,16 @@ static __always_inline void native_set_debugreg(int regno, unsigned long value) asm("mov %0, %%db6" ::"r" (value)); break; case 7: - asm("mov %0, %%db7" ::"r" (value)); + /* + * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them + * with other code. + * + * While is didn't happen with a DR7 write (see the DR7 read + * comment above which explains where it happened), add the + * __FORCE_ORDER here too to avoid similar problems in the + * future. + */ + asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER); break; default: BUG(); diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 37ff47552bcb..d3fe82c5d6b6 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -566,6 +566,26 @@ #define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT) #define MSR_AMD64_SEV_SNP_ENABLED BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT) +/* SNP feature bits enabled by the hypervisor */ +#define MSR_AMD64_SNP_VTOM BIT_ULL(3) +#define MSR_AMD64_SNP_REFLECT_VC BIT_ULL(4) +#define MSR_AMD64_SNP_RESTRICTED_INJ BIT_ULL(5) +#define MSR_AMD64_SNP_ALT_INJ BIT_ULL(6) +#define MSR_AMD64_SNP_DEBUG_SWAP BIT_ULL(7) +#define MSR_AMD64_SNP_PREVENT_HOST_IBS BIT_ULL(8) +#define MSR_AMD64_SNP_BTB_ISOLATION BIT_ULL(9) +#define MSR_AMD64_SNP_VMPL_SSS BIT_ULL(10) +#define MSR_AMD64_SNP_SECURE_TSC BIT_ULL(11) +#define MSR_AMD64_SNP_VMGEXIT_PARAM BIT_ULL(12) +#define MSR_AMD64_SNP_IBS_VIRT BIT_ULL(14) +#define MSR_AMD64_SNP_VMSA_REG_PROTECTION BIT_ULL(16) +#define MSR_AMD64_SNP_SMT_PROTECTION BIT_ULL(17) + +/* SNP feature bits reserved for future use. */ +#define MSR_AMD64_SNP_RESERVED_BIT13 BIT_ULL(13) +#define MSR_AMD64_SNP_RESERVED_BIT15 BIT_ULL(15) +#define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, 18) + #define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f /* AMD Collaborative Processor Performance Control MSRs */ diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h index f69c168391aa..80e1df482337 100644 --- a/arch/x86/include/uapi/asm/svm.h +++ b/arch/x86/include/uapi/asm/svm.h @@ -116,6 +116,12 @@ #define SVM_VMGEXIT_AP_CREATE 1 #define SVM_VMGEXIT_AP_DESTROY 2 #define SVM_VMGEXIT_HV_FEATURES 0x8000fffd +#define SVM_VMGEXIT_TERM_REQUEST 0x8000fffe +#define SVM_VMGEXIT_TERM_REASON(reason_set, reason_code) \ + /* SW_EXITINFO1[3:0] */ \ + (((((u64)reason_set) & 0xf)) | \ + /* SW_EXITINFO1[11:4] */ \ + ((((u64)reason_code) & 0xff) << 4)) #define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff /* Exit code reserved for hypervisor/software use */ diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 1f60a2b27936..fdbb5f07448f 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -330,7 +330,16 @@ static void __init bp_init_freq_invariance(void) static void disable_freq_invariance_workfn(struct work_struct *work) { + int cpu; + static_branch_disable(&arch_scale_freq_key); + + /* + * Set arch_freq_scale to a default value on all cpus + * This negates the effect of scaling + */ + for_each_possible_cpu(cpu) + per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE; } static DECLARE_WORK(disable_freq_invariance_work, diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 3aa5304200c5..4d8aff05a509 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -114,6 +114,7 @@ static void make_8259A_irq(unsigned int irq) disable_irq_nosync(irq); io_apic_irqs &= ~(1<<irq); irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq); + irq_set_status_flags(irq, IRQ_LEVEL); enable_irq(irq); lapic_assign_legacy_vector(irq, true); } diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index beb1bada1b0a..c683666876f1 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -65,8 +65,10 @@ void __init init_ISA_irqs(void) legacy_pic->init(0); - for (i = 0; i < nr_legacy_irqs(); i++) + for (i = 0; i < nr_legacy_irqs(); i++) { irq_set_chip_and_handler(i, chip, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); + } } void __init init_IRQ(void) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index fc9008dbed33..7eec0226d56a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3440,18 +3440,15 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var) { u32 ar; - if (var->unusable || !var->present) - ar = 1 << 16; - else { - ar = var->type & 15; - ar |= (var->s & 1) << 4; - ar |= (var->dpl & 3) << 5; - ar |= (var->present & 1) << 7; - ar |= (var->avl & 1) << 12; - ar |= (var->l & 1) << 13; - ar |= (var->db & 1) << 14; - ar |= (var->g & 1) << 15; - } + ar = var->type & 15; + ar |= (var->s & 1) << 4; + ar |= (var->dpl & 3) << 5; + ar |= (var->present & 1) << 7; + ar |= (var->avl & 1) << 12; + ar |= (var->l & 1) << 13; + ar |= (var->db & 1) << 14; + ar |= (var->g & 1) << 15; + ar |= (var->unusable || !var->present) << 16; return ar; } diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index b94f727251b6..8babce71915f 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -392,6 +392,7 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev) msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_ASSOCIATED) { for (i = 0; i < msidesc->nvec_used; i++) xen_destroy_irq(msidesc->irq + i); + msidesc->irq = 0; } } @@ -433,6 +434,7 @@ static struct msi_domain_ops xen_pci_msi_domain_ops = { }; static struct msi_domain_info xen_pci_msi_domain_info = { + .flags = MSI_FLAG_PCI_MSIX | MSI_FLAG_FREE_MSI_DESCS | MSI_FLAG_DEV_SYSFS, .ops = &xen_pci_msi_domain_ops, }; |