diff options
204 files changed, 1107 insertions, 1027 deletions
diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml index 1d38ff76d18f..2b1f91603897 100644 --- a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml +++ b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml @@ -24,10 +24,10 @@ allOf: select: properties: compatible: - items: - - enum: - - sifive,fu540-c000-ccache - - sifive,fu740-c000-ccache + contains: + enum: + - sifive,fu540-c000-ccache + - sifive,fu740-c000-ccache required: - compatible diff --git a/MAINTAINERS b/MAINTAINERS index 06e39d3eba93..6abfd3e36c31 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3866,6 +3866,16 @@ L: bcm-kernel-feedback-list@broadcom.com S: Maintained F: drivers/mtd/nand/raw/brcmnand/ +BROADCOM STB PCIE DRIVER +M: Jim Quinlan <jim2101024@gmail.com> +M: Nicolas Saenz Julienne <nsaenz@kernel.org> +M: Florian Fainelli <f.fainelli@gmail.com> +M: bcm-kernel-feedback-list@broadcom.com +L: linux-pci@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml +F: drivers/pci/controller/pcie-brcmstb.c + BROADCOM SYSTEMPORT ETHERNET DRIVER M: Florian Fainelli <f.fainelli@gmail.com> L: bcm-kernel-feedback-list@broadcom.com @@ -4498,7 +4508,7 @@ L: clang-built-linux@googlegroups.com S: Supported W: https://clangbuiltlinux.github.io/ B: https://github.com/ClangBuiltLinux/linux/issues -C: irc://chat.freenode.net/clangbuiltlinux +C: irc://irc.libera.chat/clangbuiltlinux F: Documentation/kbuild/llvm.rst F: include/linux/compiler-clang.h F: scripts/clang-tools/ @@ -6952,7 +6962,7 @@ F: include/uapi/linux/mdio.h F: include/uapi/linux/mii.h EXFAT FILE SYSTEM -M: Namjae Jeon <namjae.jeon@samsung.com> +M: Namjae Jeon <linkinjeon@kernel.org> M: Sungjong Seo <sj1557.seo@samsung.com> L: linux-fsdevel@vger.kernel.org S: Maintained @@ -14457,6 +14467,13 @@ S: Maintained F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt F: drivers/pci/controller/dwc/pcie-histb.c +PCIE DRIVER FOR INTEL LGM GW SOC +M: Rahul Tanwar <rtanwar@maxlinear.com> +L: linux-pci@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml +F: drivers/pci/controller/dwc/pcie-intel-gw.c + PCIE DRIVER FOR MEDIATEK M: Ryder Lee <ryder.lee@mediatek.com> M: Jianjun Wang <jianjun.wang@mediatek.com> @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 14 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Opossums on Parade # *DOCUMENTATION* diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig index 3f35761dc9ff..23595fc5a29a 100644 --- a/arch/arm/configs/nhk8815_defconfig +++ b/arch/arm/configs/nhk8815_defconfig @@ -15,8 +15,6 @@ CONFIG_SLAB=y CONFIG_ARCH_NOMADIK=y CONFIG_MACH_NOMADIK_8815NHK=y CONFIG_AEABI=y -CONFIG_ZBOOT_ROM_TEXT=0x0 -CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set @@ -52,9 +50,9 @@ CONFIG_MTD_BLOCK=y CONFIG_MTD_ONENAND=y CONFIG_MTD_ONENAND_VERIFY_WRITE=y CONFIG_MTD_ONENAND_GENERIC=y -CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y CONFIG_MTD_RAW_NAND=y CONFIG_MTD_NAND_FSMC=y +CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=y CONFIG_BLK_DEV_RAM=y @@ -97,6 +95,7 @@ CONFIG_REGULATOR=y CONFIG_DRM=y CONFIG_DRM_PANEL_TPO_TPG110=y CONFIG_DRM_PL111=y +CONFIG_FB=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_PWM=y CONFIG_FRAMEBUFFER_CONSOLE=y @@ -136,9 +135,8 @@ CONFIG_NLS_ISO8859_15=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_SHA1=y CONFIG_CRYPTO_DES=y +# CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set -# CONFIG_DEBUG_BUGVERBOSE is not set diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h index abb07f105515..74e63d4531aa 100644 --- a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h +++ b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h @@ -218,30 +218,30 @@ /* * PCI Control/Status Registers */ -#define IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x))) - -#define PCI_NP_AD IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET) -#define PCI_NP_CBE IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET) -#define PCI_NP_WDATA IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET) -#define PCI_NP_RDATA IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET) -#define PCI_CRP_AD_CBE IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET) -#define PCI_CRP_WDATA IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET) -#define PCI_CRP_RDATA IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET) -#define PCI_CSR IXP4XX_PCI_CSR(PCI_CSR_OFFSET) -#define PCI_ISR IXP4XX_PCI_CSR(PCI_ISR_OFFSET) -#define PCI_INTEN IXP4XX_PCI_CSR(PCI_INTEN_OFFSET) -#define PCI_DMACTRL IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET) -#define PCI_AHBMEMBASE IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET) -#define PCI_AHBIOBASE IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET) -#define PCI_PCIMEMBASE IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET) -#define PCI_AHBDOORBELL IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET) -#define PCI_PCIDOORBELL IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET) -#define PCI_ATPDMA0_AHBADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET) -#define PCI_ATPDMA0_PCIADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET) -#define PCI_ATPDMA0_LENADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET) -#define PCI_ATPDMA1_AHBADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET) -#define PCI_ATPDMA1_PCIADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET) -#define PCI_ATPDMA1_LENADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET) +#define _IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x))) + +#define PCI_NP_AD _IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET) +#define PCI_NP_CBE _IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET) +#define PCI_NP_WDATA _IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET) +#define PCI_NP_RDATA _IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET) +#define PCI_CRP_AD_CBE _IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET) +#define PCI_CRP_WDATA _IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET) +#define PCI_CRP_RDATA _IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET) +#define PCI_CSR _IXP4XX_PCI_CSR(PCI_CSR_OFFSET) +#define PCI_ISR _IXP4XX_PCI_CSR(PCI_ISR_OFFSET) +#define PCI_INTEN _IXP4XX_PCI_CSR(PCI_INTEN_OFFSET) +#define PCI_DMACTRL _IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET) +#define PCI_AHBMEMBASE _IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET) +#define PCI_AHBIOBASE _IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET) +#define PCI_PCIMEMBASE _IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET) +#define PCI_AHBDOORBELL _IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET) +#define PCI_PCIDOORBELL _IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET) +#define PCI_ATPDMA0_AHBADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET) +#define PCI_ATPDMA0_PCIADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET) +#define PCI_ATPDMA0_LENADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET) +#define PCI_ATPDMA1_AHBADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET) +#define PCI_ATPDMA1_PCIADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET) +#define PCI_ATPDMA1_LENADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET) /* * PCI register values and bit definitions diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index fdcd54d39c1e..62c3c1d2190f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -156,6 +156,7 @@ config ARM64 select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT + select HAVE_ARCH_PFN_VALID select HAVE_ARCH_PREL32_RELOCATIONS select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_SECCOMP_FILTER diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 7b668db43261..1110d386f3b4 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -183,6 +183,8 @@ endif # We use MRPROPER_FILES and CLEAN_FILES now archclean: $(Q)$(MAKE) $(clean)=$(boot) + $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso + $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso32 ifeq ($(KBUILD_EXTMOD),) # We need to generate vdso-offsets.h before compiling certain files in kernel/. diff --git a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts index 23cdcc9f7c72..1ccca83292ac 100644 --- a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts +++ b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2015, LGE Inc. All rights reserved. * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com> */ /dts-v1/; @@ -9,6 +10,9 @@ #include "pm8994.dtsi" #include "pmi8994.dtsi" +/* cont_splash_mem has different memory mapping */ +/delete-node/ &cont_splash_mem; + / { model = "LG Nexus 5X"; compatible = "lg,bullhead", "qcom,msm8992"; @@ -17,6 +21,9 @@ qcom,board-id = <0xb64 0>; qcom,pmic-id = <0x10009 0x1000A 0x0 0x0>; + /* Bullhead firmware doesn't support PSCI */ + /delete-node/ psci; + aliases { serial0 = &blsp1_uart2; }; @@ -38,6 +45,11 @@ ftrace-size = <0x10000>; pmsg-size = <0x20000>; }; + + cont_splash_mem: memory@3400000 { + reg = <0 0x03400000 0 0x1200000>; + no-map; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts index ffe1a9bd8f70..c096b7758aa0 100644 --- a/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts +++ b/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts @@ -1,12 +1,16 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2015, Huawei Inc. All rights reserved. * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com> */ /dts-v1/; #include "msm8994.dtsi" +/* Angler's firmware does not report where the memory is allocated */ +/delete-node/ &cont_splash_mem; + / { model = "Huawei Nexus 6P"; compatible = "huawei,angler", "qcom,msm8994"; diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi index a8c274ad74c4..188c5768a55a 100644 --- a/arch/arm64/boot/dts/qcom/sc7280.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi @@ -200,7 +200,7 @@ &BIG_CPU_SLEEP_1 &CLUSTER_SLEEP_0>; next-level-cache = <&L2_700>; - qcom,freq-domain = <&cpufreq_hw 1>; + qcom,freq-domain = <&cpufreq_hw 2>; #cooling-cells = <2>; L2_700: l2-cache { compatible = "cache"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi index 4d052e39b348..eb6b1d15293d 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi @@ -69,7 +69,7 @@ }; rmtfs_upper_guard: memory@f5d01000 { no-map; - reg = <0 0xf5d01000 0 0x2000>; + reg = <0 0xf5d01000 0 0x1000>; }; /* @@ -78,7 +78,7 @@ */ removed_region: memory@88f00000 { no-map; - reg = <0 0x88f00000 0 0x200000>; + reg = <0 0x88f00000 0 0x1c00000>; }; ramoops: ramoops@ac300000 { diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts index c2a709a384e9..d7591a4621a2 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts @@ -700,7 +700,7 @@ left_spkr: wsa8810-left{ compatible = "sdw10217211000"; reg = <0 3>; - powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrLeft"; #sound-dai-cells = <0>; @@ -708,7 +708,7 @@ right_spkr: wsa8810-right{ compatible = "sdw10217211000"; - powerdown-gpios = <&wcdgpio 3 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>; reg = <0 4>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrRight"; diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 21fa330f498d..b83fb24954b7 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -33,8 +33,7 @@ * EL2. */ .macro __init_el2_timers - mrs x0, cnthctl_el2 - orr x0, x0, #3 // Enable EL1 physical timers + mov x0, #3 // Enable EL1 physical timers msr cnthctl_el2, x0 msr cntvoff_el2, xzr // Clear virtual offset .endm diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 993a27ea6f54..f98c91bbd7c1 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -41,6 +41,7 @@ void tag_clear_highpage(struct page *to); typedef struct page *pgtable_t; +int pfn_valid(unsigned long pfn); int pfn_is_map_memory(unsigned long pfn); #include <asm/memory.h> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 8490ed2917ff..1fdb7bb7c198 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -219,6 +219,43 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) free_area_init(max_zone_pfns); } +int pfn_valid(unsigned long pfn) +{ + phys_addr_t addr = PFN_PHYS(pfn); + struct mem_section *ms; + + /* + * Ensure the upper PAGE_SHIFT bits are clear in the + * pfn. Else it might lead to false positives when + * some of the upper bits are set, but the lower bits + * match a valid pfn. + */ + if (PHYS_PFN(addr) != pfn) + return 0; + + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) + return 0; + + ms = __pfn_to_section(pfn); + if (!valid_section(ms)) + return 0; + + /* + * ZONE_DEVICE memory does not have the memblock entries. + * memblock_is_map_memory() check for ZONE_DEVICE based + * addresses will always fail. Even the normal hotplugged + * memory will never have MEMBLOCK_NOMAP flag set in their + * memblock entries. Skip memblock search for all non early + * memory sections covering all of hotplug memory including + * both normal and ZONE_DEVICE based. + */ + if (!early_section(ms)) + return pfn_section_valid(ms, pfn); + + return memblock_is_memory(addr); +} +EXPORT_SYMBOL(pfn_valid); + int pfn_is_map_memory(unsigned long pfn) { phys_addr_t addr = PFN_PHYS(pfn); diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 64201125a287..d4b145b279f6 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -4,6 +4,8 @@ #include <asm/bug.h> #include <asm/book3s/32/mmu-hash.h> +#include <asm/mmu.h> +#include <asm/synch.h> #ifndef __ASSEMBLY__ @@ -28,6 +30,15 @@ static inline void kuep_lock(void) return; update_user_segments(mfsr(0) | SR_NX); + /* + * This isync() shouldn't be necessary as the kernel is not excepted to + * run any instruction in userspace soon after the update of segments, + * but hash based cores (at least G3) seem to exhibit a random + * behaviour when the 'isync' is not there. 603 cores don't have this + * behaviour so don't do the 'isync' as it saves several CPU cycles. + */ + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) + isync(); /* Context sync required after mtsr() */ } static inline void kuep_unlock(void) @@ -36,6 +47,15 @@ static inline void kuep_unlock(void) return; update_user_segments(mfsr(0) & ~SR_NX); + /* + * This isync() shouldn't be necessary as a 'rfi' will soon be executed + * to return to userspace, but hash based cores (at least G3) seem to + * exhibit a random behaviour when the 'isync' is not there. 603 cores + * don't have this behaviour so don't do the 'isync' as it saves several + * CPU cycles. + */ + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) + isync(); /* Context sync required after mtsr() */ } #ifdef CONFIG_PPC_KUAP diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c index 0876216ceee6..edea388e9d3f 100644 --- a/arch/powerpc/mm/pageattr.c +++ b/arch/powerpc/mm/pageattr.c @@ -18,16 +18,12 @@ /* * Updates the attributes of a page in three steps: * - * 1. invalidate the page table entry - * 2. flush the TLB - * 3. install the new entry with the updated attributes - * - * Invalidating the pte means there are situations where this will not work - * when in theory it should. - * For example: - * - removing write from page whilst it is being executed - * - setting a page read-only whilst it is being read by another CPU + * 1. take the page_table_lock + * 2. install the new entry with the updated attributes + * 3. flush the TLB * + * This sequence is safe against concurrent updates, and also allows updating the + * attributes of a page currently being executed or accessed. */ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) { @@ -36,9 +32,7 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) spin_lock(&init_mm.page_table_lock); - /* invalidate the PTE so it's safe to modify */ - pte = ptep_get_and_clear(&init_mm, addr, ptep); - flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + pte = ptep_get(ptep); /* modify the PTE bits as desired, then apply */ switch (action) { @@ -59,11 +53,14 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) break; } - set_pte_at(&init_mm, addr, ptep, pte); + pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0); /* See ptesync comment in radix__set_pte_at() */ if (radix_enabled()) asm volatile("ptesync": : :"memory"); + + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + spin_unlock(&init_mm.page_table_lock); return 0; diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 943fd30095af..8183ca343675 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -1170,7 +1170,7 @@ out: return ret; } -static int __init xive_request_ipi(unsigned int cpu) +static int xive_request_ipi(unsigned int cpu) { struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)]; int ret; diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 18bd0e4bc36c..120b2f6f71bc 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -229,8 +229,8 @@ static void __init init_resources(void) } /* Clean-up any unused pre-allocated resources */ - mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res); - memblock_free(__pa(mem_res), mem_res_sz); + if (res_idx >= 0) + memblock_free(__pa(mem_res), (res_idx + 1) * sizeof(*mem_res)); return; error: diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index b0993e05affe..8fcb7ecb7225 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -560,9 +560,12 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) int pcibios_add_device(struct pci_dev *pdev) { + struct zpci_dev *zdev = to_zpci(pdev); struct resource *res; int i; + /* The pdev has a reference to the zdev via its bus */ + zpci_zdev_get(zdev); if (pdev->is_physfn) pdev->no_vf_scan = 1; @@ -582,7 +585,10 @@ int pcibios_add_device(struct pci_dev *pdev) void pcibios_release_device(struct pci_dev *pdev) { + struct zpci_dev *zdev = to_zpci(pdev); + zpci_unmap_resources(pdev); + zpci_zdev_put(zdev); } int pcibios_enable_device(struct pci_dev *pdev, int mask) diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h index b877a97e6745..e359d2686178 100644 --- a/arch/s390/pci/pci_bus.h +++ b/arch/s390/pci/pci_bus.h @@ -22,6 +22,11 @@ static inline void zpci_zdev_put(struct zpci_dev *zdev) kref_put(&zdev->kref, zpci_release_device); } +static inline void zpci_zdev_get(struct zpci_dev *zdev) +{ + kref_get(&zdev->kref); +} + int zpci_alloc_domain(int domain); void zpci_free_domain(int domain); int zpci_setup_bus_resources(struct zpci_dev *zdev, diff --git a/block/blk-core.c b/block/blk-core.c index 04477697ee4b..4f8449b29b21 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -122,7 +122,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq) rq->internal_tag = BLK_MQ_NO_TAG; rq->start_time_ns = ktime_get_ns(); rq->part = NULL; - refcount_set(&rq->ref, 1); blk_crypto_rq_set_defaults(rq); } EXPORT_SYMBOL(blk_rq_init); diff --git a/block/blk-flush.c b/block/blk-flush.c index 1002f6c58181..4201728bf3a5 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -262,6 +262,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) spin_unlock_irqrestore(&fq->mq_flush_lock, flags); } +bool is_flush_rq(struct request *rq) +{ + return rq->end_io == flush_end_io; +} + /** * blk_kick_flush - consider issuing flush request * @q: request_queue being kicked @@ -329,6 +334,14 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, flush_rq->rq_flags |= RQF_FLUSH_SEQ; flush_rq->rq_disk = first_rq->rq_disk; flush_rq->end_io = flush_end_io; + /* + * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one + * implied in refcount_inc_not_zero() called from + * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref + * and READ flush_rq->end_io + */ + smp_wmb(); + refcount_set(&flush_rq->ref, 1); blk_flush_queue_rq(flush_rq, false); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 2fe396385a4a..9d4fdc2be88a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -911,7 +911,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next) void blk_mq_put_rq_ref(struct request *rq) { - if (is_flush_rq(rq, rq->mq_hctx)) + if (is_flush_rq(rq)) rq->end_io(rq, 0); else if (refcount_dec_and_test(&rq->ref)) __blk_mq_free_request(rq); @@ -923,34 +923,14 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, unsigned long *next = priv; /* - * Just do a quick check if it is expired before locking the request in - * so we're not unnecessarilly synchronizing across CPUs. - */ - if (!blk_mq_req_expired(rq, next)) - return true; - - /* - * We have reason to believe the request may be expired. Take a - * reference on the request to lock this request lifetime into its - * currently allocated context to prevent it from being reallocated in - * the event the completion by-passes this timeout handler. - * - * If the reference was already released, then the driver beat the - * timeout handler to posting a natural completion. - */ - if (!refcount_inc_not_zero(&rq->ref)) - return true; - - /* - * The request is now locked and cannot be reallocated underneath the - * timeout handler's processing. Re-verify this exact request is truly - * expired; if it is not expired, then the request was completed and - * reallocated as a new request. + * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot + * be reallocated underneath the timeout handler's processing, then + * the expire check is reliable. If the request is not expired, then + * it was completed and reallocated as a new request after returning + * from blk_mq_check_expired(). */ if (blk_mq_req_expired(rq, next)) blk_mq_rq_timed_out(rq, reserved); - - blk_mq_put_rq_ref(rq); return true; } diff --git a/block/blk.h b/block/blk.h index 4b885c0f6708..cb01429c162c 100644 --- a/block/blk.h +++ b/block/blk.h @@ -44,11 +44,7 @@ static inline void __blk_get_queue(struct request_queue *q) kobject_get(&q->kobj); } -static inline bool -is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx) -{ - return hctx->fq->flush_rq == req; -} +bool is_flush_rq(struct request *req); struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, gfp_t flags); diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c index 31cf9aee5edd..1f6007abcf18 100644 --- a/drivers/acpi/prmt.c +++ b/drivers/acpi/prmt.c @@ -292,6 +292,12 @@ void __init init_prmt(void) int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) + sizeof (struct acpi_table_prmt_header), 0, acpi_parse_prmt, 0); + /* + * Return immediately if PRMT table is not present or no PRM module found. + */ + if (mc <= 0) + return; + pr_info("PRM: found %u modules\n", mc); status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT, diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index fbdbef0ab552..3a308461246a 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -452,7 +452,7 @@ int acpi_s2idle_prepare_late(void) if (lps0_dsm_func_mask_microsoft > 0) { acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); - acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, + acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); @@ -479,7 +479,7 @@ void acpi_s2idle_restore_early(void) if (lps0_dsm_func_mask_microsoft > 0) { acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); - acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, + acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index bc239a11aa69..5b9ea66b92dc 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -682,7 +682,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, struct image_info *img_info); void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan, unsigned int flags); + struct mhi_chan *mhi_chan); int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan); void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 84448233f64c..fc9196f11cb7 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -1430,7 +1430,7 @@ exit_unprepare_channel: } int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan, unsigned int flags) + struct mhi_chan *mhi_chan) { int ret = 0; struct device *dev = &mhi_chan->mhi_dev->dev; @@ -1455,9 +1455,6 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, if (ret) goto error_pm_state; - if (mhi_chan->dir == DMA_FROM_DEVICE) - mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS); - /* Pre-allocate buffer for xfer ring */ if (mhi_chan->pre_alloc) { int nr_el = get_nr_avail_ring_elements(mhi_cntrl, @@ -1613,7 +1610,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) } /* Move channel to start state */ -int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags) +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) { int ret, dir; struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; @@ -1624,7 +1621,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags) if (!mhi_chan) continue; - ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags); + ret = mhi_prepare_channel(mhi_cntrl, mhi_chan); if (ret) goto error_open_chan; } diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 0ef98e3ba341..148a4dd8cb9a 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -3097,8 +3097,10 @@ static int sysc_probe(struct platform_device *pdev) return error; error = sysc_check_active_timer(ddata); - if (error == -EBUSY) + if (error == -ENXIO) ddata->reserved = true; + else if (error) + return error; error = sysc_get_clocks(ddata); if (error) diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index 496900de0b0b..de36f58d551c 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c @@ -974,6 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk); } - imx_register_uart_clocks(1); + imx_register_uart_clocks(2); } CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init); diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index 51ed640e527b..4ece326ea233 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -357,27 +357,43 @@ static int gdsc_init(struct gdsc *sc) if (on < 0) return on; - /* - * Votable GDSCs can be ON due to Vote from other masters. - * If a Votable GDSC is ON, make sure we have a Vote. - */ - if ((sc->flags & VOTABLE) && on) - gdsc_enable(&sc->pd); + if (on) { + /* The regulator must be on, sync the kernel state */ + if (sc->rsupply) { + ret = regulator_enable(sc->rsupply); + if (ret < 0) + return ret; + } - /* - * Make sure the retain bit is set if the GDSC is already on, otherwise - * we end up turning off the GDSC and destroying all the register - * contents that we thought we were saving. - */ - if ((sc->flags & RETAIN_FF_ENABLE) && on) - gdsc_retain_ff_on(sc); + /* + * Votable GDSCs can be ON due to Vote from other masters. + * If a Votable GDSC is ON, make sure we have a Vote. + */ + if (sc->flags & VOTABLE) { + ret = regmap_update_bits(sc->regmap, sc->gdscr, + SW_COLLAPSE_MASK, val); + if (ret) + return ret; + } + + /* Turn on HW trigger mode if supported */ + if (sc->flags & HW_CTRL) { + ret = gdsc_hwctrl(sc, true); + if (ret < 0) + return ret; + } - /* If ALWAYS_ON GDSCs are not ON, turn them ON */ - if (sc->flags & ALWAYS_ON) { - if (!on) - gdsc_enable(&sc->pd); + /* + * Make sure the retain bit is set if the GDSC is already on, + * otherwise we end up turning off the GDSC and destroying all + * the register contents that we thought we were saving. + */ + if (sc->flags & RETAIN_FF_ENABLE) + gdsc_retain_ff_on(sc); + } else if (sc->flags & ALWAYS_ON) { + /* If ALWAYS_ON GDSCs are not ON, turn them ON */ + gdsc_enable(&sc->pd); on = true; - sc->pd.flags |= GENPD_FLAG_ALWAYS_ON; } if (on || (sc->pwrsts & PWRSTS_RET)) @@ -385,6 +401,8 @@ static int gdsc_init(struct gdsc *sc) else gdsc_clear_mem_on(sc); + if (sc->flags & ALWAYS_ON) + sc->pd.flags |= GENPD_FLAG_ALWAYS_ON; if (!sc->pd.power_off) sc->pd.power_off = gdsc_disable; if (!sc->pd.power_on) diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c index 3fc98a3ffd91..c10fc33b29b1 100644 --- a/drivers/cpufreq/armada-37xx-cpufreq.c +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -104,7 +104,11 @@ struct armada_37xx_dvfs { }; static struct armada_37xx_dvfs armada_37xx_dvfs[] = { - {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, + /* + * The cpufreq scaling for 1.2 GHz variant of the SOC is currently + * unstable because we do not know how to configure it properly. + */ + /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */ {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} }, {.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} }, {.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} }, diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index bef7528aecd3..231e585f6ba2 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -139,7 +139,9 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "qcom,qcs404", }, { .compatible = "qcom,sc7180", }, { .compatible = "qcom,sc7280", }, + { .compatible = "qcom,sc8180x", }, { .compatible = "qcom,sdm845", }, + { .compatible = "qcom,sm8150", }, { .compatible = "st,stih407", }, { .compatible = "st,stih410", }, diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index ec9a87ca2dbb..75f818d04b48 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -134,7 +134,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) } if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL)) - ret = -ENOMEM; + return -ENOMEM; /* Obtain CPUs that share SCMI performance controls */ ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index c7b364e4a287..e883731c3f8f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -3026,6 +3026,14 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size, pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start, start + size - 1, nattr); + /* Flush pending deferred work to avoid racing with deferred actions from + * previous memory map changes (e.g. munmap). Concurrent memory map changes + * can still race with get_attr because we don't hold the mmap lock. But that + * would be a race condition in the application anyway, and undefined + * behaviour is acceptable in that case. + */ + flush_work(&p->svms.deferred_list_work); + mmap_read_lock(mm); if (!svm_range_is_valid(mm, start, size)) { pr_debug("invalid range\n"); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 605e297b7a59..a30283fa5173 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1530,6 +1530,12 @@ void dc_z10_restore(struct dc *dc) if (dc->hwss.z10_restore) dc->hwss.z10_restore(dc); } + +void dc_z10_save_init(struct dc *dc) +{ + if (dc->hwss.z10_save_init) + dc->hwss.z10_save_init(dc); +} #endif /* * Applies given context to HW and copy it into current context. diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c index f2b39ec35c89..cde8ed2560b3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c @@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c */ memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config)); dc->vm_pa_config.valid = true; +#if defined(CONFIG_DRM_AMD_DC_DCN) + dc_z10_save_init(dc); +#endif } return num_vmids; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index af7b60108e9d..21d78289b048 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1338,6 +1338,7 @@ void dc_hardware_release(struct dc *dc); bool dc_set_psr_allow_active(struct dc *dc, bool enable); #if defined(CONFIG_DRM_AMD_DC_DCN) void dc_z10_restore(struct dc *dc); +void dc_z10_save_init(struct dc *dc); #endif bool dc_enable_dmub_notifications(struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 9776d1737818..912285fdce18 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); } -static void calculate_wm_set_for_vlevel( - int vlevel, - struct wm_range_table_entry *table_entry, - struct dcn_watermarks *wm_set, - struct display_mode_lib *dml, - display_e2e_pipe_params_st *pipes, - int pipe_cnt) -{ - double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; - - ASSERT(vlevel < dml->soc.num_states); - /* only pipe 0 is read for voltage and dcf/soc clocks */ - pipes[0].clks_cfg.voltage = vlevel; - pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; - - dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; - dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; - dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; - - wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; - wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; - wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; - wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; - wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; - dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; - -} - -static void dcn301_calculate_wm_and_dlg( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt, - int vlevel_req) -{ - int i, pipe_idx; - int vlevel, vlevel_max; - struct wm_range_table_entry *table_entry; - struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; - - ASSERT(bw_params); - - vlevel_max = bw_params->clk_table.num_entries - 1; - - /* WM Set D */ - table_entry = &bw_params->wm_table.entries[WM_D]; - if (table_entry->wm_type == WM_TYPE_RETRAINING) - vlevel = 0; - else - vlevel = vlevel_max; - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, - &context->bw_ctx.dml, pipes, pipe_cnt); - /* WM Set C */ - table_entry = &bw_params->wm_table.entries[WM_C]; - vlevel = min(max(vlevel_req, 2), vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, - &context->bw_ctx.dml, pipes, pipe_cnt); - /* WM Set B */ - table_entry = &bw_params->wm_table.entries[WM_B]; - vlevel = min(max(vlevel_req, 1), vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, - &context->bw_ctx.dml, pipes, pipe_cnt); - - /* WM Set A */ - table_entry = &bw_params->wm_table.entries[WM_A]; - vlevel = min(vlevel_req, vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, - &context->bw_ctx.dml, pipes, pipe_cnt); - - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - if (!context->res_ctx.pipe_ctx[i].stream) - continue; - - pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); - pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - - if (dc->config.forced_clocks) { - pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; - pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; - } - if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) - pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; - if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) - pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; - - pipe_idx++; - } - - dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); -} - static struct resource_funcs dcn301_res_pool_funcs = { .destroy = dcn301_destroy_resource_pool, .link_enc_create = dcn301_link_encoder_create, .panel_cntl_create = dcn301_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, - .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, + .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 6ac6faf0c533..8a2119d8ca0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -404,6 +404,18 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx) &pipe_ctx->stream_res.encoder_info_frame); } } +void dcn31_z10_save_init(struct dc *dc) +{ + union dmub_rb_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; + cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT; + + dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); +} void dcn31_z10_restore(struct dc *dc) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h index 40dfebe78fdd..140435e4f7ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h @@ -44,6 +44,7 @@ void dcn31_enable_power_gating_plane( void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx); void dcn31_z10_restore(struct dc *dc); +void dcn31_z10_save_init(struct dc *dc); void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on); int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index aaf2dbd095fe..b30d923471cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -97,6 +97,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .z10_restore = dcn31_z10_restore, + .z10_save_init = dcn31_z10_save_init, .is_abm_supported = dcn31_is_abm_supported, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .update_visual_confirm_color = dcn20_update_visual_confirm_color, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 5ab008e62b82..ad5f2adcc40d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -237,6 +237,7 @@ struct hw_sequencer_funcs { int width, int height, int offset); void (*z10_restore)(struct dc *dc); + void (*z10_save_init)(struct dc *dc); void (*update_visual_confirm_color)(struct dc *dc, struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 7c4734f905d9..7fafb8d6c1da 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -856,6 +856,11 @@ enum dmub_cmd_idle_opt_type { * DCN hardware restore. */ DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0, + + /** + * DCN hardware save. + */ + DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1 }; /** diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 25979106fd25..02e8c6e5448d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -5127,6 +5127,13 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) return size; } +static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + return (adev->pdev->device == 0x6860); +} + static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) { struct vega10_hwmgr *data = hwmgr->backend; @@ -5163,9 +5170,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui } out: - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, + if (vega10_get_power_profile_mode_quirks(hwmgr)) + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, + 1 << power_profile_mode, + NULL); + else + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, (!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1), NULL); + hwmgr->power_profile_mode = power_profile_mode; return 0; diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index d29907955ff7..5d82891c3222 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -855,8 +855,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, req.request.sequence = req32.request.sequence; req.request.signal = req32.request.signal; err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED); - if (err) - return err; req32.reply.type = req.reply.type; req32.reply.sequence = req.reply.sequence; @@ -865,7 +863,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, if (copy_to_user(argp, &req32, sizeof(req32))) return -EFAULT; - return 0; + return err; } #if defined(CONFIG_X86) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index be716b56e8e0..00dade49665b 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2463,6 +2463,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder, } } +/* Splitter enable for eDP MSO is limited to certain pipes. */ +static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915) +{ + if (IS_ALDERLAKE_P(i915)) + return BIT(PIPE_A) | BIT(PIPE_B); + else + return BIT(PIPE_A); +} + static void intel_ddi_mso_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { @@ -2480,8 +2489,7 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder, if (!pipe_config->splitter.enable) return; - /* Splitter enable is supported for pipe A only. */ - if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) { + if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) { pipe_config->splitter.enable = false; return; } @@ -2513,10 +2521,6 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state) return; if (crtc_state->splitter.enable) { - /* Splitter enable is supported for pipe A only. */ - if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) - return; - dss1 |= SPLITTER_ENABLE; dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap); if (crtc_state->splitter.link_count == 2) @@ -4743,12 +4747,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) dig_port->hpd_pulse = intel_dp_hpd_pulse; - /* Splitter enable for eDP MSO is limited to certain pipes. */ - if (dig_port->dp.mso_link_count) { - encoder->pipe_mask = BIT(PIPE_A); - if (IS_ALDERLAKE_P(dev_priv)) - encoder->pipe_mask |= BIT(PIPE_B); - } + if (dig_port->dp.mso_link_count) + encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv); } /* In theory we don't need the encoder->type check, but leave it just in diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 4298ae684d7d..86b7ac7b65ec 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -6387,13 +6387,13 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915) if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { bxt_enable_dc9(i915); - /* Tweaked Wa_14010685332:icp,jsp,mcc */ - if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC) - intel_de_rmw(i915, SOUTH_CHICKEN1, - SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_enable_pc8(i915); } + + /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ + if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) + intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); } void intel_display_power_resume_early(struct drm_i915_private *i915) @@ -6402,13 +6402,13 @@ void intel_display_power_resume_early(struct drm_i915_private *i915) IS_BROXTON(i915)) { gen9_sanitize_dc_state(i915); bxt_disable_dc9(i915); - /* Tweaked Wa_14010685332:icp,jsp,mcc */ - if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC) - intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); - } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_disable_pc8(i915); } + + /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ + if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) + intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); } void intel_display_power_suspend(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 08bceae40aa8..053a3c2f7267 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -206,7 +206,6 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) return lttpr_count; } -EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps); static u8 dp_voltage_max(u8 preemph) { diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index c03943198089..c3816f5c6900 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3064,24 +3064,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -static void cnp_display_clock_wa(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - - /* - * Wa_14010685332:cnp/cmp,tgp,adp - * TODO: Clarify which platforms this applies to - * TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as - * on earlier platforms and whether the workaround is also needed for runtime suspend/resume - */ - if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP || - (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) { - intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, - SBCLK_RUN_REFCLK_DIS); - intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); - } -} - static void gen8_display_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -3115,7 +3097,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) if (HAS_PCH_SPLIT(dev_priv)) ibx_irq_reset(dev_priv); - cnp_display_clock_wa(dev_priv); } static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) @@ -3159,8 +3140,6 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) GEN3_IRQ_RESET(uncore, SDE); - - cnp_display_clock_wa(dev_priv); } static void gen11_irq_reset(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c index 6f4c80bbc0eb..473f5bb5cbad 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_color.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c @@ -133,6 +133,8 @@ static int mtk_disp_color_probe(struct platform_device *pdev) static int mtk_disp_color_remove(struct platform_device *pdev) { + component_del(&pdev->dev, &mtk_disp_color_component_ops); + return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index fa9d79963cd3..5326989d5206 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -423,6 +423,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev) static int mtk_disp_ovl_remove(struct platform_device *pdev) { + component_del(&pdev->dev, &mtk_disp_ovl_component_ops); + return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 75bc00e17fc4..50d20562e612 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -34,6 +34,7 @@ #define DISP_AAL_EN 0x0000 #define DISP_AAL_SIZE 0x0030 +#define DISP_AAL_OUTPUT_SIZE 0x04d8 #define DISP_DITHER_EN 0x0000 #define DITHER_EN BIT(0) @@ -197,6 +198,7 @@ static void mtk_aal_config(struct device *dev, unsigned int w, struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE); + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_OUTPUT_SIZE); } static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index f949767698fc..bcb0310a41b6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2237,6 +2237,33 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) interlock[NV50_DISP_INTERLOCK_CORE] = 0; } + /* Finish updating head(s)... + * + * NVD is rather picky about both where window assignments can change, + * *and* about certain core and window channel states matching. + * + * The EFI GOP driver on newer GPUs configures window channels with a + * different output format to what we do, and the core channel update + * in the assign_windows case above would result in a state mismatch. + * + * Delay some of the head update until after that point to workaround + * the issue. This only affects the initial modeset. + * + * TODO: handle this better when adding flexible window mapping + */ + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state); + struct nv50_head *head = nv50_head(crtc); + + NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name, + asyh->set.mask, asyh->clr.mask); + + if (asyh->set.mask) { + nv50_head_flush_set_wndw(head, asyh); + interlock[NV50_DISP_INTERLOCK_CORE] = 1; + } + } + /* Update plane(s). */ for_each_new_plane_in_state(state, plane, new_plane_state, i) { struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index ec361d17e900..d66f97280282 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -50,11 +50,8 @@ nv50_head_flush_clr(struct nv50_head *head, } void -nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh) +nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh) { - if (asyh->set.view ) head->func->view (head, asyh); - if (asyh->set.mode ) head->func->mode (head, asyh); - if (asyh->set.core ) head->func->core_set(head, asyh); if (asyh->set.olut ) { asyh->olut.offset = nv50_lut_load(&head->olut, asyh->olut.buffer, @@ -62,6 +59,14 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh) asyh->olut.load); head->func->olut_set(head, asyh); } +} + +void +nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + if (asyh->set.view ) head->func->view (head, asyh); + if (asyh->set.mode ) head->func->mode (head, asyh); + if (asyh->set.core ) head->func->core_set(head, asyh); if (asyh->set.curs ) head->func->curs_set(head, asyh); if (asyh->set.base ) head->func->base (head, asyh); if (asyh->set.ovly ) head->func->ovly (head, asyh); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h index dae841dc05fd..0bac6be9ba34 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.h +++ b/drivers/gpu/drm/nouveau/dispnv50/head.h @@ -21,6 +21,7 @@ struct nv50_head { struct nv50_head *nv50_head_create(struct drm_device *, int index); void nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh); +void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh); void nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool flush); diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h index 0b86c44878e0..59759c4fb62e 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h @@ -4,7 +4,8 @@ struct nv_device_v0 { __u8 version; - __u8 pad01[7]; + __u8 priv; + __u8 pad02[6]; __u64 device; /* device identifier, ~0 for client default */ }; diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index ba2c28ea43d2..c68cc957248e 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -61,8 +61,6 @@ #define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e #define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e #define NV40_CHANNEL_DMA /* cl506b.h */ 0x0000406e -#define NV50_CHANNEL_DMA /* cl506e.h */ 0x0000506e -#define G82_CHANNEL_DMA /* cl826e.h */ 0x0000826e #define NV50_CHANNEL_GPFIFO /* cl506f.h */ 0x0000506f #define G82_CHANNEL_GPFIFO /* cl826f.h */ 0x0000826f diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h index 347d2c020bd1..5d9395e651b6 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/client.h +++ b/drivers/gpu/drm/nouveau/include/nvif/client.h @@ -9,7 +9,6 @@ struct nvif_client { const struct nvif_driver *driver; u64 version; u8 route; - bool super; }; int nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device, diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h index 8e85b936eaa0..7a3af05f7f98 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/driver.h +++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h @@ -11,7 +11,7 @@ struct nvif_driver { void (*fini)(void *priv); int (*suspend)(void *priv); int (*resume)(void *priv); - int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack); + int (*ioctl)(void *priv, void *data, u32 size, void **hack); void __iomem *(*map)(void *priv, u64 handle, u32 size); void (*unmap)(void *priv, void __iomem *ptr, u32 size); bool keep; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h index 5d7017fe5039..2f86606e708c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h @@ -13,7 +13,6 @@ struct nvkm_client { struct nvkm_client_notify *notify[32]; struct rb_root objroot; - bool super; void *data; int (*ntfy)(const void *, u32, const void *, u32); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h index 71ed147ad077..f52918a43246 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h @@ -4,5 +4,5 @@ #include <core/os.h> struct nvkm_client; -int nvkm_ioctl(struct nvkm_client *, bool, void *, u32, void **); +int nvkm_ioctl(struct nvkm_client *, void *, u32, void **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h index 0911e73f7424..70e7887ef4b4 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h @@ -15,7 +15,6 @@ struct nvkm_vma { u8 refd:3; /* Current page type (index, or NONE for unreferenced). */ bool used:1; /* Region allocated. */ bool part:1; /* Region was split from an allocated region by map(). */ - bool user:1; /* Region user-allocated. */ bool busy:1; /* Region busy (for temporarily preventing user access). */ bool mapped:1; /* Region contains valid pages. */ struct nvkm_memory *memory; /* Memory currently mapped into VMA. */ diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index b45ec3086285..4107b7006539 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -570,11 +570,9 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) } client->route = NVDRM_OBJECT_ABI16; - client->super = true; ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle, NV_DMA_IN_MEMORY, &args, sizeof(args), &ntfy->object); - client->super = false; client->route = NVDRM_OBJECT_NVIF; if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 40362600eed2..80099ef75702 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -86,12 +86,6 @@ nouveau_channel_del(struct nouveau_channel **pchan) struct nouveau_channel *chan = *pchan; if (chan) { struct nouveau_cli *cli = (void *)chan->user.client; - bool super; - - if (cli) { - super = cli->base.super; - cli->base.super = true; - } if (chan->fence) nouveau_fence(chan->drm)->context_del(chan); @@ -111,9 +105,6 @@ nouveau_channel_del(struct nouveau_channel **pchan) nouveau_bo_unpin(chan->push.buffer); nouveau_bo_ref(NULL, &chan->push.buffer); kfree(chan); - - if (cli) - cli->base.super = super; } *pchan = NULL; } @@ -512,20 +503,16 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device, struct nouveau_channel **pchan) { struct nouveau_cli *cli = (void *)device->object.client; - bool super; int ret; /* hack until fencenv50 is fixed, and agp access relaxed */ - super = cli->base.super; - cli->base.super = true; - ret = nouveau_channel_ind(drm, device, arg0, priv, pchan); if (ret) { NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret); ret = nouveau_channel_dma(drm, device, pchan); if (ret) { NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret); - goto done; + return ret; } } @@ -533,15 +520,13 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device, if (ret) { NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret); nouveau_channel_del(pchan); - goto done; + return ret; } ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst); if (ret) nouveau_channel_del(pchan); -done: - cli->base.super = super; return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index a616cf4573b8..ba4cd5f83725 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -244,6 +244,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE, &(struct nv_device_v0) { .device = ~0, + .priv = true, }, sizeof(struct nv_device_v0), &cli->device); if (ret) { @@ -1086,8 +1087,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) if (ret) goto done; - cli->base.super = false; - fpriv->driver_priv = cli; mutex_lock(&drm->client.mutex); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 0de6549fb875..2ca3207c13fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -41,8 +41,6 @@ nouveau_mem_map(struct nouveau_mem *mem, struct gf100_vmm_map_v0 gf100; } args; u32 argc = 0; - bool super; - int ret; switch (vmm->object.oclass) { case NVIF_CLASS_VMM_NV04: @@ -73,12 +71,7 @@ nouveau_mem_map(struct nouveau_mem *mem, return -ENOSYS; } - super = vmm->object.client->super; - vmm->object.client->super = true; - ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, - &mem->mem, 0); - vmm->object.client->super = super; - return ret; + return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0); } void @@ -99,7 +92,6 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt) struct nouveau_drm *drm = cli->drm; struct nvif_mmu *mmu = &cli->mmu; struct nvif_mem_ram_v0 args = {}; - bool super = cli->base.super; u8 type; int ret; @@ -122,11 +114,9 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt) args.dma = tt->dma_address; mutex_lock(&drm->master.lock); - cli->base.super = true; ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT, reg->num_pages << PAGE_SHIFT, &args, sizeof(args), &mem->mem); - cli->base.super = super; mutex_unlock(&drm->master.lock); return ret; } @@ -138,12 +128,10 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page) struct nouveau_cli *cli = mem->cli; struct nouveau_drm *drm = cli->drm; struct nvif_mmu *mmu = &cli->mmu; - bool super = cli->base.super; u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page); int ret; mutex_lock(&drm->master.lock); - cli->base.super = true; switch (cli->mem->oclass) { case NVIF_CLASS_MEM_GF100: ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass, @@ -167,7 +155,6 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page) WARN_ON(1); break; } - cli->base.super = super; mutex_unlock(&drm->master.lock); reg->start = mem->mem.addr >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c index b3f29b1ce9ea..52f5793b7274 100644 --- a/drivers/gpu/drm/nouveau/nouveau_nvif.c +++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c @@ -52,9 +52,9 @@ nvkm_client_map(void *priv, u64 handle, u32 size) } static int -nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack) +nvkm_client_ioctl(void *priv, void *data, u32 size, void **hack) { - return nvkm_ioctl(priv, super, data, size, hack); + return nvkm_ioctl(priv, data, size, hack); } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 82b583f5fca8..b0c3422cb01f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -237,14 +237,11 @@ void nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit) { if (limit > start) { - bool super = svmm->vmm->vmm.object.client->super; - svmm->vmm->vmm.object.client->super = true; nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR, &(struct nvif_vmm_pfnclr_v0) { .addr = start, .size = limit - start, }, sizeof(struct nvif_vmm_pfnclr_v0)); - svmm->vmm->vmm.object.client->super = super; } } @@ -634,9 +631,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, NVIF_VMM_PFNMAP_V0_A | NVIF_VMM_PFNMAP_V0_HOST; - svmm->vmm->vmm.object.client->super = true; ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); - svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); unlock_page(page); @@ -702,9 +697,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm, nouveau_hmm_convert_pfn(drm, &range, args); - svmm->vmm->vmm.object.client->super = true; ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); - svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); out: @@ -928,10 +921,8 @@ nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm, mutex_lock(&svmm->mutex); - svmm->vmm->vmm.object.client->super = true; ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) + npages * sizeof(args->p.phys[0]), NULL); - svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); } diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 9dc10b17ad34..5da1f4d223d7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c @@ -32,6 +32,9 @@ #include <nvif/event.h> #include <nvif/ioctl.h> +#include <nvif/class.h> +#include <nvif/cl0080.h> + struct usif_notify_p { struct drm_pending_event base; struct { @@ -261,7 +264,7 @@ usif_object_dtor(struct usif_object *object) } static int -usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) +usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc, bool parent_abi16) { struct nouveau_cli *cli = nouveau_cli(f); struct nvif_client *client = &cli->base; @@ -271,23 +274,48 @@ usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) struct usif_object *object; int ret = -ENOSYS; + if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) + return ret; + + switch (args->v0.oclass) { + case NV_DMA_FROM_MEMORY: + case NV_DMA_TO_MEMORY: + case NV_DMA_IN_MEMORY: + return -EINVAL; + case NV_DEVICE: { + union { + struct nv_device_v0 v0; + } *args = data; + + if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) + return ret; + + args->v0.priv = false; + break; + } + default: + if (!parent_abi16) + return -EINVAL; + break; + } + if (!(object = kmalloc(sizeof(*object), GFP_KERNEL))) return -ENOMEM; list_add(&object->head, &cli->objects); - if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { - object->route = args->v0.route; - object->token = args->v0.token; - args->v0.route = NVDRM_OBJECT_USIF; - args->v0.token = (unsigned long)(void *)object; - ret = nvif_client_ioctl(client, argv, argc); - args->v0.token = object->token; - args->v0.route = object->route; + object->route = args->v0.route; + object->token = args->v0.token; + args->v0.route = NVDRM_OBJECT_USIF; + args->v0.token = (unsigned long)(void *)object; + ret = nvif_client_ioctl(client, argv, argc); + if (ret) { + usif_object_dtor(object); + return ret; } - if (ret) - usif_object_dtor(object); - return ret; + args->v0.token = object->token; + args->v0.route = object->route; + return 0; } int @@ -301,6 +329,7 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) struct nvif_ioctl_v0 v0; } *argv = data; struct usif_object *object; + bool abi16 = false; u8 owner; int ret; @@ -331,11 +360,13 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) mutex_unlock(&cli->mutex); goto done; } + + abi16 = true; } switch (argv->v0.type) { case NVIF_IOCTL_V0_NEW: - ret = usif_object_new(filp, data, size, argv, argc); + ret = usif_object_new(filp, data, size, argv, argc, abi16); break; case NVIF_IOCTL_V0_NTFY_NEW: ret = usif_notify_new(filp, data, size, argv, argc); diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c index 12644f811b3e..a3264a0e933a 100644 --- a/drivers/gpu/drm/nouveau/nvif/client.c +++ b/drivers/gpu/drm/nouveau/nvif/client.c @@ -32,7 +32,7 @@ int nvif_client_ioctl(struct nvif_client *client, void *data, u32 size) { - return client->driver->ioctl(client->object.priv, client->super, data, size, NULL); + return client->driver->ioctl(client->object.priv, data, size, NULL); } int @@ -80,7 +80,6 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device, client->object.client = client; client->object.handle = ~0; client->route = NVIF_IOCTL_V0_ROUTE_NVIF; - client->super = true; client->driver = parent->driver; if (ret == 0) { diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c index 671a5c0199e0..dce1ecee2af5 100644 --- a/drivers/gpu/drm/nouveau/nvif/object.c +++ b/drivers/gpu/drm/nouveau/nvif/object.c @@ -44,8 +44,7 @@ nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack) } else return -ENOSYS; - return client->driver->ioctl(client->object.priv, client->super, - data, size, hack); + return client->driver->ioctl(client->object.priv, data, size, hack); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c index d777df5a64e6..735cb6816f10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c @@ -426,8 +426,7 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type, } int -nvkm_ioctl(struct nvkm_client *client, bool supervisor, - void *data, u32 size, void **hack) +nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack) { struct nvkm_object *object = &client->object; union { @@ -435,7 +434,6 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor, } *args = data; int ret = -ENOSYS; - client->super = supervisor; nvif_ioctl(object, "size %d\n", size); if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index b930f539feec..93ddf63d1114 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2624,6 +2624,26 @@ nv174_chipset = { .dma = { 0x00000001, gv100_dma_new }, }; +static const struct nvkm_device_chip +nv177_chipset = { + .name = "GA107", + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gpio = { 0x00000001, ga102_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, ga100_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, ga100_top_new }, + .disp = { 0x00000001, ga102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, +}; + static int nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, struct nvkm_notify *notify) @@ -3049,6 +3069,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x168: device->chip = &nv168_chipset; break; case 0x172: device->chip = &nv172_chipset; break; case 0x174: device->chip = &nv174_chipset; break; + case 0x177: device->chip = &nv177_chipset; break; default: if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) { switch (device->chipset) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index fea9d8f2b10c..f28894fdede9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -397,7 +397,7 @@ nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size, return ret; /* give priviledged clients register access */ - if (client->super) + if (args->v0.priv) func = &nvkm_udevice_super; else func = &nvkm_udevice; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index 55fbfe28c6dc..9669472a2749 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c @@ -440,7 +440,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps) return ret; } -static void +void nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior) { struct nvkm_dp *dp = nvkm_dp(outp); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h index 428b3f488f03..e484d0c3b0d4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h @@ -32,6 +32,7 @@ struct nvkm_dp { int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *, struct nvkm_outp **); +void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *); /* DPCD Receiver Capabilities */ #define DPCD_RC00_DPCD_REV 0x00000 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index dffcac249211..129982fef7ef 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c @@ -22,6 +22,7 @@ * Authors: Ben Skeggs */ #include "outp.h" +#include "dp.h" #include "ior.h" #include <subdev/bios.h> @@ -257,6 +258,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp) if (!ior->arm.head || ior->arm.proto != proto) { OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head, ior->arm.proto, proto); + + /* The EFI GOP driver on Ampere can leave unused DP links routed, + * which we don't expect. The DisableLT IED script *should* get + * us back to where we need to be. + */ + if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP) + nvkm_dp_disable(outp, ior); + return; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c index d20cc0681a88..797131ed7d67 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c @@ -26,7 +26,6 @@ #include <core/client.h> #include <core/gpuobj.h> #include <subdev/fb.h> -#include <subdev/instmem.h> #include <nvif/cl0002.h> #include <nvif/unpack.h> @@ -72,11 +71,7 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma, union { struct nv_dma_v0 v0; } *args = *pdata; - struct nvkm_device *device = dma->engine.subdev.device; - struct nvkm_client *client = oclass->client; struct nvkm_object *parent = oclass->parent; - struct nvkm_instmem *instmem = device->imem; - struct nvkm_fb *fb = device->fb; void *data = *pdata; u32 size = *psize; int ret = -ENOSYS; @@ -109,23 +104,13 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma, dmaobj->target = NV_MEM_TARGET_VM; break; case NV_DMA_V0_TARGET_VRAM: - if (!client->super) { - if (dmaobj->limit >= fb->ram->size - instmem->reserved) - return -EACCES; - if (device->card_type >= NV_50) - return -EACCES; - } dmaobj->target = NV_MEM_TARGET_VRAM; break; case NV_DMA_V0_TARGET_PCI: - if (!client->super) - return -EACCES; dmaobj->target = NV_MEM_TARGET_PCI; break; case NV_DMA_V0_TARGET_PCI_US: case NV_DMA_V0_TARGET_AGP: - if (!client->super) - return -EACCES; dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP; break; default: diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild index 90e9a0972a44..3209eb7af65f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild @@ -27,8 +27,6 @@ nvkm-y += nvkm/engine/fifo/dmanv04.o nvkm-y += nvkm/engine/fifo/dmanv10.o nvkm-y += nvkm/engine/fifo/dmanv17.o nvkm-y += nvkm/engine/fifo/dmanv40.o -nvkm-y += nvkm/engine/fifo/dmanv50.o -nvkm-y += nvkm/engine/fifo/dmag84.o nvkm-y += nvkm/engine/fifo/gpfifonv50.o nvkm-y += nvkm/engine/fifo/gpfifog84.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h index af8bdf275552..3a95730d7ff5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h @@ -48,8 +48,6 @@ void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int); int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push, const struct nvkm_oclass *, struct nv50_fifo_chan *); -extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass; extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass; -extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass; extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c deleted file mode 100644 index fc34cddcd2f5..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ -#include "channv50.h" - -#include <core/client.h> -#include <core/ramht.h> - -#include <nvif/class.h> -#include <nvif/cl826e.h> -#include <nvif/unpack.h> - -static int -g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, - void *data, u32 size, struct nvkm_object **pobject) -{ - struct nvkm_object *parent = oclass->parent; - union { - struct g82_channel_dma_v0 v0; - } *args = data; - struct nv50_fifo *fifo = nv50_fifo(base); - struct nv50_fifo_chan *chan; - int ret = -ENOSYS; - - nvif_ioctl(parent, "create channel dma size %d\n", size); - if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { - nvif_ioctl(parent, "create channel dma vers %d vmm %llx " - "pushbuf %llx offset %016llx\n", - args->v0.version, args->v0.vmm, args->v0.pushbuf, - args->v0.offset); - if (!args->v0.pushbuf) - return -EINVAL; - } else - return ret; - - if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) - return -ENOMEM; - *pobject = &chan->base.object; - - ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf, - oclass, chan); - if (ret) - return ret; - - args->v0.chid = chan->base.chid; - - nvkm_kmap(chan->ramfc); - nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078); - nvkm_wo32(chan->ramfc, 0x44, 0x01003fff); - nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4); - nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff); - nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff); - nvkm_wo32(chan->ramfc, 0x78, 0x00000000); - nvkm_wo32(chan->ramfc, 0x7c, 0x30000001); - nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | - (4 << 24) /* SEARCH_FULL */ | - (chan->ramht->gpuobj->node->offset >> 4)); - nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10); - nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12); - nvkm_done(chan->ramfc); - return 0; -} - -const struct nvkm_fifo_chan_oclass -g84_fifo_dma_oclass = { - .base.oclass = G82_CHANNEL_DMA, - .base.minver = 0, - .base.maxver = 0, - .ctor = g84_fifo_dma_new, -}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c deleted file mode 100644 index 8043718ad150..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ -#include "channv50.h" - -#include <core/client.h> -#include <core/ramht.h> - -#include <nvif/class.h> -#include <nvif/cl506e.h> -#include <nvif/unpack.h> - -static int -nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, - void *data, u32 size, struct nvkm_object **pobject) -{ - struct nvkm_object *parent = oclass->parent; - union { - struct nv50_channel_dma_v0 v0; - } *args = data; - struct nv50_fifo *fifo = nv50_fifo(base); - struct nv50_fifo_chan *chan; - int ret = -ENOSYS; - - nvif_ioctl(parent, "create channel dma size %d\n", size); - if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { - nvif_ioctl(parent, "create channel dma vers %d vmm %llx " - "pushbuf %llx offset %016llx\n", - args->v0.version, args->v0.vmm, args->v0.pushbuf, - args->v0.offset); - if (!args->v0.pushbuf) - return -EINVAL; - } else - return ret; - - if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) - return -ENOMEM; - *pobject = &chan->base.object; - - ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf, - oclass, chan); - if (ret) - return ret; - - args->v0.chid = chan->base.chid; - - nvkm_kmap(chan->ramfc); - nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078); - nvkm_wo32(chan->ramfc, 0x44, 0x01003fff); - nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4); - nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff); - nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff); - nvkm_wo32(chan->ramfc, 0x78, 0x00000000); - nvkm_wo32(chan->ramfc, 0x7c, 0x30000001); - nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | - (4 << 24) /* SEARCH_FULL */ | - (chan->ramht->gpuobj->node->offset >> 4)); - nvkm_done(chan->ramfc); - return 0; -} - -const struct nvkm_fifo_chan_oclass -nv50_fifo_dma_oclass = { - .base.oclass = NV50_CHANNEL_DMA, - .base.minver = 0, - .base.maxver = 0, - .ctor = nv50_fifo_dma_new, -}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c index c0a7d0f21dac..3885c3830b94 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c @@ -119,7 +119,6 @@ g84_fifo = { .uevent_init = g84_fifo_uevent_init, .uevent_fini = g84_fifo_uevent_fini, .chan = { - &g84_fifo_dma_oclass, &g84_fifo_gpfifo_oclass, NULL }, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c index b6900a52bcce..ae6c4d846eb5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c @@ -341,8 +341,6 @@ gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass, "runlist %016llx priv %d\n", args->v0.version, args->v0.vmm, args->v0.ioffset, args->v0.ilength, args->v0.runlist, args->v0.priv); - if (args->v0.priv && !oclass->client->super) - return -EINVAL; return gk104_fifo_gpfifo_new_(fifo, &args->v0.runlist, &args->v0.chid, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c index ee4967b706a7..743791c514fe 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c @@ -226,8 +226,6 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass, "runlist %016llx priv %d\n", args->v0.version, args->v0.vmm, args->v0.ioffset, args->v0.ilength, args->v0.runlist, args->v0.priv); - if (args->v0.priv && !oclass->client->super) - return -EINVAL; return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo, &args->v0.runlist, &args->v0.chid, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c index abef7fb6e2d3..99aafa103a31 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c @@ -65,8 +65,6 @@ tu102_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass, "runlist %016llx priv %d\n", args->v0.version, args->v0.vmm, args->v0.ioffset, args->v0.ilength, args->v0.runlist, args->v0.priv); - if (args->v0.priv && !oclass->client->super) - return -EINVAL; return gv100_fifo_gpfifo_new_(&tu102_fifo_gpfifo, fifo, &args->v0.runlist, &args->v0.chid, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c index be94156ea248..a08742cf425a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c @@ -136,7 +136,6 @@ nv50_fifo = { .pause = nv04_fifo_pause, .start = nv04_fifo_start, .chan = { - &nv50_fifo_dma_oclass, &nv50_fifo_gpfifo_oclass, NULL }, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c index fac2f9a45ea6..e530bb8b3b17 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c @@ -41,7 +41,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle) object = nvkm_object_search(client, handle, &nvkm_umem); if (IS_ERR(object)) { - if (client->super && client != master) { + if (client != master) { spin_lock(&master->lock); list_for_each_entry(umem, &master->umem, head) { if (umem->object.object == handle) { @@ -53,8 +53,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle) } } else { umem = nvkm_umem(object); - if (!umem->priv || client->super) - memory = nvkm_memory_ref(umem->memory); + memory = nvkm_memory_ref(umem->memory); } return memory ? memory : ERR_PTR(-ENOENT); @@ -167,7 +166,6 @@ nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, nvkm_object_ctor(&nvkm_umem, oclass, &umem->object); umem->mmu = mmu; umem->type = mmu->type[type].type; - umem->priv = oclass->client->super; INIT_LIST_HEAD(&umem->head); *pobject = &umem->object; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h index 85cf692d620a..d56a594016cc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h @@ -8,7 +8,6 @@ struct nvkm_umem { struct nvkm_object object; struct nvkm_mmu *mmu; u8 type:8; - bool priv:1; bool mappable:1; bool io:1; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c index 0e4b8941da37..6870fda4b188 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c @@ -34,7 +34,7 @@ nvkm_ummu_sclass(struct nvkm_object *object, int index, { struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; - if (mmu->func->mem.user.oclass && oclass->client->super) { + if (mmu->func->mem.user.oclass) { if (index-- == 0) { oclass->base = mmu->func->mem.user; oclass->ctor = nvkm_umem_new; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c index c43b8248c682..d6a1f8d04c09 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c @@ -45,7 +45,6 @@ nvkm_uvmm_search(struct nvkm_client *client, u64 handle) static int nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_pfnclr_v0 v0; } *args = argv; @@ -59,9 +58,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc) } else return ret; - if (!client->super) - return -ENOENT; - if (size) { mutex_lock(&vmm->mutex); ret = nvkm_vmm_pfn_unmap(vmm, addr, size); @@ -74,7 +70,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc) static int nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_pfnmap_v0 v0; } *args = argv; @@ -93,9 +88,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) } else return ret; - if (!client->super) - return -ENOENT; - if (size) { mutex_lock(&vmm->mutex); ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys); @@ -108,7 +100,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) static int nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_unmap_v0 v0; } *args = argv; @@ -130,9 +121,8 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) goto done; } - if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { - VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr, - vma->user, !client->super, vma->busy); + if (ret = -ENOENT, vma->busy) { + VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy); goto done; } @@ -181,9 +171,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc) goto fail; } - if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { - VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr, - vma->user, !client->super, vma->busy); + if (ret = -ENOENT, vma->busy) { + VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy); goto fail; } @@ -230,7 +219,6 @@ fail: static int nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_put_v0 v0; } *args = argv; @@ -252,9 +240,8 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc) goto done; } - if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { - VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr, - vma->user, !client->super, vma->busy); + if (ret = -ENOENT, vma->busy) { + VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy); goto done; } @@ -268,7 +255,6 @@ done: static int nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_get_v0 v0; } *args = argv; @@ -297,7 +283,6 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc) return ret; args->v0.addr = vma->addr; - vma->user = !client->super; return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index 710f3f8dc7c9..8bf00b396ec1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c @@ -774,7 +774,6 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) new->refd = vma->refd; new->used = vma->used; new->part = vma->part; - new->user = vma->user; new->busy = vma->busy; new->mapped = vma->mapped; list_add(&new->head, &vma->head); @@ -951,7 +950,7 @@ nvkm_vmm_node_split(struct nvkm_vmm *vmm, static void nvkm_vma_dump(struct nvkm_vma *vma) { - printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n", + printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n", vma->addr, (u64)vma->size, vma->used ? '-' : 'F', vma->mapref ? 'R' : '-', @@ -959,7 +958,6 @@ nvkm_vma_dump(struct nvkm_vma *vma) vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-', vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-', vma->part ? 'P' : '-', - vma->user ? 'U' : '-', vma->busy ? 'B' : '-', vma->mapped ? 'M' : '-', vma->memory); @@ -1024,7 +1022,6 @@ nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size) vma->mapref = true; vma->sparse = false; vma->used = true; - vma->user = true; nvkm_vmm_node_insert(vmm, vma); list_add_tail(&vma->head, &vmm->list); return 0; @@ -1615,7 +1612,6 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) vma->page = NVKM_VMA_PAGE_NONE; vma->refd = NVKM_VMA_PAGE_NONE; vma->used = false; - vma->user = false; nvkm_vmm_put_region(vmm, vma); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c index f02abd9cb4dd..b5e733783b5b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c @@ -534,15 +534,13 @@ int gp100_vmm_mthd(struct nvkm_vmm *vmm, struct nvkm_client *client, u32 mthd, void *argv, u32 argc) { - if (client->super) { - switch (mthd) { - case GP100_VMM_VN_FAULT_REPLAY: - return gp100_vmm_fault_replay(vmm, argv, argc); - case GP100_VMM_VN_FAULT_CANCEL: - return gp100_vmm_fault_cancel(vmm, argv, argc); - default: - break; - } + switch (mthd) { + case GP100_VMM_VN_FAULT_REPLAY: + return gp100_vmm_fault_replay(vmm, argv, argc); + case GP100_VMM_VN_FAULT_CANCEL: + return gp100_vmm_fault_cancel(vmm, argv, argc); + default: + break; } return -EINVAL; } diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index 74e3b460132b..2df59b3c2ea1 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -78,9 +78,7 @@ static int ttm_global_init(void) ttm_debugfs_root = debugfs_create_dir("ttm", NULL); if (IS_ERR(ttm_debugfs_root)) { - ret = PTR_ERR(ttm_debugfs_root); ttm_debugfs_root = NULL; - goto out; } /* Limit the number of pages in the pool to about 50% of the total diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c index f782d5e1aa25..03e1db5d1e8c 100644 --- a/drivers/infiniband/core/uverbs_std_types_mr.c +++ b/drivers/infiniband/core/uverbs_std_types_mr.c @@ -249,6 +249,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)( mr->uobject = uobj; atomic_inc(&pd->usecnt); + rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); + rdma_restrack_set_name(&mr->res, NULL); + rdma_restrack_add(&mr->res); uobj->object = mr; uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 283b6b81563c..ea0054c60fbc 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1681,6 +1681,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, if (nq) nq->budget++; atomic_inc(&rdev->srq_count); + spin_lock_init(&srq->lock); return 0; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index a8688a92c760..4678bd6ec7d6 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1397,7 +1397,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode) memset(&rattr, 0, sizeof(rattr)); rc = bnxt_re_register_netdev(rdev); if (rc) { - rtnl_unlock(); ibdev_err(&rdev->ibdev, "Failed to register with netedev: %#x\n", rc); return -EINVAL; diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c index 203e6ddcacbc..be4a07bd268a 100644 --- a/drivers/infiniband/hw/efa/efa_main.c +++ b/drivers/infiniband/hw/efa/efa_main.c @@ -357,6 +357,7 @@ static int efa_enable_msix(struct efa_dev *dev) } if (irq_num != msix_vecs) { + efa_disable_msix(dev); dev_err(&dev->pdev->dev, "Allocated %d MSI-X (out of %d requested)\n", irq_num, msix_vecs); diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index eb15c310d63d..e83dc562629e 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -3055,6 +3055,7 @@ static void __sdma_process_event(struct sdma_engine *sde, static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) { int i; + struct sdma_desc *descp; /* Handle last descriptor */ if (unlikely((tx->num_desc == (MAX_DESC - 1)))) { @@ -3075,12 +3076,10 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) if (unlikely(tx->num_desc == MAX_DESC)) goto enomem; - tx->descp = kmalloc_array( - MAX_DESC, - sizeof(struct sdma_desc), - GFP_ATOMIC); - if (!tx->descp) + descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC); + if (!descp) goto enomem; + tx->descp = descp; /* reserve last descriptor for coalescing */ tx->desc_limit = MAX_DESC - 1; diff --git a/drivers/infiniband/hw/irdma/Kconfig b/drivers/infiniband/hw/irdma/Kconfig index dab88286d549..b6f9c41bca51 100644 --- a/drivers/infiniband/hw/irdma/Kconfig +++ b/drivers/infiniband/hw/irdma/Kconfig @@ -6,7 +6,7 @@ config INFINIBAND_IRDMA depends on PCI depends on ICE && I40E select GENERIC_ALLOCATOR - select CONFIG_AUXILIARY_BUS + select AUXILIARY_BUS help This is an Intel(R) Ethernet Protocol Driver for RDMA driver that support E810 (iWARP/RoCE) and X722 (iWARP) network devices. diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index ae05e143401c..466f0a521940 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4462,7 +4462,8 @@ static void mlx5r_mp_remove(struct auxiliary_device *adev) mutex_lock(&mlx5_ib_multiport_mutex); if (mpi->ibdev) mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); - list_del(&mpi->list); + else + list_del(&mpi->list); mutex_unlock(&mlx5_ib_multiport_mutex); kfree(mpi); } diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c index 0ea9a5aa4ec0..1c1d1b53312d 100644 --- a/drivers/infiniband/sw/rxe/rxe_mcast.c +++ b/drivers/infiniband/sw/rxe/rxe_mcast.c @@ -85,7 +85,7 @@ int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, goto out; } - elem = rxe_alloc(&rxe->mc_elem_pool); + elem = rxe_alloc_locked(&rxe->mc_elem_pool); if (!elem) { err = -ENOMEM; goto out; diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index 85b812586ed4..72d95398e604 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c @@ -63,7 +63,7 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem, if (*num_elem < 0) goto err1; - q = kmalloc(sizeof(*q), GFP_KERNEL); + q = kzalloc(sizeof(*q), GFP_KERNEL); if (!q) goto err1; diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c index 27cc5f03611c..f6fae64861ce 100644 --- a/drivers/interconnect/qcom/icc-rpmh.c +++ b/drivers/interconnect/qcom/icc-rpmh.c @@ -20,18 +20,13 @@ void qcom_icc_pre_aggregate(struct icc_node *node) { size_t i; struct qcom_icc_node *qn; - struct qcom_icc_provider *qp; qn = node->data; - qp = to_qcom_provider(node->provider); for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) { qn->sum_avg[i] = 0; qn->max_peak[i] = 0; } - - for (i = 0; i < qn->num_bcms; i++) - qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); } EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate); @@ -49,8 +44,10 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, { size_t i; struct qcom_icc_node *qn; + struct qcom_icc_provider *qp; qn = node->data; + qp = to_qcom_provider(node->provider); if (!tag) tag = QCOM_ICC_TAG_ALWAYS; @@ -70,6 +67,9 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, *agg_avg += avg_bw; *agg_peak = max_t(u32, *agg_peak, peak_bw); + for (i = 0; i < qn->num_bcms; i++) + qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); + return 0; } EXPORT_SYMBOL_GPL(qcom_icc_aggregate); diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 98ba927aee1a..6f0df629353f 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -768,6 +768,7 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size, __iommu_dma_unmap(dev, sgt->sgl->dma_address, size); __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT); sg_free_table(&sh->sgt); + kfree(sh); } #endif /* CONFIG_DMA_REMAP */ diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index c6cf44a6c923..9ec374e17469 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -511,7 +511,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, u32 pasid, bool fault_ignore) { struct pasid_entry *pte; - u16 did; + u16 did, pgtt; pte = intel_pasid_get_entry(dev, pasid); if (WARN_ON(!pte)) @@ -521,13 +521,19 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, return; did = pasid_get_domain_id(pte); + pgtt = pasid_pte_get_pgtt(pte); + intel_pasid_clear_entry(dev, pasid, fault_ignore); if (!ecap_coherent(iommu->ecap)) clflush_cache_range(pte, sizeof(*pte)); pasid_cache_invalidation_with_pasid(iommu, did, pasid); - qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + + if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY) + qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + else + iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); /* Device IOTLB doesn't need to be flushed in caching mode. */ if (!cap_caching_mode(iommu->cap)) diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index 5ff61c3d401f..c11bc8b833b8 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -99,6 +99,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte) return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT; } +/* Get PGTT field of a PASID table entry */ +static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte) +{ + return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7); +} + extern unsigned int intel_pasid_max_id; int intel_pasid_alloc_table(struct device *dev); void intel_pasid_free_table(struct device *dev); diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 9b0f22bc0514..4b9b3f35ba0e 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -675,7 +675,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid) kfree_rcu(sdev, rcu); if (list_empty(&svm->devs)) { - intel_svm_free_pasid(mm); if (svm->notifier.ops) { mmu_notifier_unregister(&svm->notifier, mm); /* Clear mm's pasid. */ @@ -690,6 +689,8 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid) kfree(svm); } } + /* Drop a PASID reference and free it if no reference. */ + intel_svm_free_pasid(mm); } out: return ret; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 5419c4b9f27a..63f0af10c403 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -924,6 +924,9 @@ void iommu_group_remove_device(struct device *dev) struct iommu_group *group = dev->iommu_group; struct group_device *tmp_device, *device = NULL; + if (!group) + return; + dev_info(dev, "Removing from iommu group %d\n", group->id); /* Pre-notify listeners that a device is being removed. */ diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c index 3461b0a7dc62..cbfdadecb23b 100644 --- a/drivers/ipack/carriers/tpci200.c +++ b/drivers/ipack/carriers/tpci200.c @@ -89,16 +89,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200) free_irq(tpci200->info->pdev->irq, (void *) tpci200); pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs); - pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR); pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR); pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR); pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR); - pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR); pci_disable_device(tpci200->info->pdev); - pci_dev_put(tpci200->info->pdev); } static void tpci200_enable_irq(struct tpci200_board *tpci200, @@ -257,7 +254,7 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 2 !", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_disable_pci; + goto err_disable_device; } /* Request IO ID INT space (Bar 3) */ @@ -269,7 +266,7 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 3 !", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_release_ip_space; + goto err_ip_interface_bar; } /* Request MEM8 space (Bar 5) */ @@ -280,7 +277,7 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_release_ioid_int_space; + goto err_io_id_int_spaces_bar; } /* Request MEM16 space (Bar 4) */ @@ -291,7 +288,7 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_release_mem8_space; + goto err_mem8_space_bar; } /* Map internal tpci200 driver user space */ @@ -305,7 +302,7 @@ static int tpci200_register(struct tpci200_board *tpci200) tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); res = -ENOMEM; - goto out_release_mem8_space; + goto err_mem16_space_bar; } /* Initialize lock that protects interface_regs */ @@ -344,18 +341,22 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) unable to register IRQ !", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_release_ioid_int_space; + goto err_interface_regs; } return 0; -out_release_mem8_space: +err_interface_regs: + pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs); +err_mem16_space_bar: + pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR); +err_mem8_space_bar: pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR); -out_release_ioid_int_space: +err_io_id_int_spaces_bar: pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR); -out_release_ip_space: +err_ip_interface_bar: pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR); -out_disable_pci: +err_disable_device: pci_disable_device(tpci200->info->pdev); return res; } @@ -527,7 +528,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL); if (!tpci200->info) { ret = -ENOMEM; - goto out_err_info; + goto err_tpci200; } pci_dev_get(pdev); @@ -538,7 +539,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, if (ret) { dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory"); ret = -EBUSY; - goto out_err_pci_request; + goto err_tpci200_info; } tpci200->info->cfg_regs = ioremap( pci_resource_start(pdev, TPCI200_CFG_MEM_BAR), @@ -546,7 +547,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, if (!tpci200->info->cfg_regs) { dev_err(&pdev->dev, "Failed to map PCI Configuration Memory"); ret = -EFAULT; - goto out_err_ioremap; + goto err_request_region; } /* Disable byte swapping for 16 bit IP module access. This will ensure @@ -569,7 +570,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, if (ret) { dev_err(&pdev->dev, "error during tpci200 install\n"); ret = -ENODEV; - goto out_err_install; + goto err_cfg_regs; } /* Register the carrier in the industry pack bus driver */ @@ -581,7 +582,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "error registering the carrier on ipack driver\n"); ret = -EFAULT; - goto out_err_bus_register; + goto err_tpci200_install; } /* save the bus number given by ipack to logging purpose */ @@ -592,19 +593,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev, tpci200_create_device(tpci200, i); return 0; -out_err_bus_register: +err_tpci200_install: tpci200_uninstall(tpci200); - /* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */ - tpci200->info->cfg_regs = NULL; -out_err_install: - if (tpci200->info->cfg_regs) - iounmap(tpci200->info->cfg_regs); -out_err_ioremap: +err_cfg_regs: + pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); +err_request_region: pci_release_region(pdev, TPCI200_CFG_MEM_BAR); -out_err_pci_request: - pci_dev_put(pdev); +err_tpci200_info: kfree(tpci200->info); -out_err_info: + pci_dev_put(pdev); +err_tpci200: kfree(tpci200); return ret; } @@ -614,6 +612,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200) ipack_bus_unregister(tpci200->info->ipack_bus); tpci200_uninstall(tpci200); + pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); + + pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR); + + pci_dev_put(tpci200->info->pdev); + kfree(tpci200->info); kfree(tpci200); } diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index d333130d1531..c3229d8c7041 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2018,8 +2018,8 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) continue; } - dw_mci_stop_dma(host); send_stop_abort(host, data); + dw_mci_stop_dma(host); state = STATE_SENDING_STOP; break; } @@ -2043,10 +2043,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) */ if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) { - dw_mci_stop_dma(host); if (!(host->data_status & (SDMMC_INT_DRTO | SDMMC_INT_EBE))) send_stop_abort(host, data); + dw_mci_stop_dma(host); state = STATE_DATA_ERROR; break; } @@ -2079,10 +2079,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) */ if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) { - dw_mci_stop_dma(host); if (!(host->data_status & (SDMMC_INT_DRTO | SDMMC_INT_EBE))) send_stop_abort(host, data); + dw_mci_stop_dma(host); state = STATE_DATA_ERROR; break; } diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c index 51db30acf4dc..fdaa11f92fe6 100644 --- a/drivers/mmc/host/mmci_stm32_sdmmc.c +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c @@ -479,8 +479,9 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host, u32 status; int ret = 0; - if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { - spin_lock_irqsave(&host->lock, flags); + spin_lock_irqsave(&host->lock, flags); + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 && + host->pwr_reg & MCI_STM32_VSWITCHEN) { mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH); spin_unlock_irqrestore(&host->lock, flags); @@ -492,9 +493,11 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host, writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC, host->base + MMCICLEAR); + spin_lock_irqsave(&host->lock, flags); mmci_write_pwrreg(host, host->pwr_reg & ~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH)); } + spin_unlock_irqrestore(&host->lock, flags); return ret; } diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index cce390fe9cf3..e7565c671998 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c @@ -173,6 +173,23 @@ static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host) return pltfm_host->clock; } +/* + * There is a known bug on BCM2711's SDHCI core integration where the + * controller will hang when the difference between the core clock and the bus + * clock is too great. Specifically this can be reproduced under the following + * conditions: + * + * - No SD card plugged in, polling thread is running, probing cards at + * 100 kHz. + * - BCM2711's core clock configured at 500MHz or more + * + * So we set 200kHz as the minimum clock frequency available for that SoC. + */ +static unsigned int sdhci_iproc_bcm2711_get_min_clock(struct sdhci_host *host) +{ + return 200000; +} + static const struct sdhci_ops sdhci_iproc_ops = { .set_clock = sdhci_set_clock, .get_max_clock = sdhci_iproc_get_max_clock, @@ -271,13 +288,15 @@ static const struct sdhci_ops sdhci_iproc_bcm2711_ops = { .set_clock = sdhci_set_clock, .set_power = sdhci_set_power_and_bus_voltage, .get_max_clock = sdhci_iproc_get_max_clock, + .get_min_clock = sdhci_iproc_bcm2711_get_min_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = { - .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, .ops = &sdhci_iproc_bcm2711_ops, }; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index e44b7a66b73c..290a14cdc1cf 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -2089,6 +2089,23 @@ static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) sdhci_cqe_disable(mmc, recovery); } +static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) +{ + u32 count, start = 15; + + __sdhci_set_timeout(host, cmd); + count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL); + /* + * Update software timeout value if its value is less than hardware data + * timeout value. Qcom SoC hardware data timeout value was calculated + * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock. + */ + if (cmd && cmd->data && host->clock > 400000 && + host->clock <= 50000000 && + ((1 << (count + start)) > (10 * host->clock))) + host->data_timeout = 22LL * NSEC_PER_SEC; +} + static const struct cqhci_host_ops sdhci_msm_cqhci_ops = { .enable = sdhci_msm_cqe_enable, .disable = sdhci_msm_cqe_disable, @@ -2438,6 +2455,7 @@ static const struct sdhci_ops sdhci_msm_ops = { .irq = sdhci_msm_cqe_irq, .dump_vendor_regs = sdhci_msm_dump_vendor_regs, .set_power = sdhci_set_power_noreg, + .set_timeout = sdhci_msm_set_timeout, }; static const struct sdhci_pltfm_data sdhci_msm_pdata = { diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 7370981e9b34..c6068a251fbe 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -224,8 +224,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, if (id == ESD_EV_CAN_ERROR_EXT) { u8 state = msg->msg.rx.data[0]; u8 ecc = msg->msg.rx.data[1]; - u8 txerr = msg->msg.rx.data[2]; - u8 rxerr = msg->msg.rx.data[3]; + u8 rxerr = msg->msg.rx.data[2]; + u8 txerr = msg->msg.rx.data[3]; skb = alloc_can_err_skb(priv->netdev, &cf); if (skb == NULL) { diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index 3faff95fd49f..542cfc4ccb08 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -1473,9 +1473,6 @@ static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port, u16 data; u8 gates; - cur++; - next++; - if (i == schedule->num_entries) gates = initial->gate_mask ^ cur->gate_mask; @@ -1504,6 +1501,9 @@ static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port, (initial->gate_mask << TR_GCLCMD_INIT_GATE_STATES_SHIFT); hellcreek_write(hellcreek, data, TR_GCLCMD); + + cur++; + next++; } } @@ -1551,7 +1551,7 @@ static bool hellcreek_schedule_startable(struct hellcreek *hellcreek, int port) /* Calculate difference to admin base time */ base_time_ns = ktime_to_ns(hellcreek_port->current_schedule->base_time); - return base_time_ns - current_ns < (s64)8 * NSEC_PER_SEC; + return base_time_ns - current_ns < (s64)4 * NSEC_PER_SEC; } static void hellcreek_start_schedule(struct hellcreek *hellcreek, int port) diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index b1d46dd8eaab..6ea003678798 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -1277,15 +1277,16 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane) int err; /* mv88e6393x family errata 4.6: - * Cannot clear PwrDn bit on SERDES on port 0 if device is configured - * CPU_MGD mode or P0_mode is configured for [x]MII. - * Workaround: Set Port0 SERDES register 4.F002 bit 5=0 and bit 15=1. + * Cannot clear PwrDn bit on SERDES if device is configured CPU_MGD + * mode or P0_mode is configured for [x]MII. + * Workaround: Set SERDES register 4.F002 bit 5=0 and bit 15=1. * * It seems that after this workaround the SERDES is automatically * powered up (the bit is cleared), so power it down. */ - if (lane == MV88E6393X_PORT0_LANE) { - err = mv88e6390_serdes_read(chip, MV88E6393X_PORT0_LANE, + if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE || + lane == MV88E6393X_PORT10_LANE) { + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC, ®); if (err) diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 860c18fb7aae..80399c8980bd 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -677,11 +677,13 @@ static int xge_probe(struct platform_device *pdev) ret = register_netdev(ndev); if (ret) { netdev_err(ndev, "Failed to register netdev\n"); - goto err; + goto err_mdio_remove; } return 0; +err_mdio_remove: + xge_mdio_remove(ndev); err: free_netdev(ndev); diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 5c368a9cbbbc..c2e1f163bb14 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -275,6 +275,12 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, if (GEM_BFEXT(DMA_RXVALID, desc->addr)) { desc_ptp = macb_ptp_desc(bp, desc); + /* Unlikely but check */ + if (!desc_ptp) { + dev_warn_ratelimited(&bp->pdev->dev, + "Timestamp not supported in BD\n"); + return; + } gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts); memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); @@ -307,8 +313,11 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0) return -ENOMEM; - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; desc_ptp = macb_ptp_desc(queue->bp, desc); + /* Unlikely but check */ + if (!desc_ptp) + return -EINVAL; + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_timestamp = &queue->tx_timestamps[head]; tx_timestamp->skb = skb; /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index efa6c98d7459..0d9cda4ab303 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5068,6 +5068,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip) ret = -ENOMEM; goto bye; } + bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz); #endif params[0] = FW_PARAM_PFVF(CLIP_START); @@ -6781,13 +6782,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) setup_memwin(adapter); err = adap_init0(adapter, 0); -#ifdef CONFIG_DEBUG_FS - bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz); -#endif - setup_memwin_rdma(adapter); if (err) goto out_unmap_bar; + setup_memwin_rdma(adapter); + /* configure SGE_STAT_CFG_A to read WC stats */ if (!is_t4(adapter->params.chip)) t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) | diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 532523069d74..80461ab0ce9e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -938,20 +938,19 @@ static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len) return 0; } -static int hns3_dbg_get_cmd_index(struct hnae3_handle *handle, - const unsigned char *name, u32 *index) +static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index) { u32 i; for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) { - if (!strncmp(name, hns3_dbg_cmd[i].name, - strlen(hns3_dbg_cmd[i].name))) { + if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) { *index = i; return 0; } } - dev_err(&handle->pdev->dev, "unknown command(%s)\n", name); + dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n", + dbg_data->cmd); return -EINVAL; } @@ -1019,8 +1018,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, u32 index; int ret; - ret = hns3_dbg_get_cmd_index(handle, filp->f_path.dentry->d_iname, - &index); + ret = hns3_dbg_get_cmd_index(dbg_data, &index); if (ret) return ret; @@ -1090,6 +1088,7 @@ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd) char name[HNS3_DBG_FILE_NAME_LEN]; data[i].handle = handle; + data[i].cmd = hns3_dbg_cmd[cmd].cmd; data[i].qid = i; sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i); debugfs_create_file(name, 0400, entry_dir, &data[i], @@ -1110,6 +1109,7 @@ hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd) return -ENOMEM; data->handle = handle; + data->cmd = hns3_dbg_cmd[cmd].cmd; entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry; debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir, data, &hns3_dbg_fops); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h index f3766ff38bb7..bd8801065e02 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h @@ -22,6 +22,7 @@ struct hns3_dbg_item { struct hns3_dbg_data { struct hnae3_handle *handle; + enum hnae3_dbg_cmd cmd; u16 qid; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 13042f1cac6f..444c46241afc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -575,9 +575,13 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw) void hclge_cmd_uninit(struct hclge_dev *hdev) { + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + /* wait to ensure that the firmware completes the possible left + * over commands. + */ + msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME); spin_lock_bh(&hdev->hw.cmq.csq.lock); spin_lock(&hdev->hw.cmq.crq.lock); - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); hclge_cmd_uninit_regs(&hdev->hw); spin_unlock(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 8e5be127909b..53872c7b2940 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -9,6 +9,7 @@ #include "hnae3.h" #define HCLGE_CMDQ_TX_TIMEOUT 30000 +#define HCLGE_CMDQ_CLEAR_WAIT_TIME 200 #define HCLGE_DESC_DATA_LEN 6 struct hclge_dev; @@ -270,6 +271,9 @@ enum hclge_opcode_type { /* Led command */ HCLGE_OPC_LED_STATUS_CFG = 0xB000, + /* clear hardware resource command */ + HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B, + /* NCL config command */ HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index 5bf5db91d16c..39f56f245d84 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -255,21 +255,12 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC]; struct hclge_vport *vport = hclge_get_vport(h); struct hclge_dev *hdev = vport->back; - u8 i, j, pfc_map, *prio_tc; int ret; + u8 i; memset(pfc, 0, sizeof(*pfc)); pfc->pfc_cap = hdev->pfc_max; - prio_tc = hdev->tm_info.prio_tc; - pfc_map = hdev->tm_info.hw_pfc_map; - - /* Pfc setting is based on TC */ - for (i = 0; i < hdev->tm_info.num_tc; i++) { - for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { - if ((prio_tc[j] == i) && (pfc_map & BIT(i))) - pfc->pfc_en |= BIT(j); - } - } + pfc->pfc_en = hdev->tm_info.pfc_en; ret = hclge_pfc_tx_stats_get(hdev, requests); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index f6882090d38e..1b6bb0d71fcb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1551,6 +1551,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tm_info.hw_pfc_map = 0; hdev->wanted_umv_size = cfg.umv_space; hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; + hdev->gro_en = true; if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); @@ -1619,7 +1620,7 @@ static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_config_gro(struct hclge_dev *hdev, bool en) +static int hclge_config_gro(struct hclge_dev *hdev) { struct hclge_cfg_gro_status_cmd *req; struct hclge_desc desc; @@ -1631,7 +1632,7 @@ static int hclge_config_gro(struct hclge_dev *hdev, bool en) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); req = (struct hclge_cfg_gro_status_cmd *)desc.data; - req->gro_en = en ? 1 : 0; + req->gro_en = hdev->gro_en ? 1 : 0; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -2954,12 +2955,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev) } if (state != hdev->hw.mac.link) { + hdev->hw.mac.link = state; client->ops->link_status_change(handle, state); hclge_config_mac_tnl_int(hdev, state); if (rclient && rclient->ops->link_status_change) rclient->ops->link_status_change(rhandle, state); - hdev->hw.mac.link = state; hclge_push_link_status(hdev); } @@ -10081,7 +10082,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, bool writen_to_tbl) { - struct hclge_vport_vlan_cfg *vlan; + struct hclge_vport_vlan_cfg *vlan, *tmp; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) + if (vlan->vlan_id == vlan_id) + return; vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); if (!vlan) @@ -11451,6 +11456,28 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev) } } +static int hclge_clear_hw_resource(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* This new command is only supported by new firmware, it will + * fail with older firmware. Error value -EOPNOSUPP can only be + * returned by older firmware running this command, to keep code + * backward compatible we will override this value and return + * success. + */ + if (ret && ret != -EOPNOTSUPP) { + dev_err(&hdev->pdev->dev, + "failed to clear hw resource, ret = %d\n", ret); + return ret; + } + return 0; +} + static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) { if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) @@ -11504,6 +11531,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) if (ret) goto err_cmd_uninit; + ret = hclge_clear_hw_resource(hdev); + if (ret) + goto err_cmd_uninit; + ret = hclge_get_cap(hdev); if (ret) goto err_cmd_uninit; @@ -11568,7 +11599,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } - ret = hclge_config_gro(hdev, true); + ret = hclge_config_gro(hdev); if (ret) goto err_mdiobus_unreg; @@ -11951,7 +11982,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - ret = hclge_config_gro(hdev, true); + ret = hclge_config_gro(hdev); if (ret) return ret; @@ -12686,8 +12717,15 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + bool gro_en_old = hdev->gro_en; + int ret; - return hclge_config_gro(hdev, enable); + hdev->gro_en = enable; + ret = hclge_config_gro(hdev); + if (ret) + hdev->gro_en = gro_en_old; + + return ret; } static void hclge_sync_promisc_mode(struct hclge_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index ada5c68f2851..b6c1153945e5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -929,6 +929,7 @@ struct hclge_dev { unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)]; enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type; u8 fd_en; + bool gro_en; u16 wanted_umv_size; /* max available unicast mac vlan space */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index bd19a2d89f6c..d9ddb0a243d4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -507,12 +507,17 @@ static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw) void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) { + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + /* wait to ensure that the firmware completes the possible left + * over commands. + */ + msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME); spin_lock_bh(&hdev->hw.cmq.csq.lock); spin_lock(&hdev->hw.cmq.crq.lock); - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); hclgevf_cmd_uninit_regs(&hdev->hw); spin_unlock(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); + hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 202feb70dba5..5b82177f98b4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -8,6 +8,7 @@ #include "hnae3.h" #define HCLGEVF_CMDQ_TX_TIMEOUT 30000 +#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200 #define HCLGEVF_CMDQ_RX_INVLD_B 0 #define HCLGEVF_CMDQ_RX_OUTVLD_B 1 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index ff651739f16b..60588b194fe7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -507,10 +507,10 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) link_state = test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; if (link_state != hdev->hw.mac.link) { + hdev->hw.mac.link = link_state; client->ops->link_status_change(handle, !!link_state); if (rclient && rclient->ops->link_status_change) rclient->ops->link_status_change(rhandle, !!link_state); - hdev->hw.mac.link = link_state; } clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); @@ -2489,6 +2489,8 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) { int ret; + hdev->gro_en = true; + ret = hclgevf_get_basic_info(hdev); if (ret) return ret; @@ -2551,7 +2553,7 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) return 0; } -static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) +static int hclgevf_config_gro(struct hclgevf_dev *hdev) { struct hclgevf_cfg_gro_status_cmd *req; struct hclgevf_desc desc; @@ -2564,7 +2566,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) false); req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; - req->gro_en = en ? 1 : 0; + req->gro_en = hdev->gro_en ? 1 : 0; ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -3310,7 +3312,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) return ret; } - ret = hclgevf_config_gro(hdev, true); + ret = hclgevf_config_gro(hdev); if (ret) return ret; @@ -3395,7 +3397,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) goto err_config; - ret = hclgevf_config_gro(hdev, true); + ret = hclgevf_config_gro(hdev); if (ret) goto err_config; @@ -3647,8 +3649,15 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + bool gro_en_old = hdev->gro_en; + int ret; - return hclgevf_config_gro(hdev, enable); + hdev->gro_en = enable; + ret = hclgevf_config_gro(hdev); + if (ret) + hdev->gro_en = gro_en_old; + + return ret; } static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 6f222a3a0bf2..73e8bb5efc30 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -311,6 +311,8 @@ struct hclgevf_dev { u16 *vector_status; int *vector_irq; + bool gro_en; + unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; struct hclgevf_mac_table_cfg mac_table; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 772b2f8acd2e..b339b9bc0625 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -323,8 +323,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) flag = (u8)msg_q[5]; /* update upper layer with new link link status */ - hclgevf_update_link_status(hdev, link_status); hclgevf_update_speed_duplex(hdev, speed, duplex); + hclgevf_update_link_status(hdev, link_status); if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN) set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 2f97c9f5611d..60c582a16821 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1009,6 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) { u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; + u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ + u16 lat_enc_d = 0; /* latency decoded */ u16 lat_enc = 0; /* latency encoded */ if (link) { @@ -1062,7 +1064,17 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); - if (lat_enc > max_ltr_enc) + lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) * + (1U << (E1000_LTRV_SCALE_FACTOR * + ((lat_enc & E1000_LTRV_SCALE_MASK) + >> E1000_LTRV_SCALE_SHIFT))); + + max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) * + (1U << (E1000_LTRV_SCALE_FACTOR * + ((max_ltr_enc & E1000_LTRV_SCALE_MASK) + >> E1000_LTRV_SCALE_SHIFT))); + + if (lat_enc_d > max_ltr_enc_d) lat_enc = max_ltr_enc; } @@ -4124,13 +4136,17 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) return ret_val; if (!(data & valid_csum_mask)) { - data |= valid_csum_mask; - ret_val = e1000_write_nvm(hw, word, 1, &data); - if (ret_val) - return ret_val; - ret_val = e1000e_update_nvm_checksum(hw); - if (ret_val) - return ret_val; + e_dbg("NVM Checksum Invalid\n"); + + if (hw->mac.type < e1000_pch_cnp) { + data |= valid_csum_mask; + ret_val = e1000_write_nvm(hw, word, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } } return e1000e_validate_nvm_checksum_generic(hw); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 9b145f6248a8..d6a092e5ee74 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -277,8 +277,11 @@ /* Latency Tolerance Reporting */ #define E1000_LTRV 0x000F8 +#define E1000_LTRV_VALUE_MASK 0x000003FF #define E1000_LTRV_SCALE_MAX 5 #define E1000_LTRV_SCALE_FACTOR 5 +#define E1000_LTRV_SCALE_SHIFT 10 +#define E1000_LTRV_SCALE_MASK 0x00001C00 #define E1000_LTRV_REQ_SHIFT 15 #define E1000_LTRV_NOSNOOP_SHIFT 16 #define E1000_LTRV_SEND (1 << 30) diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 8c863d64930b..14afce82ef63 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -42,7 +42,9 @@ static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); if (status) - return -EIO; + /* We failed to locate the PBA, so just skip this entry */ + dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n", + ice_stat_str(status)); return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index db1c63e8802a..c6c075a637ea 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -151,6 +151,9 @@ static void igc_release_hw_control(struct igc_adapter *adapter) struct igc_hw *hw = &adapter->hw; u32 ctrl_ext; + if (!pci_device_is_present(adapter->pdev)) + return; + /* Let firmware take over control of h/w */ ctrl_ext = rd32(IGC_CTRL_EXT); wr32(IGC_CTRL_EXT, @@ -4765,26 +4768,29 @@ void igc_down(struct igc_adapter *adapter) igc_ptp_suspend(adapter); - /* disable receives in the hardware */ - rctl = rd32(IGC_RCTL); - wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); - /* flush and sleep below */ - + if (pci_device_is_present(adapter->pdev)) { + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + } /* set trans_start so we don't get spurious watchdogs during reset */ netif_trans_update(netdev); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); - /* disable transmits in the hardware */ - tctl = rd32(IGC_TCTL); - tctl &= ~IGC_TCTL_EN; - wr32(IGC_TCTL, tctl); - /* flush both disables and wait for them to finish */ - wrfl(); - usleep_range(10000, 20000); + if (pci_device_is_present(adapter->pdev)) { + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000); - igc_irq_disable(adapter); + igc_irq_disable(adapter); + } adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; @@ -5806,7 +5812,7 @@ static bool validate_schedule(struct igc_adapter *adapter, if (e->command != TC_TAPRIO_CMD_SET_GATES) return false; - for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { + for (i = 0; i < adapter->num_tx_queues; i++) { if (e->gate_mask & BIT(i)) queue_uses[i]++; @@ -5863,7 +5869,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, end_time += e->interval; - for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { + for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; if (!(e->gate_mask & BIT(i))) diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index f6848181cdbd..0f021909b430 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -1000,7 +1000,8 @@ void igc_ptp_suspend(struct igc_adapter *adapter) adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); - igc_ptp_time_save(adapter); + if (pci_device_is_present(adapter->pdev)) + igc_ptp_time_save(adapter); } /** diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 0e6d40701862..9d460a270601 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -105,7 +105,7 @@ #define MVNETA_VLAN_PRIO_TO_RXQ 0x2440 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) #define MVNETA_PORT_STATUS 0x2444 -#define MVNETA_TX_IN_PRGRS BIT(1) +#define MVNETA_TX_IN_PRGRS BIT(0) #define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_RX_MIN_FRAME_SIZE 0x247c /* Only exists on Armada XP and Armada 370 */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 6871d892eabf..15ef59aa34ff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -615,7 +615,12 @@ static int qed_enable_msix(struct qed_dev *cdev, rc = cnt; } - if (rc > 0) { + /* For VFs, we should return with an error in case we didn't get the + * exact number of msix vectors as we requested. + * Not doing that will lead to a crash when starting queues for + * this VF. + */ + if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { /* MSI-x configuration was achieved */ int_params->out.int_mode = QED_INT_MODE_MSIX; int_params->out.num_vectors = rc; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 4877cb88c31a..9837bdb89cd4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1866,6 +1866,7 @@ static void qede_sync_free_irqs(struct qede_dev *edev) } edev->int_info.used_cnt = 0; + edev->int_info.msix_cnt = 0; } static int qede_req_msix_irqs(struct qede_dev *edev) @@ -2434,7 +2435,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, goto out; err4: qede_sync_free_irqs(edev); - memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); err3: qede_napi_disable_remove(edev); err2: diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 280ac0129572..ed817011a94a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -21,7 +21,6 @@ #include <linux/delay.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> -#include <linux/pm_runtime.h> #include "stmmac_platform.h" @@ -1529,9 +1528,6 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) return ret; } - pm_runtime_enable(dev); - pm_runtime_get_sync(dev); - if (bsp_priv->integrated_phy) rk_gmac_integrated_phy_powerup(bsp_priv); @@ -1540,14 +1536,9 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) static void rk_gmac_powerdown(struct rk_priv_data *gmac) { - struct device *dev = &gmac->pdev->dev; - if (gmac->integrated_phy) rk_gmac_integrated_phy_powerdown(gmac); - pm_runtime_put_sync(dev); - pm_runtime_disable(dev); - phy_power_on(gmac, false); gmac_clk_enable(gmac, false); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index fcdb1d20389b..43eead726886 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -339,9 +339,9 @@ static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv) static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv) { if (stmmac_xdp_is_enabled(priv)) - return XDP_PACKET_HEADROOM + NET_IP_ALIGN; + return XDP_PACKET_HEADROOM; - return NET_SKB_PAD + NET_IP_ALIGN; + return 0; } void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7b3fcf558603..ed0cd3920171 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4915,6 +4915,10 @@ read_again: prefetch(np); + /* Ensure a valid XSK buffer before proceed */ + if (!buf->xdp) + break; + if (priv->extend_desc) stmmac_rx_extended_status(priv, &priv->dev->stats, &priv->xstats, @@ -4935,10 +4939,6 @@ read_again: continue; } - /* Ensure a valid XSK buffer before proceed */ - if (!buf->xdp) - break; - /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ if (likely(status & rx_not_ls)) { xsk_buff_free(buf->xdp); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 4f3b6437b114..8160087ee92f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -884,11 +884,13 @@ static int tc_setup_taprio(struct stmmac_priv *priv, return 0; disable: - mutex_lock(&priv->plat->est->lock); - priv->plat->est->enable = false; - stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, - priv->plat->clk_ptp_rate); - mutex_unlock(&priv->plat->est->lock); + if (priv->plat->est) { + mutex_lock(&priv->plat->est->lock); + priv->plat->est->enable = false; + stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + } priv->plat->fpe_cfg->enable = false; stmmac_fpe_configure(priv, priv->ioaddr, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c index 105821b53020..2a616c6f7cd0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c @@ -34,18 +34,18 @@ static int stmmac_xdp_enable_pool(struct stmmac_priv *priv, need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); if (need_update) { - stmmac_disable_rx_queue(priv, queue); - stmmac_disable_tx_queue(priv, queue); napi_disable(&ch->rx_napi); napi_disable(&ch->tx_napi); + stmmac_disable_rx_queue(priv, queue); + stmmac_disable_tx_queue(priv, queue); } set_bit(queue, priv->af_xdp_zc_qps); if (need_update) { - napi_enable(&ch->rxtx_napi); stmmac_enable_rx_queue(priv, queue); stmmac_enable_tx_queue(priv, queue); + napi_enable(&ch->rxtx_napi); err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX); if (err) @@ -72,10 +72,10 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue) need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); if (need_update) { + napi_disable(&ch->rxtx_napi); stmmac_disable_rx_queue(priv, queue); stmmac_disable_tx_queue(priv, queue); synchronize_rcu(); - napi_disable(&ch->rxtx_napi); } xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR); @@ -83,10 +83,10 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue) clear_bit(queue, priv->af_xdp_zc_qps); if (need_update) { - napi_enable(&ch->rx_napi); - napi_enable(&ch->tx_napi); stmmac_enable_rx_queue(priv, queue); stmmac_enable_tx_queue(priv, queue); + napi_enable(&ch->rx_napi); + napi_enable(&ch->tx_napi); } return 0; diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c index 975f7f9bdf4c..d127eb6e9257 100644 --- a/drivers/net/mhi_net.c +++ b/drivers/net/mhi_net.c @@ -319,7 +319,7 @@ static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev) u64_stats_init(&mhi_netdev->stats.tx_syncp); /* Start MHI channels */ - err = mhi_prepare_for_transfer(mhi_dev, 0); + err = mhi_prepare_for_transfer(mhi_dev); if (err) goto out_err; diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c index 11ff335d6228..b7a5ae20edd5 100644 --- a/drivers/net/phy/mediatek-ge.c +++ b/drivers/net/phy/mediatek-ge.c @@ -81,6 +81,8 @@ static struct phy_driver mtk_gephy_driver[] = { */ .config_intr = genphy_no_config_intr, .handle_interrupt = genphy_handle_interrupt_no_ack, + .suspend = genphy_suspend, + .resume = genphy_resume, .read_page = mtk_gephy_read_page, .write_page = mtk_gephy_write_page, }, @@ -93,6 +95,8 @@ static struct phy_driver mtk_gephy_driver[] = { */ .config_intr = genphy_no_config_intr, .handle_interrupt = genphy_handle_interrupt_no_ack, + .suspend = genphy_suspend, + .resume = genphy_resume, .read_page = mtk_gephy_read_page, .write_page = mtk_gephy_write_page, }, diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h index e1994a246122..2a1e31defe71 100644 --- a/drivers/net/usb/asix.h +++ b/drivers/net/usb/asix.h @@ -184,6 +184,7 @@ struct asix_common_private { struct phy_device *phydev; u16 phy_addr; char phy_name[20]; + bool embd_phy; }; extern const struct driver_info ax88172a_info; diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index cb01897c7a5d..30821f6a6d7a 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -354,24 +354,23 @@ out: static int ax88772_hw_reset(struct usbnet *dev, int in_pm) { struct asix_data *data = (struct asix_data *)&dev->data; - int ret, embd_phy; + struct asix_common_private *priv = dev->driver_priv; u16 rx_ctl; + int ret; ret = asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_2 | AX_GPIO_GPO2EN, 5, in_pm); if (ret < 0) goto out; - embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0); - - ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, + ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy, 0, 0, NULL, in_pm); if (ret < 0) { netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); goto out; } - if (embd_phy) { + if (priv->embd_phy) { ret = asix_sw_reset(dev, AX_SWRESET_IPPD, in_pm); if (ret < 0) goto out; @@ -449,17 +448,16 @@ out: static int ax88772a_hw_reset(struct usbnet *dev, int in_pm) { struct asix_data *data = (struct asix_data *)&dev->data; - int ret, embd_phy; + struct asix_common_private *priv = dev->driver_priv; u16 rx_ctl, phy14h, phy15h, phy16h; u8 chipcode = 0; + int ret; ret = asix_write_gpio(dev, AX_GPIO_RSE, 5, in_pm); if (ret < 0) goto out; - embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0); - - ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy | + ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy | AX_PHYSEL_SSEN, 0, 0, NULL, in_pm); if (ret < 0) { netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); @@ -683,12 +681,6 @@ static int ax88772_init_phy(struct usbnet *dev) struct asix_common_private *priv = dev->driver_priv; int ret; - ret = asix_read_phy_addr(dev, true); - if (ret < 0) - return ret; - - priv->phy_addr = ret; - snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT, priv->mdio->id, priv->phy_addr); @@ -715,6 +707,12 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) struct asix_common_private *priv; int ret, i; + priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dev->driver_priv = priv; + usbnet_get_endpoints(dev, intf); /* Maybe the boot loader passed the MAC address via device tree */ @@ -750,6 +748,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */ dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */ + ret = asix_read_phy_addr(dev, true); + if (ret < 0) + return ret; + + priv->phy_addr = ret; + priv->embd_phy = ((priv->phy_addr & 0x1f) == 0x10); + asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); chipcode &= AX_CHIPCODE_MASK; @@ -768,12 +773,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) dev->rx_urb_size = 2048; } - priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - dev->driver_priv = priv; - priv->presvd_phy_bmcr = 0; priv->presvd_phy_advertise = 0; if (chipcode == AX_AX88772_CHIPCODE) { @@ -812,6 +811,12 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) asix_rx_fixup_common_free(dev->driver_priv); } +static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf) +{ + asix_rx_fixup_common_free(dev->driver_priv); + kfree(dev->driver_priv); +} + static const struct ethtool_ops ax88178_ethtool_ops = { .get_drvinfo = asix_get_drvinfo, .get_link = asix_get_link, @@ -1221,7 +1226,7 @@ static const struct driver_info ax88772b_info = { static const struct driver_info ax88178_info = { .description = "ASIX AX88178 USB 2.0 Ethernet", .bind = ax88178_bind, - .unbind = ax88772_unbind, + .unbind = ax88178_unbind, .status = asix_status, .link_reset = ax88178_link_reset, .reset = ax88178_reset, diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 36dafcb3d04a..6a92a3fef75e 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -446,7 +446,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb) write_mii_word(pegasus, 0, 0x1b, &auxmode); } - return 0; + return ret; fail: netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); return ret; @@ -835,7 +835,7 @@ static int pegasus_open(struct net_device *net) if (!pegasus->rx_skb) goto exit; - res = set_registers(pegasus, EthID, 6, net->dev_addr); + set_registers(pegasus, EthID, 6, net->dev_addr); usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, usb_rcvbulkpipe(pegasus->usb, 1), diff --git a/drivers/net/wwan/mhi_wwan_ctrl.c b/drivers/net/wwan/mhi_wwan_ctrl.c index d0a98f34c54d..e4d0f696687f 100644 --- a/drivers/net/wwan/mhi_wwan_ctrl.c +++ b/drivers/net/wwan/mhi_wwan_ctrl.c @@ -110,7 +110,7 @@ static int mhi_wwan_ctrl_start(struct wwan_port *port) int ret; /* Start mhi device's channel(s) */ - ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev, 0); + ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev); if (ret) return ret; diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c index 377529bbf124..71bf9b4f769f 100644 --- a/drivers/net/wwan/mhi_wwan_mbim.c +++ b/drivers/net/wwan/mhi_wwan_mbim.c @@ -609,7 +609,7 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id INIT_DELAYED_WORK(&mbim->rx_refill, mhi_net_rx_refill_work); /* Start MHI channels */ - err = mhi_prepare_for_transfer(mhi_dev, 0); + err = mhi_prepare_for_transfer(mhi_dev); if (err) return err; diff --git a/drivers/opp/core.c b/drivers/opp/core.c index b335c077f215..5543c54dacc5 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1856,9 +1856,6 @@ void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) if (unlikely(!opp_table)) return; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - kfree(opp_table->supported_hw); opp_table->supported_hw = NULL; opp_table->supported_hw_count = 0; @@ -1944,9 +1941,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table) if (unlikely(!opp_table)) return; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - kfree(opp_table->prop_name); opp_table->prop_name = NULL; @@ -2056,9 +2050,6 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table) if (!opp_table->regulators) goto put_opp_table; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - if (opp_table->enabled) { for (i = opp_table->regulator_count - 1; i >= 0; i--) regulator_disable(opp_table->regulators[i]); @@ -2178,9 +2169,6 @@ void dev_pm_opp_put_clkname(struct opp_table *opp_table) if (unlikely(!opp_table)) return; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - clk_put(opp_table->clk); opp_table->clk = ERR_PTR(-EINVAL); @@ -2279,9 +2267,6 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) if (unlikely(!opp_table)) return; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - opp_table->set_opp = NULL; mutex_lock(&opp_table->lock); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index d298e38aaf7e..67f2e0710e79 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -964,8 +964,9 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) } } - /* There should be one of more OPP defined */ - if (WARN_ON(!count)) { + /* There should be one or more OPPs defined */ + if (!count) { + dev_err(dev, "%s: no supported OPPs", __func__); ret = -ENOENT; goto remove_static_opp; } diff --git a/drivers/pci/controller/pci-ixp4xx.c b/drivers/pci/controller/pci-ixp4xx.c index 896a45b24236..654ac4a82beb 100644 --- a/drivers/pci/controller/pci-ixp4xx.c +++ b/drivers/pci/controller/pci-ixp4xx.c @@ -145,7 +145,7 @@ static int ixp4xx_pci_check_master_abort(struct ixp4xx_pci *p) return 0; } -static int ixp4xx_pci_read(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data) +static int ixp4xx_pci_read_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data) { ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr); @@ -170,7 +170,7 @@ static int ixp4xx_pci_read(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data) return ixp4xx_pci_check_master_abort(p); } -static int ixp4xx_pci_write(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data) +static int ixp4xx_pci_write_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data) { ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr); @@ -308,7 +308,7 @@ static int ixp4xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, dev_dbg(p->dev, "read_config from %d size %d dev %d:%d:%d address: %08x cmd: %08x\n", where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd); - ret = ixp4xx_pci_read(p, addr, cmd, &val); + ret = ixp4xx_pci_read_indirect(p, addr, cmd, &val); if (ret) return PCIBIOS_DEVICE_NOT_FOUND; @@ -356,7 +356,7 @@ static int ixp4xx_pci_write_config(struct pci_bus *bus, unsigned int devfn, dev_dbg(p->dev, "write_config_byte %#x to %d size %d dev %d:%d:%d addr: %08x cmd %08x\n", value, where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd); - ret = ixp4xx_pci_write(p, addr, cmd, val); + ret = ixp4xx_pci_write_indirect(p, addr, cmd, val); if (ret) return PCIBIOS_DEVICE_NOT_FOUND; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 5d63df7c1820..7bbf2673c7f2 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -978,7 +978,7 @@ void pci_create_legacy_files(struct pci_bus *b) b->legacy_mem->size = 1024*1024; b->legacy_mem->attr.mode = 0600; b->legacy_mem->mmap = pci_mmap_legacy_mem; - b->legacy_io->mapping = iomem_get_mapping(); + b->legacy_mem->mapping = iomem_get_mapping(); pci_adjust_legacy_attr(b, pci_mmap_mem); error = device_create_bin_file(&b->dev, b->legacy_mem); if (error) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6d74386eadc2..ab3de1551b50 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1900,6 +1900,7 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot); #ifdef CONFIG_X86_IO_APIC static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c index f2b5d347d227..e5ae26227bdb 100644 --- a/drivers/slimbus/messaging.c +++ b/drivers/slimbus/messaging.c @@ -66,7 +66,7 @@ int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn) int ret = 0; spin_lock_irqsave(&ctrl->txn_lock, flags); - ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0, + ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 1, SLIM_MAX_TIDS, GFP_ATOMIC); if (ret < 0) { spin_unlock_irqrestore(&ctrl->txn_lock, flags); @@ -131,7 +131,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn) goto slim_xfer_err; } } - + /* Initialize tid to invalid value */ + txn->tid = 0; need_tid = slim_tid_txn(txn->mt, txn->mc); if (need_tid) { @@ -163,7 +164,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn) txn->mt, txn->mc, txn->la, ret); slim_xfer_err: - if (!clk_pause_msg && (!need_tid || ret == -ETIMEDOUT)) { + if (!clk_pause_msg && (txn->tid == 0 || ret == -ETIMEDOUT)) { /* * remove runtime-pm vote if this was TX only, or * if there was error during this transaction diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index c054e83ab636..7040293c2ee8 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -618,7 +618,7 @@ static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf) (mc == SLIM_USR_MC_GENERIC_ACK && mt == SLIM_MSG_MT_SRC_REFERRED_USER)) { slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4); - pm_runtime_mark_last_busy(ctrl->dev); + pm_runtime_mark_last_busy(ctrl->ctrl.dev); } } @@ -1080,7 +1080,8 @@ static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl) { u32 cfg = readl_relaxed(ctrl->ngd->base); - if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) + if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN || + ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP) qcom_slim_ngd_init_dma(ctrl); /* By default enable message queues */ @@ -1131,6 +1132,7 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl) dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n"); return 0; } + qcom_slim_ngd_setup(ctrl); return 0; } @@ -1257,13 +1259,14 @@ static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable) } /* controller state should be in sync with framework state */ complete(&ctrl->qmi.qmi_comp); - if (!pm_runtime_enabled(ctrl->dev) || - !pm_runtime_suspended(ctrl->dev)) - qcom_slim_ngd_runtime_resume(ctrl->dev); + if (!pm_runtime_enabled(ctrl->ctrl.dev) || + !pm_runtime_suspended(ctrl->ctrl.dev)) + qcom_slim_ngd_runtime_resume(ctrl->ctrl.dev); else - pm_runtime_resume(ctrl->dev); - pm_runtime_mark_last_busy(ctrl->dev); - pm_runtime_put(ctrl->dev); + pm_runtime_resume(ctrl->ctrl.dev); + + pm_runtime_mark_last_busy(ctrl->ctrl.dev); + pm_runtime_put(ctrl->ctrl.dev); ret = slim_register_controller(&ctrl->ctrl); if (ret) { @@ -1389,7 +1392,7 @@ static int qcom_slim_ngd_ssr_pdr_notify(struct qcom_slim_ngd_ctrl *ctrl, /* Make sure the last dma xfer is finished */ mutex_lock(&ctrl->tx_lock); if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) { - pm_runtime_get_noresume(ctrl->dev); + pm_runtime_get_noresume(ctrl->ctrl.dev); ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN; qcom_slim_ngd_down(ctrl); qcom_slim_ngd_exit_dma(ctrl); @@ -1617,6 +1620,7 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev) struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); int ret = 0; + qcom_slim_ngd_exit_dma(ctrl); if (!ctrl->qmi.handle) return 0; diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c index 3f711c1a0996..bbae3d39c7be 100644 --- a/drivers/soc/fsl/qe/qe_ic.c +++ b/drivers/soc/fsl/qe/qe_ic.c @@ -23,6 +23,7 @@ #include <linux/signal.h> #include <linux/device.h> #include <linux/spinlock.h> +#include <linux/platform_device.h> #include <asm/irq.h> #include <asm/io.h> #include <soc/fsl/qe/qe.h> @@ -53,8 +54,8 @@ struct qe_ic { struct irq_chip hc_irq; /* VIRQ numbers of QE high/low irqs */ - unsigned int virq_high; - unsigned int virq_low; + int virq_high; + int virq_low; }; /* @@ -404,42 +405,40 @@ static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) chip->irq_eoi(&desc->irq_data); } -static void __init qe_ic_init(struct device_node *node) +static int qe_ic_init(struct platform_device *pdev) { + struct device *dev = &pdev->dev; void (*low_handler)(struct irq_desc *desc); void (*high_handler)(struct irq_desc *desc); struct qe_ic *qe_ic; - struct resource res; - u32 ret; + struct resource *res; + struct device_node *node = pdev->dev.of_node; - ret = of_address_to_resource(node, 0, &res); - if (ret) - return; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(dev, "no memory resource defined\n"); + return -ENODEV; + } - qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL); + qe_ic = devm_kzalloc(dev, sizeof(*qe_ic), GFP_KERNEL); if (qe_ic == NULL) - return; + return -ENOMEM; - qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS, - &qe_ic_host_ops, qe_ic); - if (qe_ic->irqhost == NULL) { - kfree(qe_ic); - return; + qe_ic->regs = devm_ioremap(dev, res->start, resource_size(res)); + if (qe_ic->regs == NULL) { + dev_err(dev, "failed to ioremap() registers\n"); + return -ENODEV; } - qe_ic->regs = ioremap(res.start, resource_size(&res)); - qe_ic->hc_irq = qe_ic_irq_chip; - qe_ic->virq_high = irq_of_parse_and_map(node, 0); - qe_ic->virq_low = irq_of_parse_and_map(node, 1); + qe_ic->virq_high = platform_get_irq(pdev, 0); + qe_ic->virq_low = platform_get_irq(pdev, 1); - if (!qe_ic->virq_low) { - printk(KERN_ERR "Failed to map QE_IC low IRQ\n"); - kfree(qe_ic); - return; - } - if (qe_ic->virq_high != qe_ic->virq_low) { + if (qe_ic->virq_low <= 0) + return -ENODEV; + + if (qe_ic->virq_high > 0 && qe_ic->virq_high != qe_ic->virq_low) { low_handler = qe_ic_cascade_low; high_handler = qe_ic_cascade_high; } else { @@ -447,29 +446,42 @@ static void __init qe_ic_init(struct device_node *node) high_handler = NULL; } + qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS, + &qe_ic_host_ops, qe_ic); + if (qe_ic->irqhost == NULL) { + dev_err(dev, "failed to add irq domain\n"); + return -ENODEV; + } + qe_ic_write(qe_ic->regs, QEIC_CICR, 0); irq_set_handler_data(qe_ic->virq_low, qe_ic); irq_set_chained_handler(qe_ic->virq_low, low_handler); - if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) { + if (high_handler) { irq_set_handler_data(qe_ic->virq_high, qe_ic); irq_set_chained_handler(qe_ic->virq_high, high_handler); } + return 0; } +static const struct of_device_id qe_ic_ids[] = { + { .compatible = "fsl,qe-ic"}, + { .type = "qeic"}, + {}, +}; -static int __init qe_ic_of_init(void) +static struct platform_driver qe_ic_driver = { - struct device_node *np; + .driver = { + .name = "qe-ic", + .of_match_table = qe_ic_ids, + }, + .probe = qe_ic_init, +}; - np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); - if (!np) { - np = of_find_node_by_type(NULL, "qeic"); - if (!np) - return -ENODEV; - } - qe_ic_init(np); - of_node_put(np); +static int __init qe_ic_of_init(void) +{ + platform_driver_register(&qe_ic_driver); return 0; } subsys_initcall(qe_ic_of_init); diff --git a/drivers/staging/media/av7110/av7110.h b/drivers/staging/media/av7110/av7110.h index b8e8fc8ddbe9..809d938ae166 100644 --- a/drivers/staging/media/av7110/av7110.h +++ b/drivers/staging/media/av7110/av7110.h @@ -9,12 +9,11 @@ #include <linux/input.h> #include <linux/time.h> -#include "video.h" -#include "audio.h" -#include "osd.h" - +#include <linux/dvb/video.h> +#include <linux/dvb/audio.h> #include <linux/dvb/dmx.h> #include <linux/dvb/ca.h> +#include <linux/dvb/osd.h> #include <linux/dvb/net.h> #include <linux/mutex.h> diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index b9bb63d749ec..f4079b5cb26d 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -1737,6 +1737,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev, return rlen; } +static void tcpm_pd_handle_msg(struct tcpm_port *port, + enum pd_msg_request message, + enum tcpm_ams ams); + static void tcpm_handle_vdm_request(struct tcpm_port *port, const __le32 *payload, int cnt) { @@ -1764,11 +1768,11 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port, port->vdm_state = VDM_STATE_DONE; } - if (PD_VDO_SVDM(p[0])) { + if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) { rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action); } else { if (port->negotiated_rev >= PD_REV30) - tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP); + tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); } /* @@ -2471,10 +2475,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port, NONE_AMS); break; case PD_DATA_VENDOR_DEF: - if (tcpm_vdm_ams(port) || port->nr_snk_vdo) - tcpm_handle_vdm_request(port, msg->payload, cnt); - else if (port->negotiated_rev > PD_REV20) - tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); + tcpm_handle_vdm_request(port, msg->payload, cnt); break; case PD_DATA_BIST: port->bist_request = le32_to_cpu(msg->payload[0]); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 06f9f167222b..bd5689fa290e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -629,7 +629,7 @@ again: * inode has not been flagged as nocompress. This flag can * change at any time if we discover bad compression ratios. */ - if (nr_pages > 1 && inode_need_compress(BTRFS_I(inode), start, end)) { + if (inode_need_compress(BTRFS_I(inode), start, end)) { WARN_ON(pages); pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); if (!pages) { diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 2a2900903f8c..39db97f149b9 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1743,7 +1743,11 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, struct ceph_cap_flush *ceph_alloc_cap_flush(void) { - return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL); + struct ceph_cap_flush *cf; + + cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL); + cf->is_capsnap = false; + return cf; } void ceph_free_cap_flush(struct ceph_cap_flush *cf) @@ -1778,7 +1782,7 @@ static bool __detach_cap_flush_from_mdsc(struct ceph_mds_client *mdsc, prev->wake = true; wake = false; } - list_del(&cf->g_list); + list_del_init(&cf->g_list); return wake; } @@ -1793,7 +1797,7 @@ static bool __detach_cap_flush_from_ci(struct ceph_inode_info *ci, prev->wake = true; wake = false; } - list_del(&cf->i_list); + list_del_init(&cf->i_list); return wake; } @@ -2352,7 +2356,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc, ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH; list_for_each_entry_reverse(cf, &ci->i_cap_flush_list, i_list) { - if (!cf->caps) { + if (cf->is_capsnap) { last_snap_flush = cf->tid; break; } @@ -2371,7 +2375,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc, first_tid = cf->tid + 1; - if (cf->caps) { + if (!cf->is_capsnap) { struct cap_msg_args arg; dout("kick_flushing_caps %p cap %p tid %llu %s\n", @@ -3516,7 +3520,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, cleaned = cf->caps; /* Is this a capsnap? */ - if (cf->caps == 0) + if (cf->is_capsnap) continue; if (cf->tid <= flush_tid) { @@ -3589,8 +3593,9 @@ out: while (!list_empty(&to_remove)) { cf = list_first_entry(&to_remove, struct ceph_cap_flush, i_list); - list_del(&cf->i_list); - ceph_free_cap_flush(cf); + list_del_init(&cf->i_list); + if (!cf->is_capsnap) + ceph_free_cap_flush(cf); } if (wake_ci) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index afdc20213876..0b69aec23e5c 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1616,7 +1616,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, spin_lock(&mdsc->cap_dirty_lock); list_for_each_entry(cf, &to_remove, i_list) - list_del(&cf->g_list); + list_del_init(&cf->g_list); if (!list_empty(&ci->i_dirty_item)) { pr_warn_ratelimited( @@ -1668,8 +1668,9 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, struct ceph_cap_flush *cf; cf = list_first_entry(&to_remove, struct ceph_cap_flush, i_list); - list_del(&cf->i_list); - ceph_free_cap_flush(cf); + list_del_init(&cf->i_list); + if (!cf->is_capsnap) + ceph_free_cap_flush(cf); } wake_up_all(&ci->i_cap_wq); diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c index abd9af7727ad..3c444b9cb17b 100644 --- a/fs/ceph/mdsmap.c +++ b/fs/ceph/mdsmap.c @@ -394,9 +394,11 @@ void ceph_mdsmap_destroy(struct ceph_mdsmap *m) { int i; - for (i = 0; i < m->possible_max_rank; i++) - kfree(m->m_info[i].export_targets); - kfree(m->m_info); + if (m->m_info) { + for (i = 0; i < m->possible_max_rank; i++) + kfree(m->m_info[i].export_targets); + kfree(m->m_info); + } kfree(m->m_data_pg_pools); kfree(m); } diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 4c6bd1042c94..15105f9da3fd 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -487,6 +487,9 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci) pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode); return; } + capsnap->cap_flush.is_capsnap = true; + INIT_LIST_HEAD(&capsnap->cap_flush.i_list); + INIT_LIST_HEAD(&capsnap->cap_flush.g_list); spin_lock(&ci->i_ceph_lock); used = __ceph_caps_used(ci); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 9215a2f4535c..b1a363641beb 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -182,8 +182,9 @@ struct ceph_cap { struct ceph_cap_flush { u64 tid; - int caps; /* 0 means capsnap */ + int caps; bool wake; /* wake up flush waiters when finish ? */ + bool is_capsnap; /* true means capsnap */ struct list_head g_list; // global struct list_head i_list; // per inode }; diff --git a/fs/io_uring.c b/fs/io_uring.c index 04c6d059ea94..a2e20a6fbfed 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2477,8 +2477,10 @@ static void io_fallback_req_func(struct work_struct *work) struct llist_node *node = llist_del_all(&ctx->fallback_llist); struct io_kiocb *req, *tmp; + percpu_ref_get(&ctx->refs); llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node) req->io_task_work.func(req); + percpu_ref_put(&ctx->refs); } static void __io_complete_rw(struct io_kiocb *req, long res, long res2, @@ -9370,9 +9372,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, if (ctx->flags & IORING_SETUP_SQPOLL) { io_cqring_overflow_flush(ctx, false); - ret = -EOWNERDEAD; - if (unlikely(ctx->sq_data->thread == NULL)) + if (unlikely(ctx->sq_data->thread == NULL)) { + ret = -EOWNERDEAD; goto out; + } if (flags & IORING_ENTER_SQ_WAKEUP) wake_up(&ctx->sq_data->wait); if (flags & IORING_ENTER_SQ_WAIT) { @@ -9840,10 +9843,11 @@ static int io_register_personality(struct io_ring_ctx *ctx) ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds, XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL); - if (!ret) - return id; - put_cred(creds); - return ret; + if (ret < 0) { + put_cred(creds); + return ret; + } + return id; } static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg, diff --git a/fs/namespace.c b/fs/namespace.c index f79d9471cb76..97adcb5ab5d5 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1716,8 +1716,12 @@ static inline bool may_mount(void) } #ifdef CONFIG_MANDATORY_FILE_LOCKING -static inline bool may_mandlock(void) +static bool may_mandlock(void) { + pr_warn_once("======================================================\n" + "WARNING: the mand mount option is being deprecated and\n" + " will be removed in v5.15!\n" + "======================================================\n"); return capable(CAP_SYS_ADMIN); } #else diff --git a/fs/pipe.c b/fs/pipe.c index 678dee2a8228..6d4342bad9f1 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -363,10 +363,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) * _very_ unlikely case that the pipe was full, but we got * no data. */ - if (unlikely(was_full)) { + if (unlikely(was_full)) wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); - kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); - } + kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); /* * But because we didn't read anything, at this point we can @@ -385,12 +384,11 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) wake_next_reader = false; __pipe_unlock(pipe); - if (was_full) { + if (was_full) wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); - kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); - } if (wake_next_reader) wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); + kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); if (ret > 0) file_accessed(filp); return ret; @@ -565,10 +563,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from) * become empty while we dropped the lock. */ __pipe_unlock(pipe); - if (was_empty) { + if (was_empty) wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); - kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); - } + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe)); __pipe_lock(pipe); was_empty = pipe_empty(pipe->head, pipe->tail); @@ -591,10 +588,9 @@ out: * Epoll nonsensically wants a wakeup whether the pipe * was already empty or not. */ - if (was_empty || pipe->poll_usage) { + if (was_empty || pipe->poll_usage) wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); - kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); - } + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); if (wake_next_writer) wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { diff --git a/include/linux/kfence.h b/include/linux/kfence.h index a70d1ea03532..3fe6dd8a18c1 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -51,10 +51,11 @@ extern atomic_t kfence_allocation_gate; static __always_inline bool is_kfence_address(const void *addr) { /* - * The non-NULL check is required in case the __kfence_pool pointer was - * never initialized; keep it in the slow-path after the range-check. + * The __kfence_pool != NULL check is required to deal with the case + * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in + * the slow-path after the range-check! */ - return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr); + return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); } /** diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index f0ee30881ca9..20151c4f1e0e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -612,12 +612,15 @@ static inline bool mem_cgroup_disabled(void) return !cgroup_subsys_enabled(memory_cgrp_subsys); } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) +static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { + *min = *low = 0; + if (mem_cgroup_disabled()) - return 0; + return; /* * There is no reclaim protection applied to a targeted reclaim. @@ -653,13 +656,10 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, * */ if (root == memcg) - return 0; - - if (in_low_reclaim) - return READ_ONCE(memcg->memory.emin); + return; - return max(READ_ONCE(memcg->memory.emin), - READ_ONCE(memcg->memory.elow)); + *min = READ_ONCE(memcg->memory.emin); + *low = READ_ONCE(memcg->memory.elow); } void mem_cgroup_calculate_protection(struct mem_cgroup *root, @@ -1147,11 +1147,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, { } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) +static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { - return 0; + *min = *low = 0; } static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, diff --git a/include/linux/mhi.h b/include/linux/mhi.h index c493a80cb453..beb918328eef 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -721,13 +721,8 @@ void mhi_device_put(struct mhi_device *mhi_dev); * host and device execution environments match and * channels are in a DISABLED state. * @mhi_dev: Device associated with the channels - * @flags: MHI channel flags */ -int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, - unsigned int flags); - -/* Automatically allocate and queue inbound buffers */ -#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0) +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); /** * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer. diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 15b7fbe6b15c..c412dde4d67d 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -267,7 +267,7 @@ static inline bool fib6_check_expired(const struct fib6_info *f6i) return false; } -/* Function to safely get fn->sernum for passed in rt +/* Function to safely get fn->fn_sernum for passed in rt * and store result in passed in cookie. * Return true if we can get cookie safely * Return false if not @@ -282,7 +282,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i, if (fn) { *cookie = fn->fn_sernum; - /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */ + /* pairs with smp_wmb() in __fib6_update_sernum_upto_root() */ smp_rmb(); status = true; } diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 390270e00a1d..f160484afc5c 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -48,7 +48,9 @@ {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \ {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ - {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\ + {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ + {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"}, \ + {(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\ #define show_gfp_flags(flags) \ (flags) ? __print_flags(flags, "|", \ diff --git a/drivers/staging/media/av7110/audio.h b/include/uapi/linux/dvb/audio.h index 2f869da69171..2f869da69171 100644 --- a/drivers/staging/media/av7110/audio.h +++ b/include/uapi/linux/dvb/audio.h diff --git a/drivers/staging/media/av7110/osd.h b/include/uapi/linux/dvb/osd.h index 858997c74043..858997c74043 100644 --- a/drivers/staging/media/av7110/osd.h +++ b/include/uapi/linux/dvb/osd.h diff --git a/drivers/staging/media/av7110/video.h b/include/uapi/linux/dvb/video.h index 179f1ec60af6..179f1ec60af6 100644 --- a/drivers/staging/media/av7110/video.h +++ b/include/uapi/linux/dvb/video.h diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e5f2b23bb7c9..9134aedfdb7d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5302,8 +5302,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, case BPF_MAP_TYPE_RINGBUF: if (func_id != BPF_FUNC_ringbuf_output && func_id != BPF_FUNC_ringbuf_reserve && - func_id != BPF_FUNC_ringbuf_submit && - func_id != BPF_FUNC_ringbuf_discard && func_id != BPF_FUNC_ringbuf_query) goto error; break; @@ -5412,6 +5410,12 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; + case BPF_FUNC_ringbuf_output: + case BPF_FUNC_ringbuf_reserve: + case BPF_FUNC_ringbuf_query: + if (map->map_type != BPF_MAP_TYPE_RINGBUF) + goto error; + break; case BPF_FUNC_get_stackid: if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; diff --git a/kernel/cred.c b/kernel/cred.c index e6fd2b3fc31f..f784e08c2fbd 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -286,13 +286,13 @@ struct cred *prepare_creds(void) new->security = NULL; #endif - if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) - goto error; - new->ucounts = get_ucounts(new->ucounts); if (!new->ucounts) goto error; + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) + goto error; + validate_creds(new); return new; @@ -753,13 +753,13 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) #ifdef CONFIG_SECURITY new->security = NULL; #endif - if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) - goto error; - new->ucounts = get_ucounts(new->ucounts); if (!new->ucounts) goto error; + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) + goto error; + put_cred(old); validate_creds(new); return new; diff --git a/kernel/fork.c b/kernel/fork.c index e8b41e212110..c97e85245dfc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -828,10 +828,10 @@ void __init fork_init(void) for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++) init_user_ns.ucount_max[i] = max_threads/2; - set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, task_rlimit(&init_task, RLIMIT_NPROC)); - set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, task_rlimit(&init_task, RLIMIT_MSGQUEUE)); - set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, task_rlimit(&init_task, RLIMIT_SIGPENDING)); - set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, task_rlimit(&init_task, RLIMIT_MEMLOCK)); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY); #ifdef CONFIG_VMAP_STACK cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", diff --git a/mm/hugetlb.c b/mm/hugetlb.c index dfc940d5221d..8ea35ba6699f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2476,7 +2476,7 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, if (!rc) { /* * This indicates there is an entry in the reserve map - * added by alloc_huge_page. We know it was added + * not added by alloc_huge_page. We know it was added * before the alloc_huge_page call, otherwise * HPageRestoreReserve would be set on the page. * Remove the entry so that a subsequent allocation @@ -4660,7 +4660,9 @@ retry_avoidcopy: spin_unlock(ptl); mmu_notifier_invalidate_range_end(&range); out_release_all: - restore_reserve_on_error(h, vma, haddr, new_page); + /* No restore in case of successful pagetable update (Break COW) */ + if (new_page != old_page) + restore_reserve_on_error(h, vma, haddr, new_page); put_page(new_page); out_release_old: put_page(old_page); @@ -4776,7 +4778,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, pte_t new_pte; spinlock_t *ptl; unsigned long haddr = address & huge_page_mask(h); - bool new_page = false; + bool new_page, new_pagecache_page = false; /* * Currently, we are forced to kill the process in the event the @@ -4799,6 +4801,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, goto out; retry: + new_page = false; page = find_lock_page(mapping, idx); if (!page) { /* Check for page in userfault range */ @@ -4842,6 +4845,7 @@ retry: goto retry; goto out; } + new_pagecache_page = true; } else { lock_page(page); if (unlikely(anon_vma_prepare(vma))) { @@ -4926,7 +4930,9 @@ backout: spin_unlock(ptl); backout_unlocked: unlock_page(page); - restore_reserve_on_error(h, vma, haddr, page); + /* restore reserve for newly allocated pages not in page cache */ + if (new_page && !new_pagecache_page) + restore_reserve_on_error(h, vma, haddr, page); put_page(page); goto out; } @@ -5135,6 +5141,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, int ret = -ENOMEM; struct page *page; int writable; + bool new_pagecache_page = false; if (is_continue) { ret = -EFAULT; @@ -5228,6 +5235,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, ret = huge_add_to_page_cache(page, mapping, idx); if (ret) goto out_release_nounlock; + new_pagecache_page = true; } ptl = huge_pte_lockptr(h, dst_mm, dst_pte); @@ -5291,7 +5299,8 @@ out_release_unlock: if (vm_shared || is_continue) unlock_page(page); out_release_nounlock: - restore_reserve_on_error(h, dst_vma, dst_addr, page); + if (!new_pagecache_page) + restore_reserve_on_error(h, dst_vma, dst_addr, page); put_page(page); goto out; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index eefd823deb67..470400cc7513 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1146,7 +1146,7 @@ static int __get_hwpoison_page(struct page *page) * unexpected races caused by taking a page refcount. */ if (!HWPoisonHandlable(head)) - return 0; + return -EBUSY; if (PageTransHuge(head)) { /* @@ -1199,9 +1199,15 @@ try_again: } goto out; } else if (ret == -EBUSY) { - /* We raced with freeing huge page to buddy, retry. */ - if (pass++ < 3) + /* + * We raced with (possibly temporary) unhandlable + * page, retry. + */ + if (pass++ < 3) { + shake_page(p, 1); goto try_again; + } + ret = -EIO; goto out; } } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 8cb75b26ea4f..86c3af79e874 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1731,6 +1731,7 @@ failed_removal_isolated: undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); memory_notify(MEM_CANCEL_OFFLINE, &arg); failed_removal_pcplists_disabled: + lru_cache_enable(); zone_pcp_enable(zone); failed_removal: pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 856b175c15a4..eeb3a9cb36bb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3453,19 +3453,10 @@ void free_unref_page_list(struct list_head *list) * comment in free_unref_page. */ migratetype = get_pcppage_migratetype(page); - if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { - if (unlikely(is_migrate_isolate(migratetype))) { - list_del(&page->lru); - free_one_page(page_zone(page), page, pfn, 0, - migratetype, FPI_NONE); - continue; - } - - /* - * Non-isolated types over MIGRATE_PCPTYPES get added - * to the MIGRATE_MOVABLE pcp list. - */ - set_pcppage_migratetype(page, MIGRATE_MOVABLE); + if (unlikely(is_migrate_isolate(migratetype))) { + list_del(&page->lru); + free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); + continue; } set_page_private(page, pfn); @@ -3475,7 +3466,15 @@ void free_unref_page_list(struct list_head *list) list_for_each_entry_safe(page, next, list, lru) { pfn = page_private(page); set_page_private(page, 0); + + /* + * Non-isolated types over MIGRATE_PCPTYPES get added + * to the MIGRATE_MOVABLE pcp list. + */ migratetype = get_pcppage_migratetype(page); + if (unlikely(migratetype >= MIGRATE_PCPTYPES)) + migratetype = MIGRATE_MOVABLE; + trace_mm_page_free_batched(page); free_unref_page_commit(page, pfn, migratetype, 0); diff --git a/mm/shmem.c b/mm/shmem.c index 70d9ce294bb4..dacda7463d54 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1696,8 +1696,7 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; - struct swap_info_struct *si; - struct page *page = NULL; + struct page *page; swp_entry_t swap; int error; @@ -1705,12 +1704,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, swap = radix_to_swp_entry(*pagep); *pagep = NULL; - /* Prevent swapoff from happening to us. */ - si = get_swap_device(swap); - if (!si) { - error = EINVAL; - goto failed; - } /* Look it up and read it in.. */ page = lookup_swap_cache(swap, NULL, 0); if (!page) { @@ -1772,8 +1765,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, swap_free(swap); *pagep = page; - if (si) - put_swap_device(si); return 0; failed: if (!shmem_confirm_swap(mapping, index, swap)) @@ -1784,9 +1775,6 @@ unlock: put_page(page); } - if (si) - put_swap_device(si); - return error; } diff --git a/mm/swap_state.c b/mm/swap_state.c index c56aa9ac050d..bc7cee6b2ec5 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -628,13 +628,6 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, if (!mask) goto skip; - /* Test swap type to make sure the dereference is safe */ - if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) { - struct inode *inode = si->swap_file->f_mapping->host; - if (inode_read_congested(inode)) - goto skip; - } - do_poll = false; /* Read a page_cluster sized and aligned cluster around offset. */ start_offset = offset & ~mask; diff --git a/mm/vmscan.c b/mm/vmscan.c index 4620df62f0ff..eeae2f6bc532 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -100,9 +100,12 @@ struct scan_control { unsigned int may_swap:1; /* - * Cgroups are not reclaimed below their configured memory.low, - * unless we threaten to OOM. If any cgroups are skipped due to - * memory.low and nothing was reclaimed, go back for memory.low. + * Cgroup memory below memory.low is protected as long as we + * don't threaten to OOM. If any cgroup is reclaimed at + * reduced force or passed over entirely due to its memory.low + * setting (memcg_low_skipped), and nothing is reclaimed as a + * result, then go back for one more cycle that reclaims the protected + * memory (memcg_low_reclaim) to avert OOM. */ unsigned int memcg_low_reclaim:1; unsigned int memcg_low_skipped:1; @@ -2537,15 +2540,14 @@ out: for_each_evictable_lru(lru) { int file = is_file_lru(lru); unsigned long lruvec_size; + unsigned long low, min; unsigned long scan; - unsigned long protection; lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); - protection = mem_cgroup_protection(sc->target_mem_cgroup, - memcg, - sc->memcg_low_reclaim); + mem_cgroup_protection(sc->target_mem_cgroup, memcg, + &min, &low); - if (protection) { + if (min || low) { /* * Scale a cgroup's reclaim pressure by proportioning * its current usage to its memory.low or memory.min @@ -2576,6 +2578,15 @@ out: * hard protection. */ unsigned long cgroup_size = mem_cgroup_size(memcg); + unsigned long protection; + + /* memory.low scaling, make sure we retry before OOM */ + if (!sc->memcg_low_reclaim && low > min) { + protection = low; + sc->memcg_low_skipped = 1; + } else { + protection = min; + } /* Avoid TOCTOU with earlier protection check */ cgroup_size = max(cgroup_size, protection); @@ -4413,11 +4424,13 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in .may_swap = 1, .reclaim_idx = gfp_zone(gfp_mask), }; + unsigned long pflags; trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, sc.gfp_mask); cond_resched(); + psi_memstall_enter(&pflags); fs_reclaim_acquire(sc.gfp_mask); /* * We need to be able to allocate from the reserves for RECLAIM_UNMAP @@ -4442,6 +4455,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in current->flags &= ~PF_SWAPWRITE; memalloc_noreclaim_restore(noreclaim_flag); fs_reclaim_release(sc.gfp_mask); + psi_memstall_leave(&pflags); trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 2dcf1c084b20..972c8cb303a5 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2605,6 +2605,7 @@ static int do_setlink(const struct sk_buff *skb, return err; if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { + const char *pat = ifname && ifname[0] ? ifname : NULL; struct net *net; int new_ifindex; @@ -2620,7 +2621,7 @@ static int do_setlink(const struct sk_buff *skb, else new_ifindex = 0; - err = __dev_change_net_namespace(dev, net, ifname, new_ifindex); + err = __dev_change_net_namespace(dev, net, pat, new_ifindex); put_net(net); if (err) goto errout; diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 099259fc826a..7fbd0b532f52 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -465,14 +465,16 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) if (!doi_def) return; - switch (doi_def->type) { - case CIPSO_V4_MAP_TRANS: - kfree(doi_def->map.std->lvl.cipso); - kfree(doi_def->map.std->lvl.local); - kfree(doi_def->map.std->cat.cipso); - kfree(doi_def->map.std->cat.local); - kfree(doi_def->map.std); - break; + if (doi_def->map.std) { + switch (doi_def->type) { + case CIPSO_V4_MAP_TRANS: + kfree(doi_def->map.std->lvl.cipso); + kfree(doi_def->map.std->lvl.local); + kfree(doi_def->map.std->cat.cipso); + kfree(doi_def->map.std->cat.local); + kfree(doi_def->map.std); + break; + } } kfree(doi_def); } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 6ebf05859acb..177d26d8fb9c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -473,6 +473,8 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, static int gre_handle_offloads(struct sk_buff *skb, bool csum) { + if (csum && skb_checksum_start(skb) < skb->data) + return -EINVAL; return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b181773d7ad3..1e3b18797070 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -601,14 +601,14 @@ static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash) return oldest; } -static inline u32 fnhe_hashfun(__be32 daddr) +static u32 fnhe_hashfun(__be32 daddr) { - static u32 fnhe_hashrnd __read_mostly; - u32 hval; + static siphash_key_t fnhe_hash_key __read_mostly; + u64 hval; - net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd)); - hval = jhash_1word((__force u32)daddr, fnhe_hashrnd); - return hash_32(hval, FNHE_HASH_SHIFT); + net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key)); + hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key); + return hash_64(hval, FNHE_HASH_SHIFT); } static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index a8f118e469b7..1bec5b22f80d 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1341,7 +1341,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt, struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node, lockdep_is_held(&rt->fib6_table->tb6_lock)); - /* paired with smp_rmb() in rt6_get_cookie_safe() */ + /* paired with smp_rmb() in fib6_get_cookie_safe() */ smp_wmb(); while (fn) { fn->fn_sernum = sernum; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 3ad201d372d8..7baf41d160f5 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -629,6 +629,8 @@ drop: static int gre_handle_offloads(struct sk_buff *skb, bool csum) { + if (csum && skb_checksum_start(skb) < skb->data) + return -EINVAL; return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 6cf4bb89ca69..f34137d5bf85 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -41,6 +41,7 @@ #include <linux/nsproxy.h> #include <linux/slab.h> #include <linux/jhash.h> +#include <linux/siphash.h> #include <net/net_namespace.h> #include <net/snmp.h> #include <net/ipv6.h> @@ -1484,17 +1485,24 @@ static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket) static u32 rt6_exception_hash(const struct in6_addr *dst, const struct in6_addr *src) { - static u32 seed __read_mostly; - u32 val; + static siphash_key_t rt6_exception_key __read_mostly; + struct { + struct in6_addr dst; + struct in6_addr src; + } __aligned(SIPHASH_ALIGNMENT) combined = { + .dst = *dst, + }; + u64 val; - net_get_random_once(&seed, sizeof(seed)); - val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed); + net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key)); #ifdef CONFIG_IPV6_SUBTREES if (src) - val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val); + combined.src = *src; #endif - return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT); + val = siphash(&combined, sizeof(combined), &rt6_exception_key); + + return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT); } /* Helper function to find the cached rt in the hash table diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c index 1dc955ca57d3..fa611678af05 100644 --- a/net/qrtr/mhi.c +++ b/net/qrtr/mhi.c @@ -15,7 +15,6 @@ struct qrtr_mhi_dev { struct qrtr_endpoint ep; struct mhi_device *mhi_dev; struct device *dev; - struct completion ready; }; /* From MHI to QRTR */ @@ -51,10 +50,6 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb) struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep); int rc; - rc = wait_for_completion_interruptible(&qdev->ready); - if (rc) - goto free_skb; - if (skb->sk) sock_hold(skb->sk); @@ -84,7 +79,7 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev, int rc; /* start channels */ - rc = mhi_prepare_for_transfer(mhi_dev, 0); + rc = mhi_prepare_for_transfer(mhi_dev); if (rc) return rc; @@ -101,15 +96,6 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev, if (rc) return rc; - /* start channels */ - rc = mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS); - if (rc) { - qrtr_endpoint_unregister(&qdev->ep); - dev_set_drvdata(&mhi_dev->dev, NULL); - return rc; - } - - complete_all(&qdev->ready); dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n"); return 0; diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 6c61b7b1838f..b8508e35d20e 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -493,7 +493,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) goto err; } - if (len != ALIGN(size, 4) + hdrlen) + if (!size || len != ALIGN(size, 4) + hdrlen) goto err; if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA && diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 925924fab1ab..1f857ffd1ac2 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -660,6 +660,13 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, sch_tree_lock(sch); q->nbands = nbands; + for (i = nstrict; i < q->nstrict; i++) { + INIT_LIST_HEAD(&q->classes[i].alist); + if (q->classes[i].qdisc->q.qlen) { + list_add_tail(&q->classes[i].alist, &q->active); + q->classes[i].deficit = quanta[i]; + } + } q->nstrict = nstrict; memcpy(q->prio2band, priomap, sizeof(priomap)); diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index d66a8e44a1ae..dbb41821b1b8 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -835,7 +835,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) rqstp->rq_stime = ktime_get(); rqstp->rq_reserved = serv->sv_max_mesg; atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); - } + } else + svc_xprt_received(xprt); out: trace_svc_handle_xprt(xprt, len); return len; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 96f32eaa24df..7ad689f991e7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6658,6 +6658,7 @@ enum { ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, ALC623_FIXUP_LENOVO_THINKSTATION_P340, ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, + ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST, }; static const struct hda_fixup alc269_fixups[] = { @@ -8242,6 +8243,12 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC }, + [ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_limit_int_mic_boost, + .chained = true, + .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -8438,8 +8445,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), - SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), - SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index 5db2f4865bbb..905c7965f653 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -127,7 +127,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream, snd_pcm_uframes_t period_size; ssize_t periodbytes; ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream); - u32 buffer_addr = substream->runtime->dma_addr; + u32 buffer_addr = virt_to_phys(substream->runtime->dma_area); channels = substream->runtime->channels; period_size = substream->runtime->period_size; |