diff options
author | Jiri Kosina <jkosina@suse.cz> | 2021-02-23 11:33:13 +0100 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2021-02-23 11:33:13 +0100 |
commit | d6310078d9f8c416e85f641a631aecf58f9c97ff (patch) | |
tree | 58ed5d9818ada3e970d93438083731abd6293ba9 /include | |
parent | f8dd50e097b221e35c34b844826db92158ec18c2 (diff) | |
parent | df7b622906f24be954beca94e60c195fde65c6d5 (diff) |
Merge branch 'for-5.12/google' into for-linus
- User experience improvements for hid-google from Nicolas Boichat
Diffstat (limited to 'include')
205 files changed, 4653 insertions, 1273 deletions
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index a6a9373ab863..232838d28f50 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -124,11 +124,10 @@ struct cppc_perf_fb_ctrs { /* Per CPU container for runtime CPPC management. */ struct cppc_cpudata { - int cpu; + struct list_head node; struct cppc_perf_caps perf_caps; struct cppc_perf_ctrls perf_ctrls; struct cppc_perf_fb_ctrs perf_fb_ctrs; - struct cpufreq_policy *cur_policy; unsigned int shared_type; cpumask_var_t shared_cpu_map; }; @@ -137,7 +136,8 @@ extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf); extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); -extern int acpi_get_psd_map(struct cppc_cpudata **); +extern bool acpi_cpc_valid(void); +extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data); extern unsigned int cppc_get_transition_latency(int cpu); extern bool cpc_ffh_supported(void); extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val); diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 4365b9aa3e3f..267f6dfb8960 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -34,6 +34,7 @@ mandatory-y += kmap_size.h mandatory-y += kprobes.h mandatory-y += linkage.h mandatory-y += local.h +mandatory-y += local64.h mandatory-y += mm-arch-hooks.h mandatory-y += mmiowb.h mandatory-y += mmu.h diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index dd90c9792909..0e7316a86240 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -11,19 +11,19 @@ * See Documentation/atomic_bitops.txt for details. */ -static inline void set_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); } -static inline void clear_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); } -static inline void change_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 9ea83d80eb6f..c6af40ce03be 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -1137,6 +1137,10 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, } #endif +#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED +extern int devmem_is_allowed(unsigned long pfn); +#endif + #endif /* __KERNEL__ */ #endif /* __ASM_GENERIC_IO_H */ diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 306aa3a60be9..3b273f9ca39a 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -100,10 +100,10 @@ struct drm_fb_helper_funcs { * @funcs: driver callbacks for fb helper * @fbdev: emulated fbdev device info struct * @pseudo_palette: fake palette of 16 colors - * @dirty_clip: clip rectangle used with deferred_io to accumulate damage to - * the screen buffer - * @dirty_lock: spinlock protecting @dirty_clip - * @dirty_work: worker used to flush the framebuffer + * @damage_clip: clip rectangle used with deferred_io to accumulate damage to + * the screen buffer + * @damage_lock: spinlock protecting @damage_clip + * @damage_work: worker used to flush the framebuffer * @resume_work: worker used during resume if the console lock is already taken * * This is the main structure used by the fbdev helpers. Drivers supporting @@ -131,9 +131,9 @@ struct drm_fb_helper { const struct drm_fb_helper_funcs *funcs; struct fb_info *fbdev; u32 pseudo_palette[17]; - struct drm_clip_rect dirty_clip; - spinlock_t dirty_lock; - struct work_struct dirty_work; + struct drm_clip_rect damage_clip; + spinlock_t damage_lock; + struct work_struct damage_work; struct work_struct resume_work; /** diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 3449a0353fe0..434328d8a0d9 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -98,9 +98,9 @@ struct drm_gem_shmem_object { unsigned int vmap_use_count; /** - * @map_cached: map object cached (instead of using writecombine). + * @map_wc: map object write-combined (instead of using shmem defaults). */ - bool map_cached; + bool map_wc; }; #define to_drm_gem_shmem_obj(obj) \ @@ -133,9 +133,6 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, struct drm_device *dev, size_t size, uint32_t *handle); -struct drm_gem_object * -drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size); - int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index cdf2a299ccd4..a0d79d1c51e2 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -195,6 +195,9 @@ enum drm_mode_status { * @crtc_vsync_end: hardware mode vertical sync end * @crtc_vtotal: hardware mode vertical total size * + * This is the kernel API display mode information structure. For the + * user-space version see struct drm_mode_modeinfo. + * * The horizontal and vertical timings are defined per the following diagram. * * :: diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index f2de050085be..16ff3fa148f5 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -1044,9 +1044,8 @@ struct drm_connector_helper_funcs { * NOTE: * * This function is called in the check phase of an atomic update. The - * driver is not allowed to change anything outside of the free-standing - * state objects passed-in or assembled in the overall &drm_atomic_state - * update tracking structure. + * driver is not allowed to change anything outside of the + * &drm_atomic_state update tracking structure passed in. * * RETURNS: * @@ -1056,7 +1055,7 @@ struct drm_connector_helper_funcs { * for this. */ struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, - struct drm_connector_state *connector_state); + struct drm_atomic_state *state); /** * @atomic_check: @@ -1097,15 +1096,15 @@ struct drm_connector_helper_funcs { * * This hook is to be used by drivers implementing writeback connectors * that need a point when to commit the writeback job to the hardware. - * The writeback_job to commit is available in - * &drm_connector_state.writeback_job. + * The writeback_job to commit is available in the new connector state, + * in &drm_connector_state.writeback_job. * * This hook is optional. * * This callback is used by the atomic modeset helpers. */ void (*atomic_commit)(struct drm_connector *connector, - struct drm_connector_state *state); + struct drm_atomic_state *state); /** * @prepare_writeback_job: diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h index eba17106608b..98e1b2ab6403 100644 --- a/include/dt-bindings/clock/at91.h +++ b/include/dt-bindings/clock/at91.h @@ -25,6 +25,17 @@ #define PMC_PLLBCK 8 #define PMC_AUDIOPLLCK 9 +/* SAMA7G5 */ +#define PMC_CPUPLL (PMC_MAIN + 1) +#define PMC_SYSPLL (PMC_MAIN + 2) +#define PMC_DDRPLL (PMC_MAIN + 3) +#define PMC_IMGPLL (PMC_MAIN + 4) +#define PMC_BAUDPLL (PMC_MAIN + 5) +#define PMC_AUDIOPMCPLL (PMC_MAIN + 6) +#define PMC_AUDIOIOPLL (PMC_MAIN + 7) +#define PMC_ETHPLL (PMC_MAIN + 8) +#define PMC_CPU (PMC_MAIN + 9) + #ifndef AT91_PMC_MOSCS #define AT91_PMC_MOSCS 0 /* MOSCS Flag */ #define AT91_PMC_LOCKA 1 /* PLLA Lock */ diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h index fd1f938c38d1..e2749dbc74b8 100644 --- a/include/dt-bindings/clock/axg-clkc.h +++ b/include/dt-bindings/clock/axg-clkc.h @@ -72,5 +72,30 @@ #define CLKID_PCIE_CML_EN1 80 #define CLKID_MIPI_ENABLE 81 #define CLKID_GEN_CLK 84 +#define CLKID_VPU_0_SEL 92 +#define CLKID_VPU_0 93 +#define CLKID_VPU_1_SEL 95 +#define CLKID_VPU_1 96 +#define CLKID_VPU 97 +#define CLKID_VAPB_0_SEL 99 +#define CLKID_VAPB_0 100 +#define CLKID_VAPB_1_SEL 102 +#define CLKID_VAPB_1 103 +#define CLKID_VAPB_SEL 104 +#define CLKID_VAPB 105 +#define CLKID_VCLK 106 +#define CLKID_VCLK2 107 +#define CLKID_VCLK_DIV1 122 +#define CLKID_VCLK_DIV2 123 +#define CLKID_VCLK_DIV4 124 +#define CLKID_VCLK_DIV6 125 +#define CLKID_VCLK_DIV12 126 +#define CLKID_VCLK2_DIV1 127 +#define CLKID_VCLK2_DIV2 128 +#define CLKID_VCLK2_DIV4 129 +#define CLKID_VCLK2_DIV6 130 +#define CLKID_VCLK2_DIV12 131 +#define CLKID_CTS_ENCL 133 +#define CLKID_VDIN_MEAS 136 #endif /* __AXG_CLKC_H */ diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h index 5ec4137231e3..7d57063b8a65 100644 --- a/include/dt-bindings/clock/dra7.h +++ b/include/dt-bindings/clock/dra7.h @@ -84,6 +84,10 @@ #define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) #define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +/* iva clocks */ +#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) + /* dss clocks */ #define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) #define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) diff --git a/include/dt-bindings/clock/fsl,qoriq-clockgen.h b/include/dt-bindings/clock/fsl,qoriq-clockgen.h new file mode 100644 index 000000000000..ddec7d0bdc7f --- /dev/null +++ b/include/dt-bindings/clock/fsl,qoriq-clockgen.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef DT_CLOCK_FSL_QORIQ_CLOCKGEN_H +#define DT_CLOCK_FSL_QORIQ_CLOCKGEN_H + +#define QORIQ_CLK_SYSCLK 0 +#define QORIQ_CLK_CMUX 1 +#define QORIQ_CLK_HWACCEL 2 +#define QORIQ_CLK_FMAN 3 +#define QORIQ_CLK_PLATFORM_PLL 4 +#define QORIQ_CLK_CORECLK 5 + +#define QORIQ_CLK_PLL_DIV(x) ((x) - 1) + +#endif /* DT_CLOCK_FSL_QORIQ_CLOCKGEN_H */ diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h index 40d49940d8a8..a93b58c5e18e 100644 --- a/include/dt-bindings/clock/g12a-clkc.h +++ b/include/dt-bindings/clock/g12a-clkc.h @@ -147,5 +147,7 @@ #define CLKID_SPICC1_SCLK 261 #define CLKID_NNA_AXI_CLK 264 #define CLKID_NNA_CORE_CLK 267 +#define CLKID_MIPI_DSI_PXCLK_SEL 269 +#define CLKID_MIPI_DSI_PXCLK 270 #endif /* __G12A_CLKC_H */ diff --git a/include/dt-bindings/clock/imx8-lpcg.h b/include/dt-bindings/clock/imx8-lpcg.h new file mode 100644 index 000000000000..d202715652c3 --- /dev/null +++ b/include/dt-bindings/clock/imx8-lpcg.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2019-2020 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +#define IMX_LPCG_CLK_0 0 +#define IMX_LPCG_CLK_1 4 +#define IMX_LPCG_CLK_2 8 +#define IMX_LPCG_CLK_3 12 +#define IMX_LPCG_CLK_4 16 +#define IMX_LPCG_CLK_5 20 +#define IMX_LPCG_CLK_6 24 +#define IMX_LPCG_CLK_7 28 diff --git a/include/dt-bindings/clock/k210-clk.h b/include/dt-bindings/clock/k210-clk.h index 5a2fd64d1a49..a48176ad3c23 100644 --- a/include/dt-bindings/clock/k210-clk.h +++ b/include/dt-bindings/clock/k210-clk.h @@ -3,18 +3,52 @@ * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com> * Copyright (c) 2020 Western Digital Corporation or its affiliates. */ -#ifndef K210_CLK_H -#define K210_CLK_H +#ifndef CLOCK_K210_CLK_H +#define CLOCK_K210_CLK_H /* - * Arbitrary identifiers for clocks. - * The structure is: in0 -> pll0 -> aclk -> cpu - * - * Since we use the hardware defaults for now, set all these to the same clock. + * Kendryte K210 SoC clock identifiers (arbitrary values). */ -#define K210_CLK_PLL0 0 -#define K210_CLK_PLL1 0 -#define K210_CLK_ACLK 0 -#define K210_CLK_CPU 0 +#define K210_CLK_ACLK 0 +#define K210_CLK_CPU 0 +#define K210_CLK_SRAM0 1 +#define K210_CLK_SRAM1 2 +#define K210_CLK_AI 3 +#define K210_CLK_DMA 4 +#define K210_CLK_FFT 5 +#define K210_CLK_ROM 6 +#define K210_CLK_DVP 7 +#define K210_CLK_APB0 8 +#define K210_CLK_APB1 9 +#define K210_CLK_APB2 10 +#define K210_CLK_I2S0 11 +#define K210_CLK_I2S1 12 +#define K210_CLK_I2S2 13 +#define K210_CLK_I2S0_M 14 +#define K210_CLK_I2S1_M 15 +#define K210_CLK_I2S2_M 16 +#define K210_CLK_WDT0 17 +#define K210_CLK_WDT1 18 +#define K210_CLK_SPI0 19 +#define K210_CLK_SPI1 20 +#define K210_CLK_SPI2 21 +#define K210_CLK_I2C0 22 +#define K210_CLK_I2C1 23 +#define K210_CLK_I2C2 24 +#define K210_CLK_SPI3 25 +#define K210_CLK_TIMER0 26 +#define K210_CLK_TIMER1 27 +#define K210_CLK_TIMER2 28 +#define K210_CLK_GPIO 29 +#define K210_CLK_UART1 30 +#define K210_CLK_UART2 31 +#define K210_CLK_UART3 32 +#define K210_CLK_FPIOA 33 +#define K210_CLK_SHA 34 +#define K210_CLK_AES 35 +#define K210_CLK_OTP 36 +#define K210_CLK_RTC 37 -#endif /* K210_CLK_H */ +#define K210_NUM_CLKS 38 + +#endif /* CLOCK_K210_CLK_H */ diff --git a/include/dt-bindings/clock/qcom,camcc-sc7180.h b/include/dt-bindings/clock/qcom,camcc-sc7180.h new file mode 100644 index 000000000000..ef7d3a041b88 --- /dev/null +++ b/include/dt-bindings/clock/qcom,camcc-sc7180.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SC7180_H +#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SC7180_H + +/* CAM_CC clocks */ +#define CAM_CC_PLL2_OUT_EARLY 0 +#define CAM_CC_PLL0 1 +#define CAM_CC_PLL1 2 +#define CAM_CC_PLL2 3 +#define CAM_CC_PLL2_OUT_AUX 4 +#define CAM_CC_PLL3 5 +#define CAM_CC_CAMNOC_AXI_CLK 6 +#define CAM_CC_CCI_0_CLK 7 +#define CAM_CC_CCI_0_CLK_SRC 8 +#define CAM_CC_CCI_1_CLK 9 +#define CAM_CC_CCI_1_CLK_SRC 10 +#define CAM_CC_CORE_AHB_CLK 11 +#define CAM_CC_CPAS_AHB_CLK 12 +#define CAM_CC_CPHY_RX_CLK_SRC 13 +#define CAM_CC_CSI0PHYTIMER_CLK 14 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 15 +#define CAM_CC_CSI1PHYTIMER_CLK 16 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 17 +#define CAM_CC_CSI2PHYTIMER_CLK 18 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 19 +#define CAM_CC_CSI3PHYTIMER_CLK 20 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 21 +#define CAM_CC_CSIPHY0_CLK 22 +#define CAM_CC_CSIPHY1_CLK 23 +#define CAM_CC_CSIPHY2_CLK 24 +#define CAM_CC_CSIPHY3_CLK 25 +#define CAM_CC_FAST_AHB_CLK_SRC 26 +#define CAM_CC_ICP_APB_CLK 27 +#define CAM_CC_ICP_ATB_CLK 28 +#define CAM_CC_ICP_CLK 29 +#define CAM_CC_ICP_CLK_SRC 30 +#define CAM_CC_ICP_CTI_CLK 31 +#define CAM_CC_ICP_TS_CLK 32 +#define CAM_CC_IFE_0_AXI_CLK 33 +#define CAM_CC_IFE_0_CLK 34 +#define CAM_CC_IFE_0_CLK_SRC 35 +#define CAM_CC_IFE_0_CPHY_RX_CLK 36 +#define CAM_CC_IFE_0_CSID_CLK 37 +#define CAM_CC_IFE_0_CSID_CLK_SRC 38 +#define CAM_CC_IFE_0_DSP_CLK 39 +#define CAM_CC_IFE_1_AXI_CLK 40 +#define CAM_CC_IFE_1_CLK 41 +#define CAM_CC_IFE_1_CLK_SRC 42 +#define CAM_CC_IFE_1_CPHY_RX_CLK 43 +#define CAM_CC_IFE_1_CSID_CLK 44 +#define CAM_CC_IFE_1_CSID_CLK_SRC 45 +#define CAM_CC_IFE_1_DSP_CLK 46 +#define CAM_CC_IFE_LITE_CLK 47 +#define CAM_CC_IFE_LITE_CLK_SRC 48 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 49 +#define CAM_CC_IFE_LITE_CSID_CLK 50 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 51 +#define CAM_CC_IPE_0_AHB_CLK 52 +#define CAM_CC_IPE_0_AREG_CLK 53 +#define CAM_CC_IPE_0_AXI_CLK 54 +#define CAM_CC_IPE_0_CLK 55 +#define CAM_CC_IPE_0_CLK_SRC 56 +#define CAM_CC_JPEG_CLK 57 +#define CAM_CC_JPEG_CLK_SRC 58 +#define CAM_CC_LRME_CLK 59 +#define CAM_CC_LRME_CLK_SRC 60 +#define CAM_CC_MCLK0_CLK 61 +#define CAM_CC_MCLK0_CLK_SRC 62 +#define CAM_CC_MCLK1_CLK 63 +#define CAM_CC_MCLK1_CLK_SRC 64 +#define CAM_CC_MCLK2_CLK 65 +#define CAM_CC_MCLK2_CLK_SRC 66 +#define CAM_CC_MCLK3_CLK 67 +#define CAM_CC_MCLK3_CLK_SRC 68 +#define CAM_CC_MCLK4_CLK 69 +#define CAM_CC_MCLK4_CLK_SRC 70 +#define CAM_CC_BPS_AHB_CLK 71 +#define CAM_CC_BPS_AREG_CLK 72 +#define CAM_CC_BPS_AXI_CLK 73 +#define CAM_CC_BPS_CLK 74 +#define CAM_CC_BPS_CLK_SRC 75 +#define CAM_CC_SLOW_AHB_CLK_SRC 76 +#define CAM_CC_SOC_AHB_CLK 77 +#define CAM_CC_SYS_TMR_CLK 78 + +/* CAM_CC power domains */ +#define BPS_GDSC 0 +#define IFE_0_GDSC 1 +#define IFE_1_GDSC 2 +#define IPE_0_GDSC 3 +#define TITAN_TOP_GDSC 4 + +/* CAM_CC resets */ +#define CAM_CC_BPS_BCR 0 +#define CAM_CC_CAMNOC_BCR 1 +#define CAM_CC_CCI_0_BCR 2 +#define CAM_CC_CCI_1_BCR 3 +#define CAM_CC_CPAS_BCR 4 +#define CAM_CC_CSI0PHY_BCR 5 +#define CAM_CC_CSI1PHY_BCR 6 +#define CAM_CC_CSI2PHY_BCR 7 +#define CAM_CC_CSI3PHY_BCR 8 +#define CAM_CC_ICP_BCR 9 +#define CAM_CC_IFE_0_BCR 10 +#define CAM_CC_IFE_1_BCR 11 +#define CAM_CC_IFE_LITE_BCR 12 +#define CAM_CC_IPE_0_BCR 13 +#define CAM_CC_JPEG_BCR 14 +#define CAM_CC_LRME_BCR 15 +#define CAM_CC_MCLK0_BCR 16 +#define CAM_CC_MCLK1_BCR 17 +#define CAM_CC_MCLK2_BCR 18 +#define CAM_CC_MCLK3_BCR 19 +#define CAM_CC_MCLK4_BCR 20 +#define CAM_CC_TITAN_TOP_BCR 21 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-sdx55.h b/include/dt-bindings/clock/qcom,gcc-sdx55.h new file mode 100644 index 000000000000..fb9a5942f793 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sdx55.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2020, Linaro Ltd. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SDX55_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SDX55_H + +#define GPLL0 3 +#define GPLL0_OUT_EVEN 4 +#define GPLL4 5 +#define GPLL4_OUT_EVEN 6 +#define GPLL5 7 +#define GCC_AHB_PCIE_LINK_CLK 8 +#define GCC_BLSP1_AHB_CLK 9 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 10 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 11 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 12 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 13 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 14 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 15 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 16 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 17 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 18 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 19 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 20 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 21 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 22 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 23 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 24 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 25 +#define GCC_BLSP1_UART1_APPS_CLK 26 +#define GCC_BLSP1_UART1_APPS_CLK_SRC 27 +#define GCC_BLSP1_UART2_APPS_CLK 28 +#define GCC_BLSP1_UART2_APPS_CLK_SRC 29 +#define GCC_BLSP1_UART3_APPS_CLK 30 +#define GCC_BLSP1_UART3_APPS_CLK_SRC 31 +#define GCC_BLSP1_UART4_APPS_CLK 32 +#define GCC_BLSP1_UART4_APPS_CLK_SRC 33 +#define GCC_BOOT_ROM_AHB_CLK 34 +#define GCC_CE1_AHB_CLK 35 +#define GCC_CE1_AXI_CLK 36 +#define GCC_CE1_CLK 37 +#define GCC_CPUSS_AHB_CLK 38 +#define GCC_CPUSS_AHB_CLK_SRC 39 +#define GCC_CPUSS_GNOC_CLK 40 +#define GCC_CPUSS_RBCPR_CLK 41 +#define GCC_CPUSS_RBCPR_CLK_SRC 42 +#define GCC_EMAC_CLK_SRC 43 +#define GCC_EMAC_PTP_CLK_SRC 44 +#define GCC_ETH_AXI_CLK 45 +#define GCC_ETH_PTP_CLK 46 +#define GCC_ETH_RGMII_CLK 47 +#define GCC_ETH_SLAVE_AHB_CLK 48 +#define GCC_GP1_CLK 49 +#define GCC_GP1_CLK_SRC 50 +#define GCC_GP2_CLK 51 +#define GCC_GP2_CLK_SRC 52 +#define GCC_GP3_CLK 53 +#define GCC_GP3_CLK_SRC 54 +#define GCC_PCIE_0_CLKREF_CLK 55 +#define GCC_PCIE_AUX_CLK 56 +#define GCC_PCIE_AUX_PHY_CLK_SRC 57 +#define GCC_PCIE_CFG_AHB_CLK 58 +#define GCC_PCIE_MSTR_AXI_CLK 59 +#define GCC_PCIE_PIPE_CLK 60 +#define GCC_PCIE_RCHNG_PHY_CLK 61 +#define GCC_PCIE_RCHNG_PHY_CLK_SRC 62 +#define GCC_PCIE_SLEEP_CLK 63 +#define GCC_PCIE_SLV_AXI_CLK 64 +#define GCC_PCIE_SLV_Q2A_AXI_CLK 65 +#define GCC_PDM2_CLK 66 +#define GCC_PDM2_CLK_SRC 67 +#define GCC_PDM_AHB_CLK 68 +#define GCC_PDM_XO4_CLK 69 +#define GCC_SDCC1_AHB_CLK 70 +#define GCC_SDCC1_APPS_CLK 71 +#define GCC_SDCC1_APPS_CLK_SRC 72 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 73 +#define GCC_USB30_MASTER_CLK 74 +#define GCC_USB30_MASTER_CLK_SRC 75 +#define GCC_USB30_MOCK_UTMI_CLK 76 +#define GCC_USB30_MOCK_UTMI_CLK_SRC 77 +#define GCC_USB30_MSTR_AXI_CLK 78 +#define GCC_USB30_SLEEP_CLK 79 +#define GCC_USB30_SLV_AHB_CLK 80 +#define GCC_USB3_PHY_AUX_CLK 81 +#define GCC_USB3_PHY_AUX_CLK_SRC 82 +#define GCC_USB3_PHY_PIPE_CLK 83 +#define GCC_USB3_PRIM_CLKREF_CLK 84 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 85 +#define GCC_XO_DIV4_CLK 86 +#define GCC_XO_PCIE_LINK_CLK 87 + +#define GCC_EMAC_BCR 0 +#define GCC_PCIE_BCR 1 +#define GCC_PCIE_LINK_DOWN_BCR 2 +#define GCC_PCIE_NOCSR_COM_PHY_BCR 3 +#define GCC_PCIE_PHY_BCR 4 +#define GCC_PCIE_PHY_CFG_AHB_BCR 5 +#define GCC_PCIE_PHY_COM_BCR 6 +#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 7 +#define GCC_PDM_BCR 8 +#define GCC_QUSB2PHY_BCR 9 +#define GCC_TCSR_PCIE_BCR 10 +#define GCC_USB30_BCR 11 +#define GCC_USB3_PHY_BCR 12 +#define GCC_USB3PHY_PHY_BCR 13 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 14 + +/* GCC power domains */ +#define USB30_GDSC 0 +#define PCIE_GDSC 1 +#define EMAC_GDSC 2 + +#endif diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h index 2e6c54e65455..583a99161aaa 100644 --- a/include/dt-bindings/clock/qcom,rpmh.h +++ b/include/dt-bindings/clock/qcom,rpmh.h @@ -21,5 +21,15 @@ #define RPMH_IPA_CLK 12 #define RPMH_LN_BB_CLK1 13 #define RPMH_LN_BB_CLK1_A 14 +#define RPMH_CE_CLK 15 +#define RPMH_QPIC_CLK 16 +#define RPMH_DIV_CLK1 17 +#define RPMH_DIV_CLK1_A 18 +#define RPMH_RF_CLK4 19 +#define RPMH_RF_CLK4_A 20 +#define RPMH_RF_CLK5 21 +#define RPMH_RF_CLK5_A 22 +#define RPMH_PKA_CLK 23 +#define RPMH_HWKM_CLK 24 #endif diff --git a/include/dt-bindings/clock/qcom,sm8250-lpass-aoncc.h b/include/dt-bindings/clock/qcom,sm8250-lpass-aoncc.h new file mode 100644 index 000000000000..f5a1cfac8612 --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm8250-lpass-aoncc.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DT_BINDINGS_CLK_LPASS_AONCC_SM8250_H +#define _DT_BINDINGS_CLK_LPASS_AONCC_SM8250_H + +/* from AOCC */ +#define LPASS_CDC_VA_MCLK 0 +#define LPASS_CDC_TX_NPL 1 +#define LPASS_CDC_TX_MCLK 2 + +#endif /* _DT_BINDINGS_CLK_LPASS_AONCC_SM8250_H */ diff --git a/include/dt-bindings/clock/qcom,sm8250-lpass-audiocc.h b/include/dt-bindings/clock/qcom,sm8250-lpass-audiocc.h new file mode 100644 index 000000000000..a1aa6cb5d840 --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm8250-lpass-audiocc.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DT_BINDINGS_CLK_LPASS_AUDIOCC_SM8250_H +#define _DT_BINDINGS_CLK_LPASS_AUDIOCC_SM8250_H + +/* From AudioCC */ +#define LPASS_CDC_WSA_NPL 0 +#define LPASS_CDC_WSA_MCLK 1 +#define LPASS_CDC_RX_MCLK 2 +#define LPASS_CDC_RX_NPL 3 +#define LPASS_CDC_RX_MCLK_MCLK2 4 + +#endif /* _DT_BINDINGS_CLK_LPASS_AUDIOCC_SM8250_H */ diff --git a/include/dt-bindings/clock/sifive-fu740-prci.h b/include/dt-bindings/clock/sifive-fu740-prci.h new file mode 100644 index 000000000000..cd7706ea5677 --- /dev/null +++ b/include/dt-bindings/clock/sifive-fu740-prci.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (C) 2019 SiFive, Inc. + * Wesley Terpstra + * Paul Walmsley + * Zong Li + */ + +#ifndef __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H +#define __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H + +/* Clock indexes for use by Device Tree data and the PRCI driver */ + +#define PRCI_CLK_COREPLL 0 +#define PRCI_CLK_DDRPLL 1 +#define PRCI_CLK_GEMGXLPLL 2 +#define PRCI_CLK_DVFSCOREPLL 3 +#define PRCI_CLK_HFPCLKPLL 4 +#define PRCI_CLK_CLTXPLL 5 +#define PRCI_CLK_TLCLK 6 +#define PRCI_CLK_PCLK 7 + +#endif /* __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H */ diff --git a/include/dt-bindings/dma/jz4775-dma.h b/include/dt-bindings/dma/jz4775-dma.h new file mode 100644 index 000000000000..8d27e2c69dca --- /dev/null +++ b/include/dt-bindings/dma/jz4775-dma.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * This header provides macros for JZ4775 DMA bindings. + * + * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com> + */ + +#ifndef __DT_BINDINGS_DMA_JZ4775_DMA_H__ +#define __DT_BINDINGS_DMA_JZ4775_DMA_H__ + +/* + * Request type numbers for the JZ4775 DMA controller (written to the DRTn + * register for the channel). + */ +#define JZ4775_DMA_I2S0_TX 0x6 +#define JZ4775_DMA_I2S0_RX 0x7 +#define JZ4775_DMA_AUTO 0x8 +#define JZ4775_DMA_SADC_RX 0x9 +#define JZ4775_DMA_UART3_TX 0x0e +#define JZ4775_DMA_UART3_RX 0x0f +#define JZ4775_DMA_UART2_TX 0x10 +#define JZ4775_DMA_UART2_RX 0x11 +#define JZ4775_DMA_UART1_TX 0x12 +#define JZ4775_DMA_UART1_RX 0x13 +#define JZ4775_DMA_UART0_TX 0x14 +#define JZ4775_DMA_UART0_RX 0x15 +#define JZ4775_DMA_SSI0_TX 0x16 +#define JZ4775_DMA_SSI0_RX 0x17 +#define JZ4775_DMA_MSC0_TX 0x1a +#define JZ4775_DMA_MSC0_RX 0x1b +#define JZ4775_DMA_MSC1_TX 0x1c +#define JZ4775_DMA_MSC1_RX 0x1d +#define JZ4775_DMA_MSC2_TX 0x1e +#define JZ4775_DMA_MSC2_RX 0x1f +#define JZ4775_DMA_PCM0_TX 0x20 +#define JZ4775_DMA_PCM0_RX 0x21 +#define JZ4775_DMA_SMB0_TX 0x24 +#define JZ4775_DMA_SMB0_RX 0x25 +#define JZ4775_DMA_SMB1_TX 0x26 +#define JZ4775_DMA_SMB1_RX 0x27 +#define JZ4775_DMA_SMB2_TX 0x28 +#define JZ4775_DMA_SMB2_RX 0x29 + +#endif /* __DT_BINDINGS_DMA_JZ4775_DMA_H__ */ diff --git a/include/dt-bindings/dma/qcom-gpi.h b/include/dt-bindings/dma/qcom-gpi.h new file mode 100644 index 000000000000..ebda2a37f52a --- /dev/null +++ b/include/dt-bindings/dma/qcom-gpi.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* Copyright (c) 2020, Linaro Ltd. */ + +#ifndef __DT_BINDINGS_DMA_QCOM_GPI_H__ +#define __DT_BINDINGS_DMA_QCOM_GPI_H__ + +#define QCOM_GPI_SPI 1 +#define QCOM_GPI_UART 2 +#define QCOM_GPI_I2C 3 + +#endif /* __DT_BINDINGS_DMA_QCOM_GPI_H__ */ diff --git a/include/dt-bindings/dma/x2000-dma.h b/include/dt-bindings/dma/x2000-dma.h new file mode 100644 index 000000000000..db2cd4830b00 --- /dev/null +++ b/include/dt-bindings/dma/x2000-dma.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * This header provides macros for X2000 DMA bindings. + * + * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com> + */ + +#ifndef __DT_BINDINGS_DMA_X2000_DMA_H__ +#define __DT_BINDINGS_DMA_X2000_DMA_H__ + +/* + * Request type numbers for the X2000 DMA controller (written to the DRTn + * register for the channel). + */ +#define X2000_DMA_AUTO 0x8 +#define X2000_DMA_UART5_TX 0xa +#define X2000_DMA_UART5_RX 0xb +#define X2000_DMA_UART4_TX 0xc +#define X2000_DMA_UART4_RX 0xd +#define X2000_DMA_UART3_TX 0xe +#define X2000_DMA_UART3_RX 0xf +#define X2000_DMA_UART2_TX 0x10 +#define X2000_DMA_UART2_RX 0x11 +#define X2000_DMA_UART1_TX 0x12 +#define X2000_DMA_UART1_RX 0x13 +#define X2000_DMA_UART0_TX 0x14 +#define X2000_DMA_UART0_RX 0x15 +#define X2000_DMA_SSI0_TX 0x16 +#define X2000_DMA_SSI0_RX 0x17 +#define X2000_DMA_SSI1_TX 0x18 +#define X2000_DMA_SSI1_RX 0x19 +#define X2000_DMA_I2C0_TX 0x24 +#define X2000_DMA_I2C0_RX 0x25 +#define X2000_DMA_I2C1_TX 0x26 +#define X2000_DMA_I2C1_RX 0x27 +#define X2000_DMA_I2C2_TX 0x28 +#define X2000_DMA_I2C2_RX 0x29 +#define X2000_DMA_I2C3_TX 0x2a +#define X2000_DMA_I2C3_RX 0x2b +#define X2000_DMA_I2C4_TX 0x2c +#define X2000_DMA_I2C4_RX 0x2d +#define X2000_DMA_I2C5_TX 0x2e +#define X2000_DMA_I2C5_RX 0x2f +#define X2000_DMA_UART6_TX 0x30 +#define X2000_DMA_UART6_RX 0x31 +#define X2000_DMA_UART7_TX 0x32 +#define X2000_DMA_UART7_RX 0x33 +#define X2000_DMA_UART8_TX 0x34 +#define X2000_DMA_UART8_RX 0x35 +#define X2000_DMA_UART9_TX 0x36 +#define X2000_DMA_UART9_RX 0x37 +#define X2000_DMA_SADC_RX 0x38 + +#endif /* __DT_BINDINGS_DMA_X2000_DMA_H__ */ diff --git a/include/dt-bindings/gpio/msc313-gpio.h b/include/dt-bindings/gpio/msc313-gpio.h new file mode 100644 index 000000000000..2dd56683d3c1 --- /dev/null +++ b/include/dt-bindings/gpio/msc313-gpio.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +/* + * GPIO definitions for MStar/SigmaStar MSC313 and later SoCs + * + * Copyright (C) 2020 Daniel Palmer <daniel@thingy.jp> + */ + +#ifndef _DT_BINDINGS_MSC313_GPIO_H +#define _DT_BINDINGS_MSC313_GPIO_H + +#define MSC313_GPIO_FUART 0 +#define MSC313_GPIO_FUART_RX (MSC313_GPIO_FUART + 0) +#define MSC313_GPIO_FUART_TX (MSC313_GPIO_FUART + 1) +#define MSC313_GPIO_FUART_CTS (MSC313_GPIO_FUART + 2) +#define MSC313_GPIO_FUART_RTS (MSC313_GPIO_FUART + 3) + +#define MSC313_GPIO_SR (MSC313_GPIO_FUART_RTS + 1) +#define MSC313_GPIO_SR_IO2 (MSC313_GPIO_SR + 0) +#define MSC313_GPIO_SR_IO3 (MSC313_GPIO_SR + 1) +#define MSC313_GPIO_SR_IO4 (MSC313_GPIO_SR + 2) +#define MSC313_GPIO_SR_IO5 (MSC313_GPIO_SR + 3) +#define MSC313_GPIO_SR_IO6 (MSC313_GPIO_SR + 4) +#define MSC313_GPIO_SR_IO7 (MSC313_GPIO_SR + 5) +#define MSC313_GPIO_SR_IO8 (MSC313_GPIO_SR + 6) +#define MSC313_GPIO_SR_IO9 (MSC313_GPIO_SR + 7) +#define MSC313_GPIO_SR_IO10 (MSC313_GPIO_SR + 8) +#define MSC313_GPIO_SR_IO11 (MSC313_GPIO_SR + 9) +#define MSC313_GPIO_SR_IO12 (MSC313_GPIO_SR + 10) +#define MSC313_GPIO_SR_IO13 (MSC313_GPIO_SR + 11) +#define MSC313_GPIO_SR_IO14 (MSC313_GPIO_SR + 12) +#define MSC313_GPIO_SR_IO15 (MSC313_GPIO_SR + 13) +#define MSC313_GPIO_SR_IO16 (MSC313_GPIO_SR + 14) +#define MSC313_GPIO_SR_IO17 (MSC313_GPIO_SR + 15) + +#define MSC313_GPIO_SD (MSC313_GPIO_SR_IO17 + 1) +#define MSC313_GPIO_SD_CLK (MSC313_GPIO_SD + 0) +#define MSC313_GPIO_SD_CMD (MSC313_GPIO_SD + 1) +#define MSC313_GPIO_SD_D0 (MSC313_GPIO_SD + 2) +#define MSC313_GPIO_SD_D1 (MSC313_GPIO_SD + 3) +#define MSC313_GPIO_SD_D2 (MSC313_GPIO_SD + 4) +#define MSC313_GPIO_SD_D3 (MSC313_GPIO_SD + 5) + +#define MSC313_GPIO_I2C1 (MSC313_GPIO_SD_D3 + 1) +#define MSC313_GPIO_I2C1_SCL (MSC313_GPIO_I2C1 + 0) +#define MSC313_GPIO_I2C1_SDA (MSC313_GPIO_I2C1 + 1) + +#define MSC313_GPIO_SPI0 (MSC313_GPIO_I2C1_SDA + 1) +#define MSC313_GPIO_SPI0_CZ (MSC313_GPIO_SPI0 + 0) +#define MSC313_GPIO_SPI0_CK (MSC313_GPIO_SPI0 + 1) +#define MSC313_GPIO_SPI0_DI (MSC313_GPIO_SPI0 + 2) +#define MSC313_GPIO_SPI0_DO (MSC313_GPIO_SPI0 + 3) + +#endif /* _DT_BINDINGS_MSC313_GPIO_H */ diff --git a/include/dt-bindings/gpio/tegra186-gpio.h b/include/dt-bindings/gpio/tegra186-gpio.h index 0782b05e2775..af0d9583be70 100644 --- a/include/dt-bindings/gpio/tegra186-gpio.h +++ b/include/dt-bindings/gpio/tegra186-gpio.h @@ -8,8 +8,8 @@ * The second cell contains standard flag values specified in gpio.h. */ -#ifndef _DT_BINDINGS_GPIO_TEGRA_GPIO_H -#define _DT_BINDINGS_GPIO_TEGRA_GPIO_H +#ifndef _DT_BINDINGS_GPIO_TEGRA186_GPIO_H +#define _DT_BINDINGS_GPIO_TEGRA186_GPIO_H #include <dt-bindings/gpio/gpio.h> diff --git a/include/dt-bindings/interconnect/qcom,sdm845.h b/include/dt-bindings/interconnect/qcom,sdm845.h index 290be38f40e6..67b500e24915 100644 --- a/include/dt-bindings/interconnect/qcom,sdm845.h +++ b/include/dt-bindings/interconnect/qcom,sdm845.h @@ -19,6 +19,7 @@ #define SLAVE_A1NOC_SNOC 7 #define SLAVE_SERVICE_A1NOC 8 #define SLAVE_ANOC_PCIE_A1NOC_SNOC 9 +#define MASTER_QUP_1 10 #define MASTER_A2NOC_CFG 0 #define MASTER_QDSS_BAM 1 @@ -32,6 +33,7 @@ #define SLAVE_A2NOC_SNOC 9 #define SLAVE_ANOC_PCIE_SNOC 10 #define SLAVE_SERVICE_A2NOC 11 +#define MASTER_QUP_2 12 #define MASTER_SPDM 0 #define MASTER_TIC 1 diff --git a/include/dt-bindings/memory/tegra124-mc.h b/include/dt-bindings/memory/tegra124-mc.h index 186e6b7e9b35..7e73bb400eca 100644 --- a/include/dt-bindings/memory/tegra124-mc.h +++ b/include/dt-bindings/memory/tegra124-mc.h @@ -54,4 +54,72 @@ #define TEGRA124_MC_RESET_ISP2B 22 #define TEGRA124_MC_RESET_GPU 23 +#define TEGRA124_MC_PTCR 0 +#define TEGRA124_MC_DISPLAY0A 1 +#define TEGRA124_MC_DISPLAY0AB 2 +#define TEGRA124_MC_DISPLAY0B 3 +#define TEGRA124_MC_DISPLAY0BB 4 +#define TEGRA124_MC_DISPLAY0C 5 +#define TEGRA124_MC_DISPLAY0CB 6 +#define TEGRA124_MC_AFIR 14 +#define TEGRA124_MC_AVPCARM7R 15 +#define TEGRA124_MC_DISPLAYHC 16 +#define TEGRA124_MC_DISPLAYHCB 17 +#define TEGRA124_MC_HDAR 21 +#define TEGRA124_MC_HOST1XDMAR 22 +#define TEGRA124_MC_HOST1XR 23 +#define TEGRA124_MC_MSENCSRD 28 +#define TEGRA124_MC_PPCSAHBDMAR 29 +#define TEGRA124_MC_PPCSAHBSLVR 30 +#define TEGRA124_MC_SATAR 31 +#define TEGRA124_MC_VDEBSEVR 34 +#define TEGRA124_MC_VDEMBER 35 +#define TEGRA124_MC_VDEMCER 36 +#define TEGRA124_MC_VDETPER 37 +#define TEGRA124_MC_MPCORELPR 38 +#define TEGRA124_MC_MPCORER 39 +#define TEGRA124_MC_MSENCSWR 43 +#define TEGRA124_MC_AFIW 49 +#define TEGRA124_MC_AVPCARM7W 50 +#define TEGRA124_MC_HDAW 53 +#define TEGRA124_MC_HOST1XW 54 +#define TEGRA124_MC_MPCORELPW 56 +#define TEGRA124_MC_MPCOREW 57 +#define TEGRA124_MC_PPCSAHBDMAW 59 +#define TEGRA124_MC_PPCSAHBSLVW 60 +#define TEGRA124_MC_SATAW 61 +#define TEGRA124_MC_VDEBSEVW 62 +#define TEGRA124_MC_VDEDBGW 63 +#define TEGRA124_MC_VDEMBEW 64 +#define TEGRA124_MC_VDETPMW 65 +#define TEGRA124_MC_ISPRA 68 +#define TEGRA124_MC_ISPWA 70 +#define TEGRA124_MC_ISPWB 71 +#define TEGRA124_MC_XUSB_HOSTR 74 +#define TEGRA124_MC_XUSB_HOSTW 75 +#define TEGRA124_MC_XUSB_DEVR 76 +#define TEGRA124_MC_XUSB_DEVW 77 +#define TEGRA124_MC_ISPRAB 78 +#define TEGRA124_MC_ISPWAB 80 +#define TEGRA124_MC_ISPWBB 81 +#define TEGRA124_MC_TSECSRD 84 +#define TEGRA124_MC_TSECSWR 85 +#define TEGRA124_MC_A9AVPSCR 86 +#define TEGRA124_MC_A9AVPSCW 87 +#define TEGRA124_MC_GPUSRD 88 +#define TEGRA124_MC_GPUSWR 89 +#define TEGRA124_MC_DISPLAYT 90 +#define TEGRA124_MC_SDMMCRA 96 +#define TEGRA124_MC_SDMMCRAA 97 +#define TEGRA124_MC_SDMMCR 98 +#define TEGRA124_MC_SDMMCRAB 99 +#define TEGRA124_MC_SDMMCWA 100 +#define TEGRA124_MC_SDMMCWAA 101 +#define TEGRA124_MC_SDMMCW 102 +#define TEGRA124_MC_SDMMCWAB 103 +#define TEGRA124_MC_VICSRD 108 +#define TEGRA124_MC_VICSWR 109 +#define TEGRA124_MC_VIW 114 +#define TEGRA124_MC_DISPLAYD 115 + #endif diff --git a/include/dt-bindings/memory/tegra20-mc.h b/include/dt-bindings/memory/tegra20-mc.h index 35e131eee198..6f8829508ad0 100644 --- a/include/dt-bindings/memory/tegra20-mc.h +++ b/include/dt-bindings/memory/tegra20-mc.h @@ -18,4 +18,57 @@ #define TEGRA20_MC_RESET_VDE 13 #define TEGRA20_MC_RESET_VI 14 +#define TEGRA20_MC_DISPLAY0A 0 +#define TEGRA20_MC_DISPLAY0AB 1 +#define TEGRA20_MC_DISPLAY0B 2 +#define TEGRA20_MC_DISPLAY0BB 3 +#define TEGRA20_MC_DISPLAY0C 4 +#define TEGRA20_MC_DISPLAY0CB 5 +#define TEGRA20_MC_DISPLAY1B 6 +#define TEGRA20_MC_DISPLAY1BB 7 +#define TEGRA20_MC_EPPUP 8 +#define TEGRA20_MC_G2PR 9 +#define TEGRA20_MC_G2SR 10 +#define TEGRA20_MC_MPEUNIFBR 11 +#define TEGRA20_MC_VIRUV 12 +#define TEGRA20_MC_AVPCARM7R 13 +#define TEGRA20_MC_DISPLAYHC 14 +#define TEGRA20_MC_DISPLAYHCB 15 +#define TEGRA20_MC_FDCDRD 16 +#define TEGRA20_MC_G2DR 17 +#define TEGRA20_MC_HOST1XDMAR 18 +#define TEGRA20_MC_HOST1XR 19 +#define TEGRA20_MC_IDXSRD 20 +#define TEGRA20_MC_MPCORER 21 +#define TEGRA20_MC_MPE_IPRED 22 +#define TEGRA20_MC_MPEAMEMRD 23 +#define TEGRA20_MC_MPECSRD 24 +#define TEGRA20_MC_PPCSAHBDMAR 25 +#define TEGRA20_MC_PPCSAHBSLVR 26 +#define TEGRA20_MC_TEXSRD 27 +#define TEGRA20_MC_VDEBSEVR 28 +#define TEGRA20_MC_VDEMBER 29 +#define TEGRA20_MC_VDEMCER 30 +#define TEGRA20_MC_VDETPER 31 +#define TEGRA20_MC_EPPU 32 +#define TEGRA20_MC_EPPV 33 +#define TEGRA20_MC_EPPY 34 +#define TEGRA20_MC_MPEUNIFBW 35 +#define TEGRA20_MC_VIWSB 36 +#define TEGRA20_MC_VIWU 37 +#define TEGRA20_MC_VIWV 38 +#define TEGRA20_MC_VIWY 39 +#define TEGRA20_MC_G2DW 40 +#define TEGRA20_MC_AVPCARM7W 41 +#define TEGRA20_MC_FDCDWR 42 +#define TEGRA20_MC_HOST1XW 43 +#define TEGRA20_MC_ISPW 44 +#define TEGRA20_MC_MPCOREW 45 +#define TEGRA20_MC_MPECSWR 46 +#define TEGRA20_MC_PPCSAHBDMAW 47 +#define TEGRA20_MC_PPCSAHBSLVW 48 +#define TEGRA20_MC_VDEBSEVW 49 +#define TEGRA20_MC_VDEMBEW 50 +#define TEGRA20_MC_VDETPMW 51 + #endif diff --git a/include/dt-bindings/memory/tegra210-mc.h b/include/dt-bindings/memory/tegra210-mc.h index cacf05617e03..5e082547f179 100644 --- a/include/dt-bindings/memory/tegra210-mc.h +++ b/include/dt-bindings/memory/tegra210-mc.h @@ -33,6 +33,16 @@ #define TEGRA_SWGROUP_AXIAP 28 #define TEGRA_SWGROUP_ETR 29 #define TEGRA_SWGROUP_TSECB 30 +#define TEGRA_SWGROUP_NV 31 +#define TEGRA_SWGROUP_NV2 32 +#define TEGRA_SWGROUP_PPCS1 33 +#define TEGRA_SWGROUP_DC1 34 +#define TEGRA_SWGROUP_PPCS2 35 +#define TEGRA_SWGROUP_HC1 36 +#define TEGRA_SWGROUP_SE1 37 +#define TEGRA_SWGROUP_TSEC1 38 +#define TEGRA_SWGROUP_TSECB1 39 +#define TEGRA_SWGROUP_NVDEC1 40 #define TEGRA210_MC_RESET_AFI 0 #define TEGRA210_MC_RESET_AVPC 1 diff --git a/include/dt-bindings/memory/tegra30-mc.h b/include/dt-bindings/memory/tegra30-mc.h index 169f005fbc78..930f708aca17 100644 --- a/include/dt-bindings/memory/tegra30-mc.h +++ b/include/dt-bindings/memory/tegra30-mc.h @@ -41,4 +41,71 @@ #define TEGRA30_MC_RESET_VDE 16 #define TEGRA30_MC_RESET_VI 17 +#define TEGRA30_MC_PTCR 0 +#define TEGRA30_MC_DISPLAY0A 1 +#define TEGRA30_MC_DISPLAY0AB 2 +#define TEGRA30_MC_DISPLAY0B 3 +#define TEGRA30_MC_DISPLAY0BB 4 +#define TEGRA30_MC_DISPLAY0C 5 +#define TEGRA30_MC_DISPLAY0CB 6 +#define TEGRA30_MC_DISPLAY1B 7 +#define TEGRA30_MC_DISPLAY1BB 8 +#define TEGRA30_MC_EPPUP 9 +#define TEGRA30_MC_G2PR 10 +#define TEGRA30_MC_G2SR 11 +#define TEGRA30_MC_MPEUNIFBR 12 +#define TEGRA30_MC_VIRUV 13 +#define TEGRA30_MC_AFIR 14 +#define TEGRA30_MC_AVPCARM7R 15 +#define TEGRA30_MC_DISPLAYHC 16 +#define TEGRA30_MC_DISPLAYHCB 17 +#define TEGRA30_MC_FDCDRD 18 +#define TEGRA30_MC_FDCDRD2 19 +#define TEGRA30_MC_G2DR 20 +#define TEGRA30_MC_HDAR 21 +#define TEGRA30_MC_HOST1XDMAR 22 +#define TEGRA30_MC_HOST1XR 23 +#define TEGRA30_MC_IDXSRD 24 +#define TEGRA30_MC_IDXSRD2 25 +#define TEGRA30_MC_MPE_IPRED 26 +#define TEGRA30_MC_MPEAMEMRD 27 +#define TEGRA30_MC_MPECSRD 28 +#define TEGRA30_MC_PPCSAHBDMAR 29 +#define TEGRA30_MC_PPCSAHBSLVR 30 +#define TEGRA30_MC_SATAR 31 +#define TEGRA30_MC_TEXSRD 32 +#define TEGRA30_MC_TEXSRD2 33 +#define TEGRA30_MC_VDEBSEVR 34 +#define TEGRA30_MC_VDEMBER 35 +#define TEGRA30_MC_VDEMCER 36 +#define TEGRA30_MC_VDETPER 37 +#define TEGRA30_MC_MPCORELPR 38 +#define TEGRA30_MC_MPCORER 39 +#define TEGRA30_MC_EPPU 40 +#define TEGRA30_MC_EPPV 41 +#define TEGRA30_MC_EPPY 42 +#define TEGRA30_MC_MPEUNIFBW 43 +#define TEGRA30_MC_VIWSB 44 +#define TEGRA30_MC_VIWU 45 +#define TEGRA30_MC_VIWV 46 +#define TEGRA30_MC_VIWY 47 +#define TEGRA30_MC_G2DW 48 +#define TEGRA30_MC_AFIW 49 +#define TEGRA30_MC_AVPCARM7W 50 +#define TEGRA30_MC_FDCDWR 51 +#define TEGRA30_MC_FDCDWR2 52 +#define TEGRA30_MC_HDAW 53 +#define TEGRA30_MC_HOST1XW 54 +#define TEGRA30_MC_ISPW 55 +#define TEGRA30_MC_MPCORELPW 56 +#define TEGRA30_MC_MPCOREW 57 +#define TEGRA30_MC_MPECSWR 58 +#define TEGRA30_MC_PPCSAHBDMAW 59 +#define TEGRA30_MC_PPCSAHBSLVW 60 +#define TEGRA30_MC_SATAW 61 +#define TEGRA30_MC_VDEBSEVW 62 +#define TEGRA30_MC_VDEDBGW 63 +#define TEGRA30_MC_VDEMBEW 64 +#define TEGRA30_MC_VDETPMW 65 + #endif diff --git a/include/dt-bindings/power/mt8183-power.h b/include/dt-bindings/power/mt8183-power.h new file mode 100644 index 000000000000..d1ab387ba8c7 --- /dev/null +++ b/include/dt-bindings/power/mt8183-power.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 MediaTek Inc. + * Author: Weiyi Lu <weiyi.lu@mediatek.com> + */ + +#ifndef _DT_BINDINGS_POWER_MT8183_POWER_H +#define _DT_BINDINGS_POWER_MT8183_POWER_H + +#define MT8183_POWER_DOMAIN_AUDIO 0 +#define MT8183_POWER_DOMAIN_CONN 1 +#define MT8183_POWER_DOMAIN_MFG_ASYNC 2 +#define MT8183_POWER_DOMAIN_MFG 3 +#define MT8183_POWER_DOMAIN_MFG_CORE0 4 +#define MT8183_POWER_DOMAIN_MFG_CORE1 5 +#define MT8183_POWER_DOMAIN_MFG_2D 6 +#define MT8183_POWER_DOMAIN_DISP 7 +#define MT8183_POWER_DOMAIN_CAM 8 +#define MT8183_POWER_DOMAIN_ISP 9 +#define MT8183_POWER_DOMAIN_VDEC 10 +#define MT8183_POWER_DOMAIN_VENC 11 +#define MT8183_POWER_DOMAIN_VPU_TOP 12 +#define MT8183_POWER_DOMAIN_VPU_CORE0 13 +#define MT8183_POWER_DOMAIN_VPU_CORE1 14 + +#endif /* _DT_BINDINGS_POWER_MT8183_POWER_H */ diff --git a/include/dt-bindings/power/mt8192-power.h b/include/dt-bindings/power/mt8192-power.h new file mode 100644 index 000000000000..4eaa53d7270a --- /dev/null +++ b/include/dt-bindings/power/mt8192-power.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2020 MediaTek Inc. + * Author: Weiyi Lu <weiyi.lu@mediatek.com> + */ + +#ifndef _DT_BINDINGS_POWER_MT8192_POWER_H +#define _DT_BINDINGS_POWER_MT8192_POWER_H + +#define MT8192_POWER_DOMAIN_AUDIO 0 +#define MT8192_POWER_DOMAIN_CONN 1 +#define MT8192_POWER_DOMAIN_MFG0 2 +#define MT8192_POWER_DOMAIN_MFG1 3 +#define MT8192_POWER_DOMAIN_MFG2 4 +#define MT8192_POWER_DOMAIN_MFG3 5 +#define MT8192_POWER_DOMAIN_MFG4 6 +#define MT8192_POWER_DOMAIN_MFG5 7 +#define MT8192_POWER_DOMAIN_MFG6 8 +#define MT8192_POWER_DOMAIN_DISP 9 +#define MT8192_POWER_DOMAIN_IPE 10 +#define MT8192_POWER_DOMAIN_ISP 11 +#define MT8192_POWER_DOMAIN_ISP2 12 +#define MT8192_POWER_DOMAIN_MDP 13 +#define MT8192_POWER_DOMAIN_VENC 14 +#define MT8192_POWER_DOMAIN_VDEC 15 +#define MT8192_POWER_DOMAIN_VDEC2 16 +#define MT8192_POWER_DOMAIN_CAM 17 +#define MT8192_POWER_DOMAIN_CAM_RAWA 18 +#define MT8192_POWER_DOMAIN_CAM_RAWB 19 +#define MT8192_POWER_DOMAIN_CAM_RAWC 20 + +#endif /* _DT_BINDINGS_POWER_MT8192_POWER_H */ diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h index 5e61eaf73bdd..7714487ac76b 100644 --- a/include/dt-bindings/power/qcom-rpmpd.h +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -15,6 +15,11 @@ #define SDM845_GFX 7 #define SDM845_MSS 8 +/* SDX55 Power Domain Indexes */ +#define SDX55_MSS 0 +#define SDX55_MX 1 +#define SDX55_CX 2 + /* SM8150 Power Domain Indexes */ #define SM8150_MSS 0 #define SM8150_EBI 1 @@ -64,6 +69,23 @@ #define RPMH_REGULATOR_LEVEL_TURBO 384 #define RPMH_REGULATOR_LEVEL_TURBO_L1 416 +/* MSM8939 Power Domains */ +#define MSM8939_VDDMDCX 0 +#define MSM8939_VDDMDCX_AO 1 +#define MSM8939_VDDMDCX_VFC 2 +#define MSM8939_VDDCX 3 +#define MSM8939_VDDCX_AO 4 +#define MSM8939_VDDCX_VFC 5 +#define MSM8939_VDDMX 6 +#define MSM8939_VDDMX_AO 7 + +/* MSM8916 Power Domain Indexes */ +#define MSM8916_VDDCX 0 +#define MSM8916_VDDCX_AO 1 +#define MSM8916_VDDCX_VFC 2 +#define MSM8916_VDDMX 3 +#define MSM8916_VDDMX_AO 4 + /* MSM8976 Power Domain Indexes */ #define MSM8976_VDDCX 0 #define MSM8976_VDDCX_AO 1 @@ -102,6 +124,18 @@ #define QCS404_LPIMX 5 #define QCS404_LPIMX_VFL 6 +/* SDM660 Power Domains */ +#define SDM660_VDDCX 0 +#define SDM660_VDDCX_AO 1 +#define SDM660_VDDCX_VFL 2 +#define SDM660_VDDMX 3 +#define SDM660_VDDMX_AO 4 +#define SDM660_VDDMX_VFL 5 +#define SDM660_SSCCX 6 +#define SDM660_SSCCX_VFL 7 +#define SDM660_SSCMX 8 +#define SDM660_SSCMX_VFL 9 + /* RPM SMD Power Domain performance levels */ #define RPM_SMD_LEVEL_RETENTION 16 #define RPM_SMD_LEVEL_RETENTION_PLUS 32 diff --git a/include/dt-bindings/sound/apq8016-lpass.h b/include/dt-bindings/sound/apq8016-lpass.h index 3c3e16c0aadb..dc605c4bc224 100644 --- a/include/dt-bindings/sound/apq8016-lpass.h +++ b/include/dt-bindings/sound/apq8016-lpass.h @@ -2,9 +2,8 @@ #ifndef __DT_APQ8016_LPASS_H #define __DT_APQ8016_LPASS_H -#define MI2S_PRIMARY 0 -#define MI2S_SECONDARY 1 -#define MI2S_TERTIARY 2 -#define MI2S_QUATERNARY 3 +#include <dt-bindings/sound/qcom,lpass.h> + +/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */ #endif /* __DT_APQ8016_LPASS_H */ diff --git a/include/dt-bindings/sound/qcom,lpass.h b/include/dt-bindings/sound/qcom,lpass.h new file mode 100644 index 000000000000..7b0b80b38699 --- /dev/null +++ b/include/dt-bindings/sound/qcom,lpass.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_QCOM_LPASS_H +#define __DT_QCOM_LPASS_H + +#define MI2S_PRIMARY 0 +#define MI2S_SECONDARY 1 +#define MI2S_TERTIARY 2 +#define MI2S_QUATERNARY 3 +#define MI2S_QUINARY 4 + +#define LPASS_DP_RX 5 + +#define LPASS_MCLK0 0 + +#endif /* __DT_QCOM_LPASS_H */ diff --git a/include/dt-bindings/sound/sc7180-lpass.h b/include/dt-bindings/sound/sc7180-lpass.h index 56ecaafd2dc6..5c1ee8b36b19 100644 --- a/include/dt-bindings/sound/sc7180-lpass.h +++ b/include/dt-bindings/sound/sc7180-lpass.h @@ -2,10 +2,8 @@ #ifndef __DT_SC7180_LPASS_H #define __DT_SC7180_LPASS_H -#define MI2S_PRIMARY 0 -#define MI2S_SECONDARY 1 -#define LPASS_DP_RX 2 +#include <dt-bindings/sound/qcom,lpass.h> -#define LPASS_MCLK0 0 +/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */ #endif /* __DT_APQ8016_LPASS_H */ diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 1d94acd0bc85..8dcb3e1477bc 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -13,7 +13,7 @@ #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) #define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1) -#ifdef CONFIG_KVM_ARM_PMU +#ifdef CONFIG_HW_PERF_EVENTS struct kvm_pmc { u8 idx; /* index into the pmu->pmc array */ @@ -24,13 +24,11 @@ struct kvm_pmu { int irq_num; struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS); - bool ready; bool created; bool irq_level; struct irq_work overflow_work; }; -#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); @@ -61,7 +59,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); struct kvm_pmu { }; -#define kvm_arm_pmu_v3_ready(v) (false) #define kvm_arm_pmu_irq_initialized(v) (false) static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index a8d8fdcd3723..3d74f1060bd1 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -402,6 +402,7 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq, struct kvm_kernel_irq_routing_entry *irq_entry); int vgic_v4_load(struct kvm_vcpu *vcpu); +void vgic_v4_commit(struct kvm_vcpu *vcpu); int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db); #endif /* __KVM_ARM_VGIC_H */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 2630c2e953f7..053bf05fb1f7 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -885,6 +885,13 @@ static inline int acpi_device_modalias(struct device *dev, return -ENODEV; } +static inline struct platform_device * +acpi_create_platform_device(struct acpi_device *adev, + struct property_entry *properties) +{ + return NULL; +} + static inline bool acpi_dma_supported(struct acpi_device *adev) { return false; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 47b021952ac7..d705b174d346 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -447,8 +447,8 @@ enum { BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), /* allocate from reserved pool */ BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), - /* set RQF_PREEMPT */ - BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), + /* set RQF_PM */ + BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2), }; struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 070de09425ad..f94ee3089e01 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -79,9 +79,6 @@ typedef __u32 __bitwise req_flags_t; #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) /* don't call prep for this one */ #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) -/* set for "ide_preempt" requests and also for requests for which the SCSI - "quiesce" state must be ignored. */ -#define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) /* vaguely specified driver internal error. Ignored by the block layer */ #define RQF_FAILED ((__force req_flags_t)(1 << 10)) /* don't warn about errors */ @@ -430,8 +427,7 @@ struct request_queue { unsigned long queue_flags; /* * Number of contexts that have called blk_set_pm_only(). If this - * counter is above zero then only RQF_PM and RQF_PREEMPT requests are - * processed. + * counter is above zero then only RQF_PM requests are processed. */ atomic_t pm_only; @@ -696,6 +692,18 @@ static inline bool queue_is_mq(struct request_queue *q) return q->mq_ops; } +#ifdef CONFIG_PM +static inline enum rpm_status queue_rpm_status(struct request_queue *q) +{ + return q->rpm_status; +} +#else +static inline enum rpm_status queue_rpm_status(struct request_queue *q) +{ + return RPM_ACTIVE; +} +#endif + static inline enum blk_zoned_model blk_queue_zoned_model(struct request_queue *q) { diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h index 7bb66e15b481..e3a0be2c90ad 100644 --- a/include/linux/build_bug.h +++ b/include/linux/build_bug.h @@ -77,9 +77,4 @@ #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr) #define __static_assert(expr, msg, ...) _Static_assert(expr, msg) -#ifdef __GENKSYMS__ -/* genksyms gets confused by _Static_assert */ -#define _Static_assert(expr, ...) -#endif - #endif /* _LINUX_BUILD_BUG_H */ diff --git a/include/linux/capability.h b/include/linux/capability.h index 1e7fe311cabe..b2f698915c0f 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -270,6 +270,6 @@ static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns) /* audit system wants to get cap info from files as well */ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); -extern int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size); +extern int cap_convert_nscap(struct dentry *dentry, const void **ivalue, size_t size); #endif /* !_LINUX_CAPABILITY_H */ diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h index 6728c2ee0205..71b5d481c653 100644 --- a/include/linux/ceph/auth.h +++ b/include/linux/ceph/auth.h @@ -32,8 +32,6 @@ struct ceph_auth_handshake { }; struct ceph_auth_client_ops { - const char *name; - /* * true if we are authenticated and can connect to * services. @@ -53,7 +51,9 @@ struct ceph_auth_client_ops { */ int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end); int (*handle_reply)(struct ceph_auth_client *ac, int result, - void *buf, void *end); + void *buf, void *end, u8 *session_key, + int *session_key_len, u8 *con_secret, + int *con_secret_len); /* * Create authorizer for connecting to a service, and verify @@ -69,7 +69,10 @@ struct ceph_auth_client_ops { void *challenge_buf, int challenge_buf_len); int (*verify_authorizer_reply)(struct ceph_auth_client *ac, - struct ceph_authorizer *a); + struct ceph_authorizer *a, + void *reply, int reply_len, + u8 *session_key, int *session_key_len, + u8 *con_secret, int *con_secret_len); void (*invalidate_authorizer)(struct ceph_auth_client *ac, int peer_type); @@ -95,11 +98,15 @@ struct ceph_auth_client { const struct ceph_crypto_key *key; /* our secret key */ unsigned want_keys; /* which services we want */ + int preferred_mode; /* CEPH_CON_MODE_* */ + int fallback_mode; /* ditto */ + struct mutex mutex; }; -extern struct ceph_auth_client *ceph_auth_init(const char *name, - const struct ceph_crypto_key *key); +struct ceph_auth_client *ceph_auth_init(const char *name, + const struct ceph_crypto_key *key, + const int *con_modes); extern void ceph_auth_destroy(struct ceph_auth_client *ac); extern void ceph_auth_reset(struct ceph_auth_client *ac); @@ -113,21 +120,22 @@ int ceph_auth_entity_name_encode(const char *name, void **p, void *end); extern int ceph_build_auth(struct ceph_auth_client *ac, void *msg_buf, size_t msg_len); - extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); -extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, - int peer_type, - struct ceph_auth_handshake *auth); + +int __ceph_auth_get_authorizer(struct ceph_auth_client *ac, + struct ceph_auth_handshake *auth, + int peer_type, bool force_new, + int *proto, int *pref_mode, int *fallb_mode); void ceph_auth_destroy_authorizer(struct ceph_authorizer *a); -extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, - int peer_type, - struct ceph_auth_handshake *a); int ceph_auth_add_authorizer_challenge(struct ceph_auth_client *ac, struct ceph_authorizer *a, void *challenge_buf, int challenge_buf_len); -extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, - struct ceph_authorizer *a); +int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, + struct ceph_authorizer *a, + void *reply, int reply_len, + u8 *session_key, int *session_key_len, + u8 *con_secret, int *con_secret_len); extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type); @@ -147,4 +155,34 @@ int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth, return auth->check_message_signature(auth, msg); return 0; } + +int ceph_auth_get_request(struct ceph_auth_client *ac, void *buf, int buf_len); +int ceph_auth_handle_reply_more(struct ceph_auth_client *ac, void *reply, + int reply_len, void *buf, int buf_len); +int ceph_auth_handle_reply_done(struct ceph_auth_client *ac, + u64 global_id, void *reply, int reply_len, + u8 *session_key, int *session_key_len, + u8 *con_secret, int *con_secret_len); +bool ceph_auth_handle_bad_method(struct ceph_auth_client *ac, + int used_proto, int result, + const int *allowed_protos, int proto_cnt, + const int *allowed_modes, int mode_cnt); + +int ceph_auth_get_authorizer(struct ceph_auth_client *ac, + struct ceph_auth_handshake *auth, + int peer_type, void *buf, int *buf_len); +int ceph_auth_handle_svc_reply_more(struct ceph_auth_client *ac, + struct ceph_auth_handshake *auth, + void *reply, int reply_len, + void *buf, int *buf_len); +int ceph_auth_handle_svc_reply_done(struct ceph_auth_client *ac, + struct ceph_auth_handshake *auth, + void *reply, int reply_len, + u8 *session_key, int *session_key_len, + u8 *con_secret, int *con_secret_len); +bool ceph_auth_handle_bad_authorizer(struct ceph_auth_client *ac, + int peer_type, int used_proto, int result, + const int *allowed_protos, int proto_cnt, + const int *allowed_modes, int mode_cnt); + #endif diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 999636d53cf2..3a47acd9cc14 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -8,7 +8,8 @@ * feature. Base case is 1 (first use). */ #define CEPH_FEATURE_INCARNATION_1 (0ull) -#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL +#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // SERVER_JEWEL +#define CEPH_FEATURE_INCARNATION_3 ((1ull<<57)|(1ull<<28)) // SERVER_MIMIC #define DEFINE_CEPH_FEATURE(bit, incarnation, name) \ static const uint64_t __maybe_unused CEPH_FEATURE_##name = (1ULL<<bit); \ @@ -75,7 +76,7 @@ DEFINE_CEPH_FEATURE( 0, 1, UID) DEFINE_CEPH_FEATURE( 1, 1, NOSRCADDR) DEFINE_CEPH_FEATURE_RETIRED( 2, 1, MONCLOCKCHECK, JEWEL, LUMINOUS) - +DEFINE_CEPH_FEATURE( 2, 3, SERVER_NAUTILUS) DEFINE_CEPH_FEATURE( 3, 1, FLOCK) DEFINE_CEPH_FEATURE( 4, 1, SUBSCRIBE2) DEFINE_CEPH_FEATURE( 5, 1, MONNAMES) @@ -114,7 +115,7 @@ DEFINE_CEPH_FEATURE(25, 1, CRUSH_TUNABLES2) DEFINE_CEPH_FEATURE(26, 1, CREATEPOOLID) DEFINE_CEPH_FEATURE(27, 1, REPLY_CREATE_INODE) DEFINE_CEPH_FEATURE_RETIRED(28, 1, OSD_HBMSGS, HAMMER, JEWEL) -DEFINE_CEPH_FEATURE(28, 2, SERVER_M) +DEFINE_CEPH_FEATURE(28, 2, SERVER_MIMIC) DEFINE_CEPH_FEATURE(29, 1, MDSENC) DEFINE_CEPH_FEATURE(30, 1, OSDHASHPSPOOL) DEFINE_CEPH_FEATURE(31, 1, MON_SINGLE_PAXOS) // deprecate me @@ -177,13 +178,16 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin */ #define CEPH_FEATURES_SUPPORTED_DEFAULT \ (CEPH_FEATURE_NOSRCADDR | \ + CEPH_FEATURE_SERVER_NAUTILUS | \ CEPH_FEATURE_FLOCK | \ CEPH_FEATURE_SUBSCRIBE2 | \ + CEPH_FEATURE_MONNAMES | \ CEPH_FEATURE_RECONNECT_SEQ | \ CEPH_FEATURE_DIRLAYOUTHASH | \ CEPH_FEATURE_PGID64 | \ CEPH_FEATURE_PGPOOL3 | \ CEPH_FEATURE_OSDENC | \ + CEPH_FEATURE_MONENC | \ CEPH_FEATURE_CRUSH_TUNABLES | \ CEPH_FEATURE_SERVER_LUMINOUS | \ CEPH_FEATURE_RESEND_ON_SPLIT | \ @@ -193,6 +197,7 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin CEPH_FEATURE_MSG_AUTH | \ CEPH_FEATURE_CRUSH_TUNABLES2 | \ CEPH_FEATURE_REPLY_CREATE_INODE | \ + CEPH_FEATURE_SERVER_MIMIC | \ CEPH_FEATURE_MDSENC | \ CEPH_FEATURE_OSDHASHPSPOOL | \ CEPH_FEATURE_OSD_CACHEPOOL | \ diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 455e9b9e2adf..e41a811026f6 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -93,8 +93,19 @@ struct ceph_dir_layout { #define CEPH_AUTH_NONE 0x1 #define CEPH_AUTH_CEPHX 0x2 +#define CEPH_AUTH_MODE_NONE 0 +#define CEPH_AUTH_MODE_AUTHORIZER 1 +#define CEPH_AUTH_MODE_MON 10 + +/* msgr2 protocol modes */ +#define CEPH_CON_MODE_UNKNOWN 0x0 +#define CEPH_CON_MODE_CRC 0x1 +#define CEPH_CON_MODE_SECURE 0x2 + #define CEPH_AUTH_UID_DEFAULT ((__u64) -1) +const char *ceph_auth_proto_name(int proto); +const char *ceph_con_mode_name(int mode); /********************************************* * message layer @@ -424,6 +435,7 @@ union ceph_mds_request_args { } __attribute__ ((packed)) open; struct { __le32 flags; + __le32 osdmap_epoch; /* used for setting file/dir layouts */ } __attribute__ ((packed)) setxattr; struct { struct ceph_file_layout_legacy layout; @@ -445,11 +457,25 @@ union ceph_mds_request_args { } __attribute__ ((packed)) lookupino; } __attribute__ ((packed)); +union ceph_mds_request_args_ext { + union ceph_mds_request_args old; + struct { + __le32 mode; + __le32 uid; + __le32 gid; + struct ceph_timespec mtime; + struct ceph_timespec atime; + __le64 size, old_size; /* old_size needed by truncate */ + __le32 mask; /* CEPH_SETATTR_* */ + struct ceph_timespec btime; + } __attribute__ ((packed)) setattr_ext; +}; + #define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */ #define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */ #define CEPH_MDS_FLAG_ASYNC 4 /* request is asynchronous */ -struct ceph_mds_request_head { +struct ceph_mds_request_head_old { __le64 oldest_client_tid; __le32 mdsmap_epoch; /* on client */ __le32 flags; /* CEPH_MDS_FLAG_* */ @@ -462,6 +488,22 @@ struct ceph_mds_request_head { union ceph_mds_request_args args; } __attribute__ ((packed)); +#define CEPH_MDS_REQUEST_HEAD_VERSION 1 + +struct ceph_mds_request_head { + __le16 version; /* struct version */ + __le64 oldest_client_tid; + __le32 mdsmap_epoch; /* on client */ + __le32 flags; /* CEPH_MDS_FLAG_* */ + __u8 num_retry, num_fwd; /* count retry, fwd attempts */ + __le16 num_releases; /* # include cap/lease release records */ + __le32 op; /* mds op code */ + __le32 caller_uid, caller_gid; + __le64 ino; /* use this ino for openc, mkdir, mknod, + etc. (if replaying) */ + union ceph_mds_request_args_ext args; +} __attribute__ ((packed)); + /* cap/lease release record */ struct ceph_mds_request_release { __le64 ino, cap_id; /* ino and unique cap id */ diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h index 450384fe487c..04f3ace5787b 100644 --- a/include/linux/ceph/decode.h +++ b/include/linux/ceph/decode.h @@ -220,6 +220,8 @@ static inline void ceph_encode_timespec64(struct ceph_timespec *tv, */ #define CEPH_ENTITY_ADDR_TYPE_NONE 0 #define CEPH_ENTITY_ADDR_TYPE_LEGACY __cpu_to_le32(1) +#define CEPH_ENTITY_ADDR_TYPE_MSGR2 __cpu_to_le32(2) +#define CEPH_ENTITY_ADDR_TYPE_ANY __cpu_to_le32(3) static inline void ceph_encode_banner_addr(struct ceph_entity_addr *a) { @@ -239,6 +241,12 @@ static inline void ceph_decode_banner_addr(struct ceph_entity_addr *a) extern int ceph_decode_entity_addr(void **p, void *end, struct ceph_entity_addr *addr); +int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2, + struct ceph_entity_addr *addr); + +int ceph_entity_addr_encoding_len(const struct ceph_entity_addr *addr); +void ceph_encode_entity_addr(void **p, const struct ceph_entity_addr *addr); + /* * encoders */ diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index c8645f0b797d..eb9008bb3992 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -31,10 +31,10 @@ #define CEPH_OPT_FSID (1<<0) #define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ #define CEPH_OPT_MYIP (1<<2) /* specified my ip */ -#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ +#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes (msgr1) */ #define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */ #define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ -#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */ +#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs (msgr1) */ #define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */ #define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) @@ -53,6 +53,7 @@ struct ceph_options { unsigned long osd_keepalive_timeout; /* jiffies */ unsigned long osd_request_timeout; /* jiffies */ u32 read_from_replica; /* CEPH_OSD_FLAG_BALANCE/LOCALIZE_READS */ + int con_modes[2]; /* CEPH_CON_MODE_* */ /* * any type that can't be simply compared or doesn't need @@ -83,6 +84,7 @@ struct ceph_options { #define CEPH_MONC_HUNT_BACKOFF 2 #define CEPH_MONC_HUNT_MAX_MULT 10 +#define CEPH_MSG_MAX_CONTROL_LEN (16*1024*1024) #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) @@ -104,6 +106,7 @@ enum { CEPH_MOUNT_UNMOUNTING, CEPH_MOUNT_UNMOUNTED, CEPH_MOUNT_SHUTDOWN, + CEPH_MOUNT_RECOVER, }; static inline unsigned long ceph_timeout_jiffies(unsigned long timeout) @@ -150,6 +153,10 @@ struct ceph_client { #define from_msgr(ms) container_of(ms, struct ceph_client, msgr) +static inline bool ceph_msgr2(struct ceph_client *client) +{ + return client->options->con_modes[0] != CEPH_CON_MODE_UNKNOWN; +} /* * snapshots diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h index 35d385296fbb..523fd0452856 100644 --- a/include/linux/ceph/mdsmap.h +++ b/include/linux/ceph/mdsmap.h @@ -64,7 +64,7 @@ static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w) } extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m); -extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end); +struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2); extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m); extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m); diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 60b324efd1c4..0e6e9ad3c3bf 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -3,6 +3,7 @@ #define __FS_CEPH_MESSENGER_H #include <linux/bvec.h> +#include <linux/crypto.h> #include <linux/kref.h> #include <linux/mutex.h> #include <linux/net.h> @@ -52,6 +53,23 @@ struct ceph_connection_operations { int (*sign_message) (struct ceph_msg *msg); int (*check_message_signature) (struct ceph_msg *msg); + + /* msgr2 authentication exchange */ + int (*get_auth_request)(struct ceph_connection *con, + void *buf, int *buf_len, + void **authorizer, int *authorizer_len); + int (*handle_auth_reply_more)(struct ceph_connection *con, + void *reply, int reply_len, + void *buf, int *buf_len, + void **authorizer, int *authorizer_len); + int (*handle_auth_done)(struct ceph_connection *con, + u64 global_id, void *reply, int reply_len, + u8 *session_key, int *session_key_len, + u8 *con_secret, int *con_secret_len); + int (*handle_auth_bad_method)(struct ceph_connection *con, + int used_proto, int result, + const int *allowed_protos, int proto_cnt, + const int *allowed_modes, int mode_cnt); }; /* use format string %s%lld */ @@ -235,14 +253,171 @@ struct ceph_msg { bool more_to_follow; bool needs_out_seq; int front_alloc_len; - unsigned long ack_stamp; /* tx: when we were acked */ struct ceph_msgpool *pool; }; +/* + * connection states + */ +#define CEPH_CON_S_CLOSED 1 +#define CEPH_CON_S_PREOPEN 2 +#define CEPH_CON_S_V1_BANNER 3 +#define CEPH_CON_S_V1_CONNECT_MSG 4 +#define CEPH_CON_S_V2_BANNER_PREFIX 5 +#define CEPH_CON_S_V2_BANNER_PAYLOAD 6 +#define CEPH_CON_S_V2_HELLO 7 +#define CEPH_CON_S_V2_AUTH 8 +#define CEPH_CON_S_V2_AUTH_SIGNATURE 9 +#define CEPH_CON_S_V2_SESSION_CONNECT 10 +#define CEPH_CON_S_V2_SESSION_RECONNECT 11 +#define CEPH_CON_S_OPEN 12 +#define CEPH_CON_S_STANDBY 13 + +/* + * ceph_connection flag bits + */ +#define CEPH_CON_F_LOSSYTX 0 /* we can close channel or drop + messages on errors */ +#define CEPH_CON_F_KEEPALIVE_PENDING 1 /* we need to send a keepalive */ +#define CEPH_CON_F_WRITE_PENDING 2 /* we have data ready to send */ +#define CEPH_CON_F_SOCK_CLOSED 3 /* socket state changed to closed */ +#define CEPH_CON_F_BACKOFF 4 /* need to retry queuing delayed + work */ + /* ceph connection fault delay defaults, for exponential backoff */ -#define BASE_DELAY_INTERVAL (HZ/2) -#define MAX_DELAY_INTERVAL (5 * 60 * HZ) +#define BASE_DELAY_INTERVAL (HZ / 4) +#define MAX_DELAY_INTERVAL (15 * HZ) + +struct ceph_connection_v1_info { + struct kvec out_kvec[8], /* sending header/footer data */ + *out_kvec_cur; + int out_kvec_left; /* kvec's left in out_kvec */ + int out_skip; /* skip this many bytes */ + int out_kvec_bytes; /* total bytes left */ + bool out_more; /* there is more data after the kvecs */ + bool out_msg_done; + + struct ceph_auth_handshake *auth; + int auth_retry; /* true if we need a newer authorizer */ + + /* connection negotiation temps */ + u8 in_banner[CEPH_BANNER_MAX_LEN]; + struct ceph_entity_addr actual_peer_addr; + struct ceph_entity_addr peer_addr_for_me; + struct ceph_msg_connect out_connect; + struct ceph_msg_connect_reply in_reply; + + int in_base_pos; /* bytes read */ + + /* message in temps */ + u8 in_tag; /* protocol control byte */ + struct ceph_msg_header in_hdr; + __le64 in_temp_ack; /* for reading an ack */ + + /* message out temps */ + struct ceph_msg_header out_hdr; + __le64 out_temp_ack; /* for writing an ack */ + struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2 + stamp */ + + u32 connect_seq; /* identify the most recent connection + attempt for this session */ + u32 peer_global_seq; /* peer's global seq for this connection */ +}; + +#define CEPH_CRC_LEN 4 +#define CEPH_GCM_KEY_LEN 16 +#define CEPH_GCM_IV_LEN sizeof(struct ceph_gcm_nonce) +#define CEPH_GCM_BLOCK_LEN 16 +#define CEPH_GCM_TAG_LEN 16 + +#define CEPH_PREAMBLE_LEN 32 +#define CEPH_PREAMBLE_INLINE_LEN 48 +#define CEPH_PREAMBLE_PLAIN_LEN CEPH_PREAMBLE_LEN +#define CEPH_PREAMBLE_SECURE_LEN (CEPH_PREAMBLE_LEN + \ + CEPH_PREAMBLE_INLINE_LEN + \ + CEPH_GCM_TAG_LEN) +#define CEPH_EPILOGUE_PLAIN_LEN (1 + 3 * CEPH_CRC_LEN) +#define CEPH_EPILOGUE_SECURE_LEN (CEPH_GCM_BLOCK_LEN + CEPH_GCM_TAG_LEN) + +#define CEPH_FRAME_MAX_SEGMENT_COUNT 4 + +struct ceph_frame_desc { + int fd_tag; /* FRAME_TAG_* */ + int fd_seg_cnt; + int fd_lens[CEPH_FRAME_MAX_SEGMENT_COUNT]; /* logical */ + int fd_aligns[CEPH_FRAME_MAX_SEGMENT_COUNT]; +}; + +struct ceph_gcm_nonce { + __le32 fixed; + __le64 counter __packed; +}; + +struct ceph_connection_v2_info { + struct iov_iter in_iter; + struct kvec in_kvecs[5]; /* recvmsg */ + struct bio_vec in_bvec; /* recvmsg (in_cursor) */ + int in_kvec_cnt; + int in_state; /* IN_S_* */ + + struct iov_iter out_iter; + struct kvec out_kvecs[8]; /* sendmsg */ + struct bio_vec out_bvec; /* sendpage (out_cursor, out_zero), + sendmsg (out_enc_pages) */ + int out_kvec_cnt; + int out_state; /* OUT_S_* */ + + int out_zero; /* # of zero bytes to send */ + bool out_iter_sendpage; /* use sendpage if possible */ + + struct ceph_frame_desc in_desc; + struct ceph_msg_data_cursor in_cursor; + struct ceph_msg_data_cursor out_cursor; + + struct crypto_shash *hmac_tfm; /* post-auth signature */ + struct crypto_aead *gcm_tfm; /* on-wire encryption */ + struct aead_request *gcm_req; + struct crypto_wait gcm_wait; + struct ceph_gcm_nonce in_gcm_nonce; + struct ceph_gcm_nonce out_gcm_nonce; + + struct page **out_enc_pages; + int out_enc_page_cnt; + int out_enc_resid; + int out_enc_i; + + int con_mode; /* CEPH_CON_MODE_* */ + + void *conn_bufs[16]; + int conn_buf_cnt; + + struct kvec in_sign_kvecs[8]; + struct kvec out_sign_kvecs[8]; + int in_sign_kvec_cnt; + int out_sign_kvec_cnt; + + u64 client_cookie; + u64 server_cookie; + u64 global_seq; + u64 connect_seq; + u64 peer_global_seq; + + u8 in_buf[CEPH_PREAMBLE_SECURE_LEN]; + u8 out_buf[CEPH_PREAMBLE_SECURE_LEN]; + struct { + u8 late_status; /* FRAME_LATE_STATUS_* */ + union { + struct { + u32 front_crc; + u32 middle_crc; + u32 data_crc; + } __packed; + u8 pad[CEPH_GCM_BLOCK_LEN - 1]; + }; + } out_epil; +}; /* * A single connection with another host. @@ -258,24 +433,16 @@ struct ceph_connection { struct ceph_messenger *msgr; + int state; /* CEPH_CON_S_* */ atomic_t sock_state; struct socket *sock; - struct ceph_entity_addr peer_addr; /* peer address */ - struct ceph_entity_addr peer_addr_for_me; - unsigned long flags; - unsigned long state; + unsigned long flags; /* CEPH_CON_F_* */ const char *error_msg; /* error message, if any */ struct ceph_entity_name peer_name; /* peer name */ - + struct ceph_entity_addr peer_addr; /* peer address */ u64 peer_features; - u32 connect_seq; /* identify the most recent connection - attempt for this connection, client */ - u32 peer_global_seq; /* peer's global seq for this connection */ - - struct ceph_auth_handshake *auth; - int auth_retry; /* true if we need a newer authorizer */ struct mutex mutex; @@ -286,43 +453,80 @@ struct ceph_connection { u64 in_seq, in_seq_acked; /* last message received, acked */ - /* connection negotiation temps */ - char in_banner[CEPH_BANNER_MAX_LEN]; - struct ceph_msg_connect out_connect; - struct ceph_msg_connect_reply in_reply; - struct ceph_entity_addr actual_peer_addr; - - /* message out temps */ - struct ceph_msg_header out_hdr; + struct ceph_msg *in_msg; struct ceph_msg *out_msg; /* sending message (== tail of out_sent) */ - bool out_msg_done; - - struct kvec out_kvec[8], /* sending header/footer data */ - *out_kvec_cur; - int out_kvec_left; /* kvec's left in out_kvec */ - int out_skip; /* skip this many bytes */ - int out_kvec_bytes; /* total bytes left */ - int out_more; /* there is more data after the kvecs */ - __le64 out_temp_ack; /* for writing an ack */ - struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2 - stamp */ - /* message in temps */ - struct ceph_msg_header in_hdr; - struct ceph_msg *in_msg; u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */ - char in_tag; /* protocol control byte */ - int in_base_pos; /* bytes read */ - __le64 in_temp_ack; /* for reading an ack */ - struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */ struct delayed_work work; /* send|recv work */ unsigned long delay; /* current delay interval */ + + union { + struct ceph_connection_v1_info v1; + struct ceph_connection_v2_info v2; + }; }; +extern struct page *ceph_zero_page; + +void ceph_con_flag_clear(struct ceph_connection *con, unsigned long con_flag); +void ceph_con_flag_set(struct ceph_connection *con, unsigned long con_flag); +bool ceph_con_flag_test(struct ceph_connection *con, unsigned long con_flag); +bool ceph_con_flag_test_and_clear(struct ceph_connection *con, + unsigned long con_flag); +bool ceph_con_flag_test_and_set(struct ceph_connection *con, + unsigned long con_flag); + +void ceph_encode_my_addr(struct ceph_messenger *msgr); + +int ceph_tcp_connect(struct ceph_connection *con); +int ceph_con_close_socket(struct ceph_connection *con); +void ceph_con_reset_session(struct ceph_connection *con); + +u32 ceph_get_global_seq(struct ceph_messenger *msgr, u32 gt); +void ceph_con_discard_sent(struct ceph_connection *con, u64 ack_seq); +void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq); + +void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor, + struct ceph_msg *msg, size_t length); +struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, + size_t *page_offset, size_t *length, + bool *last_piece); +void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes); + +u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset, + unsigned int length); + +bool ceph_addr_is_blank(const struct ceph_entity_addr *addr); +int ceph_addr_port(const struct ceph_entity_addr *addr); +void ceph_addr_set_port(struct ceph_entity_addr *addr, int p); + +void ceph_con_process_message(struct ceph_connection *con); +int ceph_con_in_msg_alloc(struct ceph_connection *con, + struct ceph_msg_header *hdr, int *skip); +void ceph_con_get_out_msg(struct ceph_connection *con); + +/* messenger_v1.c */ +int ceph_con_v1_try_read(struct ceph_connection *con); +int ceph_con_v1_try_write(struct ceph_connection *con); +void ceph_con_v1_revoke(struct ceph_connection *con); +void ceph_con_v1_revoke_incoming(struct ceph_connection *con); +bool ceph_con_v1_opened(struct ceph_connection *con); +void ceph_con_v1_reset_session(struct ceph_connection *con); +void ceph_con_v1_reset_protocol(struct ceph_connection *con); + +/* messenger_v2.c */ +int ceph_con_v2_try_read(struct ceph_connection *con); +int ceph_con_v2_try_write(struct ceph_connection *con); +void ceph_con_v2_revoke(struct ceph_connection *con); +void ceph_con_v2_revoke_incoming(struct ceph_connection *con); +bool ceph_con_v2_opened(struct ceph_connection *con); +void ceph_con_v2_reset_session(struct ceph_connection *con); +void ceph_con_v2_reset_protocol(struct ceph_connection *con); + extern const char *ceph_pr_addr(const struct ceph_entity_addr *addr); @@ -330,7 +534,6 @@ extern int ceph_parse_ips(const char *c, const char *end, struct ceph_entity_addr *addr, int max_count, int *count); - extern int ceph_msgr_init(void); extern void ceph_msgr_exit(void); extern void ceph_msgr_flush(void); diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h index 9e50aede46c8..3989dcb94d3d 100644 --- a/include/linux/ceph/msgr.h +++ b/include/linux/ceph/msgr.h @@ -9,24 +9,45 @@ #define CEPH_MON_PORT 6789 /* default monitor port */ /* - * client-side processes will try to bind to ports in this - * range, simply for the benefit of tools like nmap or wireshark - * that would like to identify the protocol. - */ -#define CEPH_PORT_FIRST 6789 -#define CEPH_PORT_START 6800 /* non-monitors start here */ -#define CEPH_PORT_LAST 6900 - -/* * tcp connection banner. include a protocol version. and adjust * whenever the wire protocol changes. try to keep this string length * constant. */ #define CEPH_BANNER "ceph v027" +#define CEPH_BANNER_LEN 9 #define CEPH_BANNER_MAX_LEN 30 /* + * messenger V2 connection banner prefix. + * The full banner string should have the form: "ceph v2\n<le16>" + * the 2 bytes are the length of the remaining banner. + */ +#define CEPH_BANNER_V2 "ceph v2\n" +#define CEPH_BANNER_V2_LEN 8 +#define CEPH_BANNER_V2_PREFIX_LEN (CEPH_BANNER_V2_LEN + sizeof(__le16)) + +/* + * messenger V2 features + */ +#define CEPH_MSGR2_INCARNATION_1 (0ull) + +#define DEFINE_MSGR2_FEATURE(bit, incarnation, name) \ + static const uint64_t __maybe_unused CEPH_MSGR2_FEATURE_##name = (1ULL << bit); \ + static const uint64_t __maybe_unused CEPH_MSGR2_FEATUREMASK_##name = \ + (1ULL << bit | CEPH_MSGR2_INCARNATION_##incarnation); + +#define HAVE_MSGR2_FEATURE(x, name) \ + (((x) & (CEPH_MSGR2_FEATUREMASK_##name)) == (CEPH_MSGR2_FEATUREMASK_##name)) + +DEFINE_MSGR2_FEATURE( 0, 1, REVISION_1) // msgr2.1 + +#define CEPH_MSGR2_SUPPORTED_FEATURES (CEPH_MSGR2_FEATURE_REVISION_1) + +#define CEPH_MSGR2_REQUIRED_FEATURES (CEPH_MSGR2_FEATURE_REVISION_1) + + +/* * Rollover-safe type and comparator for 32-bit sequence numbers. * Comparator returns -1, 0, or 1. */ @@ -61,11 +82,18 @@ extern const char *ceph_entity_type_name(int type); * entity_addr -- network address */ struct ceph_entity_addr { - __le32 type; + __le32 type; /* CEPH_ENTITY_ADDR_TYPE_* */ __le32 nonce; /* unique id for process (e.g. pid) */ struct sockaddr_storage in_addr; } __attribute__ ((packed)); +static inline bool ceph_addr_equal_no_type(const struct ceph_entity_addr *lhs, + const struct ceph_entity_addr *rhs) +{ + return !memcmp(&lhs->in_addr, &rhs->in_addr, sizeof(lhs->in_addr)) && + lhs->nonce == rhs->nonce; +} + struct ceph_entity_inst { struct ceph_entity_name name; struct ceph_entity_addr addr; @@ -160,6 +188,24 @@ struct ceph_msg_header { __le32 crc; /* header crc32c */ } __attribute__ ((packed)); +struct ceph_msg_header2 { + __le64 seq; /* message seq# for this session */ + __le64 tid; /* transaction id */ + __le16 type; /* message type */ + __le16 priority; /* priority. higher value == higher priority */ + __le16 version; /* version of message encoding */ + + __le32 data_pre_padding_len; + __le16 data_off; /* sender: include full offset; + receiver: mask against ~PAGE_MASK */ + + __le64 ack_seq; + __u8 flags; + /* oldest code we think can decode this. unknown if zero. */ + __le16 compat_version; + __le16 reserved; +} __attribute__ ((packed)); + #define CEPH_MSG_PRIO_LOW 64 #define CEPH_MSG_PRIO_DEFAULT 127 #define CEPH_MSG_PRIO_HIGH 196 diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index cad9acfbc320..5553019c3f07 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -251,8 +251,8 @@ static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid) } struct ceph_osdmap *ceph_osdmap_alloc(void); -extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end); -struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, +struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end, bool msgr2); +struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bool msgr2, struct ceph_osdmap *map); extern void ceph_osdmap_destroy(struct ceph_osdmap *map); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 03a5de5f99f4..e4316890661a 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -639,6 +639,12 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev, const struct clk_parent_data *parent_data, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, const struct clk_div_table *table, spinlock_t *lock); +struct clk_hw *__devm_clk_hw_register_divider(struct device *dev, + struct device_node *np, const char *name, + const char *parent_name, const struct clk_hw *parent_hw, + const struct clk_parent_data *parent_data, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, + const struct clk_div_table *table, spinlock_t *lock); struct clk *clk_register_divider_table(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, @@ -779,6 +785,27 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name, (parent_data), (flags), (reg), (shift), \ (width), (clk_divider_flags), (table), \ (lock)) +/** + * devm_clk_hw_register_divider_table - register a table based divider clock + * with the clock framework (devres variant) + * @dev: device registering this clock + * @name: name of this clock + * @parent_name: name of clock's parent + * @flags: framework-specific flags + * @reg: register address to adjust divider + * @shift: number of bits to shift the bitfield + * @width: width of the bitfield + * @clk_divider_flags: divider-specific flags for this clock + * @table: array of divider/value pairs ending with a div set to 0 + * @lock: shared register lock for this clock + */ +#define devm_clk_hw_register_divider_table(dev, name, parent_name, flags, \ + reg, shift, width, \ + clk_divider_flags, table, lock) \ + __devm_clk_hw_register_divider((dev), NULL, (name), (parent_name), \ + NULL, NULL, (flags), (reg), (shift), \ + (width), (clk_divider_flags), (table), \ + (lock)) void clk_unregister_divider(struct clk *clk); void clk_hw_unregister_divider(struct clk_hw *hw); @@ -1062,6 +1089,13 @@ struct clk_hw *clk_hw_register_composite_pdata(struct device *dev, struct clk_hw *rate_hw, const struct clk_ops *rate_ops, struct clk_hw *gate_hw, const struct clk_ops *gate_ops, unsigned long flags); +struct clk_hw *devm_clk_hw_register_composite_pdata(struct device *dev, + const char *name, const struct clk_parent_data *parent_data, + int num_parents, + struct clk_hw *mux_hw, const struct clk_ops *mux_ops, + struct clk_hw *rate_hw, const struct clk_ops *rate_ops, + struct clk_hw *gate_hw, const struct clk_ops *gate_ops, + unsigned long flags); void clk_hw_unregister_composite(struct clk_hw *hw); struct clk *clk_register(struct device *dev, struct clk_hw *hw); @@ -1088,6 +1122,11 @@ static inline struct clk_hw *__clk_get_hw(struct clk *clk) return (struct clk_hw *)clk; } #endif + +struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id); +struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw, + const char *con_id); + unsigned int clk_hw_get_num_parents(const struct clk_hw *hw); struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw); struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw, diff --git a/include/linux/clk.h b/include/linux/clk.h index 7fd6a1febcf4..31ff1bf1b79f 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -110,6 +110,17 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb); int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); /** + * devm_clk_notifier_register - register a managed rate-change notifier callback + * @dev: device for clock "consumer" + * @clk: clock whose rate we are interested in + * @nb: notifier block with callback function pointer + * + * Returns 0 on success, -EERROR otherwise + */ +int devm_clk_notifier_register(struct device *dev, struct clk *clk, + struct notifier_block *nb); + +/** * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) * for a clock source. * @clk: clock source @@ -150,7 +161,7 @@ int clk_get_phase(struct clk *clk); int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); /** - * clk_get_duty_cycle - return the duty cycle ratio of a clock signal + * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal * @clk: clock signal source * @scale: scaling factor to be applied to represent the ratio as an integer * @@ -186,6 +197,13 @@ static inline int clk_notifier_unregister(struct clk *clk, return -ENOTSUPP; } +static inline int devm_clk_notifier_register(struct device *dev, + struct clk *clk, + struct notifier_block *nb) +{ + return -ENOTSUPP; +} + static inline long clk_get_accuracy(struct clk *clk) { return -ENOTSUPP; diff --git a/include/linux/clk/samsung.h b/include/linux/clk/samsung.h index 79097e365f7f..38b774001712 100644 --- a/include/linux/clk/samsung.h +++ b/include/linux/clk/samsung.h @@ -10,7 +10,7 @@ struct device_node; -#ifdef CONFIG_ARCH_S3C64XX +#ifdef CONFIG_S3C64XX_COMMON_CLK void s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, unsigned long xusbxti_f, bool s3c6400, void __iomem *base); @@ -19,7 +19,7 @@ static inline void s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, unsigned long xusbxti_f, bool s3c6400, void __iomem *base) { } -#endif /* CONFIG_ARCH_S3C64XX */ +#endif /* CONFIG_S3C64XX_COMMON_CLK */ #ifdef CONFIG_S3C2410_COMMON_CLK void s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f, diff --git a/include/linux/compat.h b/include/linux/compat.h index 14d514233e1d..6e65be753603 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -442,6 +442,38 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, #endif } +#ifdef CONFIG_CPU_BIG_ENDIAN +#define unsafe_put_compat_sigset(compat, set, label) do { \ + compat_sigset_t __user *__c = compat; \ + const sigset_t *__s = set; \ + \ + switch (_NSIG_WORDS) { \ + case 4: \ + unsafe_put_user(__s->sig[3] >> 32, &__c->sig[7], label); \ + unsafe_put_user(__s->sig[3], &__c->sig[6], label); \ + fallthrough; \ + case 3: \ + unsafe_put_user(__s->sig[2] >> 32, &__c->sig[5], label); \ + unsafe_put_user(__s->sig[2], &__c->sig[4], label); \ + fallthrough; \ + case 2: \ + unsafe_put_user(__s->sig[1] >> 32, &__c->sig[3], label); \ + unsafe_put_user(__s->sig[1], &__c->sig[2], label); \ + fallthrough; \ + case 1: \ + unsafe_put_user(__s->sig[0] >> 32, &__c->sig[1], label); \ + unsafe_put_user(__s->sig[0], &__c->sig[0], label); \ + } \ +} while (0) +#else +#define unsafe_put_compat_sigset(compat, set, label) do { \ + compat_sigset_t __user *__c = compat; \ + const sigset_t *__s = set; \ + \ + unsafe_copy_to_user(__c, __s, sizeof(*__c), label); \ +} while (0) +#endif + extern int compat_ptrace_request(struct task_struct *child, compat_long_t request, compat_ulong_t addr, compat_ulong_t data); @@ -505,6 +537,12 @@ asmlinkage long compat_sys_epoll_pwait(int epfd, int maxevents, int timeout, const compat_sigset_t __user *sigmask, compat_size_t sigsetsize); +asmlinkage long compat_sys_epoll_pwait2(int epfd, + struct epoll_event __user *events, + int maxevents, + const struct __kernel_timespec __user *timeout, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); /* fs/fcntl.c */ asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd, diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 74c6c0486eed..555ab0fddbef 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -13,6 +13,12 @@ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */ #if GCC_VERSION < 40900 # error Sorry, your version of GCC is too old - please use 4.9 or newer. +#elif defined(CONFIG_ARM64) && GCC_VERSION < 50100 +/* + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293 + * https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk + */ +# error Sorry, your version of GCC is too old - please use 5.1 or newer. #endif /* diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index b2a3f4f641a7..ea5e04e75845 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -273,6 +273,12 @@ #define __used __attribute__((__used__)) /* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warn_005funused_005fresult-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#nodiscard-warn-unused-result + */ +#define __must_check __attribute__((__warn_unused_result__)) + +/* * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index bbaa39e98f9f..e5dd5a4ae946 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -121,12 +121,6 @@ struct ftrace_likely_data { unsigned long constant; }; -#ifdef CONFIG_ENABLE_MUST_CHECK -#define __must_check __attribute__((__warn_unused_result__)) -#else -#define __must_check -#endif - #if defined(CC_USING_HOTPATCH) #define notrace __attribute__((hotpatch(0, 0))) #elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY) diff --git a/include/linux/connector.h b/include/linux/connector.h index cb732643471b..8ea860efea37 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -64,14 +64,14 @@ struct cn_dev { * @callback: connector's callback. * parameters are %cn_msg and the sender's credentials */ -int cn_add_callback(struct cb_id *id, const char *name, +int cn_add_callback(const struct cb_id *id, const char *name, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); /** * cn_del_callback() - Unregisters new callback with connector core. * * @id: unique connector's user identifier. */ -void cn_del_callback(struct cb_id *id); +void cn_del_callback(const struct cb_id *id); /** @@ -122,14 +122,14 @@ int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask); int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, - struct cb_id *id, + const struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); -void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); +void cn_queue_del_callback(struct cn_queue_dev *dev, const struct cb_id *id); void cn_queue_release_callback(struct cn_callback_entry *); struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *); void cn_queue_free_dev(struct cn_queue_dev *dev); -int cn_cb_equal(struct cb_id *, struct cb_id *); +int cn_cb_equal(const struct cb_id *, const struct cb_id *); #endif /* __CONNECTOR_H */ diff --git a/include/linux/console.h b/include/linux/console.h index dbe78e8e2602..20874db50bc8 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -186,12 +186,9 @@ extern int braille_register_console(struct console *, int index, extern int braille_unregister_console(struct console *); #ifdef CONFIG_TTY extern void console_sysfs_notify(void); -extern void register_ttynull_console(void); #else static inline void console_sysfs_notify(void) { } -static inline void register_ttynull_console(void) -{ } #endif extern bool console_suspend_enabled; diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 584fccd4fcab..9c8b7437b6cd 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -320,6 +320,15 @@ struct cpufreq_driver { unsigned int index); unsigned int (*fast_switch)(struct cpufreq_policy *policy, unsigned int target_freq); + /* + * ->fast_switch() replacement for drivers that use an internal + * representation of performance levels and can pass hints other than + * the target performance level to the hardware. + */ + void (*adjust_perf)(unsigned int cpu, + unsigned long min_perf, + unsigned long target_perf, + unsigned long capacity); /* * Caches and returns the lowest driver-supported frequency greater than @@ -588,6 +597,11 @@ struct cpufreq_governor { /* Pass a target to the cpufreq driver */ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq); +void cpufreq_driver_adjust_perf(unsigned int cpu, + unsigned long min_perf, + unsigned long target_perf, + unsigned long capacity); +bool cpufreq_driver_has_adjust_perf(void); int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); diff --git a/include/linux/device.h b/include/linux/device.h index 89bb8b84173e..1779f90eeb4c 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -609,6 +609,18 @@ static inline const char *dev_name(const struct device *dev) return kobject_name(&dev->kobj); } +/** + * dev_bus_name - Return a device's bus/class name, if at all possible + * @dev: struct device to get the bus/class name of + * + * Will return the name of the bus/class the device is attached to. If it is + * not attached to a bus/class, an empty string will be returned. + */ +static inline const char *dev_bus_name(const struct device *dev) +{ + return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : ""); +} + __printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...); #ifdef CONFIG_NUMA diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h index 29d255fdd5d6..90bd558a17f5 100644 --- a/include/linux/dm-bufio.h +++ b/include/linux/dm-bufio.h @@ -150,6 +150,7 @@ void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n); unsigned dm_bufio_get_block_size(struct dm_bufio_client *c); sector_t dm_bufio_get_device_size(struct dm_bufio_client *c); +struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c); sector_t dm_bufio_get_block_number(struct dm_buffer *b); void *dm_bufio_get_block_data(struct dm_buffer *b); void *dm_bufio_get_aux_data(struct dm_buffer *b); diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h index 583a3a1f9447..278d489e4bdd 100644 --- a/include/linux/dma-buf-map.h +++ b/include/linux/dma-buf-map.h @@ -122,7 +122,7 @@ struct dma_buf_map { /** * DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory - * @vaddr: A system-memory address + * @vaddr_: A system-memory address */ #define DMA_BUF_MAP_INIT_VADDR(vaddr_) \ { \ diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index a5f89fc4d6df..70fcd0f610ea 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -226,6 +226,9 @@ struct page *dma_alloc_from_pool(struct device *dev, size_t size, bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)); bool dma_free_from_pool(struct device *dev, void *start, size_t size); +int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, + dma_addr_t dma_start, u64 size); + #ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H #include <asm/dma-coherence.h> #elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ @@ -314,6 +317,20 @@ static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size) void *arch_dma_set_uncached(void *addr, size_t size); void arch_dma_clear_uncached(void *addr, size_t size); +#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT +bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr); +bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle); +bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg, + int nents); +bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, + int nents); +#else +#define arch_dma_map_page_direct(d, a) (false) +#define arch_dma_unmap_page_direct(d, a) (false) +#define arch_dma_map_sg_direct(d, s, n) (false) +#define arch_dma_unmap_sg_direct(d, s, n) (false) +#endif + #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 2aaed35b556d..2e49996a8f39 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -558,11 +558,4 @@ static inline int dma_mmap_wc(struct device *dev, #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) #endif -/* - * Legacy interface to set up the dma offset map. Drivers really should not - * actually use it, but we have a few legacy cases left. - */ -int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, - dma_addr_t dma_start, u64 size); - #endif /* _LINUX_DMA_MAPPING_H */ diff --git a/include/linux/dma/k3-event-router.h b/include/linux/dma/k3-event-router.h new file mode 100644 index 000000000000..e3f88b2f87be --- /dev/null +++ b/include/linux/dma/k3-event-router.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com + */ + +#ifndef K3_EVENT_ROUTER_ +#define K3_EVENT_ROUTER_ + +#include <linux/types.h> + +struct k3_event_route_data { + void *priv; + int (*set_event)(void *priv, u32 event); +}; + +#endif /* K3_EVENT_ROUTER_ */ diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h index 1962f75fa2d3..36e22c5a0f29 100644 --- a/include/linux/dma/k3-psil.h +++ b/include/linux/dma/k3-psil.h @@ -50,6 +50,15 @@ enum psil_endpoint_type { * @channel_tpl: Desired throughput level for the channel * @pdma_acc32: ACC32 must be enabled on the PDMA side * @pdma_burst: BURST must be enabled on the PDMA side + * @mapped_channel_id: PKTDMA thread to channel mapping for mapped channels. + * The thread must be serviced by the specified channel if + * mapped_channel_id is >= 0 in case of PKTDMA + * @flow_start: PKDMA flow range start of mapped channel. Unmapped + * channels use flow_id == chan_id + * @flow_num: PKDMA flow count of mapped channel. Unmapped channels + * use flow_id == chan_id + * @default_flow_id: PKDMA default (r)flow index of mapped channel. + * Must be within the flow range of the mapped channel. */ struct psil_endpoint_config { enum psil_endpoint_type ep_type; @@ -63,6 +72,13 @@ struct psil_endpoint_config { /* PDMA properties, valid for PSIL_EP_PDMA_* */ unsigned pdma_acc32:1; unsigned pdma_burst:1; + + /* PKDMA mapped channel */ + int mapped_channel_id; + /* PKTDMA tflow and rflow ranges for mapped channel */ + u16 flow_start; + u16 flow_num; + u16 default_flow_id; }; int psil_set_new_ep_config(struct device *dev, const char *name, diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h index 5eb34ad973a7..e443be4d3b4b 100644 --- a/include/linux/dma/k3-udma-glue.h +++ b/include/linux/dma/k3-udma-glue.h @@ -41,6 +41,12 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn); u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn); int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn); +struct device * + k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn); +void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn, + dma_addr_t *addr); +void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn, + dma_addr_t *addr); enum { K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0, @@ -130,5 +136,11 @@ int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_idx); int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_idx); +struct device * + k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn); +void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn, + dma_addr_t *addr); +void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn, + dma_addr_t *addr); #endif /* K3_UDMA_GLUE_H_ */ diff --git a/include/linux/dma/qcom-gpi-dma.h b/include/linux/dma/qcom-gpi-dma.h new file mode 100644 index 000000000000..f46dc3372f11 --- /dev/null +++ b/include/linux/dma/qcom-gpi-dma.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020, Linaro Limited + */ + +#ifndef QCOM_GPI_DMA_H +#define QCOM_GPI_DMA_H + +/** + * enum spi_transfer_cmd - spi transfer commands + */ +enum spi_transfer_cmd { + SPI_TX = 1, + SPI_RX, + SPI_DUPLEX, +}; + +/** + * struct gpi_spi_config - spi config for peripheral + * + * @loopback_en: spi loopback enable when set + * @clock_pol_high: clock polarity + * @data_pol_high: data polarity + * @pack_en: process tx/rx buffers as packed + * @word_len: spi word length + * @clk_div: source clock divider + * @clk_src: serial clock + * @cmd: spi cmd + * @fragmentation: keep CS assserted at end of sequence + * @cs: chip select toggle + * @set_config: set peripheral config + * @rx_len: receive length for buffer + */ +struct gpi_spi_config { + u8 set_config; + u8 loopback_en; + u8 clock_pol_high; + u8 data_pol_high; + u8 pack_en; + u8 word_len; + u8 fragmentation; + u8 cs; + u32 clk_div; + u32 clk_src; + enum spi_transfer_cmd cmd; + u32 rx_len; +}; + +enum i2c_op { + I2C_WRITE = 1, + I2C_READ, +}; + +/** + * struct gpi_i2c_config - i2c config for peripheral + * + * @pack_enable: process tx/rx buffers as packed + * @cycle_count: clock cycles to be sent + * @high_count: high period of clock + * @low_count: low period of clock + * @clk_div: source clock divider + * @addr: i2c bus address + * @stretch: stretch the clock at eot + * @set_config: set peripheral config + * @rx_len: receive length for buffer + * @op: i2c cmd + * @muli-msg: is part of multi i2c r-w msgs + */ +struct gpi_i2c_config { + u8 set_config; + u8 pack_enable; + u8 cycle_count; + u8 high_count; + u8 low_count; + u8 addr; + u8 stretch; + u16 clk_div; + u32 rx_len; + enum i2c_op op; + bool multi_msg; +}; + +#endif /* QCOM_GPI_DMA_H */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index dd357a747780..68130f5f599e 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -357,11 +357,14 @@ struct dma_chan { * @chan: driver channel device * @device: sysfs device * @dev_id: parent dma_device dev_id + * @chan_dma_dev: The channel is using custom/different dma-mapping + * compared to the parent dma_device */ struct dma_chan_dev { struct dma_chan *chan; struct device device; int dev_id; + bool chan_dma_dev; }; /** @@ -418,6 +421,9 @@ enum dma_slave_buswidth { * @slave_id: Slave requester id. Only valid for slave channels. The dma * slave peripheral will have unique id as dma requester which need to be * pass as slave config. + * @peripheral_config: peripheral configuration for programming peripheral + * for dmaengine transfer + * @peripheral_size: peripheral configuration buffer size * * This struct is passed in as configuration data to a DMA engine * in order to set up a certain channel for DMA transport at runtime. @@ -443,6 +449,8 @@ struct dma_slave_config { u32 dst_port_window_size; bool device_fc; unsigned int slave_id; + void *peripheral_config; + size_t peripheral_size; }; /** @@ -800,6 +808,7 @@ struct dma_filter { * by tx_status * @device_alloc_chan_resources: allocate resources and return the * number of allocated descriptors + * @device_router_config: optional callback for DMA router configuration * @device_free_chan_resources: release DMA channel's resources * @device_prep_dma_memcpy: prepares a memcpy operation * @device_prep_dma_xor: prepares a xor operation @@ -874,6 +883,7 @@ struct dma_device { enum dma_residue_granularity residue_granularity; int (*device_alloc_chan_resources)(struct dma_chan *chan); + int (*device_router_config)(struct dma_chan *chan); void (*device_free_chan_resources)(struct dma_chan *chan); struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( @@ -1611,4 +1621,13 @@ dmaengine_get_direction_text(enum dma_transfer_direction dir) return "invalid"; } } + +static inline struct device *dmaengine_get_dma_device(struct dma_chan *chan) +{ + if (chan->dev->chan_dma_dev) + return &chan->dev->device; + + return chan->device->dev; +} + #endif /* DMAENGINE_H */ diff --git a/include/linux/efi.h b/include/linux/efi.h index d7c0e73af2b9..763b816ba19c 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -817,12 +817,6 @@ static inline bool efi_enabled(int feature) static inline void efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {} -static inline bool -efi_capsule_pending(int *reset_type) -{ - return false; -} - static inline bool efi_soft_reserve_enabled(void) { return false; @@ -1038,6 +1032,7 @@ bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, bool efivar_variable_is_removable(efi_guid_t vendor, const char *name, size_t len); +#if IS_ENABLED(CONFIG_EFI_CAPSULE_LOADER) extern bool efi_capsule_pending(int *reset_type); extern int efi_capsule_supported(efi_guid_t guid, u32 flags, @@ -1045,6 +1040,9 @@ extern int efi_capsule_supported(efi_guid_t guid, u32 flags, extern int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages); +#else +static inline bool efi_capsule_pending(int *reset_type) { return false; } +#endif #ifdef CONFIG_EFI_RUNTIME_MAP int efi_runtime_map_init(struct kobject *); @@ -1089,7 +1087,28 @@ enum efi_secureboot_mode { efi_secureboot_mode_disabled, efi_secureboot_mode_enabled, }; -enum efi_secureboot_mode efi_get_secureboot(void); + +static inline +enum efi_secureboot_mode efi_get_secureboot_mode(efi_get_variable_t *get_var) +{ + u8 secboot, setupmode = 0; + efi_status_t status; + unsigned long size; + + size = sizeof(secboot); + status = get_var(L"SecureBoot", &EFI_GLOBAL_VARIABLE_GUID, NULL, &size, + &secboot); + if (status == EFI_NOT_FOUND) + return efi_secureboot_mode_disabled; + if (status != EFI_SUCCESS) + return efi_secureboot_mode_unknown; + + size = sizeof(setupmode); + get_var(L"SetupMode", &EFI_GLOBAL_VARIABLE_GUID, NULL, &size, &setupmode); + if (secboot == 0 || setupmode == 1) + return efi_secureboot_mode_disabled; + return efi_secureboot_mode_enabled; +} #ifdef CONFIG_RESET_ATTACK_MITIGATION void efi_enable_reset_attack_mitigation(void); diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index dc4fd8a6644d..fa0a524baed0 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -41,6 +41,7 @@ struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt); +void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); DECLARE_PER_CPU(int, eventfd_wake_count); @@ -82,6 +83,11 @@ static inline bool eventfd_signal_count(void) return false; } +static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) +{ + +} + #endif #endif /* _LINUX_EVENTFD_H */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index a5dbb57a687f..7dc2a06cf19a 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -273,7 +273,7 @@ struct f2fs_inode { __le64 i_compr_blocks; /* # of compressed blocks */ __u8 i_compress_algorithm; /* compress algorithm */ __u8 i_log_cluster_size; /* log of cluster size */ - __le16 i_padding; /* padding */ + __le16 i_compress_flag; /* compress flag */ __le32 i_extra_end[0]; /* for attribute size calculation */ } __packed; __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ diff --git a/include/linux/firmware/imx/dsp.h b/include/linux/firmware/imx/dsp.h index 7562099c9e46..4f7895a3b73c 100644 --- a/include/linux/firmware/imx/dsp.h +++ b/include/linux/firmware/imx/dsp.h @@ -55,6 +55,9 @@ static inline void *imx_dsp_get_data(struct imx_dsp_ipc *ipc) int imx_dsp_ring_doorbell(struct imx_dsp_ipc *dsp, unsigned int chan_idx); +struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *ipc, int idx); +void imx_dsp_free_channel(struct imx_dsp_ipc *ipc, int idx); + #else static inline int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc, @@ -63,5 +66,12 @@ static inline int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc, return -ENOTSUPP; } +struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *ipc, int idx) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +void imx_dsp_free_channel(struct imx_dsp_ipc *ipc, int idx) { } + #endif #endif /* _IMX_DSP_IPC_H */ diff --git a/include/linux/firmware/imx/ipc.h b/include/linux/firmware/imx/ipc.h index 891057434858..0b4643571625 100644 --- a/include/linux/firmware/imx/ipc.h +++ b/include/linux/firmware/imx/ipc.h @@ -34,6 +34,7 @@ struct imx_sc_rpc_msg { uint8_t func; }; +#ifdef CONFIG_IMX_SCU /* * This is an function to send an RPC message over an IPC channel. * It is called by client-side SCFW API function shims. @@ -55,4 +56,16 @@ int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg, bool have_resp); * @return Returns an error code (0 = success, failed if < 0) */ int imx_scu_get_handle(struct imx_sc_ipc **ipc); +#else +static inline int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg, + bool have_resp) +{ + return -ENOTSUPP; +} + +static inline int imx_scu_get_handle(struct imx_sc_ipc **ipc) +{ + return -ENOTSUPP; +} +#endif #endif /* _SC_IPC_H */ diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h index 22c76571a294..5cc63fe7e84d 100644 --- a/include/linux/firmware/imx/sci.h +++ b/include/linux/firmware/imx/sci.h @@ -16,9 +16,36 @@ #include <linux/firmware/imx/svc/pm.h> #include <linux/firmware/imx/svc/rm.h> +#if IS_ENABLED(CONFIG_IMX_SCU) int imx_scu_enable_general_irq_channel(struct device *dev); int imx_scu_irq_register_notifier(struct notifier_block *nb); int imx_scu_irq_unregister_notifier(struct notifier_block *nb); int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable); int imx_scu_soc_init(struct device *dev); +#else +static inline int imx_scu_soc_init(struct device *dev) +{ + return -ENOTSUPP; +} + +static inline int imx_scu_enable_general_irq_channel(struct device *dev) +{ + return -ENOTSUPP; +} + +static inline int imx_scu_irq_register_notifier(struct notifier_block *nb) +{ + return -ENOTSUPP; +} + +static inline int imx_scu_irq_unregister_notifier(struct notifier_block *nb) +{ + return -ENOTSUPP; +} + +static inline int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable) +{ + return -ENOTSUPP; +} +#endif #endif /* _SC_SCI_H */ diff --git a/include/linux/firmware/imx/svc/misc.h b/include/linux/firmware/imx/svc/misc.h index 031dd4d3c766..760db08a67fc 100644 --- a/include/linux/firmware/imx/svc/misc.h +++ b/include/linux/firmware/imx/svc/misc.h @@ -46,6 +46,7 @@ enum imx_misc_func { * Control Functions */ +#ifdef CONFIG_IMX_SCU int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource, u8 ctrl, u32 val); @@ -54,5 +55,23 @@ int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource, int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource, bool enable, u64 phys_addr); +#else +static inline int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, + u32 resource, u8 ctrl, u32 val) +{ + return -ENOTSUPP; +} +static inline int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, + u32 resource, u8 ctrl, u32 *val) +{ + return -ENOTSUPP; +} + +static inline int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource, + bool enable, u64 phys_addr) +{ + return -ENOTSUPP; +} +#endif #endif /* _SC_MISC_API_H */ diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 41a1bab98b7e..2a0da841c942 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -13,6 +13,8 @@ #ifndef __FIRMWARE_ZYNQMP_H__ #define __FIRMWARE_ZYNQMP_H__ +#include <linux/err.h> + #define ZYNQMP_PM_VERSION_MAJOR 1 #define ZYNQMP_PM_VERSION_MINOR 0 @@ -310,7 +312,6 @@ struct zynqmp_pm_query_data { u32 arg3; }; - int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 *ret_payload); @@ -358,147 +359,181 @@ static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void) { return ERR_PTR(-ENODEV); } + static inline int zynqmp_pm_get_api_version(u32 *version) { return -ENODEV; } + static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version) { return -ENODEV; } + static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out) { return -ENODEV; } + static inline int zynqmp_pm_clock_enable(u32 clock_id) { return -ENODEV; } + static inline int zynqmp_pm_clock_disable(u32 clock_id) { return -ENODEV; } + static inline int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state) { return -ENODEV; } + static inline int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider) { return -ENODEV; } + static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider) { return -ENODEV; } + static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate) { return -ENODEV; } + static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate) { return -ENODEV; } + static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id) { return -ENODEV; } + static inline int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id) { return -ENODEV; } + static inline int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode) { return -ENODEV; } + static inline int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode) { return -ENODEV; } + static inline int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data) { return -ENODEV; } + static inline int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data) { return -ENODEV; } + static inline int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value) { return -ENODEV; } + static inline int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type) { return -ENODEV; } + static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, - const enum zynqmp_pm_reset_action assert_flag) + const enum zynqmp_pm_reset_action assert_flag) { return -ENODEV; } + static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status) { return -ENODEV; } + static inline int zynqmp_pm_init_finalize(void) { return -ENODEV; } + static inline int zynqmp_pm_set_suspend_mode(u32 mode) { return -ENODEV; } + static inline int zynqmp_pm_request_node(const u32 node, const u32 capabilities, const u32 qos, const enum zynqmp_pm_request_ack ack) { return -ENODEV; } + static inline int zynqmp_pm_release_node(const u32 node) { return -ENODEV; } + static inline int zynqmp_pm_set_requirement(const u32 node, - const u32 capabilities, - const u32 qos, - const enum zynqmp_pm_request_ack ack) + const u32 capabilities, + const u32 qos, + const enum zynqmp_pm_request_ack ack) { return -ENODEV; } + static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out) { return -ENODEV; } + static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags) { return -ENODEV; } + static inline int zynqmp_pm_fpga_get_status(u32 *value) { return -ENODEV; } + static inline int zynqmp_pm_write_ggs(u32 index, u32 value) { return -ENODEV; } + static inline int zynqmp_pm_read_ggs(u32 index, u32 *value) { return -ENODEV; } + static inline int zynqmp_pm_write_pggs(u32 index, u32 value) { return -ENODEV; } + static inline int zynqmp_pm_read_pggs(u32 index, u32 *value) { return -ENODEV; } + static inline int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype) { return -ENODEV; } + static inline int zynqmp_pm_set_boot_health_status(u32 value) { return -ENODEV; diff --git a/include/linux/fs.h b/include/linux/fs.h index 59bba648147f..fd47deea7c17 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2876,8 +2876,7 @@ extern int inode_needs_sync(struct inode *inode); extern int generic_delete_inode(struct inode *inode); static inline int generic_drop_inode(struct inode *inode) { - return !inode->i_nlink || inode_unhashed(inode) || - (inode->i_state & I_DONTCACHE); + return !inode->i_nlink || inode_unhashed(inode); } extern void d_mark_dontcache(struct inode *inode); @@ -3198,6 +3197,7 @@ extern int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str); extern int generic_ci_d_compare(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); #endif +extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry); #ifdef CONFIG_MIGRATION extern int buffer_migrate_page(struct address_space *, diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index d23156d1ac94..2ea1387bb497 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -757,8 +757,11 @@ static inline int fscrypt_prepare_rename(struct inode *old_dir, * key is available, then the lookup is assumed to be by plaintext name; * otherwise, it is assumed to be by no-key name. * - * This also installs a custom ->d_revalidate() method which will invalidate the - * dentry if it was created without the key and the key is later added. + * This will set DCACHE_NOKEY_NAME on the dentry if the lookup is by no-key + * name. In this case the filesystem must assign the dentry a dentry_operations + * which contains fscrypt_d_revalidate (or contains a d_revalidate method that + * calls fscrypt_d_revalidate), so that the dentry will be invalidated if the + * directory's encryption key is later added. * * Return: 0 on success; -ENOENT if the directory's key is unavailable but the * filename isn't a valid no-key name, so a negative dentry should be created; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index f8529a3a2923..a2e42d3cd87c 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -137,6 +137,7 @@ struct mem_cgroup; * if @file_name is not NULL, this is the directory that * @file_name is relative to. * @file_name: optional file name associated with event + * @cookie: inotify rename cookie * * free_group_priv - called when a group refcnt hits 0 to clean up the private union * freeing_mark - called when a mark is being destroyed for some reason. The group @@ -151,7 +152,7 @@ struct fsnotify_ops { struct fsnotify_iter_info *iter_info); int (*handle_inode_event)(struct fsnotify_mark *mark, u32 mask, struct inode *inode, struct inode *dir, - const struct qstr *file_name); + const struct qstr *file_name, u32 cookie); void (*free_group_priv)(struct fsnotify_group *group); void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); void (*free_event)(struct fsnotify_event *event); @@ -277,7 +278,7 @@ static inline const struct path *fsnotify_data_path(const void *data, enum fsnotify_obj_type { FSNOTIFY_OBJ_TYPE_INODE, - FSNOTIFY_OBJ_TYPE_CHILD, + FSNOTIFY_OBJ_TYPE_PARENT, FSNOTIFY_OBJ_TYPE_VFSMOUNT, FSNOTIFY_OBJ_TYPE_SB, FSNOTIFY_OBJ_TYPE_COUNT, @@ -285,7 +286,7 @@ enum fsnotify_obj_type { }; #define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE) -#define FSNOTIFY_OBJ_TYPE_CHILD_FL (1U << FSNOTIFY_OBJ_TYPE_CHILD) +#define FSNOTIFY_OBJ_TYPE_PARENT_FL (1U << FSNOTIFY_OBJ_TYPE_PARENT) #define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT) #define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB) #define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1) @@ -330,7 +331,7 @@ static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \ } FSNOTIFY_ITER_FUNCS(inode, INODE) -FSNOTIFY_ITER_FUNCS(child, CHILD) +FSNOTIFY_ITER_FUNCS(parent, PARENT) FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT) FSNOTIFY_ITER_FUNCS(sb, SB) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 1bd3a0356ae4..9a8ce28e4485 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -7,6 +7,7 @@ #ifndef _LINUX_FTRACE_H #define _LINUX_FTRACE_H +#include <linux/trace_recursion.h> #include <linux/trace_clock.h> #include <linux/kallsyms.h> #include <linux/linkage.h> @@ -89,15 +90,39 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, struct ftrace_ops; +#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS + +struct ftrace_regs { + struct pt_regs regs; +}; +#define arch_ftrace_get_regs(fregs) (&(fregs)->regs) + +/* + * ftrace_instruction_pointer_set() is to be defined by the architecture + * if to allow setting of the instruction pointer from the ftrace_regs + * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports + * live kernel patching. + */ +#define ftrace_instruction_pointer_set(fregs, ip) do { } while (0) +#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ + +static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs) +{ + if (!fregs) + return NULL; + + return arch_ftrace_get_regs(fregs); +} + typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *op, struct pt_regs *regs); + struct ftrace_ops *op, struct ftrace_regs *fregs); ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); /* * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are * set in the flags member. - * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and + * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and * IPMODIFY are a kind of attribute flags which can be set only before * registering the ftrace_ops, and can not be modified while registered. * Changing those attribute flags after registering ftrace_ops will @@ -120,10 +145,10 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * passing regs to the handler. * Note, if this flag is set, the SAVE_REGS flag will automatically * get set upon registering the ftrace_ops, if the arch supports it. - * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure - * that the call back has its own recursion protection. If it does - * not set this, then the ftrace infrastructure will add recursion - * protection for the caller. + * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure + * that the call back needs recursion protection. If it does + * not set this, then the ftrace infrastructure will assume + * that the callback can handle recursion on its own. * STUB - The ftrace_ops is just a place holder. * INITIALIZED - The ftrace_ops has already been initialized (first use time * register_ftrace_function() is called, it will initialized the ops) @@ -155,7 +180,7 @@ enum { FTRACE_OPS_FL_DYNAMIC = BIT(1), FTRACE_OPS_FL_SAVE_REGS = BIT(2), FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3), - FTRACE_OPS_FL_RECURSION_SAFE = BIT(4), + FTRACE_OPS_FL_RECURSION = BIT(4), FTRACE_OPS_FL_STUB = BIT(5), FTRACE_OPS_FL_INITIALIZED = BIT(6), FTRACE_OPS_FL_DELETED = BIT(7), @@ -258,7 +283,7 @@ int register_ftrace_function(struct ftrace_ops *ops); int unregister_ftrace_function(struct ftrace_ops *ops); extern void ftrace_stub(unsigned long a0, unsigned long a1, - struct ftrace_ops *op, struct pt_regs *regs); + struct ftrace_ops *op, struct ftrace_regs *fregs); #else /* !CONFIG_FUNCTION_TRACER */ /* @@ -863,11 +888,11 @@ struct ftrace_graph_ent { */ struct ftrace_graph_ret { unsigned long func; /* Current function */ + int depth; /* Number of functions that overran the depth limit for current task */ - unsigned long overrun; + unsigned int overrun; unsigned long long calltime; unsigned long long rettime; - int depth; } __packed; /* Type of the callback handlers for tracing function graph*/ diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 901aab89d025..ef49307611d2 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -158,7 +158,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap); int gpiod_set_config(struct gpio_desc *desc, unsigned long config); -int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); +int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce); int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); void gpiod_toggle_active_low(struct gpio_desc *desc); @@ -481,7 +481,7 @@ static inline int gpiod_set_config(struct gpio_desc *desc, unsigned long config) return -ENOSYS; } -static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) +static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce) { /* GPIO can never have been requested */ WARN_ON(desc); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 4a7e295c3640..286de0520574 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -621,83 +621,12 @@ int gpiochip_irq_domain_activate(struct irq_domain *domain, void gpiochip_irq_domain_deactivate(struct irq_domain *domain, struct irq_data *data); -void gpiochip_set_nested_irqchip(struct gpio_chip *gc, - struct irq_chip *irqchip, - unsigned int parent_irq); - -int gpiochip_irqchip_add_key(struct gpio_chip *gc, - struct irq_chip *irqchip, - unsigned int first_irq, - irq_flow_handler_t handler, - unsigned int type, - bool threaded, - struct lock_class_key *lock_key, - struct lock_class_key *request_key); - bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc, unsigned int offset); int gpiochip_irqchip_add_domain(struct gpio_chip *gc, struct irq_domain *domain); -#ifdef CONFIG_LOCKDEP - -/* - * Lockdep requires that each irqchip instance be created with a - * unique key so as to avoid unnecessary warnings. This upfront - * boilerplate static inlines provides such a key for each - * unique instance. - */ -static inline int gpiochip_irqchip_add(struct gpio_chip *gc, - struct irq_chip *irqchip, - unsigned int first_irq, - irq_flow_handler_t handler, - unsigned int type) -{ - static struct lock_class_key lock_key; - static struct lock_class_key request_key; - - return gpiochip_irqchip_add_key(gc, irqchip, first_irq, - handler, type, false, - &lock_key, &request_key); -} - -static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gc, - struct irq_chip *irqchip, - unsigned int first_irq, - irq_flow_handler_t handler, - unsigned int type) -{ - - static struct lock_class_key lock_key; - static struct lock_class_key request_key; - - return gpiochip_irqchip_add_key(gc, irqchip, first_irq, - handler, type, true, - &lock_key, &request_key); -} -#else /* ! CONFIG_LOCKDEP */ -static inline int gpiochip_irqchip_add(struct gpio_chip *gc, - struct irq_chip *irqchip, - unsigned int first_irq, - irq_flow_handler_t handler, - unsigned int type) -{ - return gpiochip_irqchip_add_key(gc, irqchip, first_irq, - handler, type, false, NULL, NULL); -} - -static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gc, - struct irq_chip *irqchip, - unsigned int first_irq, - irq_flow_handler_t handler, - unsigned int type) -{ - return gpiochip_irqchip_add_key(gc, irqchip, first_irq, - handler, type, true, NULL, NULL); -} -#endif /* CONFIG_LOCKDEP */ - int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset); void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset); int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset, diff --git a/include/linux/init.h b/include/linux/init.h index 7b53cb3092ee..e668832ef66a 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -255,7 +255,7 @@ struct obs_kernel_param { __aligned(1) = str; \ static struct obs_kernel_param __setup_##unique_id \ __used __section(".init.setup") \ - __attribute__((aligned((sizeof(long))))) \ + __aligned(__alignof__(struct obs_kernel_param)) \ = { __setup_str_##unique_id, fn, early } #define __setup(str, fn) \ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index d956987ed032..09c6a0bf3892 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -533,11 +533,10 @@ struct dmar_domain { /* Domain ids per IOMMU. Use u16 since * domain ids are 16 bit wide according * to VT-d spec, section 9.3 */ - unsigned int auxd_refcnt; /* Refcount of auxiliary attaching */ bool has_iotlb_device; struct list_head devices; /* all devices' list */ - struct list_head auxd; /* link to device's auxiliary list */ + struct list_head subdevices; /* all subdevices' list */ struct iova_domain iovad; /* iova's that belong to this domain */ struct dma_pte *pgd; /* virtual address */ @@ -610,14 +609,21 @@ struct intel_iommu { struct dmar_drhd_unit *drhd; }; +/* Per subdevice private data */ +struct subdev_domain_info { + struct list_head link_phys; /* link to phys device siblings */ + struct list_head link_domain; /* link to domain siblings */ + struct device *pdev; /* physical device derived from */ + struct dmar_domain *domain; /* aux-domain */ + int users; /* user count */ +}; + /* PCI domain-device relationship */ struct device_domain_info { struct list_head link; /* link to domain siblings */ struct list_head global; /* link to global list */ struct list_head table; /* link to pasid table */ - struct list_head auxiliary_domains; /* auxiliary domains - * attached to this device - */ + struct list_head subdevices; /* subdevices sibling */ u32 segment; /* PCI segment number */ u8 bus; /* PCI bus number */ u8 devfn; /* PCI devfn number */ @@ -758,6 +764,7 @@ struct intel_svm_dev { struct list_head list; struct rcu_head rcu; struct device *dev; + struct intel_iommu *iommu; struct svm_dev_ops *ops; struct iommu_sva sva; u32 pasid; @@ -771,7 +778,6 @@ struct intel_svm { struct mmu_notifier notifier; struct mm_struct *mm; - struct intel_iommu *iommu; unsigned int flags; u32 pasid; int gpasid; /* In case that guest PASID is different from host PASID */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 870b3251e174..bb8ff9083e7d 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -232,6 +232,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); # define local_irq_enable_in_hardirq() local_irq_enable() #endif +bool irq_has_action(unsigned int irq); extern void disable_irq_nosync(unsigned int irq); extern bool disable_hardirq(unsigned int irq); extern void disable_irq(unsigned int irq); diff --git a/include/linux/irq.h b/include/linux/irq.h index c332871d59da..4aeb1c4c7e07 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -906,6 +906,13 @@ struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) } #endif +static inline struct cpumask *irq_get_effective_affinity_mask(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + + return d ? irq_data_get_effective_affinity_mask(d) : NULL; +} + unsigned int arch_dynirq_lower_bound(unsigned int from); int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 6976b8331b60..943c3411ca10 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -39,6 +39,8 @@ struct its_vpe { irq_hw_number_t vpe_db_lpi; /* VPE resident */ bool resident; + /* VPT parse complete */ + bool ready; union { /* GICv4.0 implementations */ struct { @@ -104,6 +106,7 @@ enum its_vcpu_info_cmd_type { PROP_UPDATE_AND_INV_VLPI, SCHEDULE_VPE, DESCHEDULE_VPE, + COMMIT_VPE, INVALL_VPE, PROP_UPDATE_VSGI, }; @@ -129,6 +132,7 @@ int its_alloc_vcpu_irqs(struct its_vm *vm); void its_free_vcpu_irqs(struct its_vm *vm); int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en); int its_make_vpe_non_resident(struct its_vpe *vpe, bool db); +int its_commit_vpe(struct its_vpe *vpe); int its_invall_vpe(struct its_vpe *vpe); int its_map_vlpi(int irq, struct its_vlpi_map *map); int its_get_vlpi(int irq, struct its_vlpi_map *map); diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 5745491303e0..891b323266df 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -113,6 +113,12 @@ static inline void irq_unlock_sparse(void) { } extern struct irq_desc irq_desc[NR_IRQS]; #endif +static inline unsigned int irq_desc_kstat_cpu(struct irq_desc *desc, + unsigned int cpu) +{ + return desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; +} + static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) { return container_of(data->common, struct irq_desc, irq_common_data); @@ -179,12 +185,7 @@ int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, /* Test to see if a driver has successfully requested an irq */ static inline int irq_desc_has_action(struct irq_desc *desc) { - return desc->action != NULL; -} - -static inline int irq_has_action(unsigned int irq) -{ - return irq_desc_has_action(irq_to_desc(irq)); + return desc && desc->action != NULL; } /** @@ -228,40 +229,31 @@ irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip, data->chip = chip; } +bool irq_check_status_bit(unsigned int irq, unsigned int bitmask); + static inline bool irq_balancing_disabled(unsigned int irq) { - struct irq_desc *desc; - - desc = irq_to_desc(irq); - return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; + return irq_check_status_bit(irq, IRQ_NO_BALANCING_MASK); } static inline bool irq_is_percpu(unsigned int irq) { - struct irq_desc *desc; - - desc = irq_to_desc(irq); - return desc->status_use_accessors & IRQ_PER_CPU; + return irq_check_status_bit(irq, IRQ_PER_CPU); } static inline bool irq_is_percpu_devid(unsigned int irq) { - struct irq_desc *desc; - - desc = irq_to_desc(irq); - return desc->status_use_accessors & IRQ_PER_CPU_DEVID; + return irq_check_status_bit(irq, IRQ_PER_CPU_DEVID); } +void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, + struct lock_class_key *request_class); static inline void irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, struct lock_class_key *request_class) { - struct irq_desc *desc = irq_to_desc(irq); - - if (desc) { - lockdep_set_class(&desc->lock, lock_class); - lockdep_set_class(&desc->request_mutex, request_class); - } + if (IS_ENABLED(CONFIG_LOCKDEP)) + __irq_set_lockdep_class(irq, lock_class, request_class); } #endif diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 578ff196b3ce..99d3cd051ac3 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -68,7 +68,7 @@ extern void *jbd2_alloc(size_t size, gfp_t flags); extern void jbd2_free(void *ptr, size_t size); #define JBD2_MIN_JOURNAL_BLOCKS 1024 -#define JBD2_MIN_FC_BLOCKS 256 +#define JBD2_DEFAULT_FAST_COMMIT_BLOCKS 256 #ifdef __KERNEL__ @@ -538,6 +538,7 @@ struct transaction_chp_stats_s { * The transaction keeps track of all of the buffers modified by a * running transaction, and all of the buffers committed but not yet * flushed to home for finished transactions. + * (Locking Documentation improved by LockDoc) */ /* @@ -658,12 +659,12 @@ struct transaction_s unsigned long t_start; /* - * When commit was requested + * When commit was requested [j_state_lock] */ unsigned long t_requested; /* - * Checkpointing stats [j_checkpoint_sem] + * Checkpointing stats [j_list_lock] */ struct transaction_chp_stats_s t_chp_stats; @@ -1691,6 +1692,13 @@ static inline int jbd2_journal_has_csum_v2or3(journal_t *journal) return journal->j_chksum_driver != NULL; } +static inline int jbd2_journal_get_num_fc_blks(journal_superblock_t *jsb) +{ + int num_fc_blocks = be32_to_cpu(jsb->s_num_fc_blks); + + return num_fc_blocks ? num_fc_blocks : JBD2_DEFAULT_FAST_COMMIT_BLOCKS; +} + /* * Return number of free blocks in the log. Must be called under j_state_lock. */ diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h index ac6aba632f2d..ca5e89fb10d3 100644 --- a/include/linux/kasan-checks.h +++ b/include/linux/kasan-checks.h @@ -9,7 +9,7 @@ * even in compilation units that selectively disable KASAN, but must use KASAN * to validate access to an address. Never use these in header files! */ -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) bool __kasan_check_read(const volatile void *p, unsigned int size); bool __kasan_check_write(const volatile void *p, unsigned int size); #else diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 30d343b4a40a..fe1ae73ff8b5 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -2,6 +2,7 @@ #ifndef _LINUX_KASAN_H #define _LINUX_KASAN_H +#include <linux/static_key.h> #include <linux/types.h> struct kmem_cache; @@ -11,7 +12,7 @@ struct task_struct; #ifdef CONFIG_KASAN -#include <linux/pgtable.h> +#include <linux/linkage.h> #include <asm/kasan.h> /* kasan_data struct is used in KUnit tests for KASAN expected failures */ @@ -20,8 +21,26 @@ struct kunit_kasan_expectation { bool report_found; }; +#endif + +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) + +#include <linux/pgtable.h> + +/* Software KASAN implementations use shadow memory. */ + +#ifdef CONFIG_KASAN_SW_TAGS +#define KASAN_SHADOW_INIT 0xFF +#else +#define KASAN_SHADOW_INIT 0 +#endif + +#ifndef PTE_HWTABLE_PTRS +#define PTE_HWTABLE_PTRS 0 +#endif + extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; -extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE]; +extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]; extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD]; extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD]; extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; @@ -35,88 +54,219 @@ static inline void *kasan_mem_to_shadow(const void *addr) + KASAN_SHADOW_OFFSET; } +int kasan_add_zero_shadow(void *start, unsigned long size); +void kasan_remove_zero_shadow(void *start, unsigned long size); + /* Enable reporting bugs after kasan_disable_current() */ extern void kasan_enable_current(void); /* Disable reporting bugs for current task */ extern void kasan_disable_current(void); -void kasan_unpoison_shadow(const void *address, size_t size); - -void kasan_unpoison_task_stack(struct task_struct *task); +#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ -void kasan_alloc_pages(struct page *page, unsigned int order); -void kasan_free_pages(struct page *page, unsigned int order); - -void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, - slab_flags_t *flags); +static inline int kasan_add_zero_shadow(void *start, unsigned long size) +{ + return 0; +} +static inline void kasan_remove_zero_shadow(void *start, + unsigned long size) +{} -void kasan_poison_slab(struct page *page); -void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); -void kasan_poison_object_data(struct kmem_cache *cache, void *object); -void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, - const void *object); +static inline void kasan_enable_current(void) {} +static inline void kasan_disable_current(void) {} -void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, - gfp_t flags); -void kasan_kfree_large(void *ptr, unsigned long ip); -void kasan_poison_kfree(void *ptr, unsigned long ip); -void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object, - size_t size, gfp_t flags); -void * __must_check kasan_krealloc(const void *object, size_t new_size, - gfp_t flags); +#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ -void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object, - gfp_t flags); -bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); +#ifdef CONFIG_KASAN struct kasan_cache { int alloc_meta_offset; int free_meta_offset; }; -/* - * These functions provide a special case to support backing module - * allocations with real shadow memory. With KASAN vmalloc, the special - * case is unnecessary, as the work is handled in the generic case. - */ -#ifndef CONFIG_KASAN_VMALLOC -int kasan_module_alloc(void *addr, size_t size); -void kasan_free_shadow(const struct vm_struct *vm); -#else -static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } -static inline void kasan_free_shadow(const struct vm_struct *vm) {} -#endif +#ifdef CONFIG_KASAN_HW_TAGS -int kasan_add_zero_shadow(void *start, unsigned long size); -void kasan_remove_zero_shadow(void *start, unsigned long size); +DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); -size_t __ksize(const void *); -static inline void kasan_unpoison_slab(const void *ptr) +static __always_inline bool kasan_enabled(void) { - kasan_unpoison_shadow(ptr, __ksize(ptr)); + return static_branch_likely(&kasan_flag_enabled); } -size_t kasan_metadata_size(struct kmem_cache *cache); -bool kasan_save_enable_multi_shot(void); -void kasan_restore_multi_shot(bool enabled); +#else /* CONFIG_KASAN_HW_TAGS */ -#else /* CONFIG_KASAN */ +static inline bool kasan_enabled(void) +{ + return true; +} -static inline void kasan_unpoison_shadow(const void *address, size_t size) {} +#endif /* CONFIG_KASAN_HW_TAGS */ -static inline void kasan_unpoison_task_stack(struct task_struct *task) {} +slab_flags_t __kasan_never_merge(void); +static __always_inline slab_flags_t kasan_never_merge(void) +{ + if (kasan_enabled()) + return __kasan_never_merge(); + return 0; +} -static inline void kasan_enable_current(void) {} -static inline void kasan_disable_current(void) {} +void __kasan_unpoison_range(const void *addr, size_t size); +static __always_inline void kasan_unpoison_range(const void *addr, size_t size) +{ + if (kasan_enabled()) + __kasan_unpoison_range(addr, size); +} + +void __kasan_alloc_pages(struct page *page, unsigned int order); +static __always_inline void kasan_alloc_pages(struct page *page, + unsigned int order) +{ + if (kasan_enabled()) + __kasan_alloc_pages(page, order); +} + +void __kasan_free_pages(struct page *page, unsigned int order); +static __always_inline void kasan_free_pages(struct page *page, + unsigned int order) +{ + if (kasan_enabled()) + __kasan_free_pages(page, order); +} + +void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, + slab_flags_t *flags); +static __always_inline void kasan_cache_create(struct kmem_cache *cache, + unsigned int *size, slab_flags_t *flags) +{ + if (kasan_enabled()) + __kasan_cache_create(cache, size, flags); +} + +size_t __kasan_metadata_size(struct kmem_cache *cache); +static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) +{ + if (kasan_enabled()) + return __kasan_metadata_size(cache); + return 0; +} + +void __kasan_poison_slab(struct page *page); +static __always_inline void kasan_poison_slab(struct page *page) +{ + if (kasan_enabled()) + __kasan_poison_slab(page); +} + +void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); +static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, + void *object) +{ + if (kasan_enabled()) + __kasan_unpoison_object_data(cache, object); +} + +void __kasan_poison_object_data(struct kmem_cache *cache, void *object); +static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, + void *object) +{ + if (kasan_enabled()) + __kasan_poison_object_data(cache, object); +} + +void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, + const void *object); +static __always_inline void * __must_check kasan_init_slab_obj( + struct kmem_cache *cache, const void *object) +{ + if (kasan_enabled()) + return __kasan_init_slab_obj(cache, object); + return (void *)object; +} + +bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); +static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object, + unsigned long ip) +{ + if (kasan_enabled()) + return __kasan_slab_free(s, object, ip); + return false; +} + +void __kasan_slab_free_mempool(void *ptr, unsigned long ip); +static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) +{ + if (kasan_enabled()) + __kasan_slab_free_mempool(ptr, ip); +} + +void * __must_check __kasan_slab_alloc(struct kmem_cache *s, + void *object, gfp_t flags); +static __always_inline void * __must_check kasan_slab_alloc( + struct kmem_cache *s, void *object, gfp_t flags) +{ + if (kasan_enabled()) + return __kasan_slab_alloc(s, object, flags); + return object; +} + +void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, + size_t size, gfp_t flags); +static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, + const void *object, size_t size, gfp_t flags) +{ + if (kasan_enabled()) + return __kasan_kmalloc(s, object, size, flags); + return (void *)object; +} + +void * __must_check __kasan_kmalloc_large(const void *ptr, + size_t size, gfp_t flags); +static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, + size_t size, gfp_t flags) +{ + if (kasan_enabled()) + return __kasan_kmalloc_large(ptr, size, flags); + return (void *)ptr; +} +void * __must_check __kasan_krealloc(const void *object, + size_t new_size, gfp_t flags); +static __always_inline void * __must_check kasan_krealloc(const void *object, + size_t new_size, gfp_t flags) +{ + if (kasan_enabled()) + return __kasan_krealloc(object, new_size, flags); + return (void *)object; +} + +void __kasan_kfree_large(void *ptr, unsigned long ip); +static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip) +{ + if (kasan_enabled()) + __kasan_kfree_large(ptr, ip); +} + +bool kasan_save_enable_multi_shot(void); +void kasan_restore_multi_shot(bool enabled); + +#else /* CONFIG_KASAN */ + +static inline bool kasan_enabled(void) +{ + return false; +} +static inline slab_flags_t kasan_never_merge(void) +{ + return 0; +} +static inline void kasan_unpoison_range(const void *address, size_t size) {} static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} - static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} - +static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) {} @@ -127,54 +277,42 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache, { return (void *)object; } - -static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) +static inline bool kasan_slab_free(struct kmem_cache *s, void *object, + unsigned long ip) +{ + return false; +} +static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {} +static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, + gfp_t flags) { - return ptr; + return object; } -static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} -static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags) { return (void *)object; } +static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) +{ + return (void *)ptr; +} static inline void *kasan_krealloc(const void *object, size_t new_size, gfp_t flags) { return (void *)object; } - -static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, - gfp_t flags) -{ - return object; -} -static inline bool kasan_slab_free(struct kmem_cache *s, void *object, - unsigned long ip) -{ - return false; -} - -static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } -static inline void kasan_free_shadow(const struct vm_struct *vm) {} - -static inline int kasan_add_zero_shadow(void *start, unsigned long size) -{ - return 0; -} -static inline void kasan_remove_zero_shadow(void *start, - unsigned long size) -{} - -static inline void kasan_unpoison_slab(const void *ptr) { } -static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } +static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} #endif /* CONFIG_KASAN */ -#ifdef CONFIG_KASAN_GENERIC +#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK +void kasan_unpoison_task_stack(struct task_struct *task); +#else +static inline void kasan_unpoison_task_stack(struct task_struct *task) {} +#endif -#define KASAN_SHADOW_INIT 0 +#ifdef CONFIG_KASAN_GENERIC void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); @@ -188,36 +326,50 @@ static inline void kasan_record_aux_stack(void *ptr) {} #endif /* CONFIG_KASAN_GENERIC */ -#ifdef CONFIG_KASAN_SW_TAGS - -#define KASAN_SHADOW_INIT 0xFF - -void kasan_init_tags(void); +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) -void *kasan_reset_tag(const void *addr); +static inline void *kasan_reset_tag(const void *addr) +{ + return (void *)arch_kasan_reset_tag(addr); +} bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); -#else /* CONFIG_KASAN_SW_TAGS */ - -static inline void kasan_init_tags(void) { } +#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ static inline void *kasan_reset_tag(const void *addr) { return (void *)addr; } -#endif /* CONFIG_KASAN_SW_TAGS */ +#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ + +#ifdef CONFIG_KASAN_SW_TAGS +void __init kasan_init_sw_tags(void); +#else +static inline void kasan_init_sw_tags(void) { } +#endif + +#ifdef CONFIG_KASAN_HW_TAGS +void kasan_init_hw_tags_cpu(void); +void __init kasan_init_hw_tags(void); +#else +static inline void kasan_init_hw_tags_cpu(void) { } +static inline void kasan_init_hw_tags(void) { } +#endif #ifdef CONFIG_KASAN_VMALLOC + int kasan_populate_vmalloc(unsigned long addr, unsigned long size); void kasan_poison_vmalloc(const void *start, unsigned long size); void kasan_unpoison_vmalloc(const void *start, unsigned long size); void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end); -#else + +#else /* CONFIG_KASAN_VMALLOC */ + static inline int kasan_populate_vmalloc(unsigned long start, unsigned long size) { @@ -232,7 +384,26 @@ static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end) {} -#endif + +#endif /* CONFIG_KASAN_VMALLOC */ + +#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ + !defined(CONFIG_KASAN_VMALLOC) + +/* + * These functions provide a special case to support backing module + * allocations with real shadow memory. With KASAN vmalloc, the special + * case is unnecessary, as the work is handled in the generic case. + */ +int kasan_module_alloc(void *addr, size_t size); +void kasan_free_shadow(const struct vm_struct *vm); + +#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ + +static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } +static inline void kasan_free_shadow(const struct vm_struct *vm) {} + +#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ #ifdef CONFIG_KASAN_INLINE void kasan_non_canonical_hook(unsigned long addr); diff --git a/include/linux/kcov.h b/include/linux/kcov.h index a10e84707d82..4e3037dc1204 100644 --- a/include/linux/kcov.h +++ b/include/linux/kcov.h @@ -52,6 +52,25 @@ static inline void kcov_remote_start_usb(u64 id) kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_USB, id)); } +/* + * The softirq flavor of kcov_remote_*() functions is introduced as a temporary + * work around for kcov's lack of nested remote coverage sections support in + * task context. Adding suport for nested sections is tracked in: + * https://bugzilla.kernel.org/show_bug.cgi?id=210337 + */ + +static inline void kcov_remote_start_usb_softirq(u64 id) +{ + if (in_serving_softirq()) + kcov_remote_start_usb(id); +} + +static inline void kcov_remote_stop_softirq(void) +{ + if (in_serving_softirq()) + kcov_remote_stop(); +} + #else static inline void kcov_task_init(struct task_struct *t) {} @@ -66,6 +85,8 @@ static inline u64 kcov_common_handle(void) } static inline void kcov_remote_start_common(u64 id) {} static inline void kcov_remote_start_usb(u64 id) {} +static inline void kcov_remote_start_usb_softirq(u64 id) {} +static inline void kcov_remote_stop_softirq(void) {} #endif /* CONFIG_KCOV */ #endif /* _LINUX_KCOV_H */ diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h index 85b5151911cf..4856706fbfeb 100644 --- a/include/linux/kdev_t.h +++ b/include/linux/kdev_t.h @@ -21,61 +21,61 @@ }) /* acceptable for old filesystems */ -static inline bool old_valid_dev(dev_t dev) +static __always_inline bool old_valid_dev(dev_t dev) { return MAJOR(dev) < 256 && MINOR(dev) < 256; } -static inline u16 old_encode_dev(dev_t dev) +static __always_inline u16 old_encode_dev(dev_t dev) { return (MAJOR(dev) << 8) | MINOR(dev); } -static inline dev_t old_decode_dev(u16 val) +static __always_inline dev_t old_decode_dev(u16 val) { return MKDEV((val >> 8) & 255, val & 255); } -static inline u32 new_encode_dev(dev_t dev) +static __always_inline u32 new_encode_dev(dev_t dev) { unsigned major = MAJOR(dev); unsigned minor = MINOR(dev); return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); } -static inline dev_t new_decode_dev(u32 dev) +static __always_inline dev_t new_decode_dev(u32 dev) { unsigned major = (dev & 0xfff00) >> 8; unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); return MKDEV(major, minor); } -static inline u64 huge_encode_dev(dev_t dev) +static __always_inline u64 huge_encode_dev(dev_t dev) { return new_encode_dev(dev); } -static inline dev_t huge_decode_dev(u64 dev) +static __always_inline dev_t huge_decode_dev(u64 dev) { return new_decode_dev(dev); } -static inline int sysv_valid_dev(dev_t dev) +static __always_inline int sysv_valid_dev(dev_t dev) { return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18); } -static inline u32 sysv_encode_dev(dev_t dev) +static __always_inline u32 sysv_encode_dev(dev_t dev) { return MINOR(dev) | (MAJOR(dev) << 18); } -static inline unsigned sysv_major(u32 dev) +static __always_inline unsigned sysv_major(u32 dev) { return (dev >> 18) & 0x3fff; } -static inline unsigned sysv_minor(u32 dev) +static __always_inline unsigned sysv_minor(u32 dev) { return dev & 0x3ffff; } diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 89f0745c096d..44ae1a7eb9e3 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -67,7 +67,6 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) /* * Number of interrupts per specific IRQ source, since bootup */ -extern unsigned int kstat_irqs(unsigned int irq); extern unsigned int kstat_irqs_usr(unsigned int irq); /* diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index a79404433812..b3a36b0cfc81 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -360,7 +360,7 @@ static inline void wait_for_kprobe_optimizer(void) { } #endif /* CONFIG_OPTPROBES */ #ifdef CONFIG_KPROBES_ON_FTRACE extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *ops, struct pt_regs *regs); + struct ftrace_ops *ops, struct ftrace_regs *fregs); extern int arch_prepare_kprobe_ftrace(struct kprobe *p); #endif diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 65b81e0c494d..2484ed97e72f 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -33,6 +33,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), unsigned int cpu, const char *namefmt); +void kthread_set_per_cpu(struct task_struct *k, int cpu); +bool kthread_is_per_cpu(struct task_struct *k); + /** * kthread_run - create and wake a thread. * @threadfn: the function to run until signal_pending(current). diff --git a/include/linux/ktime.h b/include/linux/ktime.h index a12b5523cc18..73f20deb497d 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -230,6 +230,5 @@ static inline ktime_t ms_to_ktime(u64 ms) } # include <linux/timekeeping.h> -# include <linux/timekeeping32.h> #endif diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h new file mode 100644 index 000000000000..120e5e90fa1d --- /dev/null +++ b/include/linux/kvm_dirty_ring.h @@ -0,0 +1,103 @@ +#ifndef KVM_DIRTY_RING_H +#define KVM_DIRTY_RING_H + +#include <linux/kvm.h> + +/** + * kvm_dirty_ring: KVM internal dirty ring structure + * + * @dirty_index: free running counter that points to the next slot in + * dirty_ring->dirty_gfns, where a new dirty page should go + * @reset_index: free running counter that points to the next dirty page + * in dirty_ring->dirty_gfns for which dirty trap needs to + * be reenabled + * @size: size of the compact list, dirty_ring->dirty_gfns + * @soft_limit: when the number of dirty pages in the list reaches this + * limit, vcpu that owns this ring should exit to userspace + * to allow userspace to harvest all the dirty pages + * @dirty_gfns: the array to keep the dirty gfns + * @index: index of this dirty ring + */ +struct kvm_dirty_ring { + u32 dirty_index; + u32 reset_index; + u32 size; + u32 soft_limit; + struct kvm_dirty_gfn *dirty_gfns; + int index; +}; + +#if (KVM_DIRTY_LOG_PAGE_OFFSET == 0) +/* + * If KVM_DIRTY_LOG_PAGE_OFFSET not defined, kvm_dirty_ring.o should + * not be included as well, so define these nop functions for the arch. + */ +static inline u32 kvm_dirty_ring_get_rsvd_entries(void) +{ + return 0; +} + +static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, + int index, u32 size) +{ + return 0; +} + +static inline struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm) +{ + return NULL; +} + +static inline int kvm_dirty_ring_reset(struct kvm *kvm, + struct kvm_dirty_ring *ring) +{ + return 0; +} + +static inline void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, + u32 slot, u64 offset) +{ +} + +static inline struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, + u32 offset) +{ + return NULL; +} + +static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring) +{ +} + +static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring) +{ + return true; +} + +#else /* KVM_DIRTY_LOG_PAGE_OFFSET == 0 */ + +u32 kvm_dirty_ring_get_rsvd_entries(void); +int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size); +struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm); + +/* + * called with kvm->slots_lock held, returns the number of + * processed pages. + */ +int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring); + +/* + * returns =0: successfully pushed + * <0: unable to push, need to wait + */ +void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset); + +/* for use in vm_operations_struct */ +struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset); + +void kvm_dirty_ring_free(struct kvm_dirty_ring *ring); +bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring); + +#endif /* KVM_DIRTY_LOG_PAGE_OFFSET == 0 */ + +#endif /* KVM_DIRTY_RING_H */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 7f2e2a09ebbd..f3b1013fb22c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -34,6 +34,7 @@ #include <linux/kvm_types.h> #include <asm/kvm_host.h> +#include <linux/kvm_dirty_ring.h> #ifndef KVM_MAX_VCPU_ID #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS @@ -319,6 +320,7 @@ struct kvm_vcpu { bool preempted; bool ready; struct kvm_vcpu_arch arch; + struct kvm_dirty_ring dirty_ring; }; static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) @@ -349,6 +351,11 @@ struct kvm_memory_slot { u16 as_id; }; +static inline bool kvm_slot_dirty_track_enabled(struct kvm_memory_slot *slot) +{ + return slot->flags & KVM_MEM_LOG_DIRTY_PAGES; +} + static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) { return ALIGN(memslot->npages, BITS_PER_LONG) / 8; @@ -505,6 +512,7 @@ struct kvm { struct srcu_struct irq_srcu; pid_t userspace_pid; unsigned int max_halt_poll_ns; + u32 dirty_ring_size; }; #define kvm_err(fmt, ...) \ @@ -792,13 +800,12 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, offset_in_page(__gpa), v); \ }) -int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); -void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn); +void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); @@ -1478,4 +1485,14 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) } #endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ +/* + * This defines how many reserved entries we want to keep before we + * kick the vcpu to the userspace to avoid dirty ring full. This + * value can be tuned to higher if e.g. PML is enabled on the host. + */ +#define KVM_DIRTY_RING_RSVD_ENTRIES 64 + +/* Max number of entries allowed for each kvm dirty ring */ +#define KVM_DIRTY_RING_MAX_ENTRIES 65536 + #endif diff --git a/include/linux/litex.h b/include/linux/litex.h new file mode 100644 index 000000000000..40f5be503593 --- /dev/null +++ b/include/linux/litex.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common LiteX header providing + * helper functions for accessing CSRs. + * + * Implementation of the functions is provided by + * the LiteX SoC Controller driver. + * + * Copyright (C) 2019-2020 Antmicro <www.antmicro.com> + */ + +#ifndef _LINUX_LITEX_H +#define _LINUX_LITEX_H + +#include <linux/io.h> +#include <linux/types.h> +#include <linux/compiler_types.h> + +/* + * The parameters below are true for LiteX SoCs configured for 8-bit CSR Bus, + * 32-bit aligned. + * + * Supporting other configurations will require extending the logic in this + * header and in the LiteX SoC controller driver. + */ +#define LITEX_REG_SIZE 0x4 +#define LITEX_SUBREG_SIZE 0x1 +#define LITEX_SUBREG_SIZE_BIT (LITEX_SUBREG_SIZE * 8) + +#define WRITE_LITEX_SUBREGISTER(val, base_offset, subreg_id) \ + writel((u32 __force)cpu_to_le32(val), base_offset + (LITEX_REG_SIZE * subreg_id)) + +#define READ_LITEX_SUBREGISTER(base_offset, subreg_id) \ + le32_to_cpu((__le32 __force)readl(base_offset + (LITEX_REG_SIZE * subreg_id))) + +void litex_set_reg(void __iomem *reg, unsigned long reg_sz, unsigned long val); + +unsigned long litex_get_reg(void __iomem *reg, unsigned long reg_sz); + +static inline void litex_write8(void __iomem *reg, u8 val) +{ + WRITE_LITEX_SUBREGISTER(val, reg, 0); +} + +static inline void litex_write16(void __iomem *reg, u16 val) +{ + WRITE_LITEX_SUBREGISTER(val >> 8, reg, 0); + WRITE_LITEX_SUBREGISTER(val, reg, 1); +} + +static inline void litex_write32(void __iomem *reg, u32 val) +{ + WRITE_LITEX_SUBREGISTER(val >> 24, reg, 0); + WRITE_LITEX_SUBREGISTER(val >> 16, reg, 1); + WRITE_LITEX_SUBREGISTER(val >> 8, reg, 2); + WRITE_LITEX_SUBREGISTER(val, reg, 3); +} + +static inline void litex_write64(void __iomem *reg, u64 val) +{ + WRITE_LITEX_SUBREGISTER(val >> 56, reg, 0); + WRITE_LITEX_SUBREGISTER(val >> 48, reg, 1); + WRITE_LITEX_SUBREGISTER(val >> 40, reg, 2); + WRITE_LITEX_SUBREGISTER(val >> 32, reg, 3); + WRITE_LITEX_SUBREGISTER(val >> 24, reg, 4); + WRITE_LITEX_SUBREGISTER(val >> 16, reg, 5); + WRITE_LITEX_SUBREGISTER(val >> 8, reg, 6); + WRITE_LITEX_SUBREGISTER(val, reg, 7); +} + +static inline u8 litex_read8(void __iomem *reg) +{ + return READ_LITEX_SUBREGISTER(reg, 0); +} + +static inline u16 litex_read16(void __iomem *reg) +{ + return (READ_LITEX_SUBREGISTER(reg, 0) << 8) + | (READ_LITEX_SUBREGISTER(reg, 1)); +} + +static inline u32 litex_read32(void __iomem *reg) +{ + return (READ_LITEX_SUBREGISTER(reg, 0) << 24) + | (READ_LITEX_SUBREGISTER(reg, 1) << 16) + | (READ_LITEX_SUBREGISTER(reg, 2) << 8) + | (READ_LITEX_SUBREGISTER(reg, 3)); +} + +static inline u64 litex_read64(void __iomem *reg) +{ + return ((u64)READ_LITEX_SUBREGISTER(reg, 0) << 56) + | ((u64)READ_LITEX_SUBREGISTER(reg, 1) << 48) + | ((u64)READ_LITEX_SUBREGISTER(reg, 2) << 40) + | ((u64)READ_LITEX_SUBREGISTER(reg, 3) << 32) + | ((u64)READ_LITEX_SUBREGISTER(reg, 4) << 24) + | ((u64)READ_LITEX_SUBREGISTER(reg, 5) << 16) + | ((u64)READ_LITEX_SUBREGISTER(reg, 6) << 8) + | ((u64)READ_LITEX_SUBREGISTER(reg, 7)); +} + +#endif /* _LINUX_LITEX_H */ diff --git a/include/linux/mailbox/arm_mhuv2_message.h b/include/linux/mailbox/arm_mhuv2_message.h new file mode 100644 index 000000000000..821b9d96daa4 --- /dev/null +++ b/include/linux/mailbox/arm_mhuv2_message.h @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM MHUv2 Mailbox Message + * + * Copyright (C) 2020 Arm Ltd. + * Copyright (C) 2020 Linaro Ltd. + */ + +#ifndef _LINUX_ARM_MHUV2_MESSAGE_H_ +#define _LINUX_ARM_MHUV2_MESSAGE_H_ + +#include <linux/types.h> + +/* Data structure for data-transfer protocol */ +struct arm_mhuv2_mbox_msg { + void *data; + size_t len; +}; + +#endif /* _LINUX_ARM_MHUV2_MESSAGE_H_ */ diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 0ce30ca78db0..9004375c462e 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -72,6 +72,9 @@ struct device *mdev_get_iommu_device(struct device *dev); * @mmap: mmap callback * @mdev: mediated device structure * @vma: vma structure + * @request: request callback to release device + * @mdev: mediated device structure + * @count: request sequence number * Parent device that support mediated device should be registered with mdev * module with mdev_parent_ops structure. **/ @@ -92,6 +95,7 @@ struct mdev_parent_ops { long (*ioctl)(struct mdev_device *mdev, unsigned int cmd, unsigned long arg); int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma); + void (*request)(struct mdev_device *mdev, unsigned int count); }; /* interface for exporting mdev supported type attributes */ diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h index 5d71e8a8500f..aca4dc037b70 100644 --- a/include/linux/mdio-bitbang.h +++ b/include/linux/mdio-bitbang.h @@ -35,6 +35,9 @@ struct mdiobb_ctrl { const struct mdiobb_ops *ops; }; +int mdiobb_read(struct mii_bus *bus, int phy, int reg); +int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val); + /* The returned bus is not yet registered with the phy layer. */ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 08ed57e02b73..eeb0b52203e9 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -620,9 +620,10 @@ mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) /** * mem_cgroup_lruvec - get the lru list vector for a memcg & node * @memcg: memcg of the wanted lruvec + * @pgdat: pglist_data * * Returns the lru list vector holding pages for a given @memcg & - * @node combination. This can be the node lruvec, if the memory + * @pgdat combination. This can be the node lruvec, if the memory * controller is disabled. */ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, @@ -652,7 +653,21 @@ out: return lruvec; } -struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); +/** + * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page + * @page: the page + * @pgdat: pgdat of the page + * + * This function relies on page->mem_cgroup being stable. + */ +static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, + struct pglist_data *pgdat) +{ + struct mem_cgroup *memcg = page_memcg(page); + + VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page); + return mem_cgroup_lruvec(memcg, pgdat); +} static inline bool lruvec_holds_page_lru_lock(struct page *page, struct lruvec *lruvec) @@ -913,41 +928,6 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg, local_irq_restore(flags); } -/** - * mod_memcg_page_state - update page state statistics - * @page: the page - * @idx: page state item to account - * @val: number of pages (positive or negative) - * - * The @page must be locked or the caller must use lock_page_memcg() - * to prevent double accounting when the page is concurrently being - * moved to another memcg: - * - * lock_page(page) or lock_page_memcg(page) - * if (TestClearPageState(page)) - * mod_memcg_page_state(page, state, -1); - * unlock_page(page) or unlock_page_memcg(page) - * - * Kernel pages are an exception to this, since they'll never move. - */ -static inline void __mod_memcg_page_state(struct page *page, - int idx, int val) -{ - struct mem_cgroup *memcg = page_memcg(page); - - if (memcg) - __mod_memcg_state(memcg, idx, val); -} - -static inline void mod_memcg_page_state(struct page *page, - int idx, int val) -{ - struct mem_cgroup *memcg = page_memcg(page); - - if (memcg) - mod_memcg_state(memcg, idx, val); -} - static inline unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx) { @@ -1395,18 +1375,6 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg, { } -static inline void __mod_memcg_page_state(struct page *page, - int idx, - int nr) -{ -} - -static inline void mod_memcg_page_state(struct page *page, - int idx, - int nr) -{ -} - static inline unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx) { @@ -1479,34 +1447,6 @@ static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) } #endif /* CONFIG_MEMCG */ -/* idx can be of type enum memcg_stat_item or node_stat_item */ -static inline void __inc_memcg_state(struct mem_cgroup *memcg, - int idx) -{ - __mod_memcg_state(memcg, idx, 1); -} - -/* idx can be of type enum memcg_stat_item or node_stat_item */ -static inline void __dec_memcg_state(struct mem_cgroup *memcg, - int idx) -{ - __mod_memcg_state(memcg, idx, -1); -} - -/* idx can be of type enum memcg_stat_item or node_stat_item */ -static inline void __inc_memcg_page_state(struct page *page, - int idx) -{ - __mod_memcg_page_state(page, idx, 1); -} - -/* idx can be of type enum memcg_stat_item or node_stat_item */ -static inline void __dec_memcg_page_state(struct page *page, - int idx) -{ - __mod_memcg_page_state(page, idx, -1); -} - static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) { __mod_lruvec_kmem_state(p, idx, 1); @@ -1517,34 +1457,6 @@ static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) __mod_lruvec_kmem_state(p, idx, -1); } -/* idx can be of type enum memcg_stat_item or node_stat_item */ -static inline void inc_memcg_state(struct mem_cgroup *memcg, - int idx) -{ - mod_memcg_state(memcg, idx, 1); -} - -/* idx can be of type enum memcg_stat_item or node_stat_item */ -static inline void dec_memcg_state(struct mem_cgroup *memcg, - int idx) -{ - mod_memcg_state(memcg, idx, -1); -} - -/* idx can be of type enum memcg_stat_item or node_stat_item */ -static inline void inc_memcg_page_state(struct page *page, - int idx) -{ - mod_memcg_page_state(page, idx, 1); -} - -/* idx can be of type enum memcg_stat_item or node_stat_item */ -static inline void dec_memcg_page_state(struct page *page, - int idx) -{ - mod_memcg_page_state(page, idx, -1); -} - static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) { struct mem_cgroup *memcg; @@ -1733,21 +1645,6 @@ static inline void memcg_kmem_uncharge_page(struct page *page, int order) __memcg_kmem_uncharge_page(page, order); } -static inline int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, - unsigned int nr_pages) -{ - if (memcg_kmem_enabled()) - return __memcg_kmem_charge(memcg, gfp, nr_pages); - return 0; -} - -static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg, - unsigned int nr_pages) -{ - if (memcg_kmem_enabled()) - __memcg_kmem_uncharge(memcg, nr_pages); -} - /* * A helper for accessing memcg's kmem_id, used for getting * corresponding LRU lists. diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 551093b74596..15acce5ab106 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -361,6 +361,9 @@ extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, unsigned long nr_pages); +extern int arch_create_linear_mapping(int nid, u64 start, u64 size, + struct mhp_params *params); +void arch_remove_linear_mapping(u64 start, u64 size); #endif /* CONFIG_MEMORY_HOTPLUG */ #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/mfd/syscon/xlnx-vcu.h b/include/linux/mfd/syscon/xlnx-vcu.h new file mode 100644 index 000000000000..ff7bc3656f6e --- /dev/null +++ b/include/linux/mfd/syscon/xlnx-vcu.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Pengutronix, Michael Tretter <kernel@pengutronix.de> + */ + +#ifndef __XLNX_VCU_H +#define __XLNX_VCU_H + +#define VCU_ECODER_ENABLE 0x00 +#define VCU_DECODER_ENABLE 0x04 +#define VCU_MEMORY_DEPTH 0x08 +#define VCU_ENC_COLOR_DEPTH 0x0c +#define VCU_ENC_VERTICAL_RANGE 0x10 +#define VCU_ENC_FRAME_SIZE_X 0x14 +#define VCU_ENC_FRAME_SIZE_Y 0x18 +#define VCU_ENC_COLOR_FORMAT 0x1c +#define VCU_ENC_FPS 0x20 +#define VCU_MCU_CLK 0x24 +#define VCU_CORE_CLK 0x28 +#define VCU_PLL_BYPASS 0x2c +#define VCU_ENC_CLK 0x30 +#define VCU_PLL_CLK 0x34 +#define VCU_ENC_VIDEO_STANDARD 0x38 +#define VCU_STATUS 0x3c +#define VCU_AXI_ENC_CLK 0x40 +#define VCU_AXI_DEC_CLK 0x44 +#define VCU_AXI_MCU_CLK 0x48 +#define VCU_DEC_VIDEO_STANDARD 0x4c +#define VCU_DEC_FRAME_SIZE_X 0x50 +#define VCU_DEC_FRAME_SIZE_Y 0x54 +#define VCU_DEC_FPS 0x58 +#define VCU_BUFFER_B_FRAME 0x5c +#define VCU_WPP_EN 0x60 +#define VCU_PLL_CLK_DEC 0x64 +#define VCU_NUM_CORE 0x6c +#define VCU_GASKET_INIT 0x74 +#define VCU_GASKET_VALUE 0x03 + +#endif /* __XLNX_VCU_H */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 0d6e287d614f..442c0160caab 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1280,7 +1280,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 ece_support[0x1]; u8 reserved_at_a4[0x7]; u8 log_max_srq[0x5]; - u8 reserved_at_b0[0x2]; + u8 reserved_at_b0[0x1]; + u8 uplink_follow[0x1]; u8 ts_cqe_to_dest_cqn[0x1]; u8 reserved_at_b3[0xd]; @@ -10711,9 +10712,9 @@ struct mlx5_ifc_affiliated_event_header_bits { }; enum { - MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc), - MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT(0x13), - MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT(0x20), + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT_ULL(0xc), + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT_ULL(0x13), + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT_ULL(0x20), }; enum { diff --git a/include/linux/mm.h b/include/linux/mm.h index 855161080f18..ecdf8a8cd6ae 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -31,6 +31,7 @@ #include <linux/sizes.h> #include <linux/sched.h> #include <linux/pgtable.h> +#include <linux/kasan.h> struct mempolicy; struct anon_vma; @@ -215,6 +216,13 @@ int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, loff_t *); int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, loff_t *); +/* + * Any attempt to mark this function as static leads to build failure + * when CONFIG_DEBUG_INFO_BTF is enabled because __add_to_page_cache_locked() + * is referred to by BPF code. This must be visible for error injection. + */ +int __add_to_page_cache_locked(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp, void **shadowp); #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) @@ -1421,23 +1429,31 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) } #endif /* CONFIG_NUMA_BALANCING */ -#ifdef CONFIG_KASAN_SW_TAGS +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) + static inline u8 page_kasan_tag(const struct page *page) { - return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; + if (kasan_enabled()) + return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; + return 0xff; } static inline void page_kasan_tag_set(struct page *page, u8 tag) { - page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); - page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; + if (kasan_enabled()) { + page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); + page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; + } } static inline void page_kasan_tag_reset(struct page *page) { - page_kasan_tag_set(page, 0xff); + if (kasan_enabled()) + page_kasan_tag_set(page, 0xff); } -#else + +#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ + static inline u8 page_kasan_tag(const struct page *page) { return 0xff; @@ -1445,7 +1461,8 @@ static inline u8 page_kasan_tag(const struct page *page) static inline void page_kasan_tag_set(struct page *page, u8 tag) { } static inline void page_kasan_tag_reset(struct page *page) { } -#endif + +#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ static inline struct zone *page_zone(const struct page *page) { @@ -2422,8 +2439,9 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn); #endif extern void set_dma_reserve(unsigned long new_dma_reserve); -extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, - enum meminit_context, struct vmem_altmap *, int migratetype); +extern void memmap_init_zone(unsigned long, int, unsigned long, + unsigned long, unsigned long, enum meminit_context, + struct vmem_altmap *, int migratetype); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); @@ -2702,6 +2720,8 @@ static inline void vma_set_page_prot(struct vm_area_struct *vma) } #endif +void vma_set_file(struct vm_area_struct *vma, struct file *file); + #ifdef CONFIG_NUMA_BALANCING unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long start, unsigned long end); diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 2ad72d2c8cc5..5d0767cb424a 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -37,6 +37,18 @@ void dump_mm(const struct mm_struct *mm); BUG(); \ } \ } while (0) +#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \ + static bool __section(".data.once") __warned; \ + int __ret_warn_once = !!(cond); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\ + __warned = true; \ + WARN_ON(1); \ + } \ + unlikely(__ret_warn_once); \ +}) + #define VM_WARN_ON(cond) (void)WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) #define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format) @@ -48,6 +60,7 @@ void dump_mm(const struct mm_struct *mm); #define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond) #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) #endif diff --git a/include/linux/module.h b/include/linux/module.h index c4e7a887f469..7a0bcb5b1ffc 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -66,7 +66,7 @@ struct module_version_attribute { struct module_attribute mattr; const char *module_name; const char *version; -} __attribute__ ((__aligned__(sizeof(void *)))); +}; extern ssize_t __modver_version_show(struct module_attribute *, struct module_kobject *, char *); @@ -266,20 +266,20 @@ extern typeof(name) __mod_##type##__##name##_device_table \ #else #define MODULE_VERSION(_version) \ MODULE_INFO(version, _version); \ - static struct module_version_attribute ___modver_attr = { \ - .mattr = { \ - .attr = { \ - .name = "version", \ - .mode = S_IRUGO, \ + static struct module_version_attribute __modver_attr \ + __used __section("__modver") \ + __aligned(__alignof__(struct module_version_attribute)) \ + = { \ + .mattr = { \ + .attr = { \ + .name = "version", \ + .mode = S_IRUGO, \ + }, \ + .show = __modver_version_show, \ }, \ - .show = __modver_version_show, \ - }, \ - .module_name = KBUILD_MODNAME, \ - .version = _version, \ - }; \ - static const struct module_version_attribute \ - __used __section("__modver") \ - * __moduleparam_const __modver_attr = &___modver_attr + .module_name = KBUILD_MODNAME, \ + .version = _version, \ + } #endif /* Optional firmware file (or files) needed by the module diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index 4fa67a8b2265..9e09d11ffe5b 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -96,7 +96,8 @@ void module_arch_cleanup(struct module *mod); /* Any cleanup before freeing mod->module_init */ void module_arch_freeing_init(struct module *mod); -#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC) +#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ + !defined(CONFIG_KASAN_VMALLOC) #include <linux/kasan.h> #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) #else diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 6388eb9734a5..eed280fae433 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -21,12 +21,12 @@ #define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long)) #define __MODULE_INFO(tag, name, info) \ -static const char __UNIQUE_ID(name)[] \ - __used __section(".modinfo") __attribute__((unused, aligned(1))) \ - = __MODULE_INFO_PREFIX __stringify(tag) "=" info + static const char __UNIQUE_ID(name)[] \ + __used __section(".modinfo") __aligned(1) \ + = __MODULE_INFO_PREFIX __stringify(tag) "=" info #define __MODULE_PARM_TYPE(name, _type) \ - __MODULE_INFO(parmtype, name##type, #name ":" _type) + __MODULE_INFO(parmtype, name##type, #name ":" _type) /* One for each parameter, describing how to use it. Some files do multiple of these per line, so can't just use MODULE_INFO. */ @@ -288,8 +288,8 @@ struct kparam_array /* Default value instead of permissions? */ \ static const char __param_str_##name[] = prefix #name; \ static struct kernel_param __moduleparam_const __param_##name \ - __used \ - __section("__param") __attribute__ ((unused, aligned(sizeof(void *)))) \ + __used __section("__param") \ + __aligned(__alignof__(struct kernel_param)) \ = { __param_str_##name, THIS_MODULE, ops, \ VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7bf167993c05..259be67644e3 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4547,6 +4547,7 @@ void __dev_set_rx_mode(struct net_device *dev); int dev_set_promiscuity(struct net_device *dev, int inc); int dev_set_allmulti(struct net_device *dev, int inc); void netdev_state_change(struct net_device *dev); +void __netdev_notify_peers(struct net_device *dev); void netdev_notify_peers(struct net_device *dev); void netdev_features_change(struct net_device *dev); /* Load a device via the kmod */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index a2c6455ea3fa..681ed98e4ba8 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -46,6 +46,11 @@ #define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS) /* + * Size of the NFS directory verifier + */ +#define NFS_DIR_VERIFIER_SIZE 2 + +/* * NFSv3/v4 Access mode cache entry */ struct nfs_access_entry { @@ -88,8 +93,8 @@ struct nfs_open_context { struct nfs_open_dir_context { struct list_head list; - const struct cred *cred; unsigned long attr_gencount; + __be32 verf[NFS_DIR_VERIFIER_SIZE]; __u64 dir_cookie; __u64 dup_cookie; signed char duped; @@ -157,7 +162,7 @@ struct nfs_inode { * This is the cookie verifier used for NFSv3 readdir * operations */ - __be32 cookieverf[2]; + __be32 cookieverf[NFS_DIR_VERIFIER_SIZE]; atomic_long_t nrequests; struct nfs_mds_commit_info commit_info; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index d63cb862d58e..3327239fa2f9 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -750,6 +750,20 @@ struct nfs_entry { struct nfs_server * server; }; +struct nfs_readdir_arg { + struct dentry *dentry; + const struct cred *cred; + __be32 *verf; + u64 cookie; + struct page **pages; + unsigned int page_len; + bool plus; +}; + +struct nfs_readdir_res { + __be32 *verf; +}; + /* * The following types are for NFSv2 only. */ @@ -1744,8 +1758,7 @@ struct nfs_rpc_ops { unsigned int, struct iattr *); int (*mkdir) (struct inode *, struct dentry *, struct iattr *); int (*rmdir) (struct inode *, const struct qstr *); - int (*readdir) (struct dentry *, const struct cred *, - u64, struct page **, unsigned int, bool); + int (*readdir) (struct nfs_readdir_arg *, struct nfs_readdir_res *); int (*mknod) (struct inode *, struct dentry *, struct iattr *, dev_t); int (*statfs) (struct nfs_server *, struct nfs_fh *, diff --git a/include/linux/nvme.h b/include/linux/nvme.h index d92535997687..bfed36e342cc 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -116,6 +116,9 @@ enum { NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer * Location */ + NVME_REG_CMBMSC = 0x0050, /* Controller Memory Buffer Memory + * Space Control + */ NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */ NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */ NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */ @@ -135,6 +138,7 @@ enum { #define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff) #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) #define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) +#define NVME_CAP_CMBS(cap) (((cap) >> 57) & 0x1) #define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7) #define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff) @@ -192,6 +196,8 @@ enum { NVME_CSTS_SHST_OCCUR = 1 << 2, NVME_CSTS_SHST_CMPLT = 2 << 2, NVME_CSTS_SHST_MASK = 3 << 2, + NVME_CMBMSC_CRE = 1 << 0, + NVME_CMBMSC_CMSE = 1 << 1, }; struct nvme_id_power_state { diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index e200eef6a7fd..7d4ec26d8a3e 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -77,7 +77,7 @@ #define LAST_CPUPID_SHIFT 0 #endif -#ifdef CONFIG_KASAN_SW_TAGS +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) #define KASAN_TAG_WIDTH 8 #else #define KASAN_TAG_WIDTH 0 diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index bf7966776c55..505480217cf1 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -163,8 +163,6 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn); static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } #endif -bool arm_pmu_irq_is_nmi(void); - /* Internal functions only for core arm_pmu code */ struct arm_pmu *armpmu_alloc(void); struct arm_pmu *armpmu_alloc_atomic(void); diff --git a/include/linux/phy.h b/include/linux/phy.h index 381a95732b6a..9effb511acde 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -743,7 +743,8 @@ struct phy_driver { /** @read_status: Determines the negotiated speed and duplex */ int (*read_status)(struct phy_device *phydev); - /** @config_intr: Enables or disables interrupts. + /** + * @config_intr: Enables or disables interrupts. * It should also clear any pending interrupts prior to enabling the * IRQs and after disabling them. */ diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h index 99e6069c5fd8..73f63be509c4 100644 --- a/include/linux/platform_data/atmel.h +++ b/include/linux/platform_data/atmel.h @@ -6,18 +6,6 @@ #ifndef __ATMEL_H__ #define __ATMEL_H__ - /* Compact Flash */ -struct at91_cf_data { - int irq_pin; /* I/O IRQ */ - int det_pin; /* Card detect */ - int vcc_pin; /* power switching */ - int rst_pin; /* card reset */ - u8 chipselect; /* EBI Chip Select number */ - u8 flags; -#define AT91_CF_TRUE_IDE 0x01 -#define AT91_IDE_SWAP_A0_A2 0x02 -}; - /* FIXME: this needs a better location, but gets stuff building again */ #ifdef CONFIG_ATMEL_PM extern int at91_suspend_entering_slow_clock(void); diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index a3a9a878415f..86376779ab31 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -1284,6 +1284,8 @@ enum ec_feature_code { EC_FEATURE_SCP = 39, /* The MCU is an Integrated Sensor Hub */ EC_FEATURE_ISH = 40, + /* New TCPMv2 TYPEC_ prefaced commands supported */ + EC_FEATURE_TYPEC_CMD = 41, }; #define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32) @@ -5528,6 +5530,159 @@ struct ec_response_regulator_get_voltage { uint32_t voltage_mv; } __ec_align4; +/* + * Gather all discovery information for the given port and partner type. + * + * Note that if discovery has not yet completed, only the currently completed + * responses will be filled in. If the discovery data structures are changed + * in the process of the command running, BUSY will be returned. + * + * VDO field sizes are set to the maximum possible number of VDOs a VDM may + * contain, while the number of SVIDs here is selected to fit within the PROTO2 + * maximum parameter size. + */ +#define EC_CMD_TYPEC_DISCOVERY 0x0131 + +enum typec_partner_type { + TYPEC_PARTNER_SOP = 0, + TYPEC_PARTNER_SOP_PRIME = 1, +}; + +struct ec_params_typec_discovery { + uint8_t port; + uint8_t partner_type; /* enum typec_partner_type */ +} __ec_align1; + +struct svid_mode_info { + uint16_t svid; + uint16_t mode_count; /* Number of modes partner sent */ + uint32_t mode_vdo[6]; /* Max VDOs allowed after VDM header is 6 */ +}; + +struct ec_response_typec_discovery { + uint8_t identity_count; /* Number of identity VDOs partner sent */ + uint8_t svid_count; /* Number of SVIDs partner sent */ + uint16_t reserved; + uint32_t discovery_vdo[6]; /* Max VDOs allowed after VDM header is 6 */ + struct svid_mode_info svids[0]; +} __ec_align1; + +/* + * Gather all status information for a port. + * + * Note: this covers many of the return fields from the deprecated + * EC_CMD_USB_PD_CONTROL command, except those that are redundant with the + * discovery data. The "enum pd_cc_states" is defined with the deprecated + * EC_CMD_USB_PD_CONTROL command. + * + * This also combines in the EC_CMD_USB_PD_MUX_INFO flags. + */ +#define EC_CMD_TYPEC_STATUS 0x0133 + +/* + * Power role. + * + * Note this is also used for PD header creation, and values align to those in + * the Power Delivery Specification Revision 3.0 (See + * 6.2.1.1.4 Port Power Role). + */ +enum pd_power_role { + PD_ROLE_SINK = 0, + PD_ROLE_SOURCE = 1 +}; + +/* + * Data role. + * + * Note this is also used for PD header creation, and the first two values + * align to those in the Power Delivery Specification Revision 3.0 (See + * 6.2.1.1.6 Port Data Role). + */ +enum pd_data_role { + PD_ROLE_UFP = 0, + PD_ROLE_DFP = 1, + PD_ROLE_DISCONNECTED = 2, +}; + +enum pd_vconn_role { + PD_ROLE_VCONN_OFF = 0, + PD_ROLE_VCONN_SRC = 1, +}; + +/* + * Note: BIT(0) may be used to determine whether the polarity is CC1 or CC2, + * regardless of whether a debug accessory is connected. + */ +enum tcpc_cc_polarity { + /* + * _CCx: is used to indicate the polarity while not connected to + * a Debug Accessory. Only one CC line will assert a resistor and + * the other will be open. + */ + POLARITY_CC1 = 0, + POLARITY_CC2 = 1, + + /* + * _CCx_DTS is used to indicate the polarity while connected to a + * SRC Debug Accessory. Assert resistors on both lines. + */ + POLARITY_CC1_DTS = 2, + POLARITY_CC2_DTS = 3, + + /* + * The current TCPC code relies on these specific POLARITY values. + * Adding in a check to verify if the list grows for any reason + * that this will give a hint that other places need to be + * adjusted. + */ + POLARITY_COUNT +}; + +#define PD_STATUS_EVENT_SOP_DISC_DONE BIT(0) +#define PD_STATUS_EVENT_SOP_PRIME_DISC_DONE BIT(1) + +struct ec_params_typec_status { + uint8_t port; +} __ec_align1; + +struct ec_response_typec_status { + uint8_t pd_enabled; /* PD communication enabled - bool */ + uint8_t dev_connected; /* Device connected - bool */ + uint8_t sop_connected; /* Device is SOP PD capable - bool */ + uint8_t source_cap_count; /* Number of Source Cap PDOs */ + + uint8_t power_role; /* enum pd_power_role */ + uint8_t data_role; /* enum pd_data_role */ + uint8_t vconn_role; /* enum pd_vconn_role */ + uint8_t sink_cap_count; /* Number of Sink Cap PDOs */ + + uint8_t polarity; /* enum tcpc_cc_polarity */ + uint8_t cc_state; /* enum pd_cc_states */ + uint8_t dp_pin; /* DP pin mode (MODE_DP_IN_[A-E]) */ + uint8_t mux_state; /* USB_PD_MUX* - encoded mux state */ + + char tc_state[32]; /* TC state name */ + + uint32_t events; /* PD_STATUS_EVENT bitmask */ + + /* + * BCD PD revisions for partners + * + * The format has the PD major reversion in the upper nibble, and PD + * minor version in the next nibble. Following two nibbles are + * currently 0. + * ex. PD 3.2 would map to 0x3200 + * + * PD major/minor will be 0 if no PD device is connected. + */ + uint16_t sop_revision; + uint16_t sop_prime_revision; + + uint32_t source_cap_pdos[7]; /* Max 7 PDOs can be present */ + + uint32_t sink_cap_pdos[7]; /* Max 7 PDOs can be present */ +} __ec_align1; + /*****************************************************************************/ /* The command range 0x200-0x2FF is reserved for Rotor. */ diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 240dce553a0b..fafc1beea504 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -50,6 +50,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_QUIRK_GPMC_DEBUG BIT(26) #define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25) #define SYSC_MODULE_QUIRK_PRUSS BIT(24) #define SYSC_MODULE_QUIRK_DSS_RESET BIT(23) diff --git a/include/linux/power/generic-adc-battery.h b/include/linux/power/generic-adc-battery.h index 40f9c7628f7b..c68cbf34cd34 100644 --- a/include/linux/power/generic-adc-battery.h +++ b/include/linux/power/generic-adc-battery.h @@ -11,16 +11,12 @@ * @battery_info: recommended structure to specify static power supply * parameters * @cal_charge: calculate charge level. - * @gpio_charge_finished: gpio for the charger. - * @gpio_inverted: Should be 1 if the GPIO is active low otherwise 0 * @jitter_delay: delay required after the interrupt to check battery * status.Default set is 10ms. */ struct gab_platform_data { struct power_supply_info battery_info; int (*cal_charge)(long value); - int gpio_charge_finished; - bool gpio_inverted; int jitter_delay; }; diff --git a/include/linux/psci.h b/include/linux/psci.h index 2a1bfb890e58..4ca0060a3fc4 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -34,6 +34,15 @@ struct psci_operations { extern struct psci_operations psci_ops; +struct psci_0_1_function_ids { + u32 cpu_suspend; + u32 cpu_on; + u32 cpu_off; + u32 migrate; +}; + +struct psci_0_1_function_ids get_psci_0_1_function_ids(void); + #if defined(CONFIG_ARM_PSCI_FW) int __init psci_dt_init(void); #else diff --git a/include/linux/pwm.h b/include/linux/pwm.h index a13ff383fa1d..e4d84d4db293 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -473,11 +473,6 @@ static inline int pwmchip_add(struct pwm_chip *chip) return -EINVAL; } -static inline int pwmchip_add_inversed(struct pwm_chip *chip) -{ - return -EINVAL; -} - static inline int pwmchip_remove(struct pwm_chip *chip) { return -EINVAL; diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index f7bbea3f09ca..ec2ad4b0fe14 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -48,7 +48,6 @@ struct geni_icc_path { * @clk_perf_tbl: Table of clock frequency input to serial engine clock * @icc_paths: Array of ICC paths for SE * @opp_table: Pointer to the OPP table - * @has_opp_table: Specifies if the SE has an OPP table */ struct geni_se { void __iomem *base; @@ -59,7 +58,6 @@ struct geni_se { unsigned long *clk_perf_tbl; struct geni_icc_path icc_paths[3]; struct opp_table *opp_table; - bool has_opp_table; }; /* Common SE registers */ diff --git a/include/linux/quota.h b/include/linux/quota.h index 27aab84fcbaa..18ebd39c9487 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -448,17 +448,18 @@ struct quota_format_type { }; /** - * Quota state flags - they actually come in two flavors - for users and groups. + * Quota state flags - they come in three flavors - for users, groups and projects. * * Actual typed flags layout: - * USRQUOTA GRPQUOTA - * DQUOT_USAGE_ENABLED 0x0001 0x0002 - * DQUOT_LIMITS_ENABLED 0x0004 0x0008 - * DQUOT_SUSPENDED 0x0010 0x0020 + * USRQUOTA GRPQUOTA PRJQUOTA + * DQUOT_USAGE_ENABLED 0x0001 0x0002 0x0004 + * DQUOT_LIMITS_ENABLED 0x0008 0x0010 0x0020 + * DQUOT_SUSPENDED 0x0040 0x0080 0x0100 * * Following bits are used for non-typed flags: - * DQUOT_QUOTA_SYS_FILE 0x0040 - * DQUOT_NEGATIVE_USAGE 0x0080 + * DQUOT_QUOTA_SYS_FILE 0x0200 + * DQUOT_NEGATIVE_USAGE 0x0400 + * DQUOT_NOLIST_DIRTY 0x0800 */ enum { _DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index de0826411311..fd02c5fa60cb 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -86,6 +86,12 @@ void rcu_sched_clock_irq(int user); void rcu_report_dead(unsigned int cpu); void rcutree_migrate_callbacks(int cpu); +#ifdef CONFIG_TASKS_RCU_GENERIC +void rcu_init_tasks_generic(void); +#else +static inline void rcu_init_tasks_generic(void) { } +#endif + #ifdef CONFIG_RCU_STALL_COMMON void rcu_sysrq_start(void); void rcu_sysrq_end(void); diff --git a/include/linux/regset.h b/include/linux/regset.h index c3403f328257..a00765f0e8cf 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -46,6 +46,18 @@ static inline int membuf_write(struct membuf *s, const void *v, size_t size) return s->left; } +static inline struct membuf membuf_at(const struct membuf *s, size_t offs) +{ + struct membuf n = *s; + + if (offs > n.left) + offs = n.left; + n.p += offs; + n.left -= offs; + + return n; +} + /* current s->p must be aligned for v; v must be a scalar */ #define membuf_store(s, v) \ ({ \ diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 2024944fd2f7..20e84a84fb77 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -332,6 +332,12 @@ regulator_get_exclusive(struct device *dev, const char *id) } static inline struct regulator *__must_check +devm_regulator_get_exclusive(struct device *dev, const char *id) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct regulator *__must_check regulator_get_optional(struct device *dev, const char *id) { return ERR_PTR(-ENODEV); @@ -486,6 +492,11 @@ static inline int regulator_get_voltage(struct regulator *regulator) return -EINVAL; } +static inline int regulator_sync_voltage(struct regulator *regulator) +{ + return -EINVAL; +} + static inline int regulator_is_supported_voltage(struct regulator *regulator, int min_uV, int max_uV) { @@ -578,6 +589,25 @@ static inline int devm_regulator_unregister_notifier(struct regulator *regulator return 0; } +static inline int regulator_suspend_enable(struct regulator_dev *rdev, + suspend_state_t state) +{ + return -EINVAL; +} + +static inline int regulator_suspend_disable(struct regulator_dev *rdev, + suspend_state_t state) +{ + return -EINVAL; +} + +static inline int regulator_set_suspend_voltage(struct regulator *regulator, + int min_uV, int max_uV, + suspend_state_t state) +{ + return -EINVAL; +} + static inline void *regulator_get_drvdata(struct regulator *regulator) { return NULL; diff --git a/include/linux/reset.h b/include/linux/reset.h index 05aa9f440f48..439fec7112a9 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -13,6 +13,7 @@ struct reset_control; #ifdef CONFIG_RESET_CONTROLLER int reset_control_reset(struct reset_control *rstc); +int reset_control_rearm(struct reset_control *rstc); int reset_control_assert(struct reset_control *rstc); int reset_control_deassert(struct reset_control *rstc); int reset_control_status(struct reset_control *rstc); diff --git a/include/linux/rtc.h b/include/linux/rtc.h index b829382de6c3..568909449c13 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -141,12 +141,6 @@ struct rtc_device { */ unsigned long set_offset_nsec; - bool registered; - - /* Old ABI support */ - bool nvram_old_abi; - struct bin_attribute *nvram; - time64_t range_min; timeu64_t range_max; time64_t start_secs; @@ -184,7 +178,7 @@ extern struct rtc_device *devm_rtc_device_register(struct device *dev, const struct rtc_class_ops *ops, struct module *owner); struct rtc_device *devm_rtc_allocate_device(struct device *dev); -int __rtc_register_device(struct module *owner, struct rtc_device *rtc); +int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc); extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); @@ -227,8 +221,8 @@ static inline bool is_leap_year(unsigned int year) return (!(year % 4) && (year % 100)) || !(year % 400); } -#define rtc_register_device(device) \ - __rtc_register_device(THIS_MODULE, device) +#define devm_rtc_register_device(device) \ + __devm_rtc_register_device(THIS_MODULE, device) #ifdef CONFIG_RTC_HCTOSYS_DEVICE extern int rtc_hctosys_ret; @@ -237,16 +231,14 @@ extern int rtc_hctosys_ret; #endif #ifdef CONFIG_RTC_NVMEM -int rtc_nvmem_register(struct rtc_device *rtc, - struct nvmem_config *nvmem_config); -void rtc_nvmem_unregister(struct rtc_device *rtc); +int devm_rtc_nvmem_register(struct rtc_device *rtc, + struct nvmem_config *nvmem_config); #else -static inline int rtc_nvmem_register(struct rtc_device *rtc, - struct nvmem_config *nvmem_config) +static inline int devm_rtc_nvmem_register(struct rtc_device *rtc, + struct nvmem_config *nvmem_config) { return 0; } -static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {} #endif #ifdef CONFIG_RTC_INTF_SYSFS diff --git a/include/linux/s3c_adc_battery.h b/include/linux/s3c_adc_battery.h index 833871dcf6fd..57f982c375f8 100644 --- a/include/linux/s3c_adc_battery.h +++ b/include/linux/s3c_adc_battery.h @@ -14,9 +14,6 @@ struct s3c_adc_bat_pdata { void (*enable_charger)(void); void (*disable_charger)(void); - int gpio_charge_finished; - int gpio_inverted; - const struct s3c_adc_bat_thresh *lut_noac; unsigned int lut_noac_cnt; const struct s3c_adc_bat_thresh *lut_acin; diff --git a/include/linux/sched.h b/include/linux/sched.h index 51d535b69bd6..6e3a5eeec509 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1234,7 +1234,7 @@ struct task_struct { u64 timer_slack_ns; u64 default_timer_slack_ns; -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) unsigned int kasan_depth; #endif diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h index 3ed5aa18593f..6205578ab6ee 100644 --- a/include/linux/sched/cpufreq.h +++ b/include/linux/sched/cpufreq.h @@ -28,6 +28,11 @@ static inline unsigned long map_util_freq(unsigned long util, { return (freq + (freq >> 2)) * util / cap; } + +static inline unsigned long map_util_perf(unsigned long util) +{ + return util + (util >> 2); +} #endif /* CONFIG_CPU_FREQ */ #endif /* _LINUX_SCHED_CPUFREQ_H */ diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index c49c5888e854..ecb3aad1a964 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -8,6 +8,7 @@ #ifndef _LINUX_SCMI_PROTOCOL_H #define _LINUX_SCMI_PROTOCOL_H +#include <linux/bitfield.h> #include <linux/device.h> #include <linux/notifier.h> #include <linux/types.h> @@ -149,13 +150,180 @@ struct scmi_power_ops { u32 *state); }; +/** + * scmi_sensor_reading - represent a timestamped read + * + * Used by @reading_get_timestamped method. + * + * @value: The signed value sensor read. + * @timestamp: An unsigned timestamp for the sensor read, as provided by + * SCMI platform. Set to zero when not available. + */ +struct scmi_sensor_reading { + long long value; + unsigned long long timestamp; +}; + +/** + * scmi_range_attrs - specifies a sensor or axis values' range + * @min_range: The minimum value which can be represented by the sensor/axis. + * @max_range: The maximum value which can be represented by the sensor/axis. + */ +struct scmi_range_attrs { + long long min_range; + long long max_range; +}; + +/** + * scmi_sensor_axis_info - describes one sensor axes + * @id: The axes ID. + * @type: Axes type. Chosen amongst one of @enum scmi_sensor_class. + * @scale: Power-of-10 multiplier applied to the axis unit. + * @name: NULL-terminated string representing axes name as advertised by + * SCMI platform. + * @extended_attrs: Flag to indicate the presence of additional extended + * attributes for this axes. + * @resolution: Extended attribute representing the resolution of the axes. + * Set to 0 if not reported by this axes. + * @exponent: Extended attribute representing the power-of-10 multiplier that + * is applied to the resolution field. Set to 0 if not reported by + * this axes. + * @attrs: Extended attributes representing minimum and maximum values + * measurable by this axes. Set to 0 if not reported by this sensor. + */ +struct scmi_sensor_axis_info { + unsigned int id; + unsigned int type; + int scale; + char name[SCMI_MAX_STR_SIZE]; + bool extended_attrs; + unsigned int resolution; + int exponent; + struct scmi_range_attrs attrs; +}; + +/** + * scmi_sensor_intervals_info - describes number and type of available update + * intervals + * @segmented: Flag for segmented intervals' representation. When True there + * will be exactly 3 intervals in @desc, with each entry + * representing a member of a segment in this order: + * {lowest update interval, highest update interval, step size} + * @count: Number of intervals described in @desc. + * @desc: Array of @count interval descriptor bitmask represented as detailed in + * the SCMI specification: it can be accessed using the accompanying + * macros. + * @prealloc_pool: A minimal preallocated pool of desc entries used to avoid + * lesser-than-64-bytes dynamic allocation for small @count + * values. + */ +struct scmi_sensor_intervals_info { + bool segmented; + unsigned int count; +#define SCMI_SENS_INTVL_SEGMENT_LOW 0 +#define SCMI_SENS_INTVL_SEGMENT_HIGH 1 +#define SCMI_SENS_INTVL_SEGMENT_STEP 2 + unsigned int *desc; +#define SCMI_SENS_INTVL_GET_SECS(x) FIELD_GET(GENMASK(20, 5), (x)) +#define SCMI_SENS_INTVL_GET_EXP(x) \ + ({ \ + int __signed_exp = FIELD_GET(GENMASK(4, 0), (x)); \ + \ + if (__signed_exp & BIT(4)) \ + __signed_exp |= GENMASK(31, 5); \ + __signed_exp; \ + }) +#define SCMI_MAX_PREALLOC_POOL 16 + unsigned int prealloc_pool[SCMI_MAX_PREALLOC_POOL]; +}; + +/** + * struct scmi_sensor_info - represents information related to one of the + * available sensors. + * @id: Sensor ID. + * @type: Sensor type. Chosen amongst one of @enum scmi_sensor_class. + * @scale: Power-of-10 multiplier applied to the sensor unit. + * @num_trip_points: Number of maximum configurable trip points. + * @async: Flag for asynchronous read support. + * @update: Flag for continuouos update notification support. + * @timestamped: Flag for timestamped read support. + * @tstamp_scale: Power-of-10 multiplier applied to the sensor timestamps to + * represent it in seconds. + * @num_axis: Number of supported axis if any. Reported as 0 for scalar sensors. + * @axis: Pointer to an array of @num_axis descriptors. + * @intervals: Descriptor of available update intervals. + * @sensor_config: A bitmask reporting the current sensor configuration as + * detailed in the SCMI specification: it can accessed and + * modified through the accompanying macros. + * @name: NULL-terminated string representing sensor name as advertised by + * SCMI platform. + * @extended_scalar_attrs: Flag to indicate the presence of additional extended + * attributes for this sensor. + * @sensor_power: Extended attribute representing the average power + * consumed by the sensor in microwatts (uW) when it is active. + * Reported here only for scalar sensors. + * Set to 0 if not reported by this sensor. + * @resolution: Extended attribute representing the resolution of the sensor. + * Reported here only for scalar sensors. + * Set to 0 if not reported by this sensor. + * @exponent: Extended attribute representing the power-of-10 multiplier that is + * applied to the resolution field. + * Reported here only for scalar sensors. + * Set to 0 if not reported by this sensor. + * @scalar_attrs: Extended attributes representing minimum and maximum + * measurable values by this sensor. + * Reported here only for scalar sensors. + * Set to 0 if not reported by this sensor. + */ struct scmi_sensor_info { - u32 id; - u8 type; - s8 scale; - u8 num_trip_points; + unsigned int id; + unsigned int type; + int scale; + unsigned int num_trip_points; bool async; + bool update; + bool timestamped; + int tstamp_scale; + unsigned int num_axis; + struct scmi_sensor_axis_info *axis; + struct scmi_sensor_intervals_info intervals; + unsigned int sensor_config; +#define SCMI_SENS_CFG_UPDATE_SECS_MASK GENMASK(31, 16) +#define SCMI_SENS_CFG_GET_UPDATE_SECS(x) \ + FIELD_GET(SCMI_SENS_CFG_UPDATE_SECS_MASK, (x)) + +#define SCMI_SENS_CFG_UPDATE_EXP_MASK GENMASK(15, 11) +#define SCMI_SENS_CFG_GET_UPDATE_EXP(x) \ + ({ \ + int __signed_exp = \ + FIELD_GET(SCMI_SENS_CFG_UPDATE_EXP_MASK, (x)); \ + \ + if (__signed_exp & BIT(4)) \ + __signed_exp |= GENMASK(31, 5); \ + __signed_exp; \ + }) + +#define SCMI_SENS_CFG_ROUND_MASK GENMASK(10, 9) +#define SCMI_SENS_CFG_ROUND_AUTO 2 +#define SCMI_SENS_CFG_ROUND_UP 1 +#define SCMI_SENS_CFG_ROUND_DOWN 0 + +#define SCMI_SENS_CFG_TSTAMP_ENABLED_MASK BIT(1) +#define SCMI_SENS_CFG_TSTAMP_ENABLE 1 +#define SCMI_SENS_CFG_TSTAMP_DISABLE 0 +#define SCMI_SENS_CFG_IS_TSTAMP_ENABLED(x) \ + FIELD_GET(SCMI_SENS_CFG_TSTAMP_ENABLED_MASK, (x)) + +#define SCMI_SENS_CFG_SENSOR_ENABLED_MASK BIT(0) +#define SCMI_SENS_CFG_SENSOR_ENABLE 1 +#define SCMI_SENS_CFG_SENSOR_DISABLE 0 char name[SCMI_MAX_STR_SIZE]; +#define SCMI_SENS_CFG_IS_ENABLED(x) FIELD_GET(BIT(0), (x)) + bool extended_scalar_attrs; + unsigned int sensor_power; + unsigned int resolution; + int exponent; + struct scmi_range_attrs scalar_attrs; }; /* @@ -164,11 +332,100 @@ struct scmi_sensor_info { */ enum scmi_sensor_class { NONE = 0x0, + UNSPEC = 0x1, TEMPERATURE_C = 0x2, + TEMPERATURE_F = 0x3, + TEMPERATURE_K = 0x4, VOLTAGE = 0x5, CURRENT = 0x6, POWER = 0x7, ENERGY = 0x8, + CHARGE = 0x9, + VOLTAMPERE = 0xA, + NITS = 0xB, + LUMENS = 0xC, + LUX = 0xD, + CANDELAS = 0xE, + KPA = 0xF, + PSI = 0x10, + NEWTON = 0x11, + CFM = 0x12, + RPM = 0x13, + HERTZ = 0x14, + SECS = 0x15, + MINS = 0x16, + HOURS = 0x17, + DAYS = 0x18, + WEEKS = 0x19, + MILS = 0x1A, + INCHES = 0x1B, + FEET = 0x1C, + CUBIC_INCHES = 0x1D, + CUBIC_FEET = 0x1E, + METERS = 0x1F, + CUBIC_CM = 0x20, + CUBIC_METERS = 0x21, + LITERS = 0x22, + FLUID_OUNCES = 0x23, + RADIANS = 0x24, + STERADIANS = 0x25, + REVOLUTIONS = 0x26, + CYCLES = 0x27, + GRAVITIES = 0x28, + OUNCES = 0x29, + POUNDS = 0x2A, + FOOT_POUNDS = 0x2B, + OUNCE_INCHES = 0x2C, + GAUSS = 0x2D, + GILBERTS = 0x2E, + HENRIES = 0x2F, + FARADS = 0x30, + OHMS = 0x31, + SIEMENS = 0x32, + MOLES = 0x33, + BECQUERELS = 0x34, + PPM = 0x35, + DECIBELS = 0x36, + DBA = 0x37, + DBC = 0x38, + GRAYS = 0x39, + SIEVERTS = 0x3A, + COLOR_TEMP_K = 0x3B, + BITS = 0x3C, + BYTES = 0x3D, + WORDS = 0x3E, + DWORDS = 0x3F, + QWORDS = 0x40, + PERCENTAGE = 0x41, + PASCALS = 0x42, + COUNTS = 0x43, + GRAMS = 0x44, + NEWTON_METERS = 0x45, + HITS = 0x46, + MISSES = 0x47, + RETRIES = 0x48, + OVERRUNS = 0x49, + UNDERRUNS = 0x4A, + COLLISIONS = 0x4B, + PACKETS = 0x4C, + MESSAGES = 0x4D, + CHARS = 0x4E, + ERRORS = 0x4F, + CORRECTED_ERRS = 0x50, + UNCORRECTABLE_ERRS = 0x51, + SQ_MILS = 0x52, + SQ_INCHES = 0x53, + SQ_FEET = 0x54, + SQ_CM = 0x55, + SQ_METERS = 0x56, + RADIANS_SEC = 0x57, + BPM = 0x58, + METERS_SEC_SQUARED = 0x59, + METERS_SEC = 0x5A, + CUBIC_METERS_SEC = 0x5B, + MM_MERCURY = 0x5C, + RADIANS_SEC_SQUARED = 0x5D, + OEM_UNIT = 0xFF }; /** @@ -179,6 +436,13 @@ enum scmi_sensor_class { * @info_get: get the information of the specified sensor * @trip_point_config: selects and configures a trip-point of interest * @reading_get: gets the current value of the sensor + * @reading_get_timestamped: gets the current value and timestamp, when + * available, of the sensor. (as of v3.0 spec) + * Supports multi-axis sensors for sensors which + * supports it and if the @reading array size of + * @count entry equals the sensor num_axis + * @config_get: Get sensor current configuration + * @config_set: Set sensor current configuration */ struct scmi_sensor_ops { int (*count_get)(const struct scmi_handle *handle); @@ -188,6 +452,13 @@ struct scmi_sensor_ops { u32 sensor_id, u8 trip_id, u64 trip_value); int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id, u64 *value); + int (*reading_get_timestamped)(const struct scmi_handle *handle, + u32 sensor_id, u8 count, + struct scmi_sensor_reading *readings); + int (*config_get)(const struct scmi_handle *handle, + u32 sensor_id, u32 *sensor_config); + int (*config_set)(const struct scmi_handle *handle, + u32 sensor_id, u32 sensor_config); }; /** @@ -451,6 +722,7 @@ enum scmi_notification_events { SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED = 0x0, SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 0x1, SCMI_EVENT_SENSOR_TRIP_POINT_EVENT = 0x0, + SCMI_EVENT_SENSOR_UPDATE = 0x1, SCMI_EVENT_RESET_ISSUED = 0x0, SCMI_EVENT_BASE_ERROR_EVENT = 0x0, SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER = 0x0, @@ -492,6 +764,14 @@ struct scmi_sensor_trip_point_report { unsigned int trip_point_desc; }; +struct scmi_sensor_update_report { + ktime_t timestamp; + unsigned int agent_id; + unsigned int sensor_id; + unsigned int readings_count; + struct scmi_sensor_reading readings[]; +}; + struct scmi_reset_issued_report { ktime_t timestamp; unsigned int agent_id; diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h index fb0205d87d3c..9d6c28cc4d8f 100644 --- a/include/linux/seq_buf.h +++ b/include/linux/seq_buf.h @@ -30,7 +30,7 @@ static inline void seq_buf_clear(struct seq_buf *s) } static inline void -seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size) +seq_buf_init(struct seq_buf *s, char *buf, unsigned int size) { s->buffer = buf; s->size = size; diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h index 463ed28d2b27..ca2c5393dc6b 100644 --- a/include/linux/serial_s3c.h +++ b/include/linux/serial_s3c.h @@ -254,7 +254,7 @@ * serial port * * the pointer is setup by the machine specific initialisation from the - * arch/arm/mach-s3c2410/ directory. + * arch/arm/mach-s3c/ directory. */ struct s3c2410_uartcfg { diff --git a/include/linux/sizes.h b/include/linux/sizes.h index 9874f6f67537..1ac79bcee2bb 100644 --- a/include/linux/sizes.h +++ b/include/linux/sizes.h @@ -44,6 +44,9 @@ #define SZ_2G 0x80000000 #define SZ_4G _AC(0x100000000, ULL) +#define SZ_8G _AC(0x200000000, ULL) +#define SZ_16G _AC(0x400000000, ULL) +#define SZ_32G _AC(0x800000000, ULL) #define SZ_64T _AC(0x400000000000, ULL) #endif /* __LINUX_SIZES_H__ */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 333bcdc39635..5f60c9e907c9 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -366,7 +366,7 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) static inline bool skb_frag_must_loop(struct page *p) { #if defined(CONFIG_HIGHMEM) - if (PageHighMem(p)) + if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p)) return true; #endif return false; @@ -1203,6 +1203,7 @@ struct skb_seq_state { struct sk_buff *root_skb; struct sk_buff *cur_skb; __u8 *frag_data; + __u32 frag_off; }; void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index 233463d789c6..e7842debc05d 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -2,6 +2,108 @@ #ifndef __SOC_MEDIATEK_INFRACFG_H #define __SOC_MEDIATEK_INFRACFG_H +#define MT8192_TOP_AXI_PROT_EN_STA1 0x228 +#define MT8192_TOP_AXI_PROT_EN_1_STA1 0x258 +#define MT8192_TOP_AXI_PROT_EN_SET 0x2a0 +#define MT8192_TOP_AXI_PROT_EN_CLR 0x2a4 +#define MT8192_TOP_AXI_PROT_EN_1_SET 0x2a8 +#define MT8192_TOP_AXI_PROT_EN_1_CLR 0x2ac +#define MT8192_TOP_AXI_PROT_EN_MM_SET 0x2d4 +#define MT8192_TOP_AXI_PROT_EN_MM_CLR 0x2d8 +#define MT8192_TOP_AXI_PROT_EN_MM_STA1 0x2ec +#define MT8192_TOP_AXI_PROT_EN_2_SET 0x714 +#define MT8192_TOP_AXI_PROT_EN_2_CLR 0x718 +#define MT8192_TOP_AXI_PROT_EN_2_STA1 0x724 +#define MT8192_TOP_AXI_PROT_EN_VDNR_SET 0xb84 +#define MT8192_TOP_AXI_PROT_EN_VDNR_CLR 0xb88 +#define MT8192_TOP_AXI_PROT_EN_VDNR_STA1 0xb90 +#define MT8192_TOP_AXI_PROT_EN_MM_2_SET 0xdcc +#define MT8192_TOP_AXI_PROT_EN_MM_2_CLR 0xdd0 +#define MT8192_TOP_AXI_PROT_EN_MM_2_STA1 0xdd8 + +#define MT8192_TOP_AXI_PROT_EN_DISP (BIT(6) | BIT(23)) +#define MT8192_TOP_AXI_PROT_EN_CONN (BIT(13) | BIT(18)) +#define MT8192_TOP_AXI_PROT_EN_CONN_2ND BIT(14) +#define MT8192_TOP_AXI_PROT_EN_MFG1 GENMASK(22, 21) +#define MT8192_TOP_AXI_PROT_EN_1_CONN BIT(10) +#define MT8192_TOP_AXI_PROT_EN_1_MFG1 BIT(21) +#define MT8192_TOP_AXI_PROT_EN_1_CAM BIT(22) +#define MT8192_TOP_AXI_PROT_EN_2_CAM BIT(0) +#define MT8192_TOP_AXI_PROT_EN_2_ADSP BIT(3) +#define MT8192_TOP_AXI_PROT_EN_2_AUDIO BIT(4) +#define MT8192_TOP_AXI_PROT_EN_2_MFG1 GENMASK(6, 5) +#define MT8192_TOP_AXI_PROT_EN_2_MFG1_2ND BIT(7) +#define MT8192_TOP_AXI_PROT_EN_MM_CAM (BIT(0) | BIT(2)) +#define MT8192_TOP_AXI_PROT_EN_MM_DISP (BIT(0) | BIT(2) | \ + BIT(10) | BIT(12) | \ + BIT(14) | BIT(16) | \ + BIT(24) | BIT(26)) +#define MT8192_TOP_AXI_PROT_EN_MM_CAM_2ND (BIT(1) | BIT(3)) +#define MT8192_TOP_AXI_PROT_EN_MM_DISP_2ND (BIT(1) | BIT(3) | \ + BIT(15) | BIT(17) | \ + BIT(25) | BIT(27)) +#define MT8192_TOP_AXI_PROT_EN_MM_ISP2 BIT(14) +#define MT8192_TOP_AXI_PROT_EN_MM_ISP2_2ND BIT(15) +#define MT8192_TOP_AXI_PROT_EN_MM_IPE BIT(16) +#define MT8192_TOP_AXI_PROT_EN_MM_IPE_2ND BIT(17) +#define MT8192_TOP_AXI_PROT_EN_MM_VDEC BIT(24) +#define MT8192_TOP_AXI_PROT_EN_MM_VDEC_2ND BIT(25) +#define MT8192_TOP_AXI_PROT_EN_MM_VENC BIT(26) +#define MT8192_TOP_AXI_PROT_EN_MM_VENC_2ND BIT(27) +#define MT8192_TOP_AXI_PROT_EN_MM_2_ISP BIT(8) +#define MT8192_TOP_AXI_PROT_EN_MM_2_DISP (BIT(8) | BIT(12)) +#define MT8192_TOP_AXI_PROT_EN_MM_2_ISP_2ND BIT(9) +#define MT8192_TOP_AXI_PROT_EN_MM_2_DISP_2ND (BIT(9) | BIT(13)) +#define MT8192_TOP_AXI_PROT_EN_MM_2_MDP BIT(12) +#define MT8192_TOP_AXI_PROT_EN_MM_2_MDP_2ND BIT(13) +#define MT8192_TOP_AXI_PROT_EN_VDNR_CAM BIT(21) + +#define MT8183_TOP_AXI_PROT_EN_STA1 0x228 +#define MT8183_TOP_AXI_PROT_EN_STA1_1 0x258 +#define MT8183_TOP_AXI_PROT_EN_SET 0x2a0 +#define MT8183_TOP_AXI_PROT_EN_CLR 0x2a4 +#define MT8183_TOP_AXI_PROT_EN_1_SET 0x2a8 +#define MT8183_TOP_AXI_PROT_EN_1_CLR 0x2ac +#define MT8183_TOP_AXI_PROT_EN_MCU_SET 0x2c4 +#define MT8183_TOP_AXI_PROT_EN_MCU_CLR 0x2c8 +#define MT8183_TOP_AXI_PROT_EN_MCU_STA1 0x2e4 +#define MT8183_TOP_AXI_PROT_EN_MM_SET 0x2d4 +#define MT8183_TOP_AXI_PROT_EN_MM_CLR 0x2d8 +#define MT8183_TOP_AXI_PROT_EN_MM_STA1 0x2ec + +#define MT8183_TOP_AXI_PROT_EN_DISP (BIT(10) | BIT(11)) +#define MT8183_TOP_AXI_PROT_EN_CONN (BIT(13) | BIT(14)) +#define MT8183_TOP_AXI_PROT_EN_MFG (BIT(21) | BIT(22)) +#define MT8183_TOP_AXI_PROT_EN_CAM BIT(28) +#define MT8183_TOP_AXI_PROT_EN_VPU_TOP BIT(27) +#define MT8183_TOP_AXI_PROT_EN_1_DISP (BIT(16) | BIT(17)) +#define MT8183_TOP_AXI_PROT_EN_1_MFG GENMASK(21, 19) +#define MT8183_TOP_AXI_PROT_EN_MM_ISP (BIT(3) | BIT(8)) +#define MT8183_TOP_AXI_PROT_EN_MM_ISP_2ND BIT(10) +#define MT8183_TOP_AXI_PROT_EN_MM_CAM (BIT(4) | BIT(5) | \ + BIT(9) | BIT(13)) +#define MT8183_TOP_AXI_PROT_EN_MM_VPU_TOP (GENMASK(9, 6) | \ + BIT(12)) +#define MT8183_TOP_AXI_PROT_EN_MM_VPU_TOP_2ND (BIT(10) | BIT(11)) +#define MT8183_TOP_AXI_PROT_EN_MM_CAM_2ND BIT(11) +#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE0_2ND (BIT(0) | BIT(2) | \ + BIT(4)) +#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE1_2ND (BIT(1) | BIT(3) | \ + BIT(5)) +#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE0 BIT(6) +#define MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE1 BIT(7) + +#define MT8183_SMI_COMMON_CLAMP_EN 0x3c0 +#define MT8183_SMI_COMMON_CLAMP_EN_SET 0x3c4 +#define MT8183_SMI_COMMON_CLAMP_EN_CLR 0x3c8 + +#define MT8183_SMI_COMMON_SMI_CLAMP_DISP GENMASK(7, 0) +#define MT8183_SMI_COMMON_SMI_CLAMP_VENC BIT(1) +#define MT8183_SMI_COMMON_SMI_CLAMP_ISP BIT(2) +#define MT8183_SMI_COMMON_SMI_CLAMP_CAM (BIT(3) | BIT(4)) +#define MT8183_SMI_COMMON_SMI_CLAMP_VPU_TOP (BIT(5) | BIT(6)) +#define MT8183_SMI_COMMON_SMI_CLAMP_VDEC BIT(7) + #define MT8173_TOP_AXI_PROT_EN_MCI_M2 BIT(0) #define MT8173_TOP_AXI_PROT_EN_MM_M0 BIT(1) #define MT8173_TOP_AXI_PROT_EN_MM_M1 BIT(2) @@ -32,6 +134,11 @@ #define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \ BIT(7) | BIT(8)) +#define INFRA_TOPAXI_PROTECTEN 0x0220 +#define INFRA_TOPAXI_PROTECTSTA1 0x0228 +#define INFRA_TOPAXI_PROTECTEN_SET 0x0260 +#define INFRA_TOPAXI_PROTECTEN_CLR 0x0264 + #define REG_INFRA_MISC 0xf00 #define F_DDR_4GB_SUPPORT_EN BIT(13) diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 960704d75994..8e9996610978 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -11,7 +11,6 @@ #include <linux/mailbox/mtk-cmdq-mailbox.h> #include <linux/timer.h> -#define CMDQ_NO_TIMEOUT 0xffffffffu #define CMDQ_ADDR_HIGH(addr) ((u32)(((addr) >> 16) & GENMASK(31, 0))) #define CMDQ_ADDR_LOW(addr) ((u16)(addr) | BIT(1)) @@ -24,12 +23,8 @@ struct cmdq_client_reg { }; struct cmdq_client { - spinlock_t lock; - u32 pkt_cnt; struct mbox_client client; struct mbox_chan *chan; - struct timer_list timer; - u32 timeout_ms; /* in unit of microsecond */ }; /** @@ -51,13 +46,10 @@ int cmdq_dev_get_client_reg(struct device *dev, * cmdq_mbox_create() - create CMDQ mailbox client and channel * @dev: device of CMDQ mailbox client * @index: index of CMDQ mailbox channel - * @timeout: timeout of a pkt execution by GCE, in unit of microsecond, set - * CMDQ_NO_TIMEOUT if a timer is not used. * * Return: CMDQ mailbox client pointer */ -struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, - u32 timeout); +struct cmdq_client *cmdq_mbox_create(struct device *dev, int index); /** * cmdq_mbox_destroy() - destroy CMDQ mailbox client and channel diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h index 7bab5d9a3d31..2228bf6133da 100644 --- a/include/linux/soc/mediatek/mtk-mmsys.h +++ b/include/linux/soc/mediatek/mtk-mmsys.h @@ -9,6 +9,39 @@ enum mtk_ddp_comp_id; struct device; +enum mtk_ddp_comp_id { + DDP_COMPONENT_AAL0, + DDP_COMPONENT_AAL1, + DDP_COMPONENT_BLS, + DDP_COMPONENT_CCORR, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_COLOR1, + DDP_COMPONENT_DITHER, + DDP_COMPONENT_DPI0, + DDP_COMPONENT_DPI1, + DDP_COMPONENT_DSI0, + DDP_COMPONENT_DSI1, + DDP_COMPONENT_DSI2, + DDP_COMPONENT_DSI3, + DDP_COMPONENT_GAMMA, + DDP_COMPONENT_OD0, + DDP_COMPONENT_OD1, + DDP_COMPONENT_OVL0, + DDP_COMPONENT_OVL_2L0, + DDP_COMPONENT_OVL_2L1, + DDP_COMPONENT_OVL1, + DDP_COMPONENT_PWM0, + DDP_COMPONENT_PWM1, + DDP_COMPONENT_PWM2, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_RDMA1, + DDP_COMPONENT_RDMA2, + DDP_COMPONENT_UFOE, + DDP_COMPONENT_WDMA0, + DDP_COMPONENT_WDMA1, + DDP_COMPONENT_ID_MAX, +}; + void mtk_mmsys_ddp_connect(struct device *dev, enum mtk_ddp_comp_id cur, enum mtk_ddp_comp_id next); diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 90b864655822..3db6797ba6ff 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -16,6 +16,7 @@ #define LLCC_AUDIO 6 #define LLCC_MDMHPGRW 7 #define LLCC_MDM 8 +#define LLCC_MODHW 9 #define LLCC_CMPT 10 #define LLCC_GPUHTW 11 #define LLCC_GPU 12 @@ -26,6 +27,11 @@ #define LLCC_MDMHPFX 20 #define LLCC_MDMPNG 21 #define LLCC_AUDHW 22 +#define LLCC_NPU 23 +#define LLCC_WLHW 24 +#define LLCC_MODPE 29 +#define LLCC_APTCM 30 +#define LLCC_WRCACHE 31 /** * llcc_slice_desc - Cache slice descriptor diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h index 5a472eca5ee4..39b022b92598 100644 --- a/include/linux/soc/ti/k3-ringacc.h +++ b/include/linux/soc/ti/k3-ringacc.h @@ -67,6 +67,10 @@ struct k3_ring; * few times. It's usable when the same ring is used as Free Host PD ring * for different flows, for example. * Note: Locking should be done by consumer if required + * @dma_dev: Master device which is using and accessing to the ring + * memory when the mode is K3_RINGACC_RING_MODE_RING. Memory allocations + * should be done using this device. + * @asel: Address Space Select value for physical addresses */ struct k3_ring_cfg { u32 size; @@ -74,6 +78,9 @@ struct k3_ring_cfg { enum k3_ring_mode mode; #define K3_RINGACC_RING_SHARED BIT(1) u32 flags; + + struct device *dma_dev; + u32 asel; }; #define K3_RINGACC_RING_ID_ANY (-1) @@ -245,4 +252,19 @@ int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem); u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring); +/* DMA ring support */ +struct ti_sci_handle; + +/** + * struct struct k3_ringacc_init_data - Initialization data for DMA rings + */ +struct k3_ringacc_init_data { + const struct ti_sci_handle *tisci; + u32 tisci_dev_id; + u32 num_rings; +}; + +struct k3_ringacc *k3_ringacc_dmarings_init(struct platform_device *pdev, + struct k3_ringacc_init_data *data); + #endif /* __SOC_TI_K3_RINGACC_API_H_ */ diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index cf27b080e148..0aad7009b50e 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -196,6 +196,22 @@ struct ti_sci_clk_ops { }; /** + * struct ti_sci_resource_desc - Description of TI SCI resource instance range. + * @start: Start index of the first resource range. + * @num: Number of resources in the first range. + * @start_sec: Start index of the second resource range. + * @num_sec: Number of resources in the second range. + * @res_map: Bitmap to manage the allocation of these resources. + */ +struct ti_sci_resource_desc { + u16 start; + u16 num; + u16 start_sec; + u16 num_sec; + unsigned long *res_map; +}; + +/** * struct ti_sci_rm_core_ops - Resource management core operations * @get_range: Get a range of resources belonging to ti sci host. * @get_rage_from_shost: Get a range of resources belonging to @@ -209,15 +225,15 @@ struct ti_sci_clk_ops { * - dev_id: TISCI device ID. * - subtype: Resource assignment subtype that is being requested * from the given device. - * - range_start: Start index of the resource range - * - range_end: Number of resources in the range + * - desc: Pointer to ti_sci_resource_desc to be updated with the resource + * range start index and number of resources */ struct ti_sci_rm_core_ops { int (*get_range)(const struct ti_sci_handle *handle, u32 dev_id, - u8 subtype, u16 *range_start, u16 *range_num); + u8 subtype, struct ti_sci_resource_desc *desc); int (*get_range_from_shost)(const struct ti_sci_handle *handle, u32 dev_id, u8 subtype, u8 s_host, - u16 *range_start, u16 *range_num); + struct ti_sci_resource_desc *desc); }; #define TI_SCI_RESASG_SUBTYPE_IR_OUTPUT 0 @@ -259,30 +275,46 @@ struct ti_sci_rm_irq_ops { #define TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID BIT(4) /* RA config.order_id parameter is valid for RM ring configure TISCI message */ #define TI_SCI_MSG_VALUE_RM_RING_ORDER_ID_VALID BIT(5) +/* RA config.virtid parameter is valid for RM ring configure TISCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_VIRTID_VALID BIT(6) +/* RA config.asel parameter is valid for RM ring configure TISCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_ASEL_VALID BIT(7) #define TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER \ (TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID | \ TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID | \ TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID | \ TI_SCI_MSG_VALUE_RM_RING_MODE_VALID | \ - TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID) + TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID | \ + TI_SCI_MSG_VALUE_RM_RING_ASEL_VALID) + +/** + * struct ti_sci_msg_rm_ring_cfg - Ring configuration + * + * Parameters for Navigator Subsystem ring configuration + * See @ti_sci_msg_rm_ring_cfg_req + */ +struct ti_sci_msg_rm_ring_cfg { + u32 valid_params; + u16 nav_id; + u16 index; + u32 addr_lo; + u32 addr_hi; + u32 count; + u8 mode; + u8 size; + u8 order_id; + u16 virtid; + u8 asel; +}; /** * struct ti_sci_rm_ringacc_ops - Ring Accelerator Management operations - * @config: configure the SoC Navigator Subsystem Ring Accelerator ring - * @get_config: get the SoC Navigator Subsystem Ring Accelerator ring - * configuration + * @set_cfg: configure the SoC Navigator Subsystem Ring Accelerator ring */ struct ti_sci_rm_ringacc_ops { - int (*config)(const struct ti_sci_handle *handle, - u32 valid_params, u16 nav_id, u16 index, - u32 addr_lo, u32 addr_hi, u32 count, u8 mode, - u8 size, u8 order_id - ); - int (*get_config)(const struct ti_sci_handle *handle, - u32 nav_id, u32 index, u8 *mode, - u32 *addr_lo, u32 *addr_hi, u32 *count, - u8 *size, u8 *order_id); + int (*set_cfg)(const struct ti_sci_handle *handle, + const struct ti_sci_msg_rm_ring_cfg *params); }; /** @@ -320,6 +352,9 @@ struct ti_sci_rm_psil_ops { #define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES 2 #define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES 3 +#define TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_TCHAN 0 +#define TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN 1 + /* UDMAP TX/RX channel valid_params common declarations */ #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID BIT(0) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID BIT(1) @@ -345,6 +380,8 @@ struct ti_sci_msg_rm_udmap_tx_ch_cfg { #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID BIT(11) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_CREDIT_COUNT_VALID BIT(12) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FDEPTH_VALID BIT(13) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID BIT(15) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID BIT(16) u16 nav_id; u16 index; u8 tx_pause_on_err; @@ -362,6 +399,8 @@ struct ti_sci_msg_rm_udmap_tx_ch_cfg { u16 fdepth; u8 tx_sched_priority; u8 tx_burst_size; + u8 tx_tdtype; + u8 extended_ch_type; }; /** @@ -521,18 +560,6 @@ struct ti_sci_handle { #define TI_SCI_RESOURCE_NULL 0xffff /** - * struct ti_sci_resource_desc - Description of TI SCI resource instance range. - * @start: Start index of the resource. - * @num: Number of resources. - * @res_map: Bitmap to manage the allocation of these resources. - */ -struct ti_sci_resource_desc { - u16 start; - u16 num; - unsigned long *res_map; -}; - -/** * struct ti_sci_resource - Structure representing a resource assigned * to a device. * @sets: Number of sets available from this resource type diff --git a/include/linux/string.h b/include/linux/string.h index 1cd63a8a23ab..4fcfb56abcf5 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -267,7 +267,7 @@ void __write_overflow(void) __compiletime_error("detected write beyond size of o #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 9b35ce50cf2b..19b6dea27367 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -128,8 +128,8 @@ __be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *); void xdr_inline_pages(struct xdr_buf *, unsigned int, struct page **, unsigned int, unsigned int); -void xdr_terminate_string(struct xdr_buf *, const u32); -size_t xdr_buf_pagecount(struct xdr_buf *buf); +void xdr_terminate_string(const struct xdr_buf *, const u32); +size_t xdr_buf_pagecount(const struct xdr_buf *buf); int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp); void xdr_free_bvec(struct xdr_buf *buf); @@ -182,15 +182,14 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) * XDR buffer helper functions */ extern void xdr_shift_buf(struct xdr_buf *, size_t); -extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); -extern int xdr_buf_subsegment(const struct xdr_buf *buf, struct xdr_buf *subbuf, - unsigned int base, unsigned int len); +extern void xdr_buf_from_iov(const struct kvec *, struct xdr_buf *); +extern int xdr_buf_subsegment(const struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); extern void xdr_buf_trim(struct xdr_buf *, unsigned int); -extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); -extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); +extern int read_bytes_from_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int); +extern int write_bytes_to_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int); -extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32); -extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *); +extern int xdr_encode_word(const struct xdr_buf *, unsigned int, u32); +extern int xdr_decode_word(const struct xdr_buf *, unsigned int, u32 *); struct xdr_array2_desc; typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem); @@ -201,9 +200,9 @@ struct xdr_array2_desc { xdr_xcode_elem_t xcode; }; -extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base, +extern int xdr_decode_array2(const struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc); -extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base, +extern int xdr_encode_array2(const struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc); extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len); @@ -251,9 +250,9 @@ extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); -extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); -extern uint64_t xdr_align_data(struct xdr_stream *, uint64_t, uint32_t); -extern uint64_t xdr_expand_hole(struct xdr_stream *, uint64_t, uint64_t); +extern int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); +extern unsigned int xdr_align_data(struct xdr_stream *, unsigned int offset, unsigned int length); +extern unsigned int xdr_expand_hole(struct xdr_stream *, unsigned int offset, unsigned int length); extern bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf, unsigned int len); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index a603d48d2b2c..d2e97ee802af 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -330,6 +330,7 @@ struct xprt_class { struct rpc_xprt * (*setup)(struct xprt_create *); struct module *owner; char name[32]; + const char * netid[]; }; /* @@ -384,7 +385,7 @@ xprt_disable_swap(struct rpc_xprt *xprt) */ int xprt_register_transport(struct xprt_class *type); int xprt_unregister_transport(struct xprt_class *type); -int xprt_load_transport(const char *); +int xprt_find_transport_ident(const char *); void xprt_wait_for_reply_request_def(struct rpc_task *task); void xprt_wait_for_reply_request_rtt(struct rpc_task *task); void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index df0c3c74609e..7688bc983de5 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -251,6 +251,30 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event) static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) #endif /* __SYSCALL_DEFINEx */ +/* For split 64-bit arguments on 32-bit architectures */ +#ifdef __LITTLE_ENDIAN +#define SC_ARG64(name) u32, name##_lo, u32, name##_hi +#else +#define SC_ARG64(name) u32, name##_hi, u32, name##_lo +#endif +#define SC_VAL64(type, name) ((type) name##_hi << 32 | name##_lo) + +#ifdef CONFIG_COMPAT +#define SYSCALL32_DEFINE1 COMPAT_SYSCALL_DEFINE1 +#define SYSCALL32_DEFINE2 COMPAT_SYSCALL_DEFINE2 +#define SYSCALL32_DEFINE3 COMPAT_SYSCALL_DEFINE3 +#define SYSCALL32_DEFINE4 COMPAT_SYSCALL_DEFINE4 +#define SYSCALL32_DEFINE5 COMPAT_SYSCALL_DEFINE5 +#define SYSCALL32_DEFINE6 COMPAT_SYSCALL_DEFINE6 +#else +#define SYSCALL32_DEFINE1 SYSCALL_DEFINE1 +#define SYSCALL32_DEFINE2 SYSCALL_DEFINE2 +#define SYSCALL32_DEFINE3 SYSCALL_DEFINE3 +#define SYSCALL32_DEFINE4 SYSCALL_DEFINE4 +#define SYSCALL32_DEFINE5 SYSCALL_DEFINE5 +#define SYSCALL32_DEFINE6 SYSCALL_DEFINE6 +#endif + /* * Called before coming back to user-mode. Returning to user-mode with an * address limit different than USER_DS can allow to overwrite kernel memory. @@ -362,6 +386,11 @@ asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events, int maxevents, int timeout, const sigset_t __user *sigmask, size_t sigsetsize); +asmlinkage long sys_epoll_pwait2(int epfd, struct epoll_event __user *events, + int maxevents, + const struct __kernel_timespec __user *timeout, + const sigset_t __user *sigmask, + size_t sigsetsize); /* fs/fcntl.c */ asmlinkage long sys_dup(unsigned int fildes); diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h deleted file mode 100644 index 266017fc9ee9..000000000000 --- a/include/linux/timekeeping32.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _LINUX_TIMEKEEPING32_H -#define _LINUX_TIMEKEEPING32_H -/* - * These interfaces are all based on the old timespec type - * and should get replaced with the timespec64 based versions - * over time so we can remove the file here. - */ - -static inline unsigned long get_seconds(void) -{ - return ktime_get_real_seconds(); -} - -#endif diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h new file mode 100644 index 000000000000..a9f9c5714e65 --- /dev/null +++ b/include/linux/trace_recursion.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_TRACE_RECURSION_H +#define _LINUX_TRACE_RECURSION_H + +#include <linux/interrupt.h> +#include <linux/sched.h> + +#ifdef CONFIG_TRACING + +/* Only current can touch trace_recursion */ + +/* + * For function tracing recursion: + * The order of these bits are important. + * + * When function tracing occurs, the following steps are made: + * If arch does not support a ftrace feature: + * call internal function (uses INTERNAL bits) which calls... + * If callback is registered to the "global" list, the list + * function is called and recursion checks the GLOBAL bits. + * then this function calls... + * The function callback, which can use the FTRACE bits to + * check for recursion. + * + * Now if the arch does not support a feature, and it calls + * the global list function which calls the ftrace callback + * all three of these steps will do a recursion protection. + * There's no reason to do one if the previous caller already + * did. The recursion that we are protecting against will + * go through the same steps again. + * + * To prevent the multiple recursion checks, if a recursion + * bit is set that is higher than the MAX bit of the current + * check, then we know that the check was made by the previous + * caller, and we can skip the current check. + */ +enum { + /* Function recursion bits */ + TRACE_FTRACE_BIT, + TRACE_FTRACE_NMI_BIT, + TRACE_FTRACE_IRQ_BIT, + TRACE_FTRACE_SIRQ_BIT, + + /* INTERNAL_BITs must be greater than FTRACE_BITs */ + TRACE_INTERNAL_BIT, + TRACE_INTERNAL_NMI_BIT, + TRACE_INTERNAL_IRQ_BIT, + TRACE_INTERNAL_SIRQ_BIT, + + TRACE_BRANCH_BIT, +/* + * Abuse of the trace_recursion. + * As we need a way to maintain state if we are tracing the function + * graph in irq because we want to trace a particular function that + * was called in irq context but we have irq tracing off. Since this + * can only be modified by current, we can reuse trace_recursion. + */ + TRACE_IRQ_BIT, + + /* Set if the function is in the set_graph_function file */ + TRACE_GRAPH_BIT, + + /* + * In the very unlikely case that an interrupt came in + * at a start of graph tracing, and we want to trace + * the function in that interrupt, the depth can be greater + * than zero, because of the preempted start of a previous + * trace. In an even more unlikely case, depth could be 2 + * if a softirq interrupted the start of graph tracing, + * followed by an interrupt preempting a start of graph + * tracing in the softirq, and depth can even be 3 + * if an NMI came in at the start of an interrupt function + * that preempted a softirq start of a function that + * preempted normal context!!!! Luckily, it can't be + * greater than 3, so the next two bits are a mask + * of what the depth is when we set TRACE_GRAPH_BIT + */ + + TRACE_GRAPH_DEPTH_START_BIT, + TRACE_GRAPH_DEPTH_END_BIT, + + /* + * To implement set_graph_notrace, if this bit is set, we ignore + * function graph tracing of called functions, until the return + * function is called to clear it. + */ + TRACE_GRAPH_NOTRACE_BIT, + + /* + * When transitioning between context, the preempt_count() may + * not be correct. Allow for a single recursion to cover this case. + */ + TRACE_TRANSITION_BIT, + + /* Used to prevent recursion recording from recursing. */ + TRACE_RECORD_RECURSION_BIT, +}; + +#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) +#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) +#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) + +#define trace_recursion_depth() \ + (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3) +#define trace_recursion_set_depth(depth) \ + do { \ + current->trace_recursion &= \ + ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \ + current->trace_recursion |= \ + ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \ + } while (0) + +#define TRACE_CONTEXT_BITS 4 + +#define TRACE_FTRACE_START TRACE_FTRACE_BIT +#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) + +#define TRACE_LIST_START TRACE_INTERNAL_BIT +#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) + +#define TRACE_CONTEXT_MASK TRACE_LIST_MAX + +/* + * Used for setting context + * NMI = 0 + * IRQ = 1 + * SOFTIRQ = 2 + * NORMAL = 3 + */ +enum { + TRACE_CTX_NMI, + TRACE_CTX_IRQ, + TRACE_CTX_SOFTIRQ, + TRACE_CTX_NORMAL, +}; + +static __always_inline int trace_get_context_bit(void) +{ + unsigned long pc = preempt_count(); + + if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) + return TRACE_CTX_NORMAL; + else + return pc & NMI_MASK ? TRACE_CTX_NMI : + pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ; +} + +#ifdef CONFIG_FTRACE_RECORD_RECURSION +extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip); +# define do_ftrace_record_recursion(ip, pip) \ + do { \ + if (!trace_recursion_test(TRACE_RECORD_RECURSION_BIT)) { \ + trace_recursion_set(TRACE_RECORD_RECURSION_BIT); \ + ftrace_record_recursion(ip, pip); \ + trace_recursion_clear(TRACE_RECORD_RECURSION_BIT); \ + } \ + } while (0) +#else +# define do_ftrace_record_recursion(ip, pip) do { } while (0) +#endif + +static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip, + int start, int max) +{ + unsigned int val = READ_ONCE(current->trace_recursion); + int bit; + + /* A previous recursion check was made */ + if ((val & TRACE_CONTEXT_MASK) > max) + return 0; + + bit = trace_get_context_bit() + start; + if (unlikely(val & (1 << bit))) { + /* + * It could be that preempt_count has not been updated during + * a switch between contexts. Allow for a single recursion. + */ + bit = TRACE_TRANSITION_BIT; + if (val & (1 << bit)) { + do_ftrace_record_recursion(ip, pip); + return -1; + } + } else { + /* Normal check passed, clear the transition to allow it again */ + val &= ~(1 << TRACE_TRANSITION_BIT); + } + + val |= 1 << bit; + current->trace_recursion = val; + barrier(); + + return bit + 1; +} + +static __always_inline void trace_clear_recursion(int bit) +{ + if (!bit) + return; + + barrier(); + bit--; + trace_recursion_clear(bit); +} + +/** + * ftrace_test_recursion_trylock - tests for recursion in same context + * + * Use this for ftrace callbacks. This will detect if the function + * tracing recursed in the same context (normal vs interrupt), + * + * Returns: -1 if a recursion happened. + * >= 0 if no recursion + */ +static __always_inline int ftrace_test_recursion_trylock(unsigned long ip, + unsigned long parent_ip) +{ + return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START, TRACE_FTRACE_MAX); +} + +/** + * ftrace_test_recursion_unlock - called when function callback is complete + * @bit: The return of a successful ftrace_test_recursion_trylock() + * + * This is used at the end of a ftrace callback. + */ +static __always_inline void ftrace_test_recursion_unlock(int bit) +{ + trace_clear_recursion(bit); +} + +#endif /* CONFIG_TRACING */ +#endif /* _LINUX_TRACE_RECURSION_H */ diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 6c30508fca19..5a2c650d9e1c 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -12,7 +12,7 @@ */ struct trace_seq { - unsigned char buffer[PAGE_SIZE]; + char buffer[PAGE_SIZE]; struct seq_buf seq; int full; }; @@ -51,7 +51,7 @@ static inline int trace_seq_used(struct trace_seq *s) * that is about to be written to and then return the result * of that write. */ -static inline unsigned char * +static inline char * trace_seq_buffer_ptr(struct trace_seq *s) { return s->buffer + seq_buf_used(&s->seq); diff --git a/include/linux/tty.h b/include/linux/tty.h index c873f475f0a7..37803f3e6d49 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -421,6 +421,7 @@ extern void tty_kclose(struct tty_struct *tty); extern int tty_dev_name_to_number(const char *name, dev_t *number); extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout); extern void tty_ldisc_unlock(struct tty_struct *tty); +extern ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *); #else static inline void tty_kref_put(struct tty_struct *tty) { } diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 88a7673894d5..cfbfd6fe01df 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -81,6 +81,8 @@ struct usbnet { # define EVENT_LINK_CHANGE 11 # define EVENT_SET_RX_MODE 12 # define EVENT_NO_IP_ALIGN 13 + u32 rx_speed; /* in bps - NOT Mbps */ + u32 tx_speed; /* in bps - NOT Mbps */ }; static inline struct usb_driver *driver_of(struct usb_interface *intf) diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 30bc7a7223bb..0fefeb976877 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -42,6 +42,7 @@ struct vdpa_vq_state { * @config: the configuration ops for this device. * @index: device index * @features_valid: were features initialized? for legacy guests + * @nvqs: maximum number of supported virtqueues */ struct vdpa_device { struct device dev; diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 38d3c6a8dc7e..f45940b38a02 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -90,6 +90,8 @@ struct vfio_iommu_driver_ops { struct notifier_block *nb); int (*dma_rw)(void *iommu_data, dma_addr_t user_iova, void *data, size_t count, bool write); + struct iommu_domain *(*group_iommu_domain)(void *iommu_data, + struct iommu_group *group); }; extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); @@ -126,6 +128,8 @@ extern int vfio_group_unpin_pages(struct vfio_group *group, extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, void *data, size_t len, bool write); +extern struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group); + /* each type has independent events */ enum vfio_notify_type { VFIO_IOMMU_NOTIFY = 0, diff --git a/include/linux/wait.h b/include/linux/wait.h index 27fb99cfeb02..fe10e8570a52 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -22,6 +22,7 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int #define WQ_FLAG_BOOKMARK 0x04 #define WQ_FLAG_CUSTOM 0x08 #define WQ_FLAG_DONE 0x10 +#define WQ_FLAG_PRIORITY 0x20 /* * A single wait-queue entry structure: @@ -164,11 +165,20 @@ static inline bool wq_has_sleeper(struct wait_queue_head *wq_head) extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); +extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { - list_add(&wq_entry->entry, &wq_head->head); + struct list_head *head = &wq_head->head; + struct wait_queue_entry *wq; + + list_for_each_entry(wq, &wq_head->head, entry) { + if (!(wq->flags & WQ_FLAG_PRIORITY)) + break; + head = &wq->entry; + } + list_add(&wq_entry->entry, head); } /* diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index be36cbdcc1bd..3eb202259e8c 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -520,7 +520,7 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat, u32 width, u32 height); /** - * v4l2_get_link_rate - Get link rate from transmitter + * v4l2_get_link_freq - Get link rate from transmitter * * @handler: The transmitter's control handler * @mul: The multiplier between pixel rate and link frequency. Bits per pixel on @@ -537,7 +537,7 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat, * -ENOENT: Link frequency or pixel rate control not found * -EINVAL: Invalid link frequency value */ -s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul, +s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul, unsigned int div); static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf) diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h index 9ad136682c47..14cfd036268a 100644 --- a/include/memory/renesas-rpc-if.h +++ b/include/memory/renesas-rpc-if.h @@ -10,6 +10,7 @@ #ifndef __RENESAS_RPC_IF_H #define __RENESAS_RPC_IF_H +#include <linux/pm_runtime.h> #include <linux/types.h> enum rpcif_data_dir { @@ -77,11 +78,19 @@ struct rpcif { int rpcif_sw_init(struct rpcif *rpc, struct device *dev); void rpcif_hw_init(struct rpcif *rpc, bool hyperflash); -void rpcif_enable_rpm(struct rpcif *rpc); -void rpcif_disable_rpm(struct rpcif *rpc); void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs, size_t *len); int rpcif_manual_xfer(struct rpcif *rpc); ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf); +static inline void rpcif_enable_rpm(struct rpcif *rpc) +{ + pm_runtime_enable(rpc->dev); +} + +static inline void rpcif_disable_rpm(struct rpcif *rpc) +{ + pm_runtime_disable(rpc->dev); +} + #endif // __RENESAS_RPC_IF_H diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h index e013736e275d..3ed736da02c8 100644 --- a/include/misc/ocxl.h +++ b/include/misc/ocxl.h @@ -447,7 +447,7 @@ void ocxl_link_release(struct pci_dev *dev, void *link_handle); * defined */ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr, - u64 amr, struct mm_struct *mm, + u64 amr, u16 bdf, struct mm_struct *mm, void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr), void *xsl_err_data); diff --git a/include/net/9p/client.h b/include/net/9p/client.h index dd5b5bd781a4..e1c308d8d288 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -140,10 +140,16 @@ struct p9_client { * * TODO: This needs lots of explanation. */ +enum fid_source { + FID_FROM_OTHER, + FID_FROM_INODE, + FID_FROM_DENTRY, +}; struct p9_fid { struct p9_client *clnt; u32 fid; + refcount_t count; int mode; struct p9_qid qid; u32 iounit; @@ -152,6 +158,7 @@ struct p9_fid { void *rdir; struct hlist_node dlist; /* list of all fids attached to a dentry */ + struct hlist_node ilist; }; /** diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 9a4bbccddc7f..0d6f7ec86061 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1756,7 +1756,7 @@ struct cfg80211_sar_specs { /** - * @struct cfg80211_sar_chan_ranges - sar frequency ranges + * struct cfg80211_sar_freq_ranges - sar frequency ranges * @start_freq: start range edge frequency * @end_freq: end range edge frequency */ @@ -3972,6 +3972,8 @@ struct mgmt_frame_regs { * This callback may sleep. * @reset_tid_config: Reset TID specific configuration for the peer, for the * given TIDs. This callback may sleep. + * + * @set_sar_specs: Update the SAR (TX power) settings. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -4929,6 +4931,7 @@ struct wiphy_iftype_akm_suites { * @max_data_retry_count: maximum supported per TID retry count for * configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and * %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes + * @sar_capa: SAR control capabilities */ struct wiphy { /* assign these fields before you register the wiphy */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 7338b3865a2a..111d7771b208 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -76,6 +76,8 @@ struct inet_connection_sock_af_ops { * @icsk_ext_hdr_len: Network protocol overhead (IP/IPv6 options) * @icsk_ack: Delayed ACK control data * @icsk_mtup; MTU probing control data + * @icsk_probes_tstamp: Probe timestamp (cleared by non-zero window ack) + * @icsk_user_timeout: TCP_USER_TIMEOUT value */ struct inet_connection_sock { /* inet_sock has to be the first member! */ @@ -129,6 +131,7 @@ struct inet_connection_sock { u32 probe_timestamp; } icsk_mtup; + u32 icsk_probes_tstamp; u32 icsk_user_timeout; u64 icsk_ca_priv[104 / sizeof(u64)]; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index d315740581f1..2bdbf62f4ecd 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -3880,6 +3880,7 @@ enum ieee80211_reconfig_type { * This callback may sleep. * @sta_set_4addr: Called to notify the driver when a station starts/stops using * 4-address mode + * @set_sar_specs: Update the SAR (TX power) settings. */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, diff --git a/include/net/red.h b/include/net/red.h index fc455445f4b2..932f0d79d60c 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -168,12 +168,14 @@ static inline void red_set_vars(struct red_vars *v) v->qcount = -1; } -static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog) +static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log) { if (fls(qth_min) + Wlog > 32) return false; if (fls(qth_max) + Wlog > 32) return false; + if (Scell_log >= 32) + return false; if (qth_max < qth_min) return false; return true; diff --git a/include/net/sock.h b/include/net/sock.h index bdc4323ce53c..129d200bccb4 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1921,10 +1921,13 @@ static inline void sk_set_txhash(struct sock *sk) sk->sk_txhash = net_tx_rndhash(); } -static inline void sk_rethink_txhash(struct sock *sk) +static inline bool sk_rethink_txhash(struct sock *sk) { - if (sk->sk_txhash) + if (sk->sk_txhash) { sk_set_txhash(sk); + return true; + } + return false; } static inline struct dst_entry * @@ -1947,12 +1950,10 @@ sk_dst_get(struct sock *sk) return dst; } -static inline void dst_negative_advice(struct sock *sk) +static inline void __dst_negative_advice(struct sock *sk) { struct dst_entry *ndst, *dst = __sk_dst_get(sk); - sk_rethink_txhash(sk); - if (dst && dst->ops->negative_advice) { ndst = dst->ops->negative_advice(dst); @@ -1964,6 +1965,12 @@ static inline void dst_negative_advice(struct sock *sk) } } +static inline void dst_negative_advice(struct sock *sk) +{ + sk_rethink_txhash(sk); + __dst_negative_advice(sk); +} + static inline void __sk_dst_set(struct sock *sk, struct dst_entry *dst) { diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 4f4e93bf814c..cc17bc957548 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -58,10 +58,6 @@ struct xdp_sock { struct xsk_queue *tx ____cacheline_aligned_in_smp; struct list_head tx_list; - /* Mutual exclusion of NAPI TX thread and sendmsg error paths - * in the SKB destructor callback. - */ - spinlock_t tx_completion_lock; /* Protects generic receive. */ spinlock_t rx_lock; diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index 01755b838c74..eaa8386dbc63 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -73,6 +73,11 @@ struct xsk_buff_pool { bool dma_need_sync; bool unaligned; void *addrs; + /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect: + * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when + * sockets share a single cq when the same netdev and queue id is shared. + */ + spinlock_t cq_lock; struct xdp_buff_xsk *free_heads[]; }; diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h deleted file mode 100644 index 8c18dc6d3fde..000000000000 --- a/include/soc/nps/common.h +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright (c) 2016, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef SOC_NPS_COMMON_H -#define SOC_NPS_COMMON_H - -#ifdef CONFIG_SMP -#define NPS_IPI_IRQ 5 -#endif - -#define NPS_HOST_REG_BASE 0xF6000000 - -#define NPS_MSU_BLKID 0x018 - -#define CTOP_INST_RSPI_GIC_0_R12 0x3C56117E -#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60 -#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422 - -#ifndef AUX_IENABLE -#define AUX_IENABLE 0x40c -#endif - -#define CTOP_AUX_IACK (0xFFFFF800 + 0x088) - -#ifndef __ASSEMBLY__ - -/* In order to increase compilation test coverage */ -#ifdef CONFIG_ARC -static inline void nps_ack_gic(void) -{ - __asm__ __volatile__ ( - " .word %0\n" - : - : "i"(CTOP_INST_RSPI_GIC_0_R12) - : "memory"); -} -#else -static inline void nps_ack_gic(void) { } -#define write_aux_reg(r, v) -#define read_aux_reg(r) 0 -#endif - -/* CPU global ID */ -struct global_id { - union { - struct { -#ifdef CONFIG_EZNPS_MTM_EXT - u32 __reserved:20, cluster:4, core:4, thread:4; -#else - u32 __reserved:24, cluster:4, core:4; -#endif - }; - u32 value; - }; -}; - -/* - * Convert logical to physical CPU IDs - * - * The conversion swap bits 1 and 2 of cluster id (out of 4 bits) - * Now quad of logical clusters id's are adjacent physically, - * and not like the id's physically came with each cluster. - * Below table is 4x4 mesh of core clusters as it layout on chip. - * Cluster ids are in format: logical (physical) - * - * ----------------- ------------------ - * 3 | 5 (3) 7 (7) | | 13 (11) 15 (15)| - * - * 2 | 4 (2) 6 (6) | | 12 (10) 14 (14)| - * ----------------- ------------------ - * 1 | 1 (1) 3 (5) | | 9 (9) 11 (13)| - * - * 0 | 0 (0) 2 (4) | | 8 (8) 10 (12)| - * ----------------- ------------------ - * 0 1 2 3 - */ -static inline int nps_cluster_logic_to_phys(int cluster) -{ -#ifdef __arc__ - __asm__ __volatile__( - " mov r3,%0\n" - " .short %1\n" - " .word %2\n" - " mov %0,r3\n" - : "+r"(cluster) - : "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST), - "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM) - : "r3"); -#endif - - return cluster; -} - -#define NPS_CPU_TO_CLUSTER_NUM(cpu) \ - ({ struct global_id gid; gid.value = cpu; \ - nps_cluster_logic_to_phys(gid.cluster); }) - -struct nps_host_reg_address { - union { - struct { - u32 base:8, cl_x:4, cl_y:4, - blkid:6, reg:8, __reserved:2; - }; - u32 value; - }; -}; - -struct nps_host_reg_address_non_cl { - union { - struct { - u32 base:7, blkid:11, reg:12, __reserved:2; - }; - u32 value; - }; -}; - -static inline void *nps_host_reg_non_cl(u32 blkid, u32 reg) -{ - struct nps_host_reg_address_non_cl reg_address; - - reg_address.value = NPS_HOST_REG_BASE; - reg_address.blkid = blkid; - reg_address.reg = reg; - - return (void *)reg_address.value; -} - -static inline void *nps_host_reg(u32 cpu, u32 blkid, u32 reg) -{ - struct nps_host_reg_address reg_address; - u32 cl = NPS_CPU_TO_CLUSTER_NUM(cpu); - - reg_address.value = NPS_HOST_REG_BASE; - reg_address.cl_x = (cl >> 2) & 0x3; - reg_address.cl_y = cl & 0x3; - reg_address.blkid = blkid; - reg_address.reg = reg; - - return (void *)reg_address.value; -} -#endif /* __ASSEMBLY__ */ - -#endif /* SOC_NPS_COMMON_H */ diff --git a/include/soc/nps/mtm.h b/include/soc/nps/mtm.h deleted file mode 100644 index d2f5e7e3703e..000000000000 --- a/include/soc/nps/mtm.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2016, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef SOC_NPS_MTM_H -#define SOC_NPS_MTM_H - -#define CTOP_INST_HWSCHD_OFF_R3 0x3B6F00BF -#define CTOP_INST_HWSCHD_RESTORE_R3 0x3E6F70C3 - -static inline void hw_schd_save(unsigned int *flags) -{ - __asm__ __volatile__( - " .word %1\n" - " st r3,[%0]\n" - : - : "r"(flags), "i"(CTOP_INST_HWSCHD_OFF_R3) - : "r3", "memory"); -} - -static inline void hw_schd_restore(unsigned int flags) -{ - __asm__ __volatile__( - " mov r3, %0\n" - " .word %1\n" - : - : "r"(flags), "i"(CTOP_INST_HWSCHD_RESTORE_R3) - : "r3"); -} - -#endif /* SOC_NPS_MTM_H */ diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index 1238e35653d1..d731407e23bb 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h @@ -6,7 +6,9 @@ #ifndef __SOC_TEGRA_MC_H__ #define __SOC_TEGRA_MC_H__ +#include <linux/bits.h> #include <linux/err.h> +#include <linux/interconnect-provider.h> #include <linux/reset-controller.h> #include <linux/types.h> @@ -141,6 +143,17 @@ struct tegra_mc_reset_ops { const struct tegra_mc_reset *rst); }; +#define TEGRA_MC_ICC_TAG_DEFAULT 0 +#define TEGRA_MC_ICC_TAG_ISO BIT(0) + +struct tegra_mc_icc_ops { + int (*set)(struct icc_node *src, struct icc_node *dst); + int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw, + u32 peak_bw, u32 *agg_avg, u32 *agg_peak); + struct icc_node_data *(*xlate_extended)(struct of_phandle_args *spec, + void *data); +}; + struct tegra_mc_soc { const struct tegra_mc_client *clients; unsigned int num_clients; @@ -160,6 +173,8 @@ struct tegra_mc_soc { const struct tegra_mc_reset_ops *reset_ops; const struct tegra_mc_reset *resets; unsigned int num_resets; + + const struct tegra_mc_icc_ops *icc_ops; }; struct tegra_mc { @@ -178,10 +193,22 @@ struct tegra_mc { struct reset_controller_dev reset; + struct icc_provider provider; + spinlock_t lock; }; int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate); unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc); +#ifdef CONFIG_TEGRA_MC +struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev); +#else +static inline struct tegra_mc * +devm_tegra_memory_controller_get(struct device *dev) +{ + return ERR_PTR(-ENODEV); +} +#endif + #endif /* __SOC_TEGRA_MC_H__ */ diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 2336bf9243e1..2e1200d17d0c 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -229,7 +229,7 @@ typedef int (*snd_pcm_hw_rule_func_t)(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule { unsigned int cond; int var; - int deps[4]; + int deps[5]; snd_pcm_hw_rule_func_t func; void *private; diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 4eef374d4413..4a5cc8c64be3 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -231,6 +231,7 @@ enum afs_file_error { afs_file_error_dir_bad_magic, afs_file_error_dir_big, afs_file_error_dir_missing_page, + afs_file_error_dir_name_too_long, afs_file_error_dir_over_end, afs_file_error_dir_small, afs_file_error_dir_unmarked_ext, @@ -488,6 +489,7 @@ enum afs_cb_break_reason { EM(afs_file_error_dir_bad_magic, "DIR_BAD_MAGIC") \ EM(afs_file_error_dir_big, "DIR_BIG") \ EM(afs_file_error_dir_missing_page, "DIR_MISSING_PAGE") \ + EM(afs_file_error_dir_name_too_long, "DIR_NAME_TOO_LONG") \ EM(afs_file_error_dir_over_end, "DIR_ENT_OVER_END") \ EM(afs_file_error_dir_small, "DIR_SMALL") \ EM(afs_file_error_dir_unmarked_ext, "DIR_UNMARKED_EXT") \ diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h index cb1aea25c199..e19edc63ee95 100644 --- a/include/trace/events/clk.h +++ b/include/trace/events/clk.h @@ -118,6 +118,50 @@ DEFINE_EVENT(clk_rate, clk_set_rate_complete, TP_ARGS(core, rate) ); +DEFINE_EVENT(clk_rate, clk_set_min_rate, + + TP_PROTO(struct clk_core *core, unsigned long rate), + + TP_ARGS(core, rate) +); + +DEFINE_EVENT(clk_rate, clk_set_max_rate, + + TP_PROTO(struct clk_core *core, unsigned long rate), + + TP_ARGS(core, rate) +); + +DECLARE_EVENT_CLASS(clk_rate_range, + + TP_PROTO(struct clk_core *core, unsigned long min, unsigned long max), + + TP_ARGS(core, min, max), + + TP_STRUCT__entry( + __string( name, core->name ) + __field(unsigned long, min ) + __field(unsigned long, max ) + ), + + TP_fast_assign( + __assign_str(name, core->name); + __entry->min = min; + __entry->max = max; + ), + + TP_printk("%s min %lu max %lu", __get_str(name), + (unsigned long)__entry->min, + (unsigned long)__entry->max) +); + +DEFINE_EVENT(clk_rate_range, clk_set_rate_range, + + TP_PROTO(struct clk_core *core, unsigned long min, unsigned long max), + + TP_ARGS(core, min, max) +); + DECLARE_EVENT_CLASS(clk_parent, TP_PROTO(struct clk_core *core, struct clk_core *parent), diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index f8f1e85ff130..56b113e3cd6a 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -6,6 +6,7 @@ #define _TRACE_F2FS_H #include <linux/tracepoint.h> +#include <uapi/linux/f2fs.h> #define show_dev(dev) MAJOR(dev), MINOR(dev) #define show_dev_ino(entry) show_dev(entry->dev), (unsigned long)entry->ino diff --git a/include/trace/events/iocost.h b/include/trace/events/iocost.h index 0b6869980ba2..e282ce02fa2d 100644 --- a/include/trace/events/iocost.h +++ b/include/trace/events/iocost.h @@ -11,7 +11,7 @@ struct ioc_gq; #include <linux/tracepoint.h> -TRACE_EVENT(iocost_iocg_activate, +DECLARE_EVENT_CLASS(iocost_iocg_state, TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now, u64 last_period, u64 cur_period, u64 vtime), @@ -59,6 +59,20 @@ TRACE_EVENT(iocost_iocg_activate, ) ); +DEFINE_EVENT(iocost_iocg_state, iocost_iocg_activate, + TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now, + u64 last_period, u64 cur_period, u64 vtime), + + TP_ARGS(iocg, path, now, last_period, cur_period, vtime) +); + +DEFINE_EVENT(iocost_iocg_state, iocost_iocg_idle, + TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now, + u64 last_period, u64 cur_period, u64 vtime), + + TP_ARGS(iocg, path, now, last_period, cur_period, vtime) +); + DECLARE_EVENT_CLASS(iocg_inuse_update, TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now, diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 26cfb0fa8e7e..49d7d0fe29f6 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -399,6 +399,69 @@ TRACE_EVENT(kvm_halt_poll_ns, #define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \ trace_kvm_halt_poll_ns(false, vcpu_id, new, old) +TRACE_EVENT(kvm_dirty_ring_push, + TP_PROTO(struct kvm_dirty_ring *ring, u32 slot, u64 offset), + TP_ARGS(ring, slot, offset), + + TP_STRUCT__entry( + __field(int, index) + __field(u32, dirty_index) + __field(u32, reset_index) + __field(u32, slot) + __field(u64, offset) + ), + + TP_fast_assign( + __entry->index = ring->index; + __entry->dirty_index = ring->dirty_index; + __entry->reset_index = ring->reset_index; + __entry->slot = slot; + __entry->offset = offset; + ), + + TP_printk("ring %d: dirty 0x%x reset 0x%x " + "slot %u offset 0x%llx (used %u)", + __entry->index, __entry->dirty_index, + __entry->reset_index, __entry->slot, __entry->offset, + __entry->dirty_index - __entry->reset_index) +); + +TRACE_EVENT(kvm_dirty_ring_reset, + TP_PROTO(struct kvm_dirty_ring *ring), + TP_ARGS(ring), + + TP_STRUCT__entry( + __field(int, index) + __field(u32, dirty_index) + __field(u32, reset_index) + ), + + TP_fast_assign( + __entry->index = ring->index; + __entry->dirty_index = ring->dirty_index; + __entry->reset_index = ring->reset_index; + ), + + TP_printk("ring %d: dirty 0x%x reset 0x%x (used %u)", + __entry->index, __entry->dirty_index, __entry->reset_index, + __entry->dirty_index - __entry->reset_index) +); + +TRACE_EVENT(kvm_dirty_ring_exit, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu), + + TP_STRUCT__entry( + __field(int, vcpu_id) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu->vcpu_id; + ), + + TP_printk("vcpu %d", __entry->vcpu_id) +); + #endif /* _TRACE_KVM_MAIN_H */ /* This part must be outside protection */ diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 896aafc37b09..76e85e16854b 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -60,7 +60,7 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class, ), \ TP_ARGS(wc, cid)) -DECLARE_EVENT_CLASS(xprtrdma_reply_event, +DECLARE_EVENT_CLASS(xprtrdma_reply_class, TP_PROTO( const struct rpcrdma_rep *rep ), @@ -68,29 +68,30 @@ DECLARE_EVENT_CLASS(xprtrdma_reply_event, TP_ARGS(rep), TP_STRUCT__entry( - __field(const void *, rep) - __field(const void *, r_xprt) __field(u32, xid) __field(u32, version) __field(u32, proc) + __string(addr, rpcrdma_addrstr(rep->rr_rxprt)) + __string(port, rpcrdma_portstr(rep->rr_rxprt)) ), TP_fast_assign( - __entry->rep = rep; - __entry->r_xprt = rep->rr_rxprt; __entry->xid = be32_to_cpu(rep->rr_xid); __entry->version = be32_to_cpu(rep->rr_vers); __entry->proc = be32_to_cpu(rep->rr_proc); + __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt)); + __assign_str(port, rpcrdma_portstr(rep->rr_rxprt)); ), - TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u", - __entry->r_xprt, __entry->xid, __entry->rep, - __entry->version, __entry->proc + TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u", + __get_str(addr), __get_str(port), + __entry->xid, __entry->version, __entry->proc ) ); #define DEFINE_REPLY_EVENT(name) \ - DEFINE_EVENT(xprtrdma_reply_event, name, \ + DEFINE_EVENT(xprtrdma_reply_class, \ + xprtrdma_reply_##name##_err, \ TP_PROTO( \ const struct rpcrdma_rep *rep \ ), \ @@ -261,54 +262,67 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event, ), \ TP_ARGS(task, mr, nsegs)) -DECLARE_EVENT_CLASS(xprtrdma_frwr_done, +TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL); +TRACE_DEFINE_ENUM(DMA_TO_DEVICE); +TRACE_DEFINE_ENUM(DMA_FROM_DEVICE); +TRACE_DEFINE_ENUM(DMA_NONE); + +#define xprtrdma_show_direction(x) \ + __print_symbolic(x, \ + { DMA_BIDIRECTIONAL, "BIDIR" }, \ + { DMA_TO_DEVICE, "TO_DEVICE" }, \ + { DMA_FROM_DEVICE, "FROM_DEVICE" }, \ + { DMA_NONE, "NONE" }) + +DECLARE_EVENT_CLASS(xprtrdma_mr_class, TP_PROTO( - const struct ib_wc *wc, - const struct rpcrdma_frwr *frwr + const struct rpcrdma_mr *mr ), - TP_ARGS(wc, frwr), + TP_ARGS(mr), TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) __field(u32, mr_id) - __field(unsigned int, status) - __field(unsigned int, vendor_err) + __field(int, nents) + __field(u32, handle) + __field(u32, length) + __field(u64, offset) + __field(u32, dir) ), TP_fast_assign( - __entry->mr_id = frwr->fr_mr->res.id; - __entry->status = wc->status; - __entry->vendor_err = __entry->status ? wc->vendor_err : 0; + const struct rpcrdma_req *req = mr->mr_req; + const struct rpc_task *task = req->rl_slot.rq_task; + + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->nents = mr->mr_nents; + __entry->handle = mr->mr_handle; + __entry->length = mr->mr_length; + __entry->offset = mr->mr_offset; + __entry->dir = mr->mr_dir; ), - TP_printk( - "mr.id=%u: %s (%u/0x%x)", - __entry->mr_id, rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err + TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)", + __entry->task_id, __entry->client_id, + __entry->mr_id, __entry->nents, __entry->length, + (unsigned long long)__entry->offset, __entry->handle, + xprtrdma_show_direction(__entry->dir) ) ); -#define DEFINE_FRWR_DONE_EVENT(name) \ - DEFINE_EVENT(xprtrdma_frwr_done, name, \ +#define DEFINE_MR_EVENT(name) \ + DEFINE_EVENT(xprtrdma_mr_class, \ + xprtrdma_mr_##name, \ TP_PROTO( \ - const struct ib_wc *wc, \ - const struct rpcrdma_frwr *frwr \ + const struct rpcrdma_mr *mr \ ), \ - TP_ARGS(wc, frwr)) - -TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL); -TRACE_DEFINE_ENUM(DMA_TO_DEVICE); -TRACE_DEFINE_ENUM(DMA_FROM_DEVICE); -TRACE_DEFINE_ENUM(DMA_NONE); - -#define xprtrdma_show_direction(x) \ - __print_symbolic(x, \ - { DMA_BIDIRECTIONAL, "BIDIR" }, \ - { DMA_TO_DEVICE, "TO_DEVICE" }, \ - { DMA_FROM_DEVICE, "FROM_DEVICE" }, \ - { DMA_NONE, "NONE" }) + TP_ARGS(mr)) -DECLARE_EVENT_CLASS(xprtrdma_mr, +DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class, TP_PROTO( const struct rpcrdma_mr *mr ), @@ -340,45 +354,47 @@ DECLARE_EVENT_CLASS(xprtrdma_mr, ) ); -#define DEFINE_MR_EVENT(name) \ - DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \ - TP_PROTO( \ - const struct rpcrdma_mr *mr \ - ), \ +#define DEFINE_ANON_MR_EVENT(name) \ + DEFINE_EVENT(xprtrdma_anonymous_mr_class, \ + xprtrdma_mr_##name, \ + TP_PROTO( \ + const struct rpcrdma_mr *mr \ + ), \ TP_ARGS(mr)) -DECLARE_EVENT_CLASS(xprtrdma_cb_event, +DECLARE_EVENT_CLASS(xprtrdma_callback_class, TP_PROTO( + const struct rpcrdma_xprt *r_xprt, const struct rpc_rqst *rqst ), - TP_ARGS(rqst), + TP_ARGS(r_xprt, rqst), TP_STRUCT__entry( - __field(const void *, rqst) - __field(const void *, rep) - __field(const void *, req) __field(u32, xid) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) ), TP_fast_assign( - __entry->rqst = rqst; - __entry->req = rpcr_to_rdmar(rqst); - __entry->rep = rpcr_to_rdmar(rqst)->rl_reply; __entry->xid = be32_to_cpu(rqst->rq_xid); + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p", - __entry->xid, __entry->rqst, __entry->req, __entry->rep + TP_printk("peer=[%s]:%s xid=0x%08x", + __get_str(addr), __get_str(port), __entry->xid ) ); -#define DEFINE_CB_EVENT(name) \ - DEFINE_EVENT(xprtrdma_cb_event, name, \ +#define DEFINE_CALLBACK_EVENT(name) \ + DEFINE_EVENT(xprtrdma_callback_class, \ + xprtrdma_cb_##name, \ TP_PROTO( \ + const struct rpcrdma_xprt *r_xprt, \ const struct rpc_rqst *rqst \ ), \ - TP_ARGS(rqst)) + TP_ARGS(r_xprt, rqst)) /** ** Connection events @@ -549,61 +565,33 @@ TRACE_EVENT(xprtrdma_createmrs, ) ); -TRACE_EVENT(xprtrdma_mr_get, - TP_PROTO( - const struct rpcrdma_req *req - ), - - TP_ARGS(req), - - TP_STRUCT__entry( - __field(const void *, req) - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(u32, xid) - ), - - TP_fast_assign( - const struct rpc_rqst *rqst = &req->rl_slot; - - __entry->req = req; - __entry->task_id = rqst->rq_task->tk_pid; - __entry->client_id = rqst->rq_task->tk_client->cl_clid; - __entry->xid = be32_to_cpu(rqst->rq_xid); - ), - - TP_printk("task:%u@%u xid=0x%08x req=%p", - __entry->task_id, __entry->client_id, __entry->xid, - __entry->req - ) -); - -TRACE_EVENT(xprtrdma_nomrs, +TRACE_EVENT(xprtrdma_nomrs_err, TP_PROTO( + const struct rpcrdma_xprt *r_xprt, const struct rpcrdma_req *req ), - TP_ARGS(req), + TP_ARGS(r_xprt, req), TP_STRUCT__entry( - __field(const void *, req) __field(unsigned int, task_id) __field(unsigned int, client_id) - __field(u32, xid) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) ), TP_fast_assign( const struct rpc_rqst *rqst = &req->rl_slot; - __entry->req = req; __entry->task_id = rqst->rq_task->tk_pid; __entry->client_id = rqst->rq_task->tk_client->cl_clid; - __entry->xid = be32_to_cpu(rqst->rq_xid); + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("task:%u@%u xid=0x%08x req=%p", - __entry->task_id, __entry->client_id, __entry->xid, - __entry->req + TP_printk("peer=[%s]:%s task:%u@%u", + __get_str(addr), __get_str(port), + __entry->task_id, __entry->client_id ) ); @@ -735,8 +723,8 @@ TRACE_EVENT(xprtrdma_post_send, TP_ARGS(req), TP_STRUCT__entry( - __field(const void *, req) - __field(const void *, sc) + __field(u32, cq_id) + __field(int, completion_id) __field(unsigned int, task_id) __field(unsigned int, client_id) __field(int, num_sge) @@ -745,20 +733,21 @@ TRACE_EVENT(xprtrdma_post_send, TP_fast_assign( const struct rpc_rqst *rqst = &req->rl_slot; + const struct rpcrdma_sendctx *sc = req->rl_sendctx; + __entry->cq_id = sc->sc_cid.ci_queue_id; + __entry->completion_id = sc->sc_cid.ci_completion_id; __entry->task_id = rqst->rq_task->tk_pid; __entry->client_id = rqst->rq_task->tk_client ? rqst->rq_task->tk_client->cl_clid : -1; - __entry->req = req; - __entry->sc = req->rl_sendctx; __entry->num_sge = req->rl_wr.num_sge; __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED; ), - TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s", + TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s", __entry->task_id, __entry->client_id, - __entry->req, __entry->sc, __entry->num_sge, - (__entry->num_sge == 1 ? "" : "s"), + __entry->cq_id, __entry->completion_id, + __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"), (__entry->signaled ? "signaled" : "") ) ); @@ -771,15 +760,17 @@ TRACE_EVENT(xprtrdma_post_recv, TP_ARGS(rep), TP_STRUCT__entry( - __field(const void *, rep) + __field(u32, cq_id) + __field(int, completion_id) ), TP_fast_assign( - __entry->rep = rep; + __entry->cq_id = rep->rr_cid.ci_queue_id; + __entry->completion_id = rep->rr_cid.ci_completion_id; ), - TP_printk("rep=%p", - __entry->rep + TP_printk("cq.id=%d cid=%d", + __entry->cq_id, __entry->completion_id ) ); @@ -816,7 +807,7 @@ TRACE_EVENT(xprtrdma_post_recvs, ) ); -TRACE_EVENT(xprtrdma_post_linv, +TRACE_EVENT(xprtrdma_post_linv_err, TP_PROTO( const struct rpcrdma_req *req, int status @@ -825,19 +816,21 @@ TRACE_EVENT(xprtrdma_post_linv, TP_ARGS(req, status), TP_STRUCT__entry( - __field(const void *, req) + __field(unsigned int, task_id) + __field(unsigned int, client_id) __field(int, status) - __field(u32, xid) ), TP_fast_assign( - __entry->req = req; + const struct rpc_task *task = req->rl_slot.rq_task; + + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; __entry->status = status; - __entry->xid = be32_to_cpu(req->rl_slot.rq_xid); ), - TP_printk("req=%p xid=0x%08x status=%d", - __entry->req, __entry->xid, __entry->status + TP_printk("task:%u@%u status=%d", + __entry->task_id, __entry->client_id, __entry->status ) ); @@ -845,75 +838,12 @@ TRACE_EVENT(xprtrdma_post_linv, ** Completion events **/ -TRACE_EVENT(xprtrdma_wc_send, - TP_PROTO( - const struct rpcrdma_sendctx *sc, - const struct ib_wc *wc - ), - - TP_ARGS(sc, wc), - - TP_STRUCT__entry( - __field(const void *, req) - __field(const void *, sc) - __field(unsigned int, unmap_count) - __field(unsigned int, status) - __field(unsigned int, vendor_err) - ), - - TP_fast_assign( - __entry->req = sc->sc_req; - __entry->sc = sc; - __entry->unmap_count = sc->sc_unmap_count; - __entry->status = wc->status; - __entry->vendor_err = __entry->status ? wc->vendor_err : 0; - ), - - TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)", - __entry->req, __entry->sc, __entry->unmap_count, - rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err - ) -); - -TRACE_EVENT(xprtrdma_wc_receive, - TP_PROTO( - const struct ib_wc *wc - ), - - TP_ARGS(wc), - - TP_STRUCT__entry( - __field(const void *, rep) - __field(u32, byte_len) - __field(unsigned int, status) - __field(u32, vendor_err) - ), - - TP_fast_assign( - __entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep, - rr_cqe); - __entry->status = wc->status; - if (wc->status) { - __entry->byte_len = 0; - __entry->vendor_err = wc->vendor_err; - } else { - __entry->byte_len = wc->byte_len; - __entry->vendor_err = 0; - } - ), - - TP_printk("rep=%p %u bytes: %s (%u/0x%x)", - __entry->rep, __entry->byte_len, - rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err - ) -); - -DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); -DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); -DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake); -DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_send); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_li); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_wake); +DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_done); TRACE_EVENT(xprtrdma_frwr_alloc, TP_PROTO( @@ -1036,9 +966,9 @@ TRACE_EVENT(xprtrdma_frwr_maperr, DEFINE_MR_EVENT(localinv); DEFINE_MR_EVENT(map); -DEFINE_MR_EVENT(unmap); -DEFINE_MR_EVENT(reminv); -DEFINE_MR_EVENT(recycle); + +DEFINE_ANON_MR_EVENT(unmap); +DEFINE_ANON_MR_EVENT(recycle); TRACE_EVENT(xprtrdma_dma_maperr, TP_PROTO( @@ -1066,17 +996,14 @@ TRACE_EVENT(xprtrdma_reply, TP_PROTO( const struct rpc_task *task, const struct rpcrdma_rep *rep, - const struct rpcrdma_req *req, unsigned int credits ), - TP_ARGS(task, rep, req, credits), + TP_ARGS(task, rep, credits), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) - __field(const void *, rep) - __field(const void *, req) __field(u32, xid) __field(unsigned int, credits) ), @@ -1084,49 +1011,102 @@ TRACE_EVENT(xprtrdma_reply, TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; - __entry->rep = rep; - __entry->req = req; __entry->xid = be32_to_cpu(rep->rr_xid); __entry->credits = credits; ), - TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p", + TP_printk("task:%u@%u xid=0x%08x credits=%u", __entry->task_id, __entry->client_id, __entry->xid, - __entry->credits, __entry->rep, __entry->req + __entry->credits ) ); -TRACE_EVENT(xprtrdma_defer_cmp, +DEFINE_REPLY_EVENT(vers); +DEFINE_REPLY_EVENT(rqst); +DEFINE_REPLY_EVENT(short); +DEFINE_REPLY_EVENT(hdr); + +TRACE_EVENT(xprtrdma_err_vers, TP_PROTO( - const struct rpcrdma_rep *rep + const struct rpc_rqst *rqst, + __be32 *min, + __be32 *max ), - TP_ARGS(rep), + TP_ARGS(rqst, min, max), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) - __field(const void *, rep) __field(u32, xid) + __field(u32, min) + __field(u32, max) ), TP_fast_assign( - __entry->task_id = rep->rr_rqst->rq_task->tk_pid; - __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid; - __entry->rep = rep; - __entry->xid = be32_to_cpu(rep->rr_xid); + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(rqst->rq_xid); + __entry->min = be32_to_cpup(min); + __entry->max = be32_to_cpup(max); ), - TP_printk("task:%u@%u xid=0x%08x rep=%p", + TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]", __entry->task_id, __entry->client_id, __entry->xid, - __entry->rep + __entry->min, __entry->max + ) +); + +TRACE_EVENT(xprtrdma_err_chunk, + TP_PROTO( + const struct rpc_rqst *rqst + ), + + TP_ARGS(rqst), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + ), + + TP_fast_assign( + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(rqst->rq_xid); + ), + + TP_printk("task:%u@%u xid=0x%08x", + __entry->task_id, __entry->client_id, __entry->xid ) ); -DEFINE_REPLY_EVENT(xprtrdma_reply_vers); -DEFINE_REPLY_EVENT(xprtrdma_reply_rqst); -DEFINE_REPLY_EVENT(xprtrdma_reply_short); -DEFINE_REPLY_EVENT(xprtrdma_reply_hdr); +TRACE_EVENT(xprtrdma_err_unrecognized, + TP_PROTO( + const struct rpc_rqst *rqst, + __be32 *procedure + ), + + TP_ARGS(rqst, procedure), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + __field(u32, procedure) + ), + + TP_fast_assign( + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->procedure = be32_to_cpup(procedure); + ), + + TP_printk("task:%u@%u xid=0x%08x procedure=%u", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->procedure + ) +); TRACE_EVENT(xprtrdma_fixup, TP_PROTO( @@ -1187,6 +1167,28 @@ TRACE_EVENT(xprtrdma_decode_seg, ) ); +TRACE_EVENT(xprtrdma_mrs_zap, + TP_PROTO( + const struct rpc_task *task + ), + + TP_ARGS(task), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + ), + + TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + ), + + TP_printk("task:%u@%u", + __entry->task_id, __entry->client_id + ) +); + /** ** Callback events **/ @@ -1219,36 +1221,8 @@ TRACE_EVENT(xprtrdma_cb_setup, ) ); -DEFINE_CB_EVENT(xprtrdma_cb_call); -DEFINE_CB_EVENT(xprtrdma_cb_reply); - -TRACE_EVENT(xprtrdma_leaked_rep, - TP_PROTO( - const struct rpc_rqst *rqst, - const struct rpcrdma_rep *rep - ), - - TP_ARGS(rqst, rep), - - TP_STRUCT__entry( - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(u32, xid) - __field(const void *, rep) - ), - - TP_fast_assign( - __entry->task_id = rqst->rq_task->tk_pid; - __entry->client_id = rqst->rq_task->tk_client->cl_clid; - __entry->xid = be32_to_cpu(rqst->rq_xid); - __entry->rep = rep; - ), - - TP_printk("task:%u@%u xid=0x%08x rep=%p", - __entry->task_id, __entry->client_id, __entry->xid, - __entry->rep - ) -); +DEFINE_CALLBACK_EVENT(call); +DEFINE_CALLBACK_EVENT(reply); /** ** Server-side RPC/RDMA events diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 5039af667645..cbe3e152d24c 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -366,7 +366,7 @@ TRACE_EVENT(sched_process_wait, ); /* - * Tracepoint for do_fork: + * Tracepoint for kernel_clone: */ TRACE_EVENT(sched_process_fork, diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 58994e013022..6f89c27265f5 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1424,13 +1424,61 @@ TRACE_EVENT(rpcb_unregister, ) ); +/* Record an xdr_buf containing a fully-formed RPC message */ +DECLARE_EVENT_CLASS(svc_xdr_msg_class, + TP_PROTO( + const struct xdr_buf *xdr + ), + + TP_ARGS(xdr), + + TP_STRUCT__entry( + __field(u32, xid) + __field(const void *, head_base) + __field(size_t, head_len) + __field(const void *, tail_base) + __field(size_t, tail_len) + __field(unsigned int, page_len) + __field(unsigned int, msg_len) + ), + + TP_fast_assign( + __be32 *p = (__be32 *)xdr->head[0].iov_base; + + __entry->xid = be32_to_cpu(*p); + __entry->head_base = p; + __entry->head_len = xdr->head[0].iov_len; + __entry->tail_base = xdr->tail[0].iov_base; + __entry->tail_len = xdr->tail[0].iov_len; + __entry->page_len = xdr->page_len; + __entry->msg_len = xdr->len; + ), + + TP_printk("xid=0x%08x head=[%p,%zu] page=%u tail=[%p,%zu] len=%u", + __entry->xid, + __entry->head_base, __entry->head_len, __entry->page_len, + __entry->tail_base, __entry->tail_len, __entry->msg_len + ) +); + +#define DEFINE_SVCXDRMSG_EVENT(name) \ + DEFINE_EVENT(svc_xdr_msg_class, \ + svc_xdr_##name, \ + TP_PROTO( \ + const struct xdr_buf *xdr \ + ), \ + TP_ARGS(xdr)) + +DEFINE_SVCXDRMSG_EVENT(recvfrom); + +/* Record an xdr_buf containing arbitrary data, tagged with an XID */ DECLARE_EVENT_CLASS(svc_xdr_buf_class, TP_PROTO( - const struct svc_rqst *rqst, + __be32 xid, const struct xdr_buf *xdr ), - TP_ARGS(rqst, xdr), + TP_ARGS(xid, xdr), TP_STRUCT__entry( __field(u32, xid) @@ -1443,7 +1491,7 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class, ), TP_fast_assign( - __entry->xid = be32_to_cpu(rqst->rq_xid); + __entry->xid = be32_to_cpu(xid); __entry->head_base = xdr->head[0].iov_base; __entry->head_len = xdr->head[0].iov_len; __entry->tail_base = xdr->tail[0].iov_base; @@ -1463,12 +1511,11 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class, DEFINE_EVENT(svc_xdr_buf_class, \ svc_xdr_##name, \ TP_PROTO( \ - const struct svc_rqst *rqst, \ + __be32 xid, \ const struct xdr_buf *xdr \ ), \ - TP_ARGS(rqst, xdr)) + TP_ARGS(xid, xdr)) -DEFINE_SVCXDRBUF_EVENT(recvfrom); DEFINE_SVCXDRBUF_EVENT(sendto); /* diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index fc48c64700eb..728752917785 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -859,9 +859,11 @@ __SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd) __SYSCALL(__NR_faccessat2, sys_faccessat2) #define __NR_process_madvise 440 __SYSCALL(__NR_process_madvise, sys_process_madvise) +#define __NR_epoll_pwait2 441 +__SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2) #undef __NR_syscalls -#define __NR_syscalls 441 +#define __NR_syscalls 442 /* * 32 bit systems traditionally used different diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 5ad10ab2a577..b49fbf2bdc40 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -218,6 +218,27 @@ extern "C" { #define DRM_MODE_CONTENT_PROTECTION_DESIRED 1 #define DRM_MODE_CONTENT_PROTECTION_ENABLED 2 +/** + * struct drm_mode_modeinfo - Display mode information. + * @clock: pixel clock in kHz + * @hdisplay: horizontal display size + * @hsync_start: horizontal sync start + * @hsync_end: horizontal sync end + * @htotal: horizontal total size + * @hskew: horizontal skew + * @vdisplay: vertical display size + * @vsync_start: vertical sync start + * @vsync_end: vertical sync end + * @vtotal: vertical total size + * @vscan: vertical scan + * @vrefresh: approximate vertical refresh rate in Hz + * @flags: bitmask of misc. flags, see DRM_MODE_FLAG_* defines + * @type: bitmask of type flags, see DRM_MODE_TYPE_* defines + * @name: string describing the mode resolution + * + * This is the user-space API display mode information structure. For the + * kernel version see struct drm_display_mode. + */ struct drm_mode_modeinfo { __u32 clock; __u16 hdisplay; @@ -368,27 +389,95 @@ enum drm_mode_subconnector { #define DRM_MODE_CONNECTOR_WRITEBACK 18 #define DRM_MODE_CONNECTOR_SPI 19 +/** + * struct drm_mode_get_connector - Get connector metadata. + * + * User-space can perform a GETCONNECTOR ioctl to retrieve information about a + * connector. User-space is expected to retrieve encoders, modes and properties + * by performing this ioctl at least twice: the first time to retrieve the + * number of elements, the second time to retrieve the elements themselves. + * + * To retrieve the number of elements, set @count_props and @count_encoders to + * zero, set @count_modes to 1, and set @modes_ptr to a temporary struct + * drm_mode_modeinfo element. + * + * To retrieve the elements, allocate arrays for @encoders_ptr, @modes_ptr, + * @props_ptr and @prop_values_ptr, then set @count_modes, @count_props and + * @count_encoders to their capacity. + * + * Performing the ioctl only twice may be racy: the number of elements may have + * changed with a hotplug event in-between the two ioctls. User-space is + * expected to retry the last ioctl until the number of elements stabilizes. + * The kernel won't fill any array which doesn't have the expected length. + * + * **Force-probing a connector** + * + * If the @count_modes field is set to zero, the kernel will perform a forced + * probe on the connector to refresh the connector status, modes and EDID. + * A forced-probe can be slow and the ioctl will block. A force-probe can cause + * flickering and temporary freezes, so it should not be performed + * automatically. + * + * User-space shouldn't need to force-probe connectors in general: the kernel + * will automatically take care of probing connectors that don't support + * hot-plug detection when appropriate. However, user-space may force-probe + * connectors on user request (e.g. clicking a "Scan connectors" button, or + * opening a UI to manage screens). + */ struct drm_mode_get_connector { - + /** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */ __u64 encoders_ptr; + /** @modes_ptr: Pointer to struct drm_mode_modeinfo array. */ __u64 modes_ptr; + /** @props_ptr: Pointer to ``__u32`` array of property IDs. */ __u64 props_ptr; + /** @prop_values_ptr: Pointer to ``__u64`` array of property values. */ __u64 prop_values_ptr; + /** @count_modes: Number of modes. */ __u32 count_modes; + /** @count_props: Number of properties. */ __u32 count_props; + /** @count_encoders: Number of encoders. */ __u32 count_encoders; - __u32 encoder_id; /**< Current Encoder */ - __u32 connector_id; /**< Id */ + /** @encoder_id: Object ID of the current encoder. */ + __u32 encoder_id; + /** @connector_id: Object ID of the connector. */ + __u32 connector_id; + /** + * @connector_type: Type of the connector. + * + * See DRM_MODE_CONNECTOR_* defines. + */ __u32 connector_type; + /** + * @connector_type_id: Type-specific connector number. + * + * This is not an object ID. This is a per-type connector number. Each + * (type, type_id) combination is unique across all connectors of a DRM + * device. + */ __u32 connector_type_id; + /** + * @connection: Status of the connector. + * + * See enum drm_connector_status. + */ __u32 connection; - __u32 mm_width; /**< width in millimeters */ - __u32 mm_height; /**< height in millimeters */ + /** @mm_width: Width of the connected sink in millimeters. */ + __u32 mm_width; + /** @mm_height: Height of the connected sink in millimeters. */ + __u32 mm_height; + /** + * @subpixel: Subpixel order of the connected sink. + * + * See enum subpixel_order. + */ __u32 subpixel; + /** @pad: Padding, must be zero. */ __u32 pad; }; @@ -905,24 +994,23 @@ struct drm_format_modifier { /** * struct drm_mode_create_blob - Create New block property - * @data: Pointer to data to copy. - * @length: Length of data to copy. - * @blob_id: new property ID. + * * Create a new 'blob' data property, copying length bytes from data pointer, * and returning new blob ID. */ struct drm_mode_create_blob { - /** Pointer to data to copy. */ + /** @data: Pointer to data to copy. */ __u64 data; - /** Length of data to copy. */ + /** @length: Length of data to copy. */ __u32 length; - /** Return: new property ID. */ + /** @blob_id: Return: new property ID. */ __u32 blob_id; }; /** * struct drm_mode_destroy_blob - Destroy user blob * @blob_id: blob_id to destroy + * * Destroy a user-created blob property. * * User-space can release blobs as soon as they do not need to refer to them by @@ -937,36 +1025,32 @@ struct drm_mode_destroy_blob { /** * struct drm_mode_create_lease - Create lease - * @object_ids: Pointer to array of object ids. - * @object_count: Number of object ids. - * @flags: flags for new FD. - * @lessee_id: unique identifier for lessee. - * @fd: file descriptor to new drm_master file. + * * Lease mode resources, creating another drm_master. */ struct drm_mode_create_lease { - /** Pointer to array of object ids (__u32) */ + /** @object_ids: Pointer to array of object ids (__u32) */ __u64 object_ids; - /** Number of object ids */ + /** @object_count: Number of object ids */ __u32 object_count; - /** flags for new FD (O_CLOEXEC, etc) */ + /** @flags: flags for new FD (O_CLOEXEC, etc) */ __u32 flags; - /** Return: unique identifier for lessee. */ + /** @lessee_id: Return: unique identifier for lessee. */ __u32 lessee_id; - /** Return: file descriptor to new drm_master file */ + /** @fd: Return: file descriptor to new drm_master file */ __u32 fd; }; /** * struct drm_mode_list_lessees - List lessees - * @count_lessees: Number of lessees. - * @pad: pad. - * @lessees_ptr: Pointer to lessess. - * List lesses from a drm_master + * + * List lesses from a drm_master. */ struct drm_mode_list_lessees { - /** Number of lessees. + /** + * @count_lessees: Number of lessees. + * * On input, provides length of the array. * On output, provides total number. No * more than the input number will be written @@ -974,23 +1058,26 @@ struct drm_mode_list_lessees { * the size and then the data. */ __u32 count_lessees; + /** @pad: Padding. */ __u32 pad; - /** Pointer to lessees. - * pointer to __u64 array of lessee ids + /** + * @lessees_ptr: Pointer to lessees. + * + * Pointer to __u64 array of lessee ids */ __u64 lessees_ptr; }; /** * struct drm_mode_get_lease - Get Lease - * @count_objects: Number of leased objects. - * @pad: pad. - * @objects_ptr: Pointer to objects. - * Get leased objects + * + * Get leased objects. */ struct drm_mode_get_lease { - /** Number of leased objects. + /** + * @count_objects: Number of leased objects. + * * On input, provides length of the array. * On output, provides total number. No * more than the input number will be written @@ -998,22 +1085,22 @@ struct drm_mode_get_lease { * the size and then the data. */ __u32 count_objects; + /** @pad: Padding. */ __u32 pad; - /** Pointer to objects. - * pointer to __u32 array of object ids + /** + * @objects_ptr: Pointer to objects. + * + * Pointer to __u32 array of object ids. */ __u64 objects_ptr; }; /** * struct drm_mode_revoke_lease - Revoke lease - * @lessee_id: Unique ID of lessee. - * Revoke lease */ struct drm_mode_revoke_lease { - /** Unique ID of lessee - */ + /** @lessee_id: Unique ID of lessee */ __u32 lessee_id; }; diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h index 52e8bcb33981..cf7399f03b71 100644 --- a/include/uapi/linux/bcache.h +++ b/include/uapi/linux/bcache.h @@ -213,7 +213,7 @@ struct cache_sb_disk { __le16 keys; }; __le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */ - __le16 bucket_size_hi; + __le16 obso_bucket_size_hi; /* obsoleted */ }; /* diff --git a/include/uapi/linux/cifs/cifs_netlink.h b/include/uapi/linux/cifs/cifs_netlink.h new file mode 100644 index 000000000000..da3107582f49 --- /dev/null +++ b/include/uapi/linux/cifs/cifs_netlink.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */ +/* + * Netlink routines for CIFS + * + * Copyright (c) 2020 Samuel Cabrero <scabrero@suse.de> + */ + + +#ifndef _UAPILINUX_CIFS_NETLINK_H +#define _UAPILINUX_CIFS_NETLINK_H + +#define CIFS_GENL_NAME "cifs" +#define CIFS_GENL_VERSION 0x1 + +#define CIFS_GENL_MCGRP_SWN_NAME "cifs_mcgrp_swn" + +enum cifs_genl_multicast_groups { + CIFS_GENL_MCGRP_SWN, +}; + +enum cifs_genl_attributes { + CIFS_GENL_ATTR_UNSPEC, + CIFS_GENL_ATTR_SWN_REGISTRATION_ID, + CIFS_GENL_ATTR_SWN_NET_NAME, + CIFS_GENL_ATTR_SWN_SHARE_NAME, + CIFS_GENL_ATTR_SWN_IP, + CIFS_GENL_ATTR_SWN_NET_NAME_NOTIFY, + CIFS_GENL_ATTR_SWN_SHARE_NAME_NOTIFY, + CIFS_GENL_ATTR_SWN_IP_NOTIFY, + CIFS_GENL_ATTR_SWN_KRB_AUTH, + CIFS_GENL_ATTR_SWN_USER_NAME, + CIFS_GENL_ATTR_SWN_PASSWORD, + CIFS_GENL_ATTR_SWN_DOMAIN_NAME, + CIFS_GENL_ATTR_SWN_NOTIFICATION_TYPE, + CIFS_GENL_ATTR_SWN_RESOURCE_STATE, + CIFS_GENL_ATTR_SWN_RESOURCE_NAME, + __CIFS_GENL_ATTR_MAX, +}; +#define CIFS_GENL_ATTR_MAX (__CIFS_GENL_ATTR_MAX - 1) + +enum cifs_genl_commands { + CIFS_GENL_CMD_UNSPEC, + CIFS_GENL_CMD_SWN_REGISTER, + CIFS_GENL_CMD_SWN_UNREGISTER, + CIFS_GENL_CMD_SWN_NOTIFY, + __CIFS_GENL_CMD_MAX +}; +#define CIFS_GENL_CMD_MAX (__CIFS_GENL_CMD_MAX - 1) + +enum cifs_swn_notification_type { + CIFS_SWN_NOTIFICATION_RESOURCE_CHANGE = 0x01, + CIFS_SWN_NOTIFICATION_CLIENT_MOVE = 0x02, + CIFS_SWN_NOTIFICATION_SHARE_MOVE = 0x03, + CIFS_SWN_NOTIFICATION_IP_CHANGE = 0x04, +}; + +enum cifs_swn_resource_state { + CIFS_SWN_RESOURCE_STATE_UNKNOWN = 0x00, + CIFS_SWN_RESOURCE_STATE_AVAILABLE = 0x01, + CIFS_SWN_RESOURCE_STATE_UNAVAILABLE = 0xFF +}; + +#endif /* _UAPILINUX_CIFS_NETLINK_H */ diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 5203f54a2be1..cf89c318f2ac 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -322,7 +322,7 @@ enum devlink_reload_limit { DEVLINK_RELOAD_LIMIT_MAX = __DEVLINK_RELOAD_LIMIT_MAX - 1 }; -#define DEVLINK_RELOAD_LIMITS_VALID_MASK (BIT(__DEVLINK_RELOAD_LIMIT_MAX) - 1) +#define DEVLINK_RELOAD_LIMITS_VALID_MASK (_BITUL(__DEVLINK_RELOAD_LIMIT_MAX) - 1) enum devlink_attr { /* don't change the order or add anything between, this is ABI! */ diff --git a/include/uapi/linux/f2fs.h b/include/uapi/linux/f2fs.h new file mode 100644 index 000000000000..352a822d4370 --- /dev/null +++ b/include/uapi/linux/f2fs.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _UAPI_LINUX_F2FS_H +#define _UAPI_LINUX_F2FS_H +#include <linux/types.h> +#include <linux/ioctl.h> + +/* + * f2fs-specific ioctl commands + */ +#define F2FS_IOCTL_MAGIC 0xf5 +#define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1) +#define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2) +#define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3) +#define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4) +#define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5) +#define F2FS_IOC_GARBAGE_COLLECT _IOW(F2FS_IOCTL_MAGIC, 6, __u32) +#define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7) +#define F2FS_IOC_DEFRAGMENT _IOWR(F2FS_IOCTL_MAGIC, 8, \ + struct f2fs_defragment) +#define F2FS_IOC_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \ + struct f2fs_move_range) +#define F2FS_IOC_FLUSH_DEVICE _IOW(F2FS_IOCTL_MAGIC, 10, \ + struct f2fs_flush_device) +#define F2FS_IOC_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11, \ + struct f2fs_gc_range) +#define F2FS_IOC_GET_FEATURES _IOR(F2FS_IOCTL_MAGIC, 12, __u32) +#define F2FS_IOC_SET_PIN_FILE _IOW(F2FS_IOCTL_MAGIC, 13, __u32) +#define F2FS_IOC_GET_PIN_FILE _IOR(F2FS_IOCTL_MAGIC, 14, __u32) +#define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15) +#define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64) +#define F2FS_IOC_GET_COMPRESS_BLOCKS _IOR(F2FS_IOCTL_MAGIC, 17, __u64) +#define F2FS_IOC_RELEASE_COMPRESS_BLOCKS \ + _IOR(F2FS_IOCTL_MAGIC, 18, __u64) +#define F2FS_IOC_RESERVE_COMPRESS_BLOCKS \ + _IOR(F2FS_IOCTL_MAGIC, 19, __u64) +#define F2FS_IOC_SEC_TRIM_FILE _IOW(F2FS_IOCTL_MAGIC, 20, \ + struct f2fs_sectrim_range) +#define F2FS_IOC_GET_COMPRESS_OPTION _IOR(F2FS_IOCTL_MAGIC, 21, \ + struct f2fs_comp_option) +#define F2FS_IOC_SET_COMPRESS_OPTION _IOW(F2FS_IOCTL_MAGIC, 22, \ + struct f2fs_comp_option) +#define F2FS_IOC_DECOMPRESS_FILE _IO(F2FS_IOCTL_MAGIC, 23) +#define F2FS_IOC_COMPRESS_FILE _IO(F2FS_IOCTL_MAGIC, 24) + +/* + * should be same as XFS_IOC_GOINGDOWN. + * Flags for going down operation used by FS_IOC_GOINGDOWN + */ +#define F2FS_IOC_SHUTDOWN _IOR('X', 125, __u32) /* Shutdown */ +#define F2FS_GOING_DOWN_FULLSYNC 0x0 /* going down with full sync */ +#define F2FS_GOING_DOWN_METASYNC 0x1 /* going down with metadata */ +#define F2FS_GOING_DOWN_NOSYNC 0x2 /* going down */ +#define F2FS_GOING_DOWN_METAFLUSH 0x3 /* going down with meta flush */ +#define F2FS_GOING_DOWN_NEED_FSCK 0x4 /* going down to trigger fsck */ + +/* + * Flags used by F2FS_IOC_SEC_TRIM_FILE + */ +#define F2FS_TRIM_FILE_DISCARD 0x1 /* send discard command */ +#define F2FS_TRIM_FILE_ZEROOUT 0x2 /* zero out */ +#define F2FS_TRIM_FILE_MASK 0x3 + +struct f2fs_gc_range { + __u32 sync; + __u64 start; + __u64 len; +}; + +struct f2fs_defragment { + __u64 start; + __u64 len; +}; + +struct f2fs_move_range { + __u32 dst_fd; /* destination fd */ + __u64 pos_in; /* start position in src_fd */ + __u64 pos_out; /* start position in dst_fd */ + __u64 len; /* size to move */ +}; + +struct f2fs_flush_device { + __u32 dev_num; /* device number to flush */ + __u32 segments; /* # of segments to flush */ +}; + +struct f2fs_sectrim_range { + __u64 start; + __u64 len; + __u64 flags; +}; + +struct f2fs_comp_option { + __u8 algorithm; + __u8 log_cluster_size; +}; + +#endif /* _UAPI_LINUX_F2FS_H */ diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 7233502ea991..98ca64d1beb6 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -175,6 +175,10 @@ * * 7.32 * - add flags to fuse_attr, add FUSE_ATTR_SUBMOUNT, add FUSE_SUBMOUNTS + * + * 7.33 + * - add FUSE_HANDLE_KILLPRIV_V2, FUSE_WRITE_KILL_SUIDGID, FATTR_KILL_SUIDGID + * - add FUSE_OPEN_KILL_SUIDGID */ #ifndef _LINUX_FUSE_H @@ -210,7 +214,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 32 +#define FUSE_KERNEL_MINOR_VERSION 33 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -271,6 +275,7 @@ struct fuse_file_lock { #define FATTR_MTIME_NOW (1 << 8) #define FATTR_LOCKOWNER (1 << 9) #define FATTR_CTIME (1 << 10) +#define FATTR_KILL_SUIDGID (1 << 11) /** * Flags returned by the OPEN request @@ -320,6 +325,11 @@ struct fuse_file_lock { * foffset and moffset fields in struct * fuse_setupmapping_out and fuse_removemapping_one. * FUSE_SUBMOUNTS: kernel supports auto-mounting directory submounts + * FUSE_HANDLE_KILLPRIV_V2: fs kills suid/sgid/cap on write/chown/trunc. + * Upon write/truncate suid/sgid is only killed if caller + * does not have CAP_FSETID. Additionally upon + * write/truncate sgid is killed only if file has group + * execute permission. (Same as Linux VFS behavior). */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -349,6 +359,7 @@ struct fuse_file_lock { #define FUSE_EXPLICIT_INVAL_DATA (1 << 25) #define FUSE_MAP_ALIGNMENT (1 << 26) #define FUSE_SUBMOUNTS (1 << 27) +#define FUSE_HANDLE_KILLPRIV_V2 (1 << 28) /** * CUSE INIT request/reply flags @@ -378,11 +389,14 @@ struct fuse_file_lock { * * FUSE_WRITE_CACHE: delayed write from page cache, file handle is guessed * FUSE_WRITE_LOCKOWNER: lock_owner field is valid - * FUSE_WRITE_KILL_PRIV: kill suid and sgid bits + * FUSE_WRITE_KILL_SUIDGID: kill suid and sgid bits */ #define FUSE_WRITE_CACHE (1 << 0) #define FUSE_WRITE_LOCKOWNER (1 << 1) -#define FUSE_WRITE_KILL_PRIV (1 << 2) +#define FUSE_WRITE_KILL_SUIDGID (1 << 2) + +/* Obsolete alias; this flag implies killing suid/sgid only. */ +#define FUSE_WRITE_KILL_PRIV FUSE_WRITE_KILL_SUIDGID /** * Read flags @@ -431,6 +445,12 @@ struct fuse_file_lock { */ #define FUSE_ATTR_SUBMOUNT (1 << 0) +/** + * Open flags + * FUSE_OPEN_KILL_SUIDGID: Kill suid and sgid if executable + */ +#define FUSE_OPEN_KILL_SUIDGID (1 << 0) + enum fuse_opcode { FUSE_LOOKUP = 1, FUSE_FORGET = 2, /* no reply */ @@ -592,14 +612,14 @@ struct fuse_setattr_in { struct fuse_open_in { uint32_t flags; - uint32_t unused; + uint32_t open_flags; /* FUSE_OPEN_... */ }; struct fuse_create_in { uint32_t flags; uint32_t mode; uint32_t umask; - uint32_t padding; + uint32_t open_flags; /* FUSE_OPEN_... */ }; struct fuse_open_out { diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h index 2072c260f5d0..e4eb0b8c5cf9 100644 --- a/include/uapi/linux/gpio.h +++ b/include/uapi/linux/gpio.h @@ -65,6 +65,7 @@ struct gpiochip_info { * @GPIO_V2_LINE_FLAG_BIAS_PULL_UP: line has pull-up bias enabled * @GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN: line has pull-down bias enabled * @GPIO_V2_LINE_FLAG_BIAS_DISABLED: line has bias disabled + * @GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME: line events contain REALTIME timestamps */ enum gpio_v2_line_flag { GPIO_V2_LINE_FLAG_USED = _BITULL(0), @@ -78,6 +79,7 @@ enum gpio_v2_line_flag { GPIO_V2_LINE_FLAG_BIAS_PULL_UP = _BITULL(8), GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN = _BITULL(9), GPIO_V2_LINE_FLAG_BIAS_DISABLED = _BITULL(10), + GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME = _BITULL(11), }; /** @@ -270,9 +272,6 @@ enum gpio_v2_line_event_id { /** * struct gpio_v2_line_event - The actual event being pushed to userspace * @timestamp_ns: best estimate of time of event occurrence, in nanoseconds. - * The @timestamp_ns is read from %CLOCK_MONOTONIC and is intended to allow - * the accurate measurement of the time between events. It does not provide - * the wall-clock time. * @id: event identifier with value from &enum gpio_v2_line_event_id * @offset: the offset of the line that triggered the event * @seqno: the sequence number for this event in the sequence of events for @@ -280,6 +279,13 @@ enum gpio_v2_line_event_id { * @line_seqno: the sequence number for this event in the sequence of * events on this particular line * @padding: reserved for future use + * + * By default the @timestamp_ns is read from %CLOCK_MONOTONIC and is + * intended to allow the accurate measurement of the time between events. + * It does not provide the wall-clock time. + * + * If the %GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME flag is set then the + * @timestamp_ns is read from %CLOCK_REALTIME. */ struct gpio_v2_line_event { __aligned_u64 timestamp_ns; diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index fdcdfe414223..236d437947bc 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -26,6 +26,9 @@ #define IDXD_OP_FLAG_DRDBK 0x4000 #define IDXD_OP_FLAG_DSTS 0x8000 +/* IAX */ +#define IDXD_OP_FLAG_RD_SRC2_AECS 0x010000 + /* Opcode */ enum dsa_opcode { DSA_OPCODE_NOOP = 0, @@ -47,6 +50,14 @@ enum dsa_opcode { DSA_OPCODE_CFLUSH = 0x20, }; +enum iax_opcode { + IAX_OPCODE_NOOP = 0, + IAX_OPCODE_DRAIN = 2, + IAX_OPCODE_MEMMOVE, + IAX_OPCODE_DECOMPRESS = 0x42, + IAX_OPCODE_COMPRESS, +}; + /* Completion record status */ enum dsa_completion_status { DSA_COMP_NONE = 0, @@ -80,6 +91,33 @@ enum dsa_completion_status { DSA_COMP_TRANSLATION_FAIL, }; +enum iax_completion_status { + IAX_COMP_NONE = 0, + IAX_COMP_SUCCESS, + IAX_COMP_PAGE_FAULT_IR = 0x04, + IAX_COMP_OUTBUF_OVERFLOW, + IAX_COMP_BAD_OPCODE = 0x10, + IAX_COMP_INVALID_FLAGS, + IAX_COMP_NOZERO_RESERVE, + IAX_COMP_INVALID_SIZE, + IAX_COMP_OVERLAP_BUFFERS = 0x16, + IAX_COMP_INT_HANDLE_INVAL = 0x19, + IAX_COMP_CRA_XLAT, + IAX_COMP_CRA_ALIGN, + IAX_COMP_ADDR_ALIGN, + IAX_COMP_PRIV_BAD, + IAX_COMP_TRAFFIC_CLASS_CONF, + IAX_COMP_PFAULT_RDBA, + IAX_COMP_HW_ERR1, + IAX_COMP_HW_ERR_DRB, + IAX_COMP_TRANSLATION_FAIL, + IAX_COMP_PRS_TIMEOUT, + IAX_COMP_WATCHDOG, + IAX_COMP_INVALID_COMP_FLAG = 0x30, + IAX_COMP_INVALID_FILTER_FLAG, + IAX_COMP_INVALID_NUM_ELEMS = 0x33, +}; + #define DSA_COMP_STATUS_MASK 0x7f #define DSA_COMP_STATUS_WRITE 0x80 @@ -163,6 +201,28 @@ struct dsa_hw_desc { }; } __attribute__((packed)); +struct iax_hw_desc { + uint32_t pasid:20; + uint32_t rsvd:11; + uint32_t priv:1; + uint32_t flags:24; + uint32_t opcode:8; + uint64_t completion_addr; + uint64_t src1_addr; + uint64_t dst_addr; + uint32_t src1_size; + uint16_t int_handle; + union { + uint16_t compr_flags; + uint16_t decompr_flags; + }; + uint64_t src2_addr; + uint32_t max_dst_size; + uint32_t src2_size; + uint32_t filter_flags; + uint32_t num_inputs; +} __attribute__((packed)); + struct dsa_raw_desc { uint64_t field[8]; } __attribute__((packed)); @@ -223,4 +283,23 @@ struct dsa_raw_completion_record { uint64_t field[4]; } __attribute__((packed)); +struct iax_completion_record { + volatile uint8_t status; + uint8_t error_code; + uint16_t rsvd; + uint32_t bytes_completed; + uint64_t fault_addr; + uint32_t invalid_flags; + uint32_t rsvd2; + uint32_t output_size; + uint8_t output_bits; + uint8_t rsvd3; + uint16_t rsvd4; + uint64_t rsvd5[4]; +} __attribute__((packed)); + +struct iax_raw_completion_record { + uint64_t field[8]; +} __attribute__((packed)); + #endif diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 874cc12a34d9..82708c6db432 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -75,8 +75,9 @@ struct rtnl_link_stats { * * @rx_dropped: Number of packets received but not processed, * e.g. due to lack of resources or unsupported protocol. - * For hardware interfaces this counter should not include packets - * dropped by the device which are counted separately in + * For hardware interfaces this counter may include packets discarded + * due to L2 address filtering but should not include packets dropped + * by the device due to buffer exhaustion which are counted separately in * @rx_missed_errors (since procfs folds those two counters together). * * @tx_dropped: Number of packets dropped on their way to transmission, diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index ca41220b40b8..374c67875cdb 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -250,6 +250,8 @@ struct kvm_hyperv_exit { #define KVM_EXIT_ARM_NISV 28 #define KVM_EXIT_X86_RDMSR 29 #define KVM_EXIT_X86_WRMSR 30 +#define KVM_EXIT_DIRTY_RING_FULL 31 +#define KVM_EXIT_AP_RESET_HOLD 32 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -572,6 +574,7 @@ struct kvm_vapic_addr { #define KVM_MP_STATE_CHECK_STOP 6 #define KVM_MP_STATE_OPERATING 7 #define KVM_MP_STATE_LOAD 8 +#define KVM_MP_STATE_AP_RESET_HOLD 9 struct kvm_mp_state { __u32 mp_state; @@ -1053,6 +1056,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_X86_USER_SPACE_MSR 188 #define KVM_CAP_X86_MSR_FILTER 189 #define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190 +#define KVM_CAP_SYS_HYPERV_CPUID 191 +#define KVM_CAP_DIRTY_LOG_RING 192 #ifdef KVM_CAP_IRQ_ROUTING @@ -1511,7 +1516,7 @@ struct kvm_enc_region { /* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT_2 */ #define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log) -/* Available with KVM_CAP_HYPERV_CPUID */ +/* Available with KVM_CAP_HYPERV_CPUID (vcpu) / KVM_CAP_SYS_HYPERV_CPUID (system) */ #define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2) /* Available with KVM_CAP_ARM_SVE */ @@ -1557,6 +1562,9 @@ struct kvm_pv_cmd { /* Available with KVM_CAP_X86_MSR_FILTER */ #define KVM_X86_SET_MSR_FILTER _IOW(KVMIO, 0xc6, struct kvm_msr_filter) +/* Available with KVM_CAP_DIRTY_LOG_RING */ +#define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7) + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ @@ -1710,4 +1718,52 @@ struct kvm_hyperv_eventfd { #define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0) #define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1) +/* + * Arch needs to define the macro after implementing the dirty ring + * feature. KVM_DIRTY_LOG_PAGE_OFFSET should be defined as the + * starting page offset of the dirty ring structures. + */ +#ifndef KVM_DIRTY_LOG_PAGE_OFFSET +#define KVM_DIRTY_LOG_PAGE_OFFSET 0 +#endif + +/* + * KVM dirty GFN flags, defined as: + * + * |---------------+---------------+--------------| + * | bit 1 (reset) | bit 0 (dirty) | Status | + * |---------------+---------------+--------------| + * | 0 | 0 | Invalid GFN | + * | 0 | 1 | Dirty GFN | + * | 1 | X | GFN to reset | + * |---------------+---------------+--------------| + * + * Lifecycle of a dirty GFN goes like: + * + * dirtied harvested reset + * 00 -----------> 01 -------------> 1X -------+ + * ^ | + * | | + * +------------------------------------------+ + * + * The userspace program is only responsible for the 01->1X state + * conversion after harvesting an entry. Also, it must not skip any + * dirty bits, so that dirty bits are always harvested in sequence. + */ +#define KVM_DIRTY_GFN_F_DIRTY BIT(0) +#define KVM_DIRTY_GFN_F_RESET BIT(1) +#define KVM_DIRTY_GFN_F_MASK 0x3 + +/* + * KVM dirty rings should be mapped at KVM_DIRTY_LOG_PAGE_OFFSET of + * per-vcpu mmaped regions as an array of struct kvm_dirty_gfn. The + * size of the gfn buffer is decided by the first argument when + * enabling KVM_CAP_DIRTY_LOG_RING. + */ +struct kvm_dirty_gfn { + __u32 flags; + __u32 slot; + __u64 offset; +}; + #endif /* __LINUX_KVM_H */ diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 28b6ee53305f..b1633e7ba529 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -293,6 +293,7 @@ enum nft_rule_compat_attributes { * @NFT_SET_EVAL: set can be updated from the evaluation path * @NFT_SET_OBJECT: set contains stateful objects * @NFT_SET_CONCAT: set contains a concatenation + * @NFT_SET_EXPR: set contains expressions */ enum nft_set_flags { NFT_SET_ANONYMOUS = 0x1, @@ -303,6 +304,7 @@ enum nft_set_flags { NFT_SET_EVAL = 0x20, NFT_SET_OBJECT = 0x40, NFT_SET_CONCAT = 0x80, + NFT_SET_EXPR = 0x100, }; /** @@ -706,6 +708,7 @@ enum nft_dynset_ops { enum nft_dynset_flags { NFT_DYNSET_F_INV = (1 << 0), + NFT_DYNSET_F_EXPR = (1 << 1), }; /** diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h index 8dbecb3ad036..1cc5ce0ae062 100644 --- a/include/uapi/linux/ppp-ioctl.h +++ b/include/uapi/linux/ppp-ioctl.h @@ -116,7 +116,7 @@ struct pppol2tp_ioc_stats { #define PPPIOCGCHAN _IOR('t', 55, int) /* get ppp channel number */ #define PPPIOCGL2TPSTATS _IOR('t', 54, struct pppol2tp_ioc_stats) #define PPPIOCBRIDGECHAN _IOW('t', 53, int) /* bridge one channel to another */ -#define PPPIOCUNBRIDGECHAN _IO('t', 54) /* unbridge channel */ +#define PPPIOCUNBRIDGECHAN _IO('t', 52) /* unbridge channel */ #define SIOCGPPPSTATS (SIOCDEVPRIVATE + 0) #define SIOCGPPPVER (SIOCDEVPRIVATE + 1) /* NEVER change this!! */ diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h index 00850b98078a..a38454d9e0f5 100644 --- a/include/uapi/linux/v4l2-subdev.h +++ b/include/uapi/linux/v4l2-subdev.h @@ -176,7 +176,7 @@ struct v4l2_subdev_capability { }; /* The v4l2 sub-device video device node is registered in read-only mode. */ -#define V4L2_SUBDEV_CAP_RO_SUBDEV BIT(0) +#define V4L2_SUBDEV_CAP_RO_SUBDEV 0x00000001 /* Backwards compatibility define --- to be removed */ #define v4l2_subdev_edid v4l2_edid diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 2f313a238a8f..d1812777139f 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -820,6 +820,7 @@ enum { enum { VFIO_CCW_IO_IRQ_INDEX, VFIO_CCW_CRW_IRQ_INDEX, + VFIO_CCW_REQ_IRQ_INDEX, VFIO_CCW_NUM_IRQS }; diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h index 0ec6b610402c..97523a95781d 100644 --- a/include/uapi/linux/virtio_gpu.h +++ b/include/uapi/linux/virtio_gpu.h @@ -115,6 +115,10 @@ enum virtio_gpu_ctrl_type { enum virtio_gpu_shm_id { VIRTIO_GPU_SHM_ID_UNDEFINED = 0, + /* + * VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB + * VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB + */ VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1 }; diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h index b052355ac7a3..bc1c0621f5ed 100644 --- a/include/uapi/linux/virtio_ids.h +++ b/include/uapi/linux/virtio_ids.h @@ -29,24 +29,30 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ -#define VIRTIO_ID_NET 1 /* virtio net */ -#define VIRTIO_ID_BLOCK 2 /* virtio block */ -#define VIRTIO_ID_CONSOLE 3 /* virtio console */ -#define VIRTIO_ID_RNG 4 /* virtio rng */ -#define VIRTIO_ID_BALLOON 5 /* virtio balloon */ -#define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */ -#define VIRTIO_ID_SCSI 8 /* virtio scsi */ -#define VIRTIO_ID_9P 9 /* 9p virtio console */ -#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */ -#define VIRTIO_ID_CAIF 12 /* Virtio caif */ -#define VIRTIO_ID_GPU 16 /* virtio GPU */ -#define VIRTIO_ID_INPUT 18 /* virtio input */ -#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */ -#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */ -#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */ -#define VIRTIO_ID_MEM 24 /* virtio mem */ -#define VIRTIO_ID_FS 26 /* virtio filesystem */ -#define VIRTIO_ID_PMEM 27 /* virtio pmem */ -#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */ +#define VIRTIO_ID_NET 1 /* virtio net */ +#define VIRTIO_ID_BLOCK 2 /* virtio block */ +#define VIRTIO_ID_CONSOLE 3 /* virtio console */ +#define VIRTIO_ID_RNG 4 /* virtio rng */ +#define VIRTIO_ID_BALLOON 5 /* virtio balloon */ +#define VIRTIO_ID_IOMEM 6 /* virtio ioMemory */ +#define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */ +#define VIRTIO_ID_SCSI 8 /* virtio scsi */ +#define VIRTIO_ID_9P 9 /* 9p virtio console */ +#define VIRTIO_ID_MAC80211_WLAN 10 /* virtio WLAN MAC */ +#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */ +#define VIRTIO_ID_CAIF 12 /* Virtio caif */ +#define VIRTIO_ID_MEMORY_BALLOON 13 /* virtio memory balloon */ +#define VIRTIO_ID_GPU 16 /* virtio GPU */ +#define VIRTIO_ID_CLOCK 17 /* virtio clock/timer */ +#define VIRTIO_ID_INPUT 18 /* virtio input */ +#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */ +#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */ +#define VIRTIO_ID_SIGNAL_DIST 21 /* virtio signal distribution device */ +#define VIRTIO_ID_PSTORE 22 /* virtio pstore device */ +#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */ +#define VIRTIO_ID_MEM 24 /* virtio mem */ +#define VIRTIO_ID_FS 26 /* virtio filesystem */ +#define VIRTIO_ID_PMEM 27 /* virtio pmem */ +#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */ #endif /* _LINUX_VIRTIO_IDS_H */ diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 8c15a7d336a0..dba3827c43ca 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -279,6 +279,7 @@ enum hl_device_status { * HL_INFO_CLK_THROTTLE_REASON - Retrieve clock throttling reason * HL_INFO_SYNC_MANAGER - Retrieve sync manager info per dcore * HL_INFO_TOTAL_ENERGY - Retrieve total energy consumption + * HL_INFO_PLL_FREQUENCY - Retrieve PLL frequency */ #define HL_INFO_HW_IP_INFO 0 #define HL_INFO_HW_EVENTS 1 @@ -425,6 +426,8 @@ struct hl_info_sync_manager { * @ctx_device_in_reset_drop_cnt: context dropped due to device in reset * @total_max_cs_in_flight_drop_cnt: total dropped due to maximum CS in-flight * @ctx_max_cs_in_flight_drop_cnt: context dropped due to maximum CS in-flight + * @total_validation_drop_cnt: total dropped due to validation error + * @ctx_validation_drop_cnt: context dropped due to validation error */ struct hl_info_cs_counters { __u64 total_out_of_mem_drop_cnt; @@ -437,6 +440,8 @@ struct hl_info_cs_counters { __u64 ctx_device_in_reset_drop_cnt; __u64 total_max_cs_in_flight_drop_cnt; __u64 ctx_max_cs_in_flight_drop_cnt; + __u64 total_validation_drop_cnt; + __u64 ctx_validation_drop_cnt; }; enum gaudi_dcores { diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index 00c7235ae93e..2c43b0ef1e4d 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -192,7 +192,7 @@ void xs_suspend_cancel(void); struct work_struct; -void xenbus_probe(struct work_struct *); +void xenbus_probe(void); #define XENBUS_IS_ERR_READ(str) ({ \ if (!IS_ERR(str) && strlen(str) == 0) { \ |