summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.com>2024-11-18 21:51:47 +0100
committerJiri Kosina <jkosina@suse.com>2024-11-18 21:51:47 +0100
commit65578513c3a996cc0fa23526050cddeed08d8d64 (patch)
treec622e3629e080f60c6ff21a308e7bf4e1b02cbd5 /drivers
parent873c578324c7082677303e2921b71fe0f5737ccc (diff)
parente8a0581914bd2e28f7af8d333ddc73fd78b1ef84 (diff)
Merge branch 'for-6.13/multitouch-v2' into for-linus
- code cleanup for mt_set_mode() (Dmitry Torokhov)
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c9
-rw-r--r--drivers/accel/ivpu/ivpu_hw.c1
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h1
-rw-r--r--drivers/accel/ivpu/ivpu_hw_ip.c5
-rw-r--r--drivers/acpi/button.c11
-rw-r--r--drivers/acpi/cppc_acpi.c31
-rw-r--r--drivers/acpi/prmt.c29
-rw-r--r--drivers/acpi/resource.c7
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/base/core.c48
-rw-r--r--drivers/base/module.c4
-rw-r--r--drivers/char/tpm/tpm-chip.c14
-rw-r--r--drivers/char/tpm/tpm-dev-common.c3
-rw-r--r--drivers/char/tpm/tpm-interface.c30
-rw-r--r--drivers/char/tpm/tpm2-sessions.c100
-rw-r--r--drivers/cxl/Kconfig1
-rw-r--r--drivers/cxl/Makefile20
-rw-r--r--drivers/cxl/acpi.c7
-rw-r--r--drivers/cxl/core/cdat.c3
-rw-r--r--drivers/cxl/core/hdm.c50
-rw-r--r--drivers/cxl/core/port.c13
-rw-r--r--drivers/cxl/core/region.c91
-rw-r--r--drivers/cxl/core/trace.h17
-rw-r--r--drivers/cxl/cxl.h3
-rw-r--r--drivers/cxl/port.c17
-rw-r--r--drivers/dma/sh/rz-dmac.c25
-rw-r--r--drivers/dma/ti/k3-udma.c62
-rw-r--r--drivers/edac/qcom_edac.c8
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firmware/arm_scmi/bus.c7
-rw-r--r--drivers/firmware/arm_scmi/common.h2
-rw-r--r--drivers/firmware/arm_scmi/driver.c10
-rw-r--r--drivers/firmware/arm_sdei.c2
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c42
-rw-r--r--drivers/firmware/qcom/qcom_scm.c17
-rw-r--r--drivers/gpio/gpio-sloppy-logic-analyzer.c4
-rw-r--r--drivers/gpio/gpiolib-swnode.c2
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c1
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h132
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c66
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c3
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c1
-rw-r--r--drivers/gpu/drm/i915/Kconfig3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.h10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c74
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c85
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h4
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c4
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c11
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c16
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h1
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c20
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c5
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c24
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c8
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c42
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c65
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h8
-rw-r--r--drivers/gpu/drm/xe/xe_device.c2
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c12
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c10
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c18
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c32
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c6
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c3
-rw-r--r--drivers/hid/hid-multitouch.c30
-rw-r--r--drivers/iio/adc/ad7124.c2
-rw-r--r--drivers/iio/adc/ad7380.c136
-rw-r--r--drivers/iio/dac/Kconfig2
-rw-r--r--drivers/iio/industrialio-gts-helper.c4
-rw-r--r--drivers/iio/light/veml6030.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c38
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c1
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/input/input.c134
-rw-r--r--drivers/input/keyboard/adp5588-keys.c6
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c19
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c14
-rw-r--r--drivers/md/md.c24
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/misc/mei/client.c4
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c38
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c21
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c108
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c70
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c68
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c59
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c33
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c45
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c9
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c1
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c70
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c82
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c9
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/gtp.c22
-rw-r--r--drivers/net/hyperv/netvsc_drv.c30
-rw-r--r--drivers/net/macsec.c3
-rw-r--r--drivers/net/mctp/mctp-i2c.c3
-rw-r--r--drivers/net/netdevsim/fib.c4
-rw-r--r--drivers/net/phy/dp83822.c4
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/pse-pd/pse_core.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c1
-rw-r--r--drivers/net/usb/usbnet.c3
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c7
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig1
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c15
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c96
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/radiotap.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c7
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c48
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c4
-rw-r--r--drivers/net/wwan/wwan_core.c2
-rw-r--r--drivers/nvme/host/core.c56
-rw-r--r--drivers/nvme/host/ioctl.c7
-rw-r--r--drivers/nvme/target/auth.c1
-rw-r--r--drivers/pci/pci.c14
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/pci/pwrctl/pci-pwrctl-pwrseq.c55
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c12
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c2
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c21
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c10
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c8
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usbc.c1
-rw-r--r--drivers/phy/rockchip/Kconfig1
-rw-r--r--drivers/phy/starfive/phy-jh7110-usb.c16
-rw-r--r--drivers/phy/tegra/xusb.c2
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c4
-rw-r--r--drivers/platform/x86/asus-wmi.c10
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c9
-rw-r--r--drivers/platform/x86/intel/pmc/adl.c2
-rw-r--r--drivers/platform/x86/intel/pmc/cnp.c2
-rw-r--r--drivers/platform/x86/intel/pmc/core.c46
-rw-r--r--drivers/platform/x86/intel/pmc/core.h8
-rw-r--r--drivers/platform/x86/intel/pmc/core_ssram.c4
-rw-r--r--drivers/platform/x86/intel/pmc/icl.c2
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c2
-rw-r--r--drivers/platform/x86/intel/pmc/tgl.c2
-rw-r--r--drivers/powercap/dtpm_devfreq.c2
-rw-r--r--drivers/rpmsg/qcom_glink_native.c10
-rw-r--r--drivers/scsi/scsi_debug.c10
-rw-r--r--drivers/soc/qcom/llcc-qcom.c3
-rw-r--r--drivers/soc/qcom/pmic_glink.c25
-rw-r--r--drivers/soc/qcom/socinfo.c8
-rw-r--r--drivers/soundwire/intel_ace2x.c19
-rw-r--r--drivers/spi/spi-fsl-dspi.c6
-rw-r--r--drivers/spi/spi-geni-qcom.c8
-rw-r--r--drivers/spi/spi-mtk-snfi.c2
-rw-r--r--drivers/spi/spi-stm32.c1
-rw-r--r--drivers/staging/iio/frequency/ad9832.c7
-rw-r--r--drivers/thunderbolt/retimer.c5
-rw-r--r--drivers/thunderbolt/tb.c48
-rw-r--r--drivers/ufs/core/ufshcd.c2
-rw-r--r--drivers/usb/core/usb-acpi.c4
-rw-r--r--drivers/usb/dwc2/params.c1
-rw-r--r--drivers/usb/host/xhci-pci.c6
-rw-r--r--drivers/usb/host/xhci-ring.c16
-rw-r--r--drivers/usb/phy/phy.c2
-rw-r--r--drivers/usb/typec/class.c6
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c10
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c10
-rw-r--r--drivers/video/fbdev/Kconfig15
-rw-r--r--drivers/video/fbdev/Makefile1
-rw-r--r--drivers/video/fbdev/bw2.c2
-rw-r--r--drivers/video/fbdev/cg14.c2
-rw-r--r--drivers/video/fbdev/cg3.c2
-rw-r--r--drivers/video/fbdev/cg6.c2
-rw-r--r--drivers/video/fbdev/da8xx-fb.c1665
-rw-r--r--drivers/video/fbdev/ffb.c2
-rw-r--r--drivers/video/fbdev/leo.c2
-rw-r--r--drivers/video/fbdev/nvidia/nv_hw.c8
-rw-r--r--drivers/video/fbdev/p9100.c2
-rw-r--r--drivers/video/fbdev/sbuslib.c2
-rw-r--r--drivers/video/fbdev/sbuslib.h2
-rw-r--r--drivers/video/fbdev/sstfb.c9
-rw-r--r--drivers/video/fbdev/tcx.c2
243 files changed, 2602 insertions, 2794 deletions
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index 6f86f8df30db..8d50981594d1 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -108,6 +108,14 @@ static int reset_pending_show(struct seq_file *s, void *v)
return 0;
}
+static int firewall_irq_counter_show(struct seq_file *s, void *v)
+{
+ struct ivpu_device *vdev = seq_to_ivpu(s);
+
+ seq_printf(s, "%d\n", atomic_read(&vdev->hw->firewall_irq_counter));
+ return 0;
+}
+
static const struct drm_debugfs_info vdev_debugfs_list[] = {
{"bo_list", bo_list_show, 0},
{"fw_name", fw_name_show, 0},
@@ -116,6 +124,7 @@ static const struct drm_debugfs_info vdev_debugfs_list[] = {
{"last_bootmode", last_bootmode_show, 0},
{"reset_counter", reset_counter_show, 0},
{"reset_pending", reset_pending_show, 0},
+ {"firewall_irq_counter", firewall_irq_counter_show, 0},
};
static ssize_t
diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
index 27f0fe4d54e0..e69c0613513f 100644
--- a/drivers/accel/ivpu/ivpu_hw.c
+++ b/drivers/accel/ivpu/ivpu_hw.c
@@ -249,6 +249,7 @@ int ivpu_hw_init(struct ivpu_device *vdev)
platform_init(vdev);
wa_init(vdev);
timeouts_init(vdev);
+ atomic_set(&vdev->hw->firewall_irq_counter, 0);
return 0;
}
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index 1c0c98e3afb8..a96a05b2acda 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -52,6 +52,7 @@ struct ivpu_hw_info {
int dma_bits;
ktime_t d0i3_entry_host_ts;
u64 d0i3_entry_vpu_ts;
+ atomic_t firewall_irq_counter;
};
int ivpu_hw_init(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c
index dfd2f4a5b526..60b33fc59d96 100644
--- a/drivers/accel/ivpu/ivpu_hw_ip.c
+++ b/drivers/accel/ivpu/ivpu_hw_ip.c
@@ -1062,7 +1062,10 @@ static void irq_wdt_mss_handler(struct ivpu_device *vdev)
static void irq_noc_firewall_handler(struct ivpu_device *vdev)
{
- ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
+ atomic_inc(&vdev->hw->firewall_irq_counter);
+
+ ivpu_dbg(vdev, IRQ, "NOC Firewall interrupt detected, counter %d\n",
+ atomic_read(&vdev->hw->firewall_irq_counter));
}
/* Handler for IRQs from NPU core */
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 51470208e6da..7773e6b860e7 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -130,6 +130,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
+ {
+ /*
+ * Samsung galaxybook2 ,initial _LID device notification returns
+ * lid closed.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "750XED"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ },
{}
};
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index b73b3aa92f3f..1a40f0514eaa 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -867,7 +867,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
/* Store CPU Logical ID */
cpc_ptr->cpu_id = pr->id;
- spin_lock_init(&cpc_ptr->rmw_lock);
+ raw_spin_lock_init(&cpc_ptr->rmw_lock);
/* Parse PSD data for this CPU */
ret = acpi_get_psd(cpc_ptr, handle);
@@ -1087,6 +1087,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
struct cpc_desc *cpc_desc;
+ unsigned long flags;
size = GET_BIT_WIDTH(reg);
@@ -1126,7 +1127,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return -ENODEV;
}
- spin_lock(&cpc_desc->rmw_lock);
+ raw_spin_lock_irqsave(&cpc_desc->rmw_lock, flags);
switch (size) {
case 8:
prev_val = readb_relaxed(vaddr);
@@ -1141,7 +1142,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
prev_val = readq_relaxed(vaddr);
break;
default:
- spin_unlock(&cpc_desc->rmw_lock);
+ raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
return -EFAULT;
}
val = MASK_VAL_WRITE(reg, prev_val, val);
@@ -1174,7 +1175,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
}
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- spin_unlock(&cpc_desc->rmw_lock);
+ raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
return ret_val;
}
@@ -1916,9 +1917,15 @@ unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
u64 mul, div;
if (caps->lowest_freq && caps->nominal_freq) {
- mul = caps->nominal_freq - caps->lowest_freq;
+ /* Avoid special case when nominal_freq is equal to lowest_freq */
+ if (caps->lowest_freq == caps->nominal_freq) {
+ mul = caps->nominal_freq;
+ div = caps->nominal_perf;
+ } else {
+ mul = caps->nominal_freq - caps->lowest_freq;
+ div = caps->nominal_perf - caps->lowest_perf;
+ }
mul *= KHZ_PER_MHZ;
- div = caps->nominal_perf - caps->lowest_perf;
offset = caps->nominal_freq * KHZ_PER_MHZ -
div64_u64(caps->nominal_perf * mul, div);
} else {
@@ -1939,11 +1946,17 @@ unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
{
s64 retval, offset = 0;
static u64 max_khz;
- u64 mul, div;
+ u64 mul, div;
if (caps->lowest_freq && caps->nominal_freq) {
- mul = caps->nominal_perf - caps->lowest_perf;
- div = caps->nominal_freq - caps->lowest_freq;
+ /* Avoid special case when nominal_freq is equal to lowest_freq */
+ if (caps->lowest_freq == caps->nominal_freq) {
+ mul = caps->nominal_perf;
+ div = caps->nominal_freq;
+ } else {
+ mul = caps->nominal_perf - caps->lowest_perf;
+ div = caps->nominal_freq - caps->lowest_freq;
+ }
/*
* We don't need to convert to kHz for computing offset and can
* directly use nominal_freq and lowest_freq as the div64_u64
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 1cfaa5957ac4..747f83f7114d 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -52,7 +52,7 @@ struct prm_context_buffer {
static LIST_HEAD(prm_module_list);
struct prm_handler_info {
- guid_t guid;
+ efi_guid_t guid;
efi_status_t (__efiapi *handler_addr)(u64, void *);
u64 static_data_buffer_addr;
u64 acpi_param_buffer_addr;
@@ -72,17 +72,21 @@ struct prm_module_info {
struct prm_handler_info handlers[] __counted_by(handler_count);
};
-static u64 efi_pa_va_lookup(u64 pa)
+static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa)
{
efi_memory_desc_t *md;
u64 pa_offset = pa & ~PAGE_MASK;
u64 page = pa & PAGE_MASK;
for_each_efi_memory_desc(md) {
- if (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)
+ if ((md->attribute & EFI_MEMORY_RUNTIME) &&
+ (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)) {
return pa_offset + md->virt_addr + page - md->phys_addr;
+ }
}
+ pr_warn("Failed to find VA for GUID: %pUL, PA: 0x%llx", guid, pa);
+
return 0;
}
@@ -148,9 +152,15 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
th = &tm->handlers[cur_handler];
guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
- th->handler_addr = (void *)efi_pa_va_lookup(handler_info->handler_address);
- th->static_data_buffer_addr = efi_pa_va_lookup(handler_info->static_data_buffer_address);
- th->acpi_param_buffer_addr = efi_pa_va_lookup(handler_info->acpi_param_buffer_address);
+ th->handler_addr =
+ (void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address);
+
+ th->static_data_buffer_addr =
+ efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address);
+
+ th->acpi_param_buffer_addr =
+ efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address);
+
} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
return 0;
@@ -277,6 +287,13 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
if (!handler || !module)
goto invalid_guid;
+ if (!handler->handler_addr ||
+ !handler->static_data_buffer_addr ||
+ !handler->acpi_param_buffer_addr) {
+ buffer->prm_status = PRM_HANDLER_ERROR;
+ return AE_OK;
+ }
+
ACPI_COPY_NAMESEG(context.signature, "PRMC");
context.revision = 0x0;
context.reserved = 0x0;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 129bceb1f4a2..7fe842dae1ec 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -503,6 +503,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "17U70P"),
},
},
+ {
+ /* LG Electronics 16T90SP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+ DMI_MATCH(DMI_BOARD_NAME, "16T90SP"),
+ },
+ },
{ }
};
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index fa41ea57a978..3b303d4ae37a 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -651,6 +651,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
/* the scmd has an associated qc */
if (!(qc->flags & ATA_QCFLAG_EH)) {
/* which hasn't failed yet, timeout */
+ set_host_byte(scmd, DID_TIME_OUT);
qc->err_mask |= AC_ERR_TIMEOUT;
qc->flags |= ATA_QCFLAG_EH;
nr_timedout++;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index a4c853411a6b..048ff98dbdfd 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -26,7 +26,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
-#include <linux/rcupdate.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
@@ -2634,7 +2633,6 @@ static const char *dev_uevent_name(const struct kobject *kobj)
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
const struct device *dev = kobj_to_dev(kobj);
- struct device_driver *driver;
int retval = 0;
/* add device node properties if present */
@@ -2663,12 +2661,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
if (dev->type && dev->type->name)
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
- /* Synchronize with module_remove_driver() */
- rcu_read_lock();
- driver = READ_ONCE(dev->driver);
- if (driver)
- add_uevent_var(env, "DRIVER=%s", driver->name);
- rcu_read_unlock();
+ if (dev->driver)
+ add_uevent_var(env, "DRIVER=%s", dev->driver->name);
/* Add common DT information about the device */
of_device_uevent(dev, env);
@@ -2738,8 +2732,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env)
return -ENOMEM;
+ /* Synchronize with really_probe() */
+ device_lock(dev);
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
+ device_unlock(dev);
if (retval)
goto out;
@@ -4038,6 +4035,41 @@ int device_for_each_child_reverse(struct device *parent, void *data,
EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
/**
+ * device_for_each_child_reverse_from - device child iterator in reversed order.
+ * @parent: parent struct device.
+ * @from: optional starting point in child list
+ * @fn: function to be called for each device.
+ * @data: data for the callback.
+ *
+ * Iterate over @parent's child devices, starting at @from, and call @fn
+ * for each, passing it @data. This helper is identical to
+ * device_for_each_child_reverse() when @from is NULL.
+ *
+ * @fn is checked each iteration. If it returns anything other than 0,
+ * iteration stop and that value is returned to the caller of
+ * device_for_each_child_reverse_from();
+ */
+int device_for_each_child_reverse_from(struct device *parent,
+ struct device *from, const void *data,
+ int (*fn)(struct device *, const void *))
+{
+ struct klist_iter i;
+ struct device *child;
+ int error = 0;
+
+ if (!parent->p)
+ return 0;
+
+ klist_iter_init_node(&parent->p->klist_children, &i,
+ (from ? &from->p->knode_parent : NULL));
+ while ((child = prev_device(&i)) && !error)
+ error = fn(child, data);
+ klist_iter_exit(&i);
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
+
+/**
* device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
* @match: Callback function to check device
diff --git a/drivers/base/module.c b/drivers/base/module.c
index c4eaa1158d54..5bc71bea883a 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -7,7 +7,6 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/rcupdate.h>
#include "base.h"
static char *make_driver_name(const struct device_driver *drv)
@@ -102,9 +101,6 @@ void module_remove_driver(const struct device_driver *drv)
if (!drv)
return;
- /* Synchronize with dev_uevent() */
- synchronize_rcu();
-
sysfs_remove_link(&drv->p->kobj, "module");
if (drv->owner)
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 854546000c92..7df7abaf3e52 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -525,10 +525,6 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
- /* Give back zero bytes, as TPM chip has not yet fully resumed: */
- if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
- return 0;
-
return tpm_get_random(chip, data, max);
}
@@ -674,6 +670,16 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
*/
void tpm_chip_unregister(struct tpm_chip *chip)
{
+#ifdef CONFIG_TCG_TPM2_HMAC
+ int rc;
+
+ rc = tpm_try_get_ops(chip);
+ if (!rc) {
+ tpm2_end_auth_session(chip);
+ tpm_put_ops(chip);
+ }
+#endif
+
tpm_del_legacy_sysfs(chip);
if (tpm_is_hwrng_enabled(chip))
hwrng_unregister(&chip->hwrng);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index c3fbbf4d3db7..48ff87444f85 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -27,6 +27,9 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
struct tpm_header *header = (void *)buf;
ssize_t ret, len;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ tpm2_end_auth_session(chip);
+
ret = tpm2_prepare_space(chip, space, buf, bufsiz);
/* If the command is not implemented by the TPM, synthesize a
* response with a TPM2_RC_COMMAND_CODE return for user-space.
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 5da134f12c9a..b1daa0d7b341 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -370,6 +370,13 @@ int tpm_pm_suspend(struct device *dev)
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc) {
+ /* Can be safely set out of locks, as no action cannot race: */
+ chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+ goto out;
+ }
+
if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
goto suspended;
@@ -377,19 +384,19 @@ int tpm_pm_suspend(struct device *dev)
!pm_suspend_via_firmware())
goto suspended;
- rc = tpm_try_get_ops(chip);
- if (!rc) {
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- tpm2_shutdown(chip, TPM2_SU_STATE);
- else
- rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
-
- tpm_put_ops(chip);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ tpm2_end_auth_session(chip);
+ tpm2_shutdown(chip, TPM2_SU_STATE);
+ goto suspended;
}
+ rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+
suspended:
chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+ tpm_put_ops(chip);
+out:
if (rc)
dev_err(dev, "Ignoring error %d while suspending\n", rc);
return 0;
@@ -438,11 +445,18 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
if (!chip)
return -ENODEV;
+ /* Give back zero bytes, as TPM chip has not yet fully resumed: */
+ if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) {
+ rc = 0;
+ goto out;
+ }
+
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_get_random(chip, out, max);
else
rc = tpm1_get_random(chip, out, max);
+out:
tpm_put_ops(chip);
return rc;
}
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index 511c67061728..0739830904b2 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -333,6 +333,9 @@ void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
}
#ifdef CONFIG_TCG_TPM2_HMAC
+ /* The first write to /dev/tpm{rm0} will flush the session. */
+ attributes |= TPM2_SA_CONTINUE_SESSION;
+
/*
* The Architecture Guide requires us to strip trailing zeros
* before computing the HMAC
@@ -484,7 +487,8 @@ static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v,
sha256_final(&sctx, out);
}
-static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
+static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip,
+ struct tpm2_auth *auth)
{
struct crypto_kpp *kpp;
struct kpp_request *req;
@@ -543,7 +547,7 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
sg_set_buf(&s[0], chip->null_ec_key_x, EC_PT_SZ);
sg_set_buf(&s[1], chip->null_ec_key_y, EC_PT_SZ);
kpp_request_set_input(req, s, EC_PT_SZ*2);
- sg_init_one(d, chip->auth->salt, EC_PT_SZ);
+ sg_init_one(d, auth->salt, EC_PT_SZ);
kpp_request_set_output(req, d, EC_PT_SZ);
crypto_kpp_compute_shared_secret(req);
kpp_request_free(req);
@@ -554,8 +558,7 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
* This works because KDFe fully consumes the secret before it
* writes the salt
*/
- tpm2_KDFe(chip->auth->salt, "SECRET", x, chip->null_ec_key_x,
- chip->auth->salt);
+ tpm2_KDFe(auth->salt, "SECRET", x, chip->null_ec_key_x, auth->salt);
out:
crypto_free_kpp(kpp);
@@ -853,7 +856,9 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
if (rc)
/* manually close the session if it wasn't consumed */
tpm2_flush_context(chip, auth->handle);
- memzero_explicit(auth, sizeof(*auth));
+
+ kfree_sensitive(auth);
+ chip->auth = NULL;
} else {
/* reset for next use */
auth->session = TPM_HEADER_SIZE;
@@ -881,7 +886,8 @@ void tpm2_end_auth_session(struct tpm_chip *chip)
return;
tpm2_flush_context(chip, auth->handle);
- memzero_explicit(auth, sizeof(*auth));
+ kfree_sensitive(auth);
+ chip->auth = NULL;
}
EXPORT_SYMBOL(tpm2_end_auth_session);
@@ -915,33 +921,37 @@ static int tpm2_parse_start_auth_session(struct tpm2_auth *auth,
static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key)
{
- int rc;
unsigned int offset = 0; /* dummy offset for null seed context */
u8 name[SHA256_DIGEST_SIZE + 2];
+ u32 tmp_null_key;
+ int rc;
rc = tpm2_load_context(chip, chip->null_key_context, &offset,
- null_key);
- if (rc != -EINVAL)
- return rc;
+ &tmp_null_key);
+ if (rc != -EINVAL) {
+ if (!rc)
+ *null_key = tmp_null_key;
+ goto err;
+ }
- /* an integrity failure may mean the TPM has been reset */
- dev_err(&chip->dev, "NULL key integrity failure!\n");
- /* check the null name against what we know */
- tpm2_create_primary(chip, TPM2_RH_NULL, NULL, name);
- if (memcmp(name, chip->null_key_name, sizeof(name)) == 0)
- /* name unchanged, assume transient integrity failure */
- return rc;
- /*
- * Fatal TPM failure: the NULL seed has actually changed, so
- * the TPM must have been illegally reset. All in-kernel TPM
- * operations will fail because the NULL primary can't be
- * loaded to salt the sessions, but disable the TPM anyway so
- * userspace programmes can't be compromised by it.
- */
- dev_err(&chip->dev, "NULL name has changed, disabling TPM due to interference\n");
+ /* Try to re-create null key, given the integrity failure: */
+ rc = tpm2_create_primary(chip, TPM2_RH_NULL, &tmp_null_key, name);
+ if (rc)
+ goto err;
+
+ /* Return null key if the name has not been changed: */
+ if (!memcmp(name, chip->null_key_name, sizeof(name))) {
+ *null_key = tmp_null_key;
+ return 0;
+ }
+
+ /* Deduce from the name change TPM interference: */
+ dev_err(&chip->dev, "null key integrity check failed\n");
+ tpm2_flush_context(chip, tmp_null_key);
chip->flags |= TPM_CHIP_FLAG_DISABLE;
- return rc;
+err:
+ return rc ? -ENODEV : 0;
}
/**
@@ -958,16 +968,20 @@ static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key)
*/
int tpm2_start_auth_session(struct tpm_chip *chip)
{
+ struct tpm2_auth *auth;
struct tpm_buf buf;
- struct tpm2_auth *auth = chip->auth;
- int rc;
u32 null_key;
+ int rc;
- if (!auth) {
- dev_warn_once(&chip->dev, "auth session is not active\n");
+ if (chip->auth) {
+ dev_warn_once(&chip->dev, "auth session is active\n");
return 0;
}
+ auth = kzalloc(sizeof(*auth), GFP_KERNEL);
+ if (!auth)
+ return -ENOMEM;
+
rc = tpm2_load_null(chip, &null_key);
if (rc)
goto out;
@@ -988,7 +1002,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
tpm_buf_append(&buf, auth->our_nonce, sizeof(auth->our_nonce));
/* append encrypted salt and squirrel away unencrypted in auth */
- tpm_buf_append_salt(&buf, chip);
+ tpm_buf_append_salt(&buf, chip, auth);
/* session type (HMAC, audit or policy) */
tpm_buf_append_u8(&buf, TPM2_SE_HMAC);
@@ -1010,10 +1024,13 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
tpm_buf_destroy(&buf);
- if (rc)
- goto out;
+ if (rc == TPM2_RC_SUCCESS) {
+ chip->auth = auth;
+ return 0;
+ }
- out:
+out:
+ kfree_sensitive(auth);
return rc;
}
EXPORT_SYMBOL(tpm2_start_auth_session);
@@ -1347,18 +1364,21 @@ static int tpm2_create_null_primary(struct tpm_chip *chip)
*
* Derive and context save the null primary and allocate memory in the
* struct tpm_chip for the authorizations.
+ *
+ * Return:
+ * * 0 - OK
+ * * -errno - A system error
+ * * TPM_RC - A TPM error
*/
int tpm2_sessions_init(struct tpm_chip *chip)
{
int rc;
rc = tpm2_create_null_primary(chip);
- if (rc)
- dev_err(&chip->dev, "TPM: security failed (NULL seed derivation): %d\n", rc);
-
- chip->auth = kmalloc(sizeof(*chip->auth), GFP_KERNEL);
- if (!chip->auth)
- return -ENOMEM;
+ if (rc) {
+ dev_err(&chip->dev, "null key creation failed with %d\n", rc);
+ return rc;
+ }
return rc;
}
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 29c192f20082..876469e23f7a 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -60,6 +60,7 @@ config CXL_ACPI
default CXL_BUS
select ACPI_TABLE_LIB
select ACPI_HMAT
+ select CXL_PORT
help
Enable support for host managed device memory (HDM) resources
published by a platform's ACPI CXL memory layout description. See
diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile
index db321f48ba52..2caa90fa4bf2 100644
--- a/drivers/cxl/Makefile
+++ b/drivers/cxl/Makefile
@@ -1,13 +1,21 @@
# SPDX-License-Identifier: GPL-2.0
+
+# Order is important here for the built-in case:
+# - 'core' first for fundamental init
+# - 'port' before platform root drivers like 'acpi' so that CXL-root ports
+# are immediately enabled
+# - 'mem' and 'pmem' before endpoint drivers so that memdevs are
+# immediately enabled
+# - 'pci' last, also mirrors the hardware enumeration hierarchy
obj-y += core/
-obj-$(CONFIG_CXL_PCI) += cxl_pci.o
-obj-$(CONFIG_CXL_MEM) += cxl_mem.o
+obj-$(CONFIG_CXL_PORT) += cxl_port.o
obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o
obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o
-obj-$(CONFIG_CXL_PORT) += cxl_port.o
+obj-$(CONFIG_CXL_MEM) += cxl_mem.o
+obj-$(CONFIG_CXL_PCI) += cxl_pci.o
-cxl_mem-y := mem.o
-cxl_pci-y := pci.o
+cxl_port-y := port.o
cxl_acpi-y := acpi.o
cxl_pmem-y := pmem.o security.o
-cxl_port-y := port.o
+cxl_mem-y := mem.o
+cxl_pci-y := pci.o
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 82b78e331d8e..432b7cfd12a8 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -924,6 +924,13 @@ static void __exit cxl_acpi_exit(void)
/* load before dax_hmem sees 'Soft Reserved' CXL ranges */
subsys_initcall(cxl_acpi_init);
+
+/*
+ * Arrange for host-bridge ports to be active synchronous with
+ * cxl_acpi_probe() exit.
+ */
+MODULE_SOFTDEP("pre: cxl_port");
+
module_exit(cxl_acpi_exit);
MODULE_DESCRIPTION("CXL ACPI: Platform Support");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index ef1621d40f05..e9cd7939c407 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -641,6 +641,9 @@ static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr,
void *ptr;
int rc;
+ if (!dev_is_pci(cxlds->dev))
+ return -ENODEV;
+
if (cxlds->rcd)
return -ENODEV;
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 3df10517a327..223c273c0cd1 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -712,7 +712,44 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
return 0;
}
-static int cxl_decoder_reset(struct cxl_decoder *cxld)
+static int commit_reap(struct device *dev, const void *data)
+{
+ struct cxl_port *port = to_cxl_port(dev->parent);
+ struct cxl_decoder *cxld;
+
+ if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev))
+ return 0;
+
+ cxld = to_cxl_decoder(dev);
+ if (port->commit_end == cxld->id &&
+ ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
+ port->commit_end--;
+ dev_dbg(&port->dev, "reap: %s commit_end: %d\n",
+ dev_name(&cxld->dev), port->commit_end);
+ }
+
+ return 0;
+}
+
+void cxl_port_commit_reap(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ /*
+ * Once the highest committed decoder is disabled, free any other
+ * decoders that were pinned allocated by out-of-order release.
+ */
+ port->commit_end--;
+ dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev),
+ port->commit_end);
+ device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL,
+ commit_reap);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, CXL);
+
+static void cxl_decoder_reset(struct cxl_decoder *cxld)
{
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
@@ -721,14 +758,14 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
u32 ctrl;
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
- return 0;
+ return;
- if (port->commit_end != id) {
+ if (port->commit_end == id)
+ cxl_port_commit_reap(cxld);
+ else
dev_dbg(&port->dev,
"%s: out of order reset, expected decoder%d.%d\n",
dev_name(&cxld->dev), port->id, port->commit_end);
- return -EBUSY;
- }
down_read(&cxl_dpa_rwsem);
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
@@ -741,7 +778,6 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
up_read(&cxl_dpa_rwsem);
- port->commit_end--;
cxld->flags &= ~CXL_DECODER_F_ENABLE;
/* Userspace is now responsible for reconfiguring this decoder */
@@ -751,8 +787,6 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
cxled = to_cxl_endpoint_decoder(&cxld->dev);
cxled->state = CXL_DECODER_STATE_MANUAL;
}
-
- return 0;
}
static int cxl_setup_hdm_decoder_from_dvsec(
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index e666ec6a9085..af92c67bc954 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -2084,11 +2084,18 @@ static void cxl_bus_remove(struct device *dev)
static struct workqueue_struct *cxl_bus_wq;
-static void cxl_bus_rescan_queue(struct work_struct *w)
+static int cxl_rescan_attach(struct device *dev, void *data)
{
- int rc = bus_rescan_devices(&cxl_bus_type);
+ int rc = device_attach(dev);
+
+ dev_vdbg(dev, "rescan: %s\n", rc ? "attach" : "detached");
- pr_debug("CXL bus rescan result: %d\n", rc);
+ return 0;
+}
+
+static void cxl_bus_rescan_queue(struct work_struct *w)
+{
+ bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_rescan_attach);
}
void cxl_bus_rescan(void)
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index e701e4b04032..dff618c708dc 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -232,8 +232,8 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
return 0;
} else {
- dev_err(&cxlr->dev,
- "Failed to synchronize CPU cache state\n");
+ dev_WARN(&cxlr->dev,
+ "Failed to synchronize CPU cache state\n");
return -ENXIO;
}
}
@@ -242,19 +242,17 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
return 0;
}
-static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
+static void cxl_region_decode_reset(struct cxl_region *cxlr, int count)
{
struct cxl_region_params *p = &cxlr->params;
- int i, rc = 0;
+ int i;
/*
- * Before region teardown attempt to flush, and if the flush
- * fails cancel the region teardown for data consistency
- * concerns
+ * Before region teardown attempt to flush, evict any data cached for
+ * this region, or scream loudly about missing arch / platform support
+ * for CXL teardown.
*/
- rc = cxl_region_invalidate_memregion(cxlr);
- if (rc)
- return rc;
+ cxl_region_invalidate_memregion(cxlr);
for (i = count - 1; i >= 0; i--) {
struct cxl_endpoint_decoder *cxled = p->targets[i];
@@ -277,23 +275,17 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
if (cxld->reset)
- rc = cxld->reset(cxld);
- if (rc)
- return rc;
+ cxld->reset(cxld);
set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
}
endpoint_reset:
- rc = cxled->cxld.reset(&cxled->cxld);
- if (rc)
- return rc;
+ cxled->cxld.reset(&cxled->cxld);
set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
}
/* all decoders associated with this region have been torn down */
clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
-
- return 0;
}
static int commit_decoder(struct cxl_decoder *cxld)
@@ -409,16 +401,8 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
* still pending.
*/
if (p->state == CXL_CONFIG_RESET_PENDING) {
- rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
- /*
- * Revert to committed since there may still be active
- * decoders associated with this region, or move forward
- * to active to mark the reset successful
- */
- if (rc)
- p->state = CXL_CONFIG_COMMIT;
- else
- p->state = CXL_CONFIG_ACTIVE;
+ cxl_region_decode_reset(cxlr, p->interleave_ways);
+ p->state = CXL_CONFIG_ACTIVE;
}
}
@@ -794,26 +778,50 @@ out:
return rc;
}
+static int check_commit_order(struct device *dev, const void *data)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+ /*
+ * if port->commit_end is not the only free decoder, then out of
+ * order shutdown has occurred, block further allocations until
+ * that is resolved
+ */
+ if (((cxld->flags & CXL_DECODER_F_ENABLE) == 0))
+ return -EBUSY;
+ return 0;
+}
+
static int match_free_decoder(struct device *dev, void *data)
{
+ struct cxl_port *port = to_cxl_port(dev->parent);
struct cxl_decoder *cxld;
- int *id = data;
+ int rc;
if (!is_switch_decoder(dev))
return 0;
cxld = to_cxl_decoder(dev);
- /* enforce ordered allocation */
- if (cxld->id != *id)
+ if (cxld->id != port->commit_end + 1)
return 0;
- if (!cxld->region)
- return 1;
-
- (*id)++;
+ if (cxld->region) {
+ dev_dbg(dev->parent,
+ "next decoder to commit (%s) is already reserved (%s)\n",
+ dev_name(dev), dev_name(&cxld->region->dev));
+ return 0;
+ }
- return 0;
+ rc = device_for_each_child_reverse_from(dev->parent, dev, NULL,
+ check_commit_order);
+ if (rc) {
+ dev_dbg(dev->parent,
+ "unable to allocate %s due to out of order shutdown\n",
+ dev_name(dev));
+ return 0;
+ }
+ return 1;
}
static int match_auto_decoder(struct device *dev, void *data)
@@ -840,7 +848,6 @@ cxl_region_find_decoder(struct cxl_port *port,
struct cxl_region *cxlr)
{
struct device *dev;
- int id = 0;
if (port == cxled_to_port(cxled))
return &cxled->cxld;
@@ -849,7 +856,7 @@ cxl_region_find_decoder(struct cxl_port *port,
dev = device_find_child(&port->dev, &cxlr->params,
match_auto_decoder);
else
- dev = device_find_child(&port->dev, &id, match_free_decoder);
+ dev = device_find_child(&port->dev, NULL, match_free_decoder);
if (!dev)
return NULL;
/*
@@ -2054,13 +2061,7 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
get_device(&cxlr->dev);
if (p->state > CXL_CONFIG_ACTIVE) {
- /*
- * TODO: tear down all impacted regions if a device is
- * removed out of order
- */
- rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
- if (rc)
- goto out;
+ cxl_region_decode_reset(cxlr, p->interleave_ways);
p->state = CXL_CONFIG_ACTIVE;
}
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index 8672b42ee4d1..8389a94adb1a 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -279,7 +279,7 @@ TRACE_EVENT(cxl_generic_event,
#define CXL_GMER_MEM_EVT_TYPE_ECC_ERROR 0x00
#define CXL_GMER_MEM_EVT_TYPE_INV_ADDR 0x01
#define CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x02
-#define show_mem_event_type(type) __print_symbolic(type, \
+#define show_gmer_mem_event_type(type) __print_symbolic(type, \
{ CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
{ CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
{ CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \
@@ -373,7 +373,7 @@ TRACE_EVENT(cxl_general_media,
"hpa=%llx region=%s region_uuid=%pUb",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
- show_mem_event_type(__entry->type),
+ show_gmer_mem_event_type(__entry->type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->device,
__print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
@@ -391,6 +391,17 @@ TRACE_EVENT(cxl_general_media,
* DRAM Event Record defines many fields the same as the General Media Event
* Record. Reuse those definitions as appropriate.
*/
+#define CXL_DER_MEM_EVT_TYPE_ECC_ERROR 0x00
+#define CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x01
+#define CXL_DER_MEM_EVT_TYPE_INV_ADDR 0x02
+#define CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x03
+#define show_dram_mem_event_type(type) __print_symbolic(type, \
+ { CXL_DER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
+ { CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \
+ { CXL_DER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
+ { CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \
+)
+
#define CXL_DER_VALID_CHANNEL BIT(0)
#define CXL_DER_VALID_RANK BIT(1)
#define CXL_DER_VALID_NIBBLE BIT(2)
@@ -477,7 +488,7 @@ TRACE_EVENT(cxl_dram,
"hpa=%llx region=%s region_uuid=%pUb",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
- show_mem_event_type(__entry->type),
+ show_dram_mem_event_type(__entry->type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->nibble_mask,
__entry->bank_group, __entry->bank,
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 0d8b810a51f0..5406e3ab3d4a 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -359,7 +359,7 @@ struct cxl_decoder {
struct cxl_region *region;
unsigned long flags;
int (*commit)(struct cxl_decoder *cxld);
- int (*reset)(struct cxl_decoder *cxld);
+ void (*reset)(struct cxl_decoder *cxld);
};
/*
@@ -730,6 +730,7 @@ static inline bool is_cxl_root(struct cxl_port *port)
int cxl_num_decoders_committed(struct cxl_port *port);
bool is_cxl_port(const struct device *dev);
struct cxl_port *to_cxl_port(const struct device *dev);
+void cxl_port_commit_reap(struct cxl_decoder *cxld);
struct pci_bus;
int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
struct pci_bus *bus);
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 861dde65768f..9dc394295e1f 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -208,7 +208,22 @@ static struct cxl_driver cxl_port_driver = {
},
};
-module_cxl_driver(cxl_port_driver);
+static int __init cxl_port_init(void)
+{
+ return cxl_driver_register(&cxl_port_driver);
+}
+/*
+ * Be ready to immediately enable ports emitted by the platform CXL root
+ * (e.g. cxl_acpi) when CONFIG_CXL_PORT=y.
+ */
+subsys_initcall(cxl_port_init);
+
+static void __exit cxl_port_exit(void)
+{
+ cxl_driver_unregister(&cxl_port_driver);
+}
+module_exit(cxl_port_exit);
+
MODULE_DESCRIPTION("CXL: Port enumeration and services");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 65a27c5a7bce..811389fc9cb8 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -601,22 +601,25 @@ static int rz_dmac_config(struct dma_chan *chan,
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
u32 val;
- channel->src_per_address = config->src_addr;
channel->dst_per_address = config->dst_addr;
-
- val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
- if (val == CHCFG_DS_INVALID)
- return -EINVAL;
-
channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
- channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
+ if (channel->dst_per_address) {
+ val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
+ if (val == CHCFG_DS_INVALID)
+ return -EINVAL;
- val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
- if (val == CHCFG_DS_INVALID)
- return -EINVAL;
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
+ }
+ channel->src_per_address = config->src_addr;
channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
- channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
+ if (channel->src_per_address) {
+ val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
+ if (val == CHCFG_DS_INVALID)
+ return -EINVAL;
+
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
+ }
return 0;
}
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 406ee199c2ac..b3f27b3f9209 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -3185,27 +3185,40 @@ static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
d->static_tr.elcnt = elcnt;
- /*
- * PDMA must to close the packet when the channel is in packet mode.
- * For TR mode when the channel is not cyclic we also need PDMA to close
- * the packet otherwise the transfer will stall because PDMA holds on
- * the data it has received from the peripheral.
- */
if (uc->config.pkt_mode || !uc->cyclic) {
+ /*
+ * PDMA must close the packet when the channel is in packet mode.
+ * For TR mode when the channel is not cyclic we also need PDMA
+ * to close the packet otherwise the transfer will stall because
+ * PDMA holds on the data it has received from the peripheral.
+ */
unsigned int div = dev_width * elcnt;
if (uc->cyclic)
d->static_tr.bstcnt = d->residue / d->sglen / div;
else
d->static_tr.bstcnt = d->residue / div;
+ } else if (uc->ud->match_data->type == DMA_TYPE_BCDMA &&
+ uc->config.dir == DMA_DEV_TO_MEM &&
+ uc->cyclic) {
+ /*
+ * For cyclic mode with BCDMA we have to set EOP in each TR to
+ * prevent short packet errors seen on channel teardown. So the
+ * PDMA must close the packet after every TR transfer by setting
+ * burst count equal to the number of bytes transferred.
+ */
+ struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base;
- if (uc->config.dir == DMA_DEV_TO_MEM &&
- d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
- return -EINVAL;
+ d->static_tr.bstcnt =
+ (tr_req->icnt0 * tr_req->icnt1) / dev_width;
} else {
d->static_tr.bstcnt = 0;
}
+ if (uc->config.dir == DMA_DEV_TO_MEM &&
+ d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
+ return -EINVAL;
+
return 0;
}
@@ -3450,8 +3463,9 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
- "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
- __func__, d->static_tr.bstcnt);
+ "%s: StaticTR Z is limited to maximum %u (%u)\n",
+ __func__, uc->ud->match_data->statictr_z_mask,
+ d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
@@ -3476,6 +3490,7 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
unsigned int i;
int num_tr;
+ u32 period_csf = 0;
num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
&tr0_cnt1, &tr1_cnt0);
@@ -3498,6 +3513,20 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
period_addr = buf_addr |
((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
+ /*
+ * For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the
+ * last TR of a descriptor, to mark the packet as complete.
+ * This is required for getting the teardown completion message in case
+ * of TX, and to avoid short-packet error in case of RX.
+ *
+ * As we are in cyclic mode, we do not know which period might be the
+ * last one, so set the flag for each period.
+ */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
+ uc->ud->match_data->type == DMA_TYPE_BCDMA) {
+ period_csf = CPPI5_TR_CSF_EOP;
+ }
+
for (i = 0; i < periods; i++) {
int tr_idx = i * num_tr;
@@ -3525,8 +3554,10 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
}
if (!(flags & DMA_PREP_INTERRUPT))
- cppi5_tr_csf_set(&tr_req[tr_idx].flags,
- CPPI5_TR_CSF_SUPR_EVT);
+ period_csf |= CPPI5_TR_CSF_SUPR_EVT;
+
+ if (period_csf)
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, period_csf);
period_addr += period_len;
}
@@ -3655,8 +3686,9 @@ udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
- "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
- __func__, d->static_tr.bstcnt);
+ "%s: StaticTR Z is limited to maximum %u (%u)\n",
+ __func__, uc->ud->match_data->statictr_z_mask,
+ d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index d3cd4cc54ace..a9a8ba067007 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -342,9 +342,11 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
int ecc_irq;
int rc;
- rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap);
- if (rc)
- return rc;
+ if (!llcc_driv_data->ecc_irq_configured) {
+ rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap);
+ if (rc)
+ return rc;
+ }
/* Allocate edac control info */
edev_ctl = edac_device_alloc_ctl_info(0, "qcom-llcc", 1, "bank",
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 6adadb11962e..892b94cfd626 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -204,7 +204,7 @@ static struct fw_node *build_tree(struct fw_card *card, const u32 *sid, int self
// the node->ports array where the parent node should be. Later,
// when we handle the parent node, we fix up the reference.
++parent_count;
- node->color = i;
+ node->color = port_index;
break;
case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD:
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 96b2e5f9a8ef..157172a5f2b5 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -325,7 +325,10 @@ EXPORT_SYMBOL_GPL(scmi_driver_unregister);
static void scmi_device_release(struct device *dev)
{
- kfree(to_scmi_dev(dev));
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ kfree_const(scmi_dev->name);
+ kfree(scmi_dev);
}
static void __scmi_device_destroy(struct scmi_device *scmi_dev)
@@ -338,7 +341,6 @@ static void __scmi_device_destroy(struct scmi_device *scmi_dev)
if (scmi_dev->protocol_id == SCMI_PROTOCOL_SYSTEM)
atomic_set(&scmi_syspower_registered, 0);
- kfree_const(scmi_dev->name);
ida_free(&scmi_bus_id, scmi_dev->id);
device_unregister(&scmi_dev->dev);
}
@@ -410,7 +412,6 @@ __scmi_device_create(struct device_node *np, struct device *parent,
return scmi_dev;
put_dev:
- kfree_const(scmi_dev->name);
put_device(&scmi_dev->dev);
ida_free(&scmi_bus_id, id);
return NULL;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index c4b8e7ff88aa..cdec50a698a1 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -163,6 +163,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
* used to initialize this channel
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
+ * @is_p2a: A flag to identify a channel as P2A (RX)
* @rx_timeout_ms: The configured RX timeout in milliseconds.
* @handle: Pointer to SCMI entity handle
* @no_completion_irq: Flag to indicate that this channel has no completion
@@ -174,6 +175,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
struct scmi_chan_info {
int id;
struct device *dev;
+ bool is_p2a;
unsigned int rx_timeout_ms;
struct scmi_handle *handle;
bool no_completion_irq;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index a477b5ade38d..f8934d049d68 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -1048,6 +1048,11 @@ static inline void scmi_xfer_command_release(struct scmi_info *info,
static inline void scmi_clear_channel(struct scmi_info *info,
struct scmi_chan_info *cinfo)
{
+ if (!cinfo->is_p2a) {
+ dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n");
+ return;
+ }
+
if (info->desc->ops->clear_channel)
info->desc->ops->clear_channel(cinfo);
}
@@ -2638,6 +2643,7 @@ static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
if (!cinfo)
return -ENOMEM;
+ cinfo->is_p2a = !tx;
cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
/* Create a unique name for this transport device */
@@ -3042,10 +3048,10 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier));
- ret = of_property_read_u32(dev->of_node, "max-rx-timeout-ms",
+ ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms",
&trans->desc->max_rx_timeout_ms);
if (ret && ret != -EINVAL)
- dev_err(dev, "Malformed max-rx-timeout-ms DT property.\n");
+ dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n");
dev_info(dev, "SCMI max-rx-timeout: %dms\n",
trans->desc->max_rx_timeout_ms);
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index 285fe7ad490d..3e8051fe8296 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -763,7 +763,7 @@ static int sdei_device_freeze(struct device *dev)
int err;
/* unregister private events */
- cpuhp_remove_state(sdei_entry_point);
+ cpuhp_remove_state(sdei_hp_state);
err = sdei_unregister_shared();
if (err)
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
index 9ca5ee58edbd..0f7ec8848202 100644
--- a/drivers/firmware/microchip/mpfs-auto-update.c
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -76,14 +76,11 @@
#define AUTO_UPDATE_INFO_SIZE SZ_1M
#define AUTO_UPDATE_BITSTREAM_BASE (AUTO_UPDATE_DIRECTORY_SIZE + AUTO_UPDATE_INFO_SIZE)
-#define AUTO_UPDATE_TIMEOUT_MS 60000
-
struct mpfs_auto_update_priv {
struct mpfs_sys_controller *sys_controller;
struct device *dev;
struct mtd_info *flash;
struct fw_upload *fw_uploader;
- struct completion programming_complete;
size_t size_per_bitstream;
bool cancel_request;
};
@@ -156,19 +153,6 @@ static void mpfs_auto_update_cancel(struct fw_upload *fw_uploader)
static enum fw_upload_err mpfs_auto_update_poll_complete(struct fw_upload *fw_uploader)
{
- struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
- int ret;
-
- /*
- * There is no meaningful way to get the status of the programming while
- * it is in progress, so attempting anything other than waiting for it
- * to complete would be misplaced.
- */
- ret = wait_for_completion_timeout(&priv->programming_complete,
- msecs_to_jiffies(AUTO_UPDATE_TIMEOUT_MS));
- if (!ret)
- return FW_UPLOAD_ERR_TIMEOUT;
-
return FW_UPLOAD_ERR_NONE;
}
@@ -349,33 +333,23 @@ static enum fw_upload_err mpfs_auto_update_write(struct fw_upload *fw_uploader,
u32 offset, u32 size, u32 *written)
{
struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
- enum fw_upload_err err = FW_UPLOAD_ERR_NONE;
int ret;
- reinit_completion(&priv->programming_complete);
-
ret = mpfs_auto_update_write_bitstream(fw_uploader, data, offset, size, written);
- if (ret) {
- err = FW_UPLOAD_ERR_RW_ERROR;
- goto out;
- }
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
- if (priv->cancel_request) {
- err = FW_UPLOAD_ERR_CANCELED;
- goto out;
- }
+ if (priv->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
if (mpfs_auto_update_is_bitstream_info(data, size))
- goto out;
+ return FW_UPLOAD_ERR_NONE;
ret = mpfs_auto_update_verify_image(fw_uploader);
if (ret)
- err = FW_UPLOAD_ERR_FW_INVALID;
+ return FW_UPLOAD_ERR_FW_INVALID;
-out:
- complete(&priv->programming_complete);
-
- return err;
+ return FW_UPLOAD_ERR_NONE;
}
static const struct fw_upload_ops mpfs_auto_update_ops = {
@@ -461,8 +435,6 @@ static int mpfs_auto_update_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret,
"The current bitstream does not support auto-update\n");
- init_completion(&priv->programming_complete);
-
fw_uploader = firmware_upload_register(THIS_MODULE, dev, "mpfs-auto-update",
&mpfs_auto_update_ops, priv);
if (IS_ERR(fw_uploader))
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 10986cb11ec0..2e4260ba5f79 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -112,6 +112,7 @@ enum qcom_scm_qseecom_tz_cmd_info {
};
#define QSEECOM_MAX_APP_NAME_SIZE 64
+#define SHMBRIDGE_RESULT_NOTSUPP 4
/* Each bit configures cold/warm boot address for one of the 4 CPUs */
static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
@@ -216,7 +217,7 @@ static DEFINE_SPINLOCK(scm_query_lock);
struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
{
- return __scm->mempool;
+ return __scm ? __scm->mempool : NULL;
}
static enum qcom_scm_convention __get_convention(void)
@@ -545,7 +546,7 @@ static void qcom_scm_set_download_mode(u32 dload_mode)
} else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
- } else {
+ } else if (dload_mode) {
dev_err(__scm->dev,
"No available mechanism for setting download mode\n");
}
@@ -1361,6 +1362,8 @@ EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
int qcom_scm_shm_bridge_enable(void)
{
+ int ret;
+
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
.cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
@@ -1373,7 +1376,15 @@ int qcom_scm_shm_bridge_enable(void)
QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
return -EOPNOTSUPP;
- return qcom_scm_call(__scm->dev, &desc, &res) ?: res.result[0];
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ if (ret)
+ return ret;
+
+ if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP)
+ return -EOPNOTSUPP;
+
+ return res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
diff --git a/drivers/gpio/gpio-sloppy-logic-analyzer.c b/drivers/gpio/gpio-sloppy-logic-analyzer.c
index 07e0d7180579..59a8f3a5c4e4 100644
--- a/drivers/gpio/gpio-sloppy-logic-analyzer.c
+++ b/drivers/gpio/gpio-sloppy-logic-analyzer.c
@@ -234,7 +234,9 @@ static int gpio_la_poll_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- devm_mutex_init(dev, &priv->blob_lock);
+ ret = devm_mutex_init(dev, &priv->blob_lock);
+ if (ret)
+ return ret;
fops_buf_size_set(priv, GPIO_LA_DEFAULT_BUF_SIZE);
diff --git a/drivers/gpio/gpiolib-swnode.c b/drivers/gpio/gpiolib-swnode.c
index 2b2dd7e92211..51d2475c05c5 100644
--- a/drivers/gpio/gpiolib-swnode.c
+++ b/drivers/gpio/gpiolib-swnode.c
@@ -64,7 +64,7 @@ struct gpio_desc *swnode_find_gpio(struct fwnode_handle *fwnode,
struct fwnode_reference_args args;
struct gpio_desc *desc;
char propname[32]; /* 32 is max size of property name */
- int ret;
+ int ret = 0;
swnode = to_software_node(fwnode);
if (!swnode)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index d5952ab7752c..2b02655abb56 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -4926,6 +4926,8 @@ static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
return NULL;
s->private = priv;
+ if (*pos > 0)
+ priv->newline = true;
priv->idx = srcu_read_lock(&gpio_devices_srcu);
list_for_each_entry_srcu(gdev, &gpio_devices, list,
@@ -4969,7 +4971,7 @@ static int gpiolib_seq_show(struct seq_file *s, void *v)
gc = srcu_dereference(gdev->chip, &gdev->srcu);
if (!gc) {
- seq_printf(s, "%s%s: (dangling chip)",
+ seq_printf(s, "%s%s: (dangling chip)\n",
priv->newline ? "\n" : "",
dev_name(&gdev->dev));
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index f85ace0384d2..1f5a296f5ed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -147,6 +147,7 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
struct acpi_buffer *params)
{
acpi_status status;
+ union acpi_object *obj;
union acpi_object atif_arg_elements[2];
struct acpi_object_list atif_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -169,16 +170,24 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
&buffer);
+ obj = (union acpi_object *)buffer.pointer;
- /* Fail only if calling the method fails and ATIF is supported */
+ /* Fail if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
acpi_format_exception(status));
- kfree(buffer.pointer);
+ kfree(obj);
return NULL;
}
- return buffer.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ DRM_DEBUG_DRIVER("bad object returned from ATIF: %d\n",
+ obj->type);
+ kfree(obj);
+ return NULL;
+ }
+
+ return obj;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index a8763496aed3..9288f37a3cc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -51,6 +51,12 @@ MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
#define SDMA0_HYP_DEC_REG_END 0x589a
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+/*define for compression field for sdma7*/
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_offset 0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask 0x00000001
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift 16
+#define SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift)
+
static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG),
SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG),
@@ -1724,7 +1730,8 @@ static void sdma_v7_0_emit_fill_buffer(struct amdgpu_ib *ib,
uint64_t dst_offset,
uint32_t byte_count)
{
- ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL);
+ ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL) |
+ SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(1);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = src_data;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6b5e2206e687..13421a58210d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8374,7 +8374,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
IP_VERSION(3, 5, 0) ||
acrtc_state->stream->link->psr_settings.psr_version <
- DC_PSR_VERSION_UNSUPPORTED) {
+ DC_PSR_VERSION_UNSUPPORTED ||
+ !(adev->flags & AMD_IS_APU)) {
timing = &acrtc_state->stream->timing;
/* at least 2 frames */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 069e0195e50a..eea317dcbe8c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -44,6 +44,7 @@
#include "dm_helpers.h"
#include "ddc_service_types.h"
+#include "clk_mgr.h"
static u32 edid_extract_panel_id(struct edid *edid)
{
@@ -1121,6 +1122,8 @@ bool dm_helpers_dp_handle_test_pattern_request(
struct pipe_ctx *pipe_ctx = NULL;
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_device *dev = aconnector->base.dev;
+ struct dc_state *dc_state = ctx->dc->current_state;
+ struct clk_mgr *clk_mgr = ctx->dc->clk_mgr;
int i;
for (i = 0; i < MAX_PIPES; i++) {
@@ -1221,6 +1224,16 @@ bool dm_helpers_dp_handle_test_pattern_request(
pipe_ctx->stream->test_pattern.type = test_pattern;
pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space;
+ /* Temp W/A for compliance test failure */
+ dc_state->bw_ctx.bw.dcn.clk.p_state_change_support = false;
+ dc_state->bw_ctx.bw.dcn.clk.dramclk_khz = clk_mgr->dc_mode_softmax_enabled ?
+ clk_mgr->bw_params->dc_mode_softmax_memclk : clk_mgr->bw_params->max_memclk_mhz;
+ dc_state->bw_ctx.bw.dcn.clk.idle_dramclk_khz = dc_state->bw_ctx.bw.dcn.clk.dramclk_khz;
+ ctx->dc->clk_mgr->funcs->update_clocks(
+ ctx->dc->clk_mgr,
+ dc_state,
+ false);
+
dc_link_dp_set_test_pattern(
(struct dc_link *) link,
test_pattern,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
index 11c904ae2958..c4c52173ef22 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
@@ -303,6 +303,7 @@ void build_unoptimized_policy_settings(enum dml_project_id project, struct dml_m
if (project == dml_project_dcn35 ||
project == dml_project_dcn351) {
policy->DCCProgrammingAssumesScanDirectionUnknownFinal = false;
+ policy->EnhancedPrefetchScheduleAccelerationFinal = 0;
policy->AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter_if_possible; /*new*/
policy->UseOnlyMaxPrefetchModes = 1;
}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 3cd52e7a9c77..95838c7ab054 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -841,6 +841,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
isPSRSUSupported = false;
else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
isPSRSUSupported = false;
+ else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x01)
+ isPSRSUSupported = false;
else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
isPSRSUSupported = true;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 9ad9cf7a9c98..80e60ea2d11e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1234,6 +1234,14 @@ static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
}
}
+static bool smu_is_workload_profile_available(struct smu_context *smu,
+ u32 profile)
+{
+ if (profile >= PP_SMC_POWER_PROFILE_COUNT)
+ return false;
+ return smu->workload_map && smu->workload_map[profile].valid_mapping;
+}
+
static int smu_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1265,7 +1273,8 @@ static int smu_sw_init(void *handle)
smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
- if (smu->is_apu)
+ if (smu->is_apu ||
+ !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
else
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
index ee457a6f0813..c2fd0a4a13e5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
@@ -25,7 +25,7 @@
#define SMU14_DRIVER_IF_V14_0_H
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x18
+#define PPTABLE_VERSION 0x1B
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -145,7 +145,7 @@ typedef enum {
} FEATURE_BTC_e;
// Debug Overrides Bitmask
-#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001
+#define DEBUG_OVERRIDE_NOT_USE 0x00000001
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008
@@ -161,6 +161,7 @@ typedef enum {
#define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000
#define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000
#define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000
+#define DEBUG_OVERRIDE_DFLL_BTC_FCW_LOG 0x00010000
// VR Mapping Bit Defines
#define VR_MAPPING_VR_SELECT_MASK 0x01
@@ -391,6 +392,21 @@ typedef struct {
EccInfo_t EccInfo[24];
} EccInfoTable_t;
+#define EPCS_HIGH_POWER 600
+#define EPCS_NORMAL_POWER 450
+#define EPCS_LOW_POWER 300
+#define EPCS_SHORTED_POWER 150
+#define EPCS_NO_BOOTUP 0
+
+typedef enum{
+ EPCS_SHORTED_LIMIT,
+ EPCS_LOW_POWER_LIMIT,
+ EPCS_NORMAL_POWER_LIMIT,
+ EPCS_HIGH_POWER_LIMIT,
+ EPCS_NOT_CONFIGURED,
+ EPCS_STATUS_COUNT,
+} EPCS_STATUS_e;
+
//D3HOT sequences
typedef enum {
BACO_SEQUENCE,
@@ -662,7 +678,7 @@ typedef enum {
} PP_GRTAVFS_FW_SEP_FUSE_e;
#define PP_NUM_RTAVFS_PWL_ZONES 5
-
+#define PP_NUM_PSM_DIDT_PWL_ZONES 3
// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3
// Slope Q1.7, Offset Q1.2
@@ -746,10 +762,10 @@ typedef struct {
uint16_t Padding;
//Frequency changes
- int16_t GfxclkFmin; // MHz
- int16_t GfxclkFmax; // MHz
- uint16_t UclkFmin; // MHz
- uint16_t UclkFmax; // MHz
+ int16_t GfxclkFoffset;
+ uint16_t Padding1;
+ uint16_t UclkFmin;
+ uint16_t UclkFmax;
uint16_t FclkFmin;
uint16_t FclkFmax;
@@ -770,19 +786,23 @@ typedef struct {
uint8_t MaxOpTemp;
uint8_t AdvancedOdModeEnabled;
- uint8_t Padding1[3];
+ uint8_t Padding2[3];
uint16_t GfxVoltageFullCtrlMode;
uint16_t SocVoltageFullCtrlMode;
uint16_t GfxclkFullCtrlMode;
uint16_t UclkFullCtrlMode;
uint16_t FclkFullCtrlMode;
- uint16_t Padding2;
+ uint16_t Padding3;
int16_t GfxEdc;
int16_t GfxPccLimitControl;
- uint32_t Spare[10];
+ uint16_t GfxclkFmaxVmax;
+ uint8_t GfxclkFmaxVmaxTemperature;
+ uint8_t Padding4[1];
+
+ uint32_t Spare[9];
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
} OverDriveTable_t;
@@ -802,8 +822,8 @@ typedef struct {
uint16_t VddSocVmax;
//gfxclk
- int16_t GfxclkFmin; // MHz
- int16_t GfxclkFmax; // MHz
+ int16_t GfxclkFoffset;
+ uint16_t Padding;
//uclk
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
@@ -828,7 +848,7 @@ typedef struct {
uint8_t FanZeroRpmEnable;
//temperature
uint8_t MaxOpTemp;
- uint8_t Padding[2];
+ uint8_t Padding1[2];
//Full Ctrl
uint16_t GfxVoltageFullCtrlMode;
@@ -839,7 +859,7 @@ typedef struct {
//EDC
int16_t GfxEdc;
int16_t GfxPccLimitControl;
- int16_t Padding1;
+ int16_t Padding2;
uint32_t Spare[5];
} OverDriveLimits_t;
@@ -987,8 +1007,9 @@ typedef struct {
uint16_t BaseClockDc;
uint16_t GameClockDc;
uint16_t BoostClockDc;
-
- uint32_t Reserved[4];
+ uint16_t MaxReportedClock;
+ uint16_t Padding;
+ uint32_t Reserved[3];
} DriverReportedClocks_t;
typedef struct {
@@ -1132,7 +1153,7 @@ typedef struct {
uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz
uint16_t GfxclkAibFmax;
- uint16_t GfxclkFreqCap;
+ uint16_t GfxDpmPadding;
//GFX Idle Power Settings
uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz
@@ -1172,8 +1193,7 @@ typedef struct {
uint32_t DvoFmaxLowScaler; //Unitless float
// GFX DCS
- uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase
- uint16_t PaddingDcs;
+ uint32_t PaddingDcs;
uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase
uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch.
@@ -1205,8 +1225,7 @@ typedef struct {
uint16_t DalDcModeMaxUclkFreq;
uint8_t PaddingsMem[2];
//FCLK Section
- uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value
- uint16_t PaddingFclk;
+ uint32_t PaddingFclk;
// Link DPM Settings
uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5
@@ -1215,12 +1234,19 @@ typedef struct {
// SECTION: VDD_GFX AVFS
uint8_t OverrideGfxAvfsFuses;
- uint8_t GfxAvfsPadding[3];
+ uint8_t GfxAvfsPadding[1];
+ uint16_t DroopGBStDev;
uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain
uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding
//uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
- uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
+
+ uint16_t PsmDidt_Vcross[PP_NUM_PSM_DIDT_PWL_ZONES-1];
+ uint32_t PsmDidt_StaticDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_StaticDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_DynDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_DynDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t spare_HwRtAvfsFuses[19];
uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
@@ -1246,11 +1272,7 @@ typedef struct {
uint32_t dGbV_dT_vmin;
uint32_t dGbV_dT_vmax;
- //Unused: PMFW-9370
- uint32_t V2F_vmin_range_low;
- uint32_t V2F_vmin_range_high;
- uint32_t V2F_vmax_range_low;
- uint32_t V2F_vmax_range_high;
+ uint32_t PaddingV2F[4];
AvfsDcBtcParams_t DcBtcGfxParams;
QuadraticInt_t SSCurve_GFX;
@@ -1327,18 +1349,18 @@ typedef struct {
uint16_t PsmDidtReleaseTimer;
uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog
// CAC EDC
- uint32_t Leakage_C0; // in IEEE float
- uint32_t Leakage_C1; // in IEEE float
- uint32_t Leakage_C2; // in IEEE float
- uint32_t Leakage_C3; // in IEEE float
- uint32_t Leakage_C4; // in IEEE float
- uint32_t Leakage_C5; // in IEEE float
- uint32_t GFX_CLK_SCALAR; // in IEEE float
- uint32_t GFX_CLK_INTERCEPT; // in IEEE float
- uint32_t GFX_CAC_M; // in IEEE float
- uint32_t GFX_CAC_B; // in IEEE float
- uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float
- uint32_t DynToTotalCacScalar; // in IEEE
+ uint32_t CacEdcCacLeakageC0;
+ uint32_t CacEdcCacLeakageC1;
+ uint32_t CacEdcCacLeakageC2;
+ uint32_t CacEdcCacLeakageC3;
+ uint32_t CacEdcCacLeakageC4;
+ uint32_t CacEdcCacLeakageC5;
+ uint32_t CacEdcGfxClkScalar;
+ uint32_t CacEdcGfxClkIntercept;
+ uint32_t CacEdcCac_m;
+ uint32_t CacEdcCac_b;
+ uint32_t CacEdcCurrLimitGuardband;
+ uint32_t CacEdcDynToTotalCacRatio;
// GFX EDC XVMIN
uint32_t XVmin_Gfx_EdcThreshScalar;
uint32_t XVmin_Gfx_EdcEnableFreq;
@@ -1467,7 +1489,7 @@ typedef struct {
uint8_t VddqOffEnabled;
uint8_t PaddingUmcFlags[2];
- uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
+ uint32_t Paddign1;
uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
uint8_t FuseWritePowerMuxPresent;
@@ -1530,7 +1552,7 @@ typedef struct {
int16_t FuzzyFan_ErrorSetDelta;
int16_t FuzzyFan_ErrorRateSetDelta;
int16_t FuzzyFan_PwmSetDelta;
- uint16_t FuzzyFan_Reserved;
+ uint16_t FanPadding2;
uint16_t FwCtfLimit[TEMP_COUNT];
@@ -1547,9 +1569,10 @@ typedef struct {
uint16_t FanSpare[1];
uint8_t FanIntakeSensorSupport;
uint8_t FanIntakePadding;
- uint32_t FanAmbientPerfBoostThreshold;
uint32_t FanSpare2[12];
+ uint32_t ODFeatureCtrlMask;
+
uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix
uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron
uint16_t TemperatureFwCtfLimit_Hynix;
@@ -1637,7 +1660,7 @@ typedef struct {
uint16_t AverageDclk0Frequency ;
uint16_t AverageVclk1Frequency ;
uint16_t AverageDclk1Frequency ;
- uint16_t PCIeBusy ;
+ uint16_t AveragePCIeBusy ;
uint16_t dGPU_W_MAX ;
uint16_t padding ;
@@ -1665,12 +1688,12 @@ typedef struct {
uint16_t AverageGfxActivity ;
uint16_t AverageUclkActivity ;
- uint16_t Vcn0ActivityPercentage ;
+ uint16_t AverageVcn0ActivityPercentage;
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
uint16_t AverageSocketPower;
- uint16_t MovingAverageTotalBoardPower;
+ uint16_t AverageTotalBoardPower;
uint16_t AvgTemperature[TEMP_COUNT];
uint16_t AvgTemperatureFanIntake;
@@ -1684,7 +1707,8 @@ typedef struct {
uint8_t ThrottlingPercentage[THROTTLER_COUNT];
- uint8_t padding1[3];
+ uint8_t VmaxThrottlingPercentage;
+ uint8_t padding1[2];
//metrics for D3hot entry/exit and driver ARM msgs
uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
@@ -1693,7 +1717,7 @@ typedef struct {
uint16_t ApuSTAPMSmartShiftLimit;
uint16_t ApuSTAPMLimit;
- uint16_t MovingAvgApuSocketPower;
+ uint16_t AvgApuSocketPower;
uint16_t AverageUclkActivity_MAX;
@@ -1823,6 +1847,17 @@ typedef struct {
#define TABLE_TRANSFER_FAILED 0xFF
#define TABLE_TRANSFER_PENDING 0xAB
+#define TABLE_PPT_FAILED 0x100
+#define TABLE_TDC_FAILED 0x200
+#define TABLE_TEMP_FAILED 0x400
+#define TABLE_FAN_TARGET_TEMP_FAILED 0x800
+#define TABLE_FAN_STOP_TEMP_FAILED 0x1000
+#define TABLE_FAN_START_TEMP_FAILED 0x2000
+#define TABLE_FAN_PWM_MIN_FAILED 0x4000
+#define TABLE_ACOUSTIC_TARGET_RPM_FAILED 0x8000
+#define TABLE_ACOUSTIC_LIMIT_RPM_FAILED 0x10000
+#define TABLE_MGPU_ACOUSTIC_TARGET_RPM_FAILED 0x20000
+
// Table types
#define TABLE_PPTABLE 0
#define TABLE_COMBO_PPTABLE 1
@@ -1849,5 +1884,6 @@ typedef struct {
#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+#define IH_INTERRUPT_CONTEXT_ID_DYNAMIC_TABLE 0xA
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index 46b456590a08..727d5b405435 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -28,7 +28,7 @@
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x26
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E
#define FEATURE_MASK(feature) (1ULL << feature)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 22737b11b1bf..1fe020f1f4db 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -242,7 +242,9 @@ static int vangogh_tables_init(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2));
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+ smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3));
+ smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4));
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index cb923e33fd6f..d53e162dcd8d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2485,7 +2485,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
- u32 workload_mask;
+ u32 workload_mask, selected_workload_mask;
smu->power_profile_mode = input[size];
@@ -2552,7 +2552,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
return -EINVAL;
- workload_mask = 1 << workload_type;
+ selected_workload_mask = workload_mask = 1 << workload_type;
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
@@ -2572,7 +2572,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
workload_mask,
NULL);
if (!ret)
- smu->workload_mask = workload_mask;
+ smu->workload_mask = selected_workload_mask;
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 5899d01fa73d..e83ea2bc7f9c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1077,12 +1077,9 @@ static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
switch (od_feature_bit) {
case PP_OD_FEATURE_GFXCLK_FMIN:
- od_min_setting = overdrive_lowerlimits->GfxclkFmin;
- od_max_setting = overdrive_upperlimits->GfxclkFmin;
- break;
case PP_OD_FEATURE_GFXCLK_FMAX:
- od_min_setting = overdrive_lowerlimits->GfxclkFmax;
- od_max_setting = overdrive_upperlimits->GfxclkFmax;
+ od_min_setting = overdrive_lowerlimits->GfxclkFoffset;
+ od_max_setting = overdrive_upperlimits->GfxclkFoffset;
break;
case PP_OD_FEATURE_UCLK_FMIN:
od_min_setting = overdrive_lowerlimits->UclkFmin;
@@ -1269,10 +1266,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_BIT))
break;
- size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
- size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
- od_table->OverDriveTable.GfxclkFmin,
- od_table->OverDriveTable.GfxclkFmax);
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+ const OverDriveLimits_t * const overdrive_lowerlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMin;
+
+ size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
+ size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n",
+ overdrive_lowerlimits->GfxclkFoffset,
+ overdrive_upperlimits->GfxclkFoffset);
break;
case SMU_OD_MCLK:
@@ -1414,7 +1417,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_FMAX,
NULL,
&max_value);
- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
min_value, max_value);
}
@@ -1796,7 +1799,7 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
-
+ uint32_t current_profile_mode = smu->power_profile_mode;
smu->power_profile_mode = input[size];
if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
@@ -1854,6 +1857,11 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
}
}
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
+ smu_v14_0_deep_sleep_control(smu, false);
+ else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
+ smu_v14_0_deep_sleep_control(smu, true);
+
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
@@ -2158,7 +2166,7 @@ static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
- gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
+ gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage,
metrics->Vcn1ActivityPercentage);
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
@@ -2217,8 +2225,7 @@ static void smu_v14_0_2_dump_od_table(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
- dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin,
- od_table->OverDriveTable.GfxclkFmax);
+ dev_dbg(adev->dev, "OD: Gfxclk offset: (%d)\n", od_table->OverDriveTable.GfxclkFoffset);
dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
od_table->OverDriveTable.UclkFmax);
}
@@ -2309,10 +2316,8 @@ static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu)
memcpy(user_od_table,
boot_od_table,
sizeof(OverDriveTableExternal_t));
- user_od_table->OverDriveTable.GfxclkFmin =
- user_od_table_bak.OverDriveTable.GfxclkFmin;
- user_od_table->OverDriveTable.GfxclkFmax =
- user_od_table_bak.OverDriveTable.GfxclkFmax;
+ user_od_table->OverDriveTable.GfxclkFoffset =
+ user_od_table_bak.OverDriveTable.GfxclkFoffset;
user_od_table->OverDriveTable.UclkFmin =
user_od_table_bak.OverDriveTable.UclkFmin;
user_od_table->OverDriveTable.UclkFmax =
@@ -2441,22 +2446,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
}
switch (input[i]) {
- case 0:
- smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMIN,
- &minimum,
- &maximum);
- if (input[i + 1] < minimum ||
- input[i + 1] > maximum) {
- dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n",
- input[i + 1], minimum, maximum);
- return -EINVAL;
- }
-
- od_table->OverDriveTable.GfxclkFmin = input[i + 1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
- break;
-
case 1:
smu_v14_0_2_get_od_setting_limits(smu,
PP_OD_FEATURE_GFXCLK_FMAX,
@@ -2469,7 +2458,7 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
return -EINVAL;
}
- od_table->OverDriveTable.GfxclkFmax = input[i + 1];
+ od_table->OverDriveTable.GfxclkFoffset = input[i + 1];
od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
break;
@@ -2480,13 +2469,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
}
}
- if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) {
- dev_err(adev->dev,
- "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n",
- (uint32_t)od_table->OverDriveTable.GfxclkFmin,
- (uint32_t)od_table->OverDriveTable.GfxclkFmax);
- return -EINVAL;
- }
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index b29980f95379..295e9d031e2d 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -58,9 +58,10 @@ int drm_aux_bridge_register(struct device *parent)
adev->id = ret;
adev->name = "aux_bridge";
adev->dev.parent = parent;
- adev->dev.of_node = of_node_get(parent->of_node);
adev->dev.release = drm_aux_bridge_release;
+ device_set_of_node_from_dev(&adev->dev, parent);
+
ret = auxiliary_device_init(adev);
if (ret) {
ida_free(&drm_aux_bridge_ida, adev->id);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 290e2532fab1..f3afdab55c11 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -2391,6 +2391,7 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
if (tc->pre_emphasis[0] < 0 || tc->pre_emphasis[0] > 2 ||
tc->pre_emphasis[1] < 0 || tc->pre_emphasis[1] > 2) {
dev_err(dev, "Incorrect Pre-Emphasis setting, use either 0=0dB 1=3.5dB 2=6dB\n");
+ of_node_put(node);
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index faa253b27664..14ac351fd76d 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -123,9 +123,8 @@ config DRM_I915_USERPTR
config DRM_I915_GVT_KVMGT
tristate "Enable KVM host support Intel GVT-g graphics virtualization"
depends on DRM_I915
- depends on X86
+ depends on KVM_X86
depends on 64BIT
- depends on KVM
depends on VFIO
select DRM_I915_GVT
select KVM_EXTERNAL_WRITE_TRACKING
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index 175b00e5a253..eb0e1233ad04 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -127,9 +127,8 @@ static void mtk_crtc_destroy(struct drm_crtc *crtc)
mtk_mutex_put(mtk_crtc->mutex);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle);
-
if (mtk_crtc->cmdq_client.chan) {
+ cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle);
mbox_free_channel(mtk_crtc->cmdq_client.chan);
mtk_crtc->cmdq_client.chan = NULL;
}
@@ -913,6 +912,7 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
BIT(pipe),
mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes),
mtk_ddp_comp_supported_rotations(comp),
+ mtk_ddp_comp_get_blend_modes(comp),
mtk_ddp_comp_get_formats(comp),
mtk_ddp_comp_get_num_formats(comp), i);
if (ret)
diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
index be66d94be361..edc6417639e6 100644
--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
@@ -363,6 +363,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = {
.layer_config = mtk_ovl_layer_config,
.bgclr_in_on = mtk_ovl_bgclr_in_on,
.bgclr_in_off = mtk_ovl_bgclr_in_off,
+ .get_blend_modes = mtk_ovl_get_blend_modes,
.get_formats = mtk_ovl_get_formats,
.get_num_formats = mtk_ovl_get_num_formats,
};
@@ -416,6 +417,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = {
.disconnect = mtk_ovl_adaptor_disconnect,
.add = mtk_ovl_adaptor_add_comp,
.remove = mtk_ovl_adaptor_remove_comp,
+ .get_blend_modes = mtk_ovl_adaptor_get_blend_modes,
.get_formats = mtk_ovl_adaptor_get_formats,
.get_num_formats = mtk_ovl_adaptor_get_num_formats,
.mode_valid = mtk_ovl_adaptor_mode_valid,
diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
index ecf6dc283cd7..39720b27f4e9 100644
--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
@@ -80,6 +80,7 @@ struct mtk_ddp_comp_funcs {
void (*ctm_set)(struct device *dev,
struct drm_crtc_state *state);
struct device * (*dma_dev_get)(struct device *dev);
+ u32 (*get_blend_modes)(struct device *dev);
const u32 *(*get_formats)(struct device *dev);
size_t (*get_num_formats)(struct device *dev);
void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next);
@@ -267,6 +268,15 @@ static inline struct device *mtk_ddp_comp_dma_dev_get(struct mtk_ddp_comp *comp)
}
static inline
+u32 mtk_ddp_comp_get_blend_modes(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->get_blend_modes)
+ return comp->funcs->get_blend_modes(comp->dev);
+
+ return 0;
+}
+
+static inline
const u32 *mtk_ddp_comp_get_formats(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->get_formats)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
index 082ac18fe04a..04154db9085c 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -103,6 +103,7 @@ void mtk_ovl_register_vblank_cb(struct device *dev,
void mtk_ovl_unregister_vblank_cb(struct device *dev);
void mtk_ovl_enable_vblank(struct device *dev);
void mtk_ovl_disable_vblank(struct device *dev);
+u32 mtk_ovl_get_blend_modes(struct device *dev);
const u32 *mtk_ovl_get_formats(struct device *dev);
size_t mtk_ovl_get_num_formats(struct device *dev);
@@ -131,6 +132,7 @@ void mtk_ovl_adaptor_start(struct device *dev);
void mtk_ovl_adaptor_stop(struct device *dev);
unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev);
struct device *mtk_ovl_adaptor_dma_dev_get(struct device *dev);
+u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev);
const u32 *mtk_ovl_adaptor_get_formats(struct device *dev);
size_t mtk_ovl_adaptor_get_num_formats(struct device *dev);
enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 89b439dcf3a6..e0c0bb01f65a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -65,8 +65,8 @@
#define OVL_CON_CLRFMT_RGB (1 << 12)
#define OVL_CON_CLRFMT_ARGB8888 (2 << 12)
#define OVL_CON_CLRFMT_RGBA8888 (3 << 12)
-#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP)
-#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP)
+#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP)
+#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP)
#define OVL_CON_CLRFMT_UYVY (4 << 12)
#define OVL_CON_CLRFMT_YUYV (5 << 12)
#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
@@ -146,6 +146,7 @@ struct mtk_disp_ovl_data {
bool fmt_rgb565_is_0;
bool smi_id_en;
bool supports_afbc;
+ const u32 blend_modes;
const u32 *formats;
size_t num_formats;
bool supports_clrfmt_ext;
@@ -214,6 +215,13 @@ void mtk_ovl_disable_vblank(struct device *dev)
writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN);
}
+u32 mtk_ovl_get_blend_modes(struct device *dev)
+{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ return ovl->data->blend_modes;
+}
+
const u32 *mtk_ovl_get_formats(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
@@ -386,14 +394,27 @@ void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
DISP_REG_OVL_RDMA_CTRL(idx));
}
-static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt,
- unsigned int blend_mode)
+static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl,
+ struct mtk_plane_state *state)
{
- /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
- * is defined in mediatek HW data sheet.
- * The alphabet order in XXX is no relation to data
- * arrangement in memory.
+ unsigned int fmt = state->pending.format;
+ unsigned int blend_mode = DRM_MODE_BLEND_COVERAGE;
+
+ /*
+ * For the platforms where OVL_CON_CLRFMT_MAN is defined in the hardware data sheet
+ * and supports premultiplied color formats, such as OVL_CON_CLRFMT_PARGB8888.
+ *
+ * Check blend_modes in the driver data to see if premultiplied mode is supported.
+ * If not, use coverage mode instead to set it to the supported color formats.
+ *
+ * Current DRM assumption is that alpha is default premultiplied, so the bitmask of
+ * blend_modes must include BIT(DRM_MODE_BLEND_PREMULTI). Otherwise, mtk_plane_init()
+ * will get an error return from drm_plane_create_blend_mode_property() and
+ * state->base.pixel_blend_mode should not be used.
*/
+ if (ovl->data->blend_modes & BIT(DRM_MODE_BLEND_PREMULTI))
+ blend_mode = state->base.pixel_blend_mode;
+
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
@@ -471,20 +492,26 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
return;
}
- con = ovl_fmt_convert(ovl, fmt, blend_mode);
+ con = mtk_ovl_fmt_convert(ovl, state);
if (state->base.fb) {
- con |= OVL_CON_AEN;
con |= state->base.alpha & OVL_CON_ALPHA;
- }
- /* CONST_BLD must be enabled for XRGB formats although the alpha channel
- * can be ignored, or OVL will still read the value from memory.
- * For RGB888 related formats, whether CONST_BLD is enabled or not won't
- * affect the result. Therefore we use !has_alpha as the condition.
- */
- if ((state->base.fb && !state->base.fb->format->has_alpha) ||
- blend_mode == DRM_MODE_BLEND_PIXEL_NONE)
- ignore_pixel_alpha = OVL_CONST_BLEND;
+ /*
+ * For blend_modes supported SoCs, always enable alpha blending.
+ * For blend_modes unsupported SoCs, enable alpha blending when has_alpha is set.
+ */
+ if (blend_mode || state->base.fb->format->has_alpha)
+ con |= OVL_CON_AEN;
+
+ /*
+ * Although the alpha channel can be ignored, CONST_BLD must be enabled
+ * for XRGB format, otherwise OVL will still read the value from memory.
+ * For RGB888 related formats, whether CONST_BLD is enabled or not won't
+ * affect the result. Therefore we use !has_alpha as the condition.
+ */
+ if (blend_mode == DRM_MODE_BLEND_PIXEL_NONE || !state->base.fb->format->has_alpha)
+ ignore_pixel_alpha = OVL_CONST_BLEND;
+ }
if (pending->rotation & DRM_MODE_REFLECT_Y) {
con |= OVL_CON_VIRT_FLIP;
@@ -663,6 +690,9 @@ static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = {
.layer_nr = 4,
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
+ .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE),
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
@@ -673,6 +703,9 @@ static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = {
.layer_nr = 2,
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
+ .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE),
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
@@ -684,6 +717,9 @@ static const struct mtk_disp_ovl_data mt8195_ovl_driver_data = {
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
.supports_afbc = true,
+ .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE),
.formats = mt8195_formats,
.num_formats = ARRAY_SIZE(mt8195_formats),
.supports_clrfmt_ext = true,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index c6768210b08b..bf2546c4681a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -400,6 +400,13 @@ void mtk_ovl_adaptor_disable_vblank(struct device *dev)
mtk_ethdr_disable_vblank(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
}
+u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev)
+{
+ struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
+
+ return mtk_ethdr_get_blend_modes(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
+}
+
const u32 *mtk_ovl_adaptor_get_formats(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index d8796a904eca..f2bee617f063 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -145,6 +145,89 @@ struct mtk_dp_data {
u16 audio_m_div2_bit;
};
+static const struct mtk_dp_efuse_fmt mt8188_dp_efuse_fmt[MTK_DP_CAL_MAX] = {
+ [MTK_DP_CAL_GLB_BIAS_TRIM] = {
+ .idx = 0,
+ .shift = 10,
+ .mask = 0x1f,
+ .min_val = 1,
+ .max_val = 0x1e,
+ .default_val = 0xf,
+ },
+ [MTK_DP_CAL_CLKTX_IMPSE] = {
+ .idx = 0,
+ .shift = 15,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = {
+ .idx = 1,
+ .shift = 0,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = {
+ .idx = 1,
+ .shift = 8,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = {
+ .idx = 1,
+ .shift = 16,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = {
+ .idx = 1,
+ .shift = 24,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = {
+ .idx = 1,
+ .shift = 4,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = {
+ .idx = 1,
+ .shift = 12,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = {
+ .idx = 1,
+ .shift = 20,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = {
+ .idx = 1,
+ .shift = 28,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+};
+
static const struct mtk_dp_efuse_fmt mt8195_edp_efuse_fmt[MTK_DP_CAL_MAX] = {
[MTK_DP_CAL_GLB_BIAS_TRIM] = {
.idx = 3,
@@ -2771,7 +2854,7 @@ static SIMPLE_DEV_PM_OPS(mtk_dp_pm_ops, mtk_dp_suspend, mtk_dp_resume);
static const struct mtk_dp_data mt8188_dp_data = {
.bridge_type = DRM_MODE_CONNECTOR_DisplayPort,
.smc_cmd = MTK_DP_SIP_ATF_VIDEO_UNMUTE,
- .efuse_fmt = mt8195_dp_efuse_fmt,
+ .efuse_fmt = mt8188_dp_efuse_fmt,
.audio_supported = true,
.audio_pkt_in_hblank_area = true,
.audio_m_div2_bit = MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2,
diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c
index d1d9cf8b10e1..0f22e7d337cb 100644
--- a/drivers/gpu/drm/mediatek/mtk_ethdr.c
+++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c
@@ -145,6 +145,13 @@ static irqreturn_t mtk_ethdr_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+u32 mtk_ethdr_get_blend_modes(struct device *dev)
+{
+ return BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE);
+}
+
void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt)
diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.h b/drivers/gpu/drm/mediatek/mtk_ethdr.h
index 81af9edea3f7..a72aeee46829 100644
--- a/drivers/gpu/drm/mediatek/mtk_ethdr.h
+++ b/drivers/gpu/drm/mediatek/mtk_ethdr.h
@@ -13,6 +13,7 @@ void mtk_ethdr_clk_disable(struct device *dev);
void mtk_ethdr_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+u32 mtk_ethdr_get_blend_modes(struct device *dev);
void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt);
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 7d2cb4e0fafa..8a48b3b0a956 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -320,8 +320,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
- unsigned int supported_rotations, const u32 *formats,
- size_t num_formats, unsigned int plane_idx)
+ unsigned int supported_rotations, const u32 blend_modes,
+ const u32 *formats, size_t num_formats, unsigned int plane_idx)
{
int err;
@@ -366,12 +366,11 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (err)
DRM_ERROR("failed to create property: alpha\n");
- err = drm_plane_create_blend_mode_property(plane,
- BIT(DRM_MODE_BLEND_PREMULTI) |
- BIT(DRM_MODE_BLEND_COVERAGE) |
- BIT(DRM_MODE_BLEND_PIXEL_NONE));
- if (err)
- DRM_ERROR("failed to create property: blend_mode\n");
+ if (blend_modes) {
+ err = drm_plane_create_blend_mode_property(plane, blend_modes);
+ if (err)
+ DRM_ERROR("failed to create property: blend_mode\n");
+ }
drm_plane_helper_add(plane, &mtk_plane_helper_funcs);
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h
index 5b177eac67b7..3b13b89989c7 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_plane.h
@@ -48,6 +48,6 @@ to_mtk_plane_state(struct drm_plane_state *state)
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
- unsigned int supported_rotations, const u32 *formats,
- size_t num_formats, unsigned int plane_idx);
+ unsigned int supported_rotations, const u32 blend_modes,
+ const u32 *formats, size_t num_formats, unsigned int plane_idx);
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index ef232c0c2049..4e2d3a02ea06 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -487,6 +487,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
struct panthor_fw_binary_iter *iter,
u32 ehdr)
{
+ ssize_t vm_pgsz = panthor_vm_page_size(ptdev->fw->vm);
struct panthor_fw_binary_section_entry_hdr hdr;
struct panthor_fw_section *section;
u32 section_size;
@@ -515,8 +516,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
return -EINVAL;
}
- if ((hdr.va.start & ~PAGE_MASK) != 0 ||
- (hdr.va.end & ~PAGE_MASK) != 0) {
+ if (!IS_ALIGNED(hdr.va.start, vm_pgsz) || !IS_ALIGNED(hdr.va.end, vm_pgsz)) {
drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n",
hdr.va.start, hdr.va.end);
return -EINVAL;
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index 38f560864879..be97d56bc011 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -44,8 +44,7 @@ void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
goto out_free_bo;
- ret = panthor_vm_unmap_range(vm, bo->va_node.start,
- panthor_kernel_bo_size(bo));
+ ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size);
if (ret)
goto out_free_bo;
@@ -95,10 +94,16 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
}
bo = to_panthor_bo(&obj->base);
- size = obj->base.size;
kbo->obj = &obj->base;
bo->flags = bo_flags;
+ /* The system and GPU MMU page size might differ, which becomes a
+ * problem for FW sections that need to be mapped at explicit address
+ * since our PAGE_SIZE alignment might cover a VA range that's
+ * expected to be used for another section.
+ * Make sure we never map more than we need.
+ */
+ size = ALIGN(size, panthor_vm_page_size(vm));
ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node);
if (ret)
goto err_put_obj;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 3cd2bce59edc..5d5e25b1be95 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -826,6 +826,14 @@ void panthor_vm_idle(struct panthor_vm *vm)
mutex_unlock(&ptdev->mmu->as.slots_lock);
}
+u32 panthor_vm_page_size(struct panthor_vm *vm)
+{
+ const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops);
+ u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1;
+
+ return 1u << pg_shift;
+}
+
static void panthor_vm_stop(struct panthor_vm *vm)
{
drm_sched_stop(&vm->sched, NULL);
@@ -1025,12 +1033,13 @@ int
panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
struct drm_mm_node *va_node)
{
+ ssize_t vm_pgsz = panthor_vm_page_size(vm);
int ret;
- if (!size || (size & ~PAGE_MASK))
+ if (!size || !IS_ALIGNED(size, vm_pgsz))
return -EINVAL;
- if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK))
+ if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz))
return -EINVAL;
mutex_lock(&vm->mm_lock);
@@ -2366,11 +2375,12 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
const struct drm_panthor_vm_bind_op *op,
struct panthor_vm_op_ctx *op_ctx)
{
+ ssize_t vm_pgsz = panthor_vm_page_size(vm);
struct drm_gem_object *gem;
int ret;
/* Aligned on page size. */
- if ((op->va | op->size) & ~PAGE_MASK)
+ if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
return -EINVAL;
switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
index 6788771071e3..8d21e83d8aba 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.h
+++ b/drivers/gpu/drm/panthor/panthor_mmu.h
@@ -30,6 +30,7 @@ panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset);
int panthor_vm_active(struct panthor_vm *vm);
void panthor_vm_idle(struct panthor_vm *vm);
+u32 panthor_vm_page_size(struct panthor_vm *vm);
int panthor_vm_as(struct panthor_vm *vm);
int panthor_vm_flush_all(struct panthor_vm *vm);
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index aee362abb710..9929e22f4d8d 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -589,10 +589,11 @@ struct panthor_group {
* @timedout: True when a timeout occurred on any of the queues owned by
* this group.
*
- * Timeouts can be reported by drm_sched or by the FW. In any case, any
- * timeout situation is unrecoverable, and the group becomes useless.
- * We simply wait for all references to be dropped so we can release the
- * group object.
+ * Timeouts can be reported by drm_sched or by the FW. If a reset is required,
+ * and the group can't be suspended, this also leads to a timeout. In any case,
+ * any timeout situation is unrecoverable, and the group becomes useless. We
+ * simply wait for all references to be dropped so we can release the group
+ * object.
*/
bool timedout;
@@ -2640,6 +2641,12 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
csgs_upd_ctx_init(&upd_ctx);
while (slot_mask) {
u32 csg_id = ffs(slot_mask) - 1;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+
+ /* We consider group suspension failures as fatal and flag the
+ * group as unusable by setting timedout=true.
+ */
+ csg_slot->group->timedout = true;
csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
CSG_STATE_TERMINATE,
@@ -3409,6 +3416,11 @@ panthor_job_create(struct panthor_file *pfile,
goto err_put_job;
}
+ if (!group_can_run(job->group)) {
+ ret = -EINVAL;
+ goto err_put_job;
+ }
+
if (job->queue_idx >= job->group->queue_count ||
!job->group->queues[job->queue_idx]) {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index eaef20f41786..e97c6c60bc96 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1276,10 +1276,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->own_submit_wq = false;
} else {
#ifdef CONFIG_LOCKDEP
- sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
+ sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name,
+ WQ_MEM_RECLAIM,
&drm_sched_lockdep_map);
#else
- sched->submit_wq = alloc_ordered_workqueue(name, 0);
+ sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
#endif
if (!sched->submit_wq)
return -ENOMEM;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index c9eb329665ec..34d22ba210b0 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1153,8 +1153,8 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (host1x_drm_wants_iommu(dev) && device_iommu_mapped(dma_dev)) {
tegra->domain = iommu_paging_domain_alloc(dma_dev);
- if (!tegra->domain) {
- err = -ENOMEM;
+ if (IS_ERR(tegra->domain)) {
+ err = PTR_ERR(tegra->domain);
goto free;
}
diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c
index 15e36a8db685..6bba97d0be88 100644
--- a/drivers/gpu/drm/tests/drm_connector_test.c
+++ b/drivers/gpu/drm/tests/drm_connector_test.c
@@ -996,7 +996,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1017,7 +1017,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1038,7 +1038,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc_vic_1(struct kunit *t
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
rate = drm_hdmi_compute_mode_clock(mode, 10, HDMI_COLORSPACE_RGB);
@@ -1056,7 +1056,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1077,7 +1077,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc_vic_1(struct kunit *t
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
rate = drm_hdmi_compute_mode_clock(mode, 12, HDMI_COLORSPACE_RGB);
@@ -1095,7 +1095,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_double(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 6);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 6);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_TRUE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1118,7 +1118,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_valid(struct kunit
unsigned long long rate;
unsigned int vic = *(unsigned int *)test->param_value;
- mode = drm_display_mode_from_cea_vic(drm, vic);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1155,7 +1155,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_10_bpc(struct kuni
drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0];
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, vic);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1180,7 +1180,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_12_bpc(struct kuni
drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0];
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, vic);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1203,7 +1203,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_8_bpc(struct kunit
struct drm_device *drm = &priv->drm;
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1225,7 +1225,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_10_bpc(struct kuni
struct drm_device *drm = &priv->drm;
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1247,7 +1247,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_12_bpc(struct kuni
struct drm_device *drm = &priv->drm;
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index 34ee95d41f29..294773342e71 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -441,7 +441,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
drm = &priv->drm;
@@ -555,7 +555,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
drm = &priv->drm;
@@ -671,7 +671,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
drm = &priv->drm;
@@ -1263,7 +1263,7 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
/*
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index aa62719dab0e..04a6b8cc62ac 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -3,6 +3,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_managed.h>
@@ -311,6 +312,47 @@ drm_kunit_helper_create_crtc(struct kunit *test,
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc);
+static void kunit_action_drm_mode_destroy(void *ptr)
+{
+ struct drm_display_mode *mode = ptr;
+
+ drm_mode_destroy(NULL, mode);
+}
+
+/**
+ * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC
+ for a KUnit test
+ * @test: The test context object
+ * @dev: DRM device
+ * @video_code: CEA VIC of the mode
+ *
+ * Creates a new mode matching the specified CEA VIC for a KUnit test.
+ *
+ * Resources will be cleaned up automatically.
+ *
+ * Returns: A new drm_display_mode on success or NULL on failure
+ */
+struct drm_display_mode *
+drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev,
+ u8 video_code)
+{
+ struct drm_display_mode *mode;
+ int ret;
+
+ mode = drm_display_mode_from_cea_vic(dev, video_code);
+ if (!mode)
+ return NULL;
+
+ ret = kunit_add_action_or_reset(test,
+ kunit_action_drm_mode_destroy,
+ mode);
+ if (ret)
+ return NULL;
+
+ return mode;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_display_mode_from_cea_vic);
+
MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
MODULE_DESCRIPTION("KUnit test suite helper functions");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 75736faf2a80..c6e0c8d77a70 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -309,18 +309,7 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe)
}
/* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */
-void xe_display_pm_runtime_suspend(struct xe_device *xe)
-{
- if (!xe->info.probe_display)
- return;
-
- if (xe->d3cold.allowed)
- xe_display_pm_suspend(xe, true);
-
- intel_hpd_poll_enable(xe);
-}
-
-void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
+static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
{
struct intel_display *display = &xe->display;
bool s2idle = suspend_to_idle();
@@ -353,28 +342,38 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
intel_dmc_suspend(xe);
+
+ if (runtime && has_display(xe))
+ intel_hpd_poll_enable(xe);
}
-void xe_display_pm_suspend_late(struct xe_device *xe)
+void xe_display_pm_suspend(struct xe_device *xe)
+{
+ __xe_display_pm_suspend(xe, false);
+}
+
+void xe_display_pm_runtime_suspend(struct xe_device *xe)
{
- bool s2idle = suspend_to_idle();
if (!xe->info.probe_display)
return;
- intel_power_domains_suspend(xe, s2idle);
+ if (xe->d3cold.allowed) {
+ __xe_display_pm_suspend(xe, true);
+ return;
+ }
- intel_display_power_suspend_late(xe);
+ intel_hpd_poll_enable(xe);
}
-void xe_display_pm_runtime_resume(struct xe_device *xe)
+void xe_display_pm_suspend_late(struct xe_device *xe)
{
+ bool s2idle = suspend_to_idle();
if (!xe->info.probe_display)
return;
- intel_hpd_poll_disable(xe);
+ intel_power_domains_suspend(xe, s2idle);
- if (xe->d3cold.allowed)
- xe_display_pm_resume(xe, true);
+ intel_display_power_suspend_late(xe);
}
void xe_display_pm_resume_early(struct xe_device *xe)
@@ -387,7 +386,7 @@ void xe_display_pm_resume_early(struct xe_device *xe)
intel_power_domains_resume(xe);
}
-void xe_display_pm_resume(struct xe_device *xe, bool runtime)
+static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
{
struct intel_display *display = &xe->display;
@@ -411,9 +410,11 @@ void xe_display_pm_resume(struct xe_device *xe, bool runtime)
intel_display_driver_resume(xe);
drm_kms_helper_poll_enable(&xe->drm);
intel_display_driver_enable_user_access(xe);
- intel_hpd_poll_disable(xe);
}
+ if (has_display(xe))
+ intel_hpd_poll_disable(xe);
+
intel_opregion_resume(display);
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
@@ -421,6 +422,26 @@ void xe_display_pm_resume(struct xe_device *xe, bool runtime)
intel_power_domains_enable(xe);
}
+void xe_display_pm_resume(struct xe_device *xe)
+{
+ __xe_display_pm_resume(xe, false);
+}
+
+void xe_display_pm_runtime_resume(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ if (xe->d3cold.allowed) {
+ __xe_display_pm_resume(xe, true);
+ return;
+ }
+
+ intel_hpd_init(xe);
+ intel_hpd_poll_disable(xe);
+}
+
+
static void display_device_remove(struct drm_device *dev, void *arg)
{
struct xe_device *xe = arg;
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 53d727fd792b..bed55fd26f30 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -34,10 +34,10 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir);
void xe_display_irq_reset(struct xe_device *xe);
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt);
-void xe_display_pm_suspend(struct xe_device *xe, bool runtime);
+void xe_display_pm_suspend(struct xe_device *xe);
void xe_display_pm_suspend_late(struct xe_device *xe);
void xe_display_pm_resume_early(struct xe_device *xe);
-void xe_display_pm_resume(struct xe_device *xe, bool runtime);
+void xe_display_pm_resume(struct xe_device *xe);
void xe_display_pm_runtime_suspend(struct xe_device *xe);
void xe_display_pm_runtime_resume(struct xe_device *xe);
@@ -65,10 +65,10 @@ static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
static inline void xe_display_irq_reset(struct xe_device *xe) {}
static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {}
-static inline void xe_display_pm_suspend(struct xe_device *xe, bool runtime) {}
+static inline void xe_display_pm_suspend(struct xe_device *xe) {}
static inline void xe_display_pm_suspend_late(struct xe_device *xe) {}
static inline void xe_display_pm_resume_early(struct xe_device *xe) {}
-static inline void xe_display_pm_resume(struct xe_device *xe, bool runtime) {}
+static inline void xe_display_pm_resume(struct xe_device *xe) {}
static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {}
static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {}
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 0a9ffc19e92f..10fd4601b9f2 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -890,7 +890,7 @@ void xe_device_l2_flush(struct xe_device *xe)
spin_lock(&gt->global_invl_lock);
xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1);
- if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true))
+ if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true))
xe_gt_err_once(gt, "Global invalidation timeout\n");
spin_unlock(&gt->global_invl_lock);
diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c
index b263fff15273..7d9fc489dcb8 100644
--- a/drivers/gpu/drm/xe/xe_force_wake.c
+++ b/drivers/gpu/drm/xe/xe_force_wake.c
@@ -115,9 +115,15 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain,
XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
&value, true);
if (ret)
- xe_gt_notice(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
- domain->id, str_wake_sleep(wake), ERR_PTR(ret),
- domain->reg_ack.addr, value);
+ xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
+ domain->id, str_wake_sleep(wake), ERR_PTR(ret),
+ domain->reg_ack.addr, value);
+ if (value == ~0) {
+ xe_gt_err(gt,
+ "Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
+ domain->id, str_wake_sleep(wake));
+ ret = -EIO;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 2895f154654c..ff19eca5d358 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -397,6 +397,16 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
{
+ struct xe_device *xe = tile_to_xe(ggtt->tile);
+
+ /*
+ * XXX: Barrier for GGTT pages. Unsure exactly why this required but
+ * without this LNL is having issues with the GuC reading scratch page
+ * vs. correct GGTT page. Not particularly a hot code path so blindly
+ * do a mmio read here which results in GuC reading correct GGTT page.
+ */
+ xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG);
+
/* Each GT in a tile has its own TLB to cache GGTT lookups */
ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index d16eb9ab49fb..17986bfd8818 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -898,6 +898,24 @@ retry_same_fence:
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
/*
+ * Occasionally it is seen that the G2H worker starts running after a delay of more than
+ * a second even after being queued and activated by the Linux workqueue subsystem. This
+ * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
+ * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
+ * and this is beyond xe kmd.
+ *
+ * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
+ */
+ if (!ret) {
+ flush_work(&ct->g2h_worker);
+ if (g2h_fence.done) {
+ xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
+ g2h_fence.seqno, action[0]);
+ ret = 1;
+ }
+ }
+
+ /*
* Ensure we serialize with completion side to prevent UAF with fence going out of scope on
* the stack, since we have no clue if it will fire after the timeout before we can erase
* from the xa. Also we have some dependent loads and stores below for which we need the
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 8a9254e5af6e..f903b0772722 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -916,12 +916,22 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
{
struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q));
- u32 ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
- u32 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
+ u32 ctx_timestamp, ctx_job_timestamp;
u32 timeout_ms = q->sched_props.job_timeout_ms;
u32 diff;
u64 running_time_ms;
+ if (!xe_sched_job_started(job)) {
+ xe_gt_warn(gt, "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, not started",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id);
+
+ return xe_sched_invalidate_job(job, 2);
+ }
+
+ ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
+ ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
+
/*
* Counter wraps at ~223s at the usual 19.2MHz, be paranoid catch
* possible overflows with a high timeout.
@@ -1049,10 +1059,6 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
exec_queue_killed_or_banned_or_wedged(q) ||
exec_queue_destroyed(q);
- /* Job hasn't started, can't be timed out */
- if (!skip_timeout_check && !xe_sched_job_started(job))
- goto rearm;
-
/*
* XXX: Sampling timeout doesn't work in wedged mode as we have to
* modify scheduling state to read timestamp. We could read the
@@ -1726,8 +1732,13 @@ void xe_guc_submit_stop(struct xe_guc *guc)
mutex_lock(&guc->submission_state.lock);
- xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
guc_exec_queue_stop(guc, q);
+ }
mutex_unlock(&guc->submission_state.lock);
@@ -1765,8 +1776,13 @@ int xe_guc_submit_start(struct xe_guc *guc)
mutex_lock(&guc->submission_state.lock);
atomic_dec(&guc->submission_state.stopped);
- xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to start parallel queues */
+ if (q->guc->id != index)
+ continue;
+
guc_exec_queue_start(q);
+ }
mutex_unlock(&guc->submission_state.lock);
wake_up_all(&guc->ct.wq);
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 7cf2160fe040..33eb039053e4 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -123,7 +123,7 @@ int xe_pm_suspend(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
- xe_display_pm_suspend(xe, false);
+ xe_display_pm_suspend(xe);
/* FIXME: Super racey... */
err = xe_bo_evict_all(xe);
@@ -133,7 +133,7 @@ int xe_pm_suspend(struct xe_device *xe)
for_each_gt(gt, xe, id) {
err = xe_gt_suspend(gt);
if (err) {
- xe_display_pm_resume(xe, false);
+ xe_display_pm_resume(xe);
goto err;
}
}
@@ -187,7 +187,7 @@ int xe_pm_resume(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_resume(gt);
- xe_display_pm_resume(xe, false);
+ xe_display_pm_resume(xe);
err = xe_bo_restore_user(xe);
if (err)
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index c6cf227ead40..2e72c06fd40d 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -54,8 +54,9 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
{
struct xe_user_fence *ufence;
u64 __user *ptr = u64_to_user_ptr(addr);
+ u64 __maybe_unused prefetch_val;
- if (!access_ok(ptr, sizeof(*ptr)))
+ if (get_user(prefetch_val, ptr))
return ERR_PTR(-EFAULT);
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index e936019d21fe..785743036647 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -31,6 +31,7 @@
* [1] https://gitlab.freedesktop.org/libevdev/hid-tools
*/
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
@@ -83,6 +84,13 @@ enum latency_mode {
HID_LATENCY_HIGH = 1,
};
+enum report_mode {
+ TOUCHPAD_REPORT_NONE = 0,
+ TOUCHPAD_REPORT_BUTTONS = BIT(0),
+ TOUCHPAD_REPORT_CONTACTS = BIT(1),
+ TOUCHPAD_REPORT_ALL = TOUCHPAD_REPORT_BUTTONS | TOUCHPAD_REPORT_CONTACTS,
+};
+
#define MT_IO_FLAGS_RUNNING 0
#define MT_IO_FLAGS_ACTIVE_SLOTS 1
#define MT_IO_FLAGS_PENDING_SLOTS 2
@@ -1493,8 +1501,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage,
enum latency_mode latency,
- bool surface_switch,
- bool button_switch,
+ enum report_mode report_mode,
bool *inputmode_found)
{
struct mt_device *td = hid_get_drvdata(hdev);
@@ -1549,11 +1556,11 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
return true;
case HID_DG_SURFACESWITCH:
- field->value[index] = surface_switch;
+ field->value[index] = !!(report_mode & TOUCHPAD_REPORT_CONTACTS);
return true;
case HID_DG_BUTTONSWITCH:
- field->value[index] = button_switch;
+ field->value[index] = !!(report_mode & TOUCHPAD_REPORT_BUTTONS);
return true;
}
@@ -1561,7 +1568,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
}
static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
- bool surface_switch, bool button_switch)
+ enum report_mode report_mode)
{
struct hid_report_enum *rep_enum;
struct hid_report *rep;
@@ -1586,8 +1593,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
rep->field[i],
usage,
latency,
- surface_switch,
- button_switch,
+ report_mode,
&inputmode_found))
update_report = true;
}
@@ -1830,7 +1836,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev_warn(&hdev->dev, "Cannot allocate sysfs group for %s\n",
hdev->name);
- mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true);
+ mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL);
return 0;
}
@@ -1842,9 +1848,9 @@ static int mt_suspend(struct hid_device *hdev, pm_message_t state)
/* High latency is desirable for power savings during S3/S0ix */
if ((td->mtclass.quirks & MT_QUIRK_DISABLE_WAKEUP) ||
!hid_hw_may_wakeup(hdev))
- mt_set_modes(hdev, HID_LATENCY_HIGH, false, false);
+ mt_set_modes(hdev, HID_LATENCY_HIGH, TOUCHPAD_REPORT_NONE);
else
- mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+ mt_set_modes(hdev, HID_LATENCY_HIGH, TOUCHPAD_REPORT_ALL);
return 0;
}
@@ -1852,7 +1858,7 @@ static int mt_suspend(struct hid_device *hdev, pm_message_t state)
static int mt_reset_resume(struct hid_device *hdev)
{
mt_release_contacts(hdev);
- mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true);
+ mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL);
return 0;
}
@@ -1864,7 +1870,7 @@ static int mt_resume(struct hid_device *hdev)
hid_hw_idle(hdev, 0, 0, HID_REQ_SET_IDLE);
- mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true);
+ mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL);
return 0;
}
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index a5d91933f505..b79c48d46ccc 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -637,7 +637,7 @@ static int ad7124_write_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_SAMP_FREQ:
- if (val2 != 0) {
+ if (val2 != 0 || val == 0) {
ret = -EINVAL;
break;
}
diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c
index e8bddfb0d07d..fb728570debe 100644
--- a/drivers/iio/adc/ad7380.c
+++ b/drivers/iio/adc/ad7380.c
@@ -75,6 +75,7 @@
#define T_CONVERT_NS 190 /* conversion time */
#define T_CONVERT_0_NS 10 /* 1st conversion start time (oversampling) */
#define T_CONVERT_X_NS 500 /* xth conversion start time (oversampling) */
+#define T_POWERUP_US 5000 /* Power up */
struct ad7380_timing_specs {
const unsigned int t_csh_ns; /* CS minimum high time */
@@ -86,6 +87,9 @@ struct ad7380_chip_info {
unsigned int num_channels;
unsigned int num_simult_channels;
bool has_mux;
+ const char * const *supplies;
+ unsigned int num_supplies;
+ bool external_ref_only;
const char * const *vcm_supplies;
unsigned int num_vcm_supplies;
const unsigned long *available_scan_masks;
@@ -243,6 +247,10 @@ DEFINE_AD7380_8_CHANNEL(ad7386_4_channels, 16, 0, u);
DEFINE_AD7380_8_CHANNEL(ad7387_4_channels, 14, 0, u);
DEFINE_AD7380_8_CHANNEL(ad7388_4_channels, 12, 0, u);
+static const char * const ad7380_supplies[] = {
+ "vcc", "vlogic",
+};
+
static const char * const ad7380_2_channel_vcm_supplies[] = {
"aina", "ainb",
};
@@ -338,6 +346,8 @@ static const struct ad7380_chip_info ad7380_chip_info = {
.channels = ad7380_channels,
.num_channels = ARRAY_SIZE(ad7380_channels),
.num_simult_channels = 2,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
};
@@ -347,6 +357,8 @@ static const struct ad7380_chip_info ad7381_chip_info = {
.channels = ad7381_channels,
.num_channels = ARRAY_SIZE(ad7381_channels),
.num_simult_channels = 2,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
};
@@ -356,6 +368,8 @@ static const struct ad7380_chip_info ad7383_chip_info = {
.channels = ad7383_channels,
.num_channels = ARRAY_SIZE(ad7383_channels),
.num_simult_channels = 2,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
.vcm_supplies = ad7380_2_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
@@ -367,6 +381,8 @@ static const struct ad7380_chip_info ad7384_chip_info = {
.channels = ad7384_channels,
.num_channels = ARRAY_SIZE(ad7384_channels),
.num_simult_channels = 2,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
.vcm_supplies = ad7380_2_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
@@ -378,6 +394,8 @@ static const struct ad7380_chip_info ad7386_chip_info = {
.channels = ad7386_channels,
.num_channels = ARRAY_SIZE(ad7386_channels),
.num_simult_channels = 2,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
@@ -388,6 +406,8 @@ static const struct ad7380_chip_info ad7387_chip_info = {
.channels = ad7387_channels,
.num_channels = ARRAY_SIZE(ad7387_channels),
.num_simult_channels = 2,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
@@ -398,6 +418,8 @@ static const struct ad7380_chip_info ad7388_chip_info = {
.channels = ad7388_channels,
.num_channels = ARRAY_SIZE(ad7388_channels),
.num_simult_channels = 2,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
@@ -408,6 +430,9 @@ static const struct ad7380_chip_info ad7380_4_chip_info = {
.channels = ad7380_4_channels,
.num_channels = ARRAY_SIZE(ad7380_4_channels),
.num_simult_channels = 4,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
+ .external_ref_only = true,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
};
@@ -417,6 +442,8 @@ static const struct ad7380_chip_info ad7381_4_chip_info = {
.channels = ad7381_4_channels,
.num_channels = ARRAY_SIZE(ad7381_4_channels),
.num_simult_channels = 4,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
};
@@ -426,6 +453,8 @@ static const struct ad7380_chip_info ad7383_4_chip_info = {
.channels = ad7383_4_channels,
.num_channels = ARRAY_SIZE(ad7383_4_channels),
.num_simult_channels = 4,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
.vcm_supplies = ad7380_4_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
@@ -437,6 +466,8 @@ static const struct ad7380_chip_info ad7384_4_chip_info = {
.channels = ad7384_4_channels,
.num_channels = ARRAY_SIZE(ad7384_4_channels),
.num_simult_channels = 4,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
.vcm_supplies = ad7380_4_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
@@ -448,6 +479,8 @@ static const struct ad7380_chip_info ad7386_4_chip_info = {
.channels = ad7386_4_channels,
.num_channels = ARRAY_SIZE(ad7386_4_channels),
.num_simult_channels = 4,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
@@ -458,6 +491,8 @@ static const struct ad7380_chip_info ad7387_4_chip_info = {
.channels = ad7387_4_channels,
.num_channels = ARRAY_SIZE(ad7387_4_channels),
.num_simult_channels = 4,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
@@ -468,6 +503,8 @@ static const struct ad7380_chip_info ad7388_4_chip_info = {
.channels = ad7388_4_channels,
.num_channels = ARRAY_SIZE(ad7388_4_channels),
.num_simult_channels = 4,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
@@ -956,7 +993,7 @@ static const struct iio_info ad7380_info = {
.debugfs_reg_access = &ad7380_debugfs_reg_access,
};
-static int ad7380_init(struct ad7380_state *st, struct regulator *vref)
+static int ad7380_init(struct ad7380_state *st, bool external_ref_en)
{
int ret;
@@ -968,13 +1005,13 @@ static int ad7380_init(struct ad7380_state *st, struct regulator *vref)
if (ret < 0)
return ret;
- /* select internal or external reference voltage */
- ret = regmap_update_bits(st->regmap, AD7380_REG_ADDR_CONFIG1,
- AD7380_CONFIG1_REFSEL,
- FIELD_PREP(AD7380_CONFIG1_REFSEL,
- vref ? 1 : 0));
- if (ret < 0)
- return ret;
+ if (external_ref_en) {
+ /* select external reference voltage */
+ ret = regmap_set_bits(st->regmap, AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_REFSEL);
+ if (ret < 0)
+ return ret;
+ }
/* This is the default value after reset. */
st->oversampling_ratio = 1;
@@ -987,16 +1024,11 @@ static int ad7380_init(struct ad7380_state *st, struct regulator *vref)
FIELD_PREP(AD7380_CONFIG2_SDO, 1));
}
-static void ad7380_regulator_disable(void *p)
-{
- regulator_disable(p);
-}
-
static int ad7380_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct ad7380_state *st;
- struct regulator *vref;
+ bool external_ref_en;
int ret, i;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -1009,36 +1041,38 @@ static int ad7380_probe(struct spi_device *spi)
if (!st->chip_info)
return dev_err_probe(&spi->dev, -EINVAL, "missing match data\n");
- vref = devm_regulator_get_optional(&spi->dev, "refio");
- if (IS_ERR(vref)) {
- if (PTR_ERR(vref) != -ENODEV)
- return dev_err_probe(&spi->dev, PTR_ERR(vref),
- "Failed to get refio regulator\n");
-
- vref = NULL;
- }
+ ret = devm_regulator_bulk_get_enable(&spi->dev, st->chip_info->num_supplies,
+ st->chip_info->supplies);
- /*
- * If there is no REFIO supply, then it means that we are using
- * the internal 2.5V reference, otherwise REFIO is reference voltage.
- */
- if (vref) {
- ret = regulator_enable(vref);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev,
- ad7380_regulator_disable, vref);
- if (ret)
- return ret;
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to enable power supplies\n");
+ fsleep(T_POWERUP_US);
- ret = regulator_get_voltage(vref);
+ if (st->chip_info->external_ref_only) {
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev,
+ "refin");
if (ret < 0)
- return ret;
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to get refin regulator\n");
st->vref_mv = ret / 1000;
+
+ /* these chips don't have a register bit for this */
+ external_ref_en = false;
} else {
- st->vref_mv = AD7380_INTERNAL_REF_MV;
+ /*
+ * If there is no REFIO supply, then it means that we are using
+ * the internal reference, otherwise REFIO is reference voltage.
+ */
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev,
+ "refio");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to get refio regulator\n");
+
+ external_ref_en = ret != -ENODEV;
+ st->vref_mv = external_ref_en ? ret / 1000 : AD7380_INTERNAL_REF_MV;
}
if (st->chip_info->num_vcm_supplies > ARRAY_SIZE(st->vcm_mv))
@@ -1050,27 +1084,13 @@ static int ad7380_probe(struct spi_device *spi)
* input pin.
*/
for (i = 0; i < st->chip_info->num_vcm_supplies; i++) {
- struct regulator *vcm;
-
- vcm = devm_regulator_get(&spi->dev,
- st->chip_info->vcm_supplies[i]);
- if (IS_ERR(vcm))
- return dev_err_probe(&spi->dev, PTR_ERR(vcm),
- "Failed to get %s regulator\n",
- st->chip_info->vcm_supplies[i]);
+ const char *vcm = st->chip_info->vcm_supplies[i];
- ret = regulator_enable(vcm);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev,
- ad7380_regulator_disable, vcm);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(vcm);
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, vcm);
if (ret < 0)
- return ret;
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to get %s regulator\n",
+ vcm);
st->vcm_mv[i] = ret / 1000;
}
@@ -1135,7 +1155,7 @@ static int ad7380_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = ad7380_init(st, vref);
+ ret = ad7380_init(st, external_ref_en);
if (ret)
return ret;
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 45e337c6d256..9f5d5ebb8653 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -380,7 +380,7 @@ config LTC2632
config LTC2664
tristate "Analog Devices LTC2664 and LTC2672 DAC SPI driver"
depends on SPI
- select REGMAP
+ select REGMAP_SPI
help
Say yes here to build support for Analog Devices
LTC2664 and LTC2672 converters (DAC).
diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
index 59d7615c0f56..5f131bc1a01e 100644
--- a/drivers/iio/industrialio-gts-helper.c
+++ b/drivers/iio/industrialio-gts-helper.c
@@ -307,13 +307,15 @@ static int iio_gts_build_avail_scale_table(struct iio_gts *gts)
if (ret)
goto err_free_out;
+ for (i = 0; i < gts->num_itime; i++)
+ kfree(per_time_gains[i]);
kfree(per_time_gains);
gts->per_time_avail_scale_tables = per_time_scales;
return 0;
err_free_out:
- for (i--; i; i--) {
+ for (i--; i >= 0; i--) {
kfree(per_time_scales[i]);
kfree(per_time_gains[i]);
}
diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
index 9630de1c578e..621428885455 100644
--- a/drivers/iio/light/veml6030.c
+++ b/drivers/iio/light/veml6030.c
@@ -522,7 +522,7 @@ static int veml6030_read_raw(struct iio_dev *indio_dev,
}
if (mask == IIO_CHAN_INFO_PROCESSED) {
*val = (reg * data->cur_resolution) / 10000;
- *val2 = (reg * data->cur_resolution) % 10000;
+ *val2 = (reg * data->cur_resolution) % 10000 * 100;
return IIO_VAL_INT_PLUS_MICRO;
}
*val = reg;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 2ebcb2de962b..7ad83566ab0f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1532,9 +1532,11 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
u32 tbl_indx;
int rc;
+ spin_lock_bh(&rcfw->tbl_lock);
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
+ spin_unlock_bh(&rcfw->tbl_lock);
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_DESTROY_QP,
@@ -1545,8 +1547,10 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc) {
+ spin_lock_bh(&rcfw->tbl_lock);
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
rcfw->qp_tbl[tbl_indx].qp_handle = qp;
+ spin_unlock_bh(&rcfw->tbl_lock);
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 7294221b3316..e82bd37158ad 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -290,7 +290,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_hwq *hwq;
u32 sw_prod, cmdq_prod;
struct pci_dev *pdev;
- unsigned long flags;
u16 cookie;
u8 *preq;
@@ -301,7 +300,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
/* Cmdq are in 16-byte units, each request can consume 1 or more
* cmdqe
*/
- spin_lock_irqsave(&hwq->lock, flags);
+ spin_lock_bh(&hwq->lock);
required_slots = bnxt_qplib_get_cmd_slots(msg->req);
free_slots = HWQ_FREE_SLOTS(hwq);
cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
@@ -311,7 +310,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
dev_info_ratelimited(&pdev->dev,
"CMDQ is full req/free %d/%d!",
required_slots, free_slots);
- spin_unlock_irqrestore(&hwq->lock, flags);
+ spin_unlock_bh(&hwq->lock);
return -EAGAIN;
}
if (msg->block)
@@ -367,7 +366,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
wmb();
writel(cmdq_prod, cmdq->cmdq_mbox.prod);
writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
- spin_unlock_irqrestore(&hwq->lock, flags);
+ spin_unlock_bh(&hwq->lock);
/* Return the CREQ response pointer */
return 0;
}
@@ -486,7 +485,6 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
{
struct creq_qp_event *evnt = (struct creq_qp_event *)msg->resp;
struct bnxt_qplib_crsqe *crsqe;
- unsigned long flags;
u16 cookie;
int rc;
u8 opcode;
@@ -512,12 +510,12 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
rc = __poll_for_resp(rcfw, cookie);
if (rc) {
- spin_lock_irqsave(&rcfw->cmdq.hwq.lock, flags);
+ spin_lock_bh(&rcfw->cmdq.hwq.lock);
crsqe = &rcfw->crsqe_tbl[cookie];
crsqe->is_waiter_alive = false;
if (rc == -ENODEV)
set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags);
- spin_unlock_irqrestore(&rcfw->cmdq.hwq.lock, flags);
+ spin_unlock_bh(&rcfw->cmdq.hwq.lock);
return -ETIMEDOUT;
}
@@ -628,7 +626,6 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
u16 cookie, blocked = 0;
bool is_waiter_alive;
struct pci_dev *pdev;
- unsigned long flags;
u32 wait_cmds = 0;
int rc = 0;
@@ -637,17 +634,21 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
err_event = (struct creq_qp_error_notification *)qp_event;
qp_id = le32_to_cpu(err_event->xid);
+ spin_lock(&rcfw->tbl_lock);
tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw);
qp = rcfw->qp_tbl[tbl_indx].qp_handle;
+ if (!qp) {
+ spin_unlock(&rcfw->tbl_lock);
+ break;
+ }
+ bnxt_qplib_mark_qp_error(qp);
+ rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
+ spin_unlock(&rcfw->tbl_lock);
dev_dbg(&pdev->dev, "Received QP error notification\n");
dev_dbg(&pdev->dev,
"qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
qp_id, err_event->req_err_state_reason,
err_event->res_err_state_reason);
- if (!qp)
- break;
- bnxt_qplib_mark_qp_error(qp);
- rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
break;
default:
/*
@@ -659,8 +660,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
*
*/
- spin_lock_irqsave_nested(&hwq->lock, flags,
- SINGLE_DEPTH_NESTING);
+ spin_lock_nested(&hwq->lock, SINGLE_DEPTH_NESTING);
cookie = le16_to_cpu(qp_event->cookie);
blocked = cookie & RCFW_CMD_IS_BLOCKING;
cookie &= RCFW_MAX_COOKIE_VALUE;
@@ -672,7 +672,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
dev_info(&pdev->dev,
"rcfw timedout: cookie = %#x, free_slots = %d",
cookie, crsqe->free_slots);
- spin_unlock_irqrestore(&hwq->lock, flags);
+ spin_unlock(&hwq->lock);
return rc;
}
@@ -720,7 +720,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
__destroy_timedout_ah(rcfw,
(struct creq_create_ah_resp *)
qp_event);
- spin_unlock_irqrestore(&hwq->lock, flags);
+ spin_unlock(&hwq->lock);
}
*num_wait += wait_cmds;
return rc;
@@ -734,12 +734,11 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
struct bnxt_qplib_hwq *hwq = &creq->hwq;
struct creq_base *creqe;
- unsigned long flags;
u32 num_wakeup = 0;
u32 hw_polled = 0;
/* Service the CREQ until budget is over */
- spin_lock_irqsave(&hwq->lock, flags);
+ spin_lock_bh(&hwq->lock);
while (budget > 0) {
creqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
if (!CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags))
@@ -782,7 +781,7 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
if (hw_polled)
bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo,
rcfw->res->cctx, true);
- spin_unlock_irqrestore(&hwq->lock, flags);
+ spin_unlock_bh(&hwq->lock);
if (num_wakeup)
wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
}
@@ -978,6 +977,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
GFP_KERNEL);
if (!rcfw->qp_tbl)
goto fail;
+ spin_lock_init(&rcfw->tbl_lock);
rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 45996e60a0d0..07779aeb7575 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -224,6 +224,8 @@ struct bnxt_qplib_rcfw {
struct bnxt_qplib_crsqe *crsqe_tbl;
int qp_tbl_size;
struct bnxt_qplib_qp_node *qp_tbl;
+ /* To synchronize the qp-handle hash table */
+ spinlock_t tbl_lock;
u64 oos_prev;
u32 init_oos_stats;
u32 cmdq_depth;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 10a4c738b59f..e059f92d90fd 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -473,6 +473,7 @@ static const struct ib_device_ops c4iw_dev_ops = {
.fill_res_cq_entry = c4iw_fill_res_cq_entry,
.fill_res_cm_id_entry = c4iw_fill_res_cm_id_entry,
.fill_res_mr_entry = c4iw_fill_res_mr_entry,
+ .fill_res_qp_entry = c4iw_fill_res_qp_entry,
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = c4iw_get_dma_mr,
.get_hw_stats = c4iw_get_mib,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index e39b1a101e97..10ce3b44f645 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4268,14 +4268,14 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic)
- MLX5_SET(qpc, qpc, log_sra_max, ilog2(attr->max_rd_atomic));
+ MLX5_SET(qpc, qpc, log_sra_max, fls(attr->max_rd_atomic - 1));
if (attr_mask & IB_QP_SQ_PSN)
MLX5_SET(qpc, qpc, next_send_psn, attr->sq_psn);
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic)
MLX5_SET(qpc, qpc, log_rra_max,
- ilog2(attr->max_dest_rd_atomic));
+ fls(attr->max_dest_rd_atomic - 1));
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 47fac29cf7c3..c51858f1cdc5 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -119,12 +119,12 @@ static void input_pass_values(struct input_dev *dev,
handle = rcu_dereference(dev->grab);
if (handle) {
- count = handle->handler->events(handle, vals, count);
+ count = handle->handle_events(handle, vals, count);
} else {
list_for_each_entry_rcu(handle, &dev->h_list, d_node)
if (handle->open) {
- count = handle->handler->events(handle, vals,
- count);
+ count = handle->handle_events(handle, vals,
+ count);
if (!count)
break;
}
@@ -2534,57 +2534,6 @@ static int input_handler_check_methods(const struct input_handler *handler)
return 0;
}
-/*
- * An implementation of input_handler's events() method that simply
- * invokes handler->event() method for each event one by one.
- */
-static unsigned int input_handler_events_default(struct input_handle *handle,
- struct input_value *vals,
- unsigned int count)
-{
- struct input_handler *handler = handle->handler;
- struct input_value *v;
-
- for (v = vals; v != vals + count; v++)
- handler->event(handle, v->type, v->code, v->value);
-
- return count;
-}
-
-/*
- * An implementation of input_handler's events() method that invokes
- * handler->filter() method for each event one by one and removes events
- * that were filtered out from the "vals" array.
- */
-static unsigned int input_handler_events_filter(struct input_handle *handle,
- struct input_value *vals,
- unsigned int count)
-{
- struct input_handler *handler = handle->handler;
- struct input_value *end = vals;
- struct input_value *v;
-
- for (v = vals; v != vals + count; v++) {
- if (handler->filter(handle, v->type, v->code, v->value))
- continue;
- if (end != v)
- *end = *v;
- end++;
- }
-
- return end - vals;
-}
-
-/*
- * An implementation of input_handler's events() method that does nothing.
- */
-static unsigned int input_handler_events_null(struct input_handle *handle,
- struct input_value *vals,
- unsigned int count)
-{
- return count;
-}
-
/**
* input_register_handler - register a new input handler
* @handler: handler to be registered
@@ -2604,13 +2553,6 @@ int input_register_handler(struct input_handler *handler)
INIT_LIST_HEAD(&handler->h_list);
- if (handler->filter)
- handler->events = input_handler_events_filter;
- else if (handler->event)
- handler->events = input_handler_events_default;
- else if (!handler->events)
- handler->events = input_handler_events_null;
-
error = mutex_lock_interruptible(&input_mutex);
if (error)
return error;
@@ -2684,6 +2626,75 @@ int input_handler_for_each_handle(struct input_handler *handler, void *data,
}
EXPORT_SYMBOL(input_handler_for_each_handle);
+/*
+ * An implementation of input_handle's handle_events() method that simply
+ * invokes handler->event() method for each event one by one.
+ */
+static unsigned int input_handle_events_default(struct input_handle *handle,
+ struct input_value *vals,
+ unsigned int count)
+{
+ struct input_handler *handler = handle->handler;
+ struct input_value *v;
+
+ for (v = vals; v != vals + count; v++)
+ handler->event(handle, v->type, v->code, v->value);
+
+ return count;
+}
+
+/*
+ * An implementation of input_handle's handle_events() method that invokes
+ * handler->filter() method for each event one by one and removes events
+ * that were filtered out from the "vals" array.
+ */
+static unsigned int input_handle_events_filter(struct input_handle *handle,
+ struct input_value *vals,
+ unsigned int count)
+{
+ struct input_handler *handler = handle->handler;
+ struct input_value *end = vals;
+ struct input_value *v;
+
+ for (v = vals; v != vals + count; v++) {
+ if (handler->filter(handle, v->type, v->code, v->value))
+ continue;
+ if (end != v)
+ *end = *v;
+ end++;
+ }
+
+ return end - vals;
+}
+
+/*
+ * An implementation of input_handle's handle_events() method that does nothing.
+ */
+static unsigned int input_handle_events_null(struct input_handle *handle,
+ struct input_value *vals,
+ unsigned int count)
+{
+ return count;
+}
+
+/*
+ * Sets up appropriate handle->event_handler based on the input_handler
+ * associated with the handle.
+ */
+static void input_handle_setup_event_handler(struct input_handle *handle)
+{
+ struct input_handler *handler = handle->handler;
+
+ if (handler->filter)
+ handle->handle_events = input_handle_events_filter;
+ else if (handler->event)
+ handle->handle_events = input_handle_events_default;
+ else if (handler->events)
+ handle->handle_events = handler->events;
+ else
+ handle->handle_events = input_handle_events_null;
+}
+
/**
* input_register_handle - register a new input handle
* @handle: handle to register
@@ -2701,6 +2712,7 @@ int input_register_handle(struct input_handle *handle)
struct input_dev *dev = handle->dev;
int error;
+ input_handle_setup_event_handler(handle);
/*
* We take dev->mutex here to prevent race with
* input_release_device().
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index d25d63a807f2..dc734974ce06 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -822,7 +822,8 @@ static int adp5588_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- disable_irq(client->irq);
+ if (client->irq)
+ disable_irq(client->irq);
return 0;
}
@@ -831,7 +832,8 @@ static int adp5588_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- enable_irq(client->irq);
+ if (client->irq)
+ enable_irq(client->irq);
return 0;
}
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index fda49b2fe088..85c6d8ce003f 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1121,6 +1121,14 @@ static void edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
}
}
+static void edt_ft5x06_exit_regmap(void *arg)
+{
+ struct edt_ft5x06_ts_data *data = arg;
+
+ if (!IS_ERR_OR_NULL(data->regmap))
+ regmap_exit(data->regmap);
+}
+
static void edt_ft5x06_disable_regulators(void *arg)
{
struct edt_ft5x06_ts_data *data = arg;
@@ -1154,6 +1162,16 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client)
return PTR_ERR(tsdata->regmap);
}
+ /*
+ * We are not using devm_regmap_init_i2c() and instead install a
+ * custom action because we may replace regmap with M06-specific one
+ * and we need to make sure that it will not be released too early.
+ */
+ error = devm_add_action_or_reset(&client->dev, edt_ft5x06_exit_regmap,
+ tsdata);
+ if (error)
+ return error;
+
chip_data = device_get_match_data(&client->dev);
if (!chip_data)
chip_data = (const struct edt_i2c_chip_data *)id->driver_data;
@@ -1347,7 +1365,6 @@ static void edt_ft5x06_ts_remove(struct i2c_client *client)
struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
edt_ft5x06_ts_teardown_debugfs(tsdata);
- regmap_exit(tsdata->regmap);
}
static int edt_ft5x06_ts_suspend(struct device *dev)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ab597e74ba08..52f625e07658 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3810,8 +3810,18 @@ static int its_vpe_set_affinity(struct irq_data *d,
* Check if we're racing against a VPE being destroyed, for
* which we don't want to allow a VMOVP.
*/
- if (!atomic_read(&vpe->vmapp_count))
- return -EINVAL;
+ if (!atomic_read(&vpe->vmapp_count)) {
+ if (gic_requires_eager_mapping())
+ return -EINVAL;
+
+ /*
+ * If we lazily map the VPEs, this isn't an error and
+ * we can exit cleanly.
+ */
+ cpu = cpumask_first(mask_val);
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ return IRQ_SET_MASK_OK_DONE;
+ }
/*
* Changing affinity is mega expensive, so let's be as lazy as
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 179ee4afe937..67108c397c5a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -546,6 +546,26 @@ static int mddev_set_closing_and_sync_blockdev(struct mddev *mddev, int opener_n
return 0;
}
+/*
+ * The only difference from bio_chain_endio() is that the current
+ * bi_status of bio does not affect the bi_status of parent.
+ */
+static void md_end_flush(struct bio *bio)
+{
+ struct bio *parent = bio->bi_private;
+
+ /*
+ * If any flush io error before the power failure,
+ * disk data may be lost.
+ */
+ if (bio->bi_status)
+ pr_err("md: %pg flush io error %d\n", bio->bi_bdev,
+ blk_status_to_errno(bio->bi_status));
+
+ bio_put(bio);
+ bio_endio(parent);
+}
+
bool md_flush_request(struct mddev *mddev, struct bio *bio)
{
struct md_rdev *rdev;
@@ -565,7 +585,9 @@ bool md_flush_request(struct mddev *mddev, struct bio *bio)
new = bio_alloc_bioset(rdev->bdev, 0,
REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO,
&mddev->bio_set);
- bio_chain(new, bio);
+ new->bi_private = bio;
+ new->bi_end_io = md_end_flush;
+ bio_inc_remaining(bio);
submit_bio(new);
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index f3bf1116794a..862b1fb71d86 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4061,9 +4061,12 @@ static int raid10_run(struct mddev *mddev)
}
if (!mddev_is_dm(conf->mddev)) {
- ret = raid10_set_queue_limits(mddev);
- if (ret)
+ int err = raid10_set_queue_limits(mddev);
+
+ if (err) {
+ ret = err;
goto out_free_conf;
+ }
}
/* need to check that every block has at least one working mirror */
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 9d090fa07516..be011cef12e5 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -321,7 +321,7 @@ void mei_io_cb_free(struct mei_cl_cb *cb)
return;
list_del(&cb->list);
- kfree(cb->buf.data);
+ kvfree(cb->buf.data);
kfree(cb->ext_hdr);
kfree(cb);
}
@@ -497,7 +497,7 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
if (length == 0)
return cb;
- cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
+ cb->buf.data = kvmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
if (!cb->buf.data) {
mei_io_cb_free(cb);
return NULL;
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 0f81586a19df..68ce4920e01e 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -892,28 +892,40 @@ static void gl9767_disable_ssc_pll(struct pci_dev *pdev)
gl9767_vhs_read(pdev);
}
+static void gl9767_set_low_power_negotiation(struct pci_dev *pdev, bool enable)
+{
+ u32 value;
+
+ gl9767_vhs_write(pdev);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
+ if (enable)
+ value &= ~PCIE_GLI_9767_CFG_LOW_PWR_OFF;
+ else
+ value |= PCIE_GLI_9767_CFG_LOW_PWR_OFF;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
+
+ gl9767_vhs_read(pdev);
+}
+
static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct mmc_ios *ios = &host->mmc->ios;
struct pci_dev *pdev;
- u32 value;
u16 clk;
pdev = slot->chip->pdev;
host->mmc->actual_clock = 0;
- gl9767_vhs_write(pdev);
-
- pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
- value |= PCIE_GLI_9767_CFG_LOW_PWR_OFF;
- pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
-
+ gl9767_set_low_power_negotiation(pdev, false);
gl9767_disable_ssc_pll(pdev);
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
- if (clock == 0)
+ if (clock == 0) {
+ gl9767_set_low_power_negotiation(pdev, true);
return;
+ }
clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) {
@@ -922,12 +934,7 @@ static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
}
sdhci_enable_clk(host, clk);
-
- pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
- value &= ~PCIE_GLI_9767_CFG_LOW_PWR_OFF;
- pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
-
- gl9767_vhs_read(pdev);
+ gl9767_set_low_power_negotiation(pdev, true);
}
static void gli_set_9767(struct sdhci_host *host)
@@ -1061,6 +1068,9 @@ static int gl9767_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios)
sdhci_writew(host, value, SDHCI_CLOCK_CONTROL);
}
+ pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
+ value &= ~PCIE_GLI_9767_CFG_LOW_PWR_OFF;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
gl9767_vhs_read(pdev);
return 0;
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 4e8710c7cb7b..5290f5ad98f3 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2733,26 +2733,27 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
return MICREL_KSZ8_P1_ERRATA;
break;
case KSZ8567_CHIP_ID:
+ /* KSZ8567R Errata DS80000752C Module 4 */
+ case KSZ8765_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8795_CHIP_ID:
+ /* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */
case KSZ9477_CHIP_ID:
+ /* KSZ9477S Errata DS80000754A Module 4 */
case KSZ9567_CHIP_ID:
+ /* KSZ9567S Errata DS80000756A Module 4 */
case KSZ9896_CHIP_ID:
+ /* KSZ9896C Errata DS80000757A Module 3 */
case KSZ9897_CHIP_ID:
- /* KSZ9477 Errata DS80000754C
- *
- * Module 4: Energy Efficient Ethernet (EEE) feature select must
- * be manually disabled
+ /* KSZ9897R Errata DS80000758C Module 4 */
+ /* Energy Efficient Ethernet (EEE) feature select must be manually disabled
* The EEE feature is enabled by default, but it is not fully
* operational. It must be manually disabled through register
* controls. If not disabled, the PHY ports can auto-negotiate
* to enable EEE, and this feature can cause link drops when
* linked to another device supporting EEE.
*
- * The same item appears in the errata for the KSZ9567, KSZ9896,
- * and KSZ9897.
- *
- * A similar item appears in the errata for the KSZ8567, but
- * provides an alternative workaround. For now, use the simple
- * workaround of disabling the EEE feature for this device too.
+ * The same item appears in the errata for all switches above.
*/
return MICREL_NO_EEE;
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index c34caf9815c5..a54682240839 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -206,6 +206,7 @@ struct mv88e6xxx_gpio_ops;
struct mv88e6xxx_avb_ops;
struct mv88e6xxx_ptp_ops;
struct mv88e6xxx_pcs_ops;
+struct mv88e6xxx_cc_coeffs;
struct mv88e6xxx_irq {
u16 masked;
@@ -408,6 +409,7 @@ struct mv88e6xxx_chip {
struct cyclecounter tstamp_cc;
struct timecounter tstamp_tc;
struct delayed_work overflow_work;
+ const struct mv88e6xxx_cc_coeffs *cc_coeffs;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
@@ -731,10 +733,6 @@ struct mv88e6xxx_ptp_ops {
int arr1_sts_reg;
int dep_sts_reg;
u32 rx_filters;
- u32 cc_shift;
- u32 cc_mult;
- u32 cc_mult_num;
- u32 cc_mult_dem;
};
struct mv88e6xxx_pcs_ops {
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 5394a8cf7bf1..04053fdc6489 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -1713,6 +1713,7 @@ int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port,
ptr = shift / 8;
shift %= 8;
mask >>= ptr * 8;
+ ptr <<= 8;
err = mv88e6393x_port_policy_read(chip, port, ptr, &reg);
if (err)
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index 56391e09b325..aed4a4b07f34 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -18,6 +18,13 @@
#define MV88E6XXX_MAX_ADJ_PPB 1000000
+struct mv88e6xxx_cc_coeffs {
+ u32 cc_shift;
+ u32 cc_mult;
+ u32 cc_mult_num;
+ u32 cc_mult_dem;
+};
+
/* Family MV88E6250:
* Raw timestamps are in units of 10-ns clock periods.
*
@@ -25,22 +32,43 @@
* simplifies to
* clkadj = scaled_ppm * 2^7 / 5^5
*/
-#define MV88E6250_CC_SHIFT 28
-#define MV88E6250_CC_MULT (10 << MV88E6250_CC_SHIFT)
-#define MV88E6250_CC_MULT_NUM (1 << 7)
-#define MV88E6250_CC_MULT_DEM 3125ULL
+#define MV88E6XXX_CC_10NS_SHIFT 28
+static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_10ns_coeffs = {
+ .cc_shift = MV88E6XXX_CC_10NS_SHIFT,
+ .cc_mult = 10 << MV88E6XXX_CC_10NS_SHIFT,
+ .cc_mult_num = 1 << 7,
+ .cc_mult_dem = 3125ULL,
+};
-/* Other families:
+/* Other families except MV88E6393X in internal clock mode:
* Raw timestamps are in units of 8-ns clock periods.
*
* clkadj = scaled_ppm * 8*2^28 / (10^6 * 2^16)
* simplifies to
* clkadj = scaled_ppm * 2^9 / 5^6
*/
-#define MV88E6XXX_CC_SHIFT 28
-#define MV88E6XXX_CC_MULT (8 << MV88E6XXX_CC_SHIFT)
-#define MV88E6XXX_CC_MULT_NUM (1 << 9)
-#define MV88E6XXX_CC_MULT_DEM 15625ULL
+#define MV88E6XXX_CC_8NS_SHIFT 28
+static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_8ns_coeffs = {
+ .cc_shift = MV88E6XXX_CC_8NS_SHIFT,
+ .cc_mult = 8 << MV88E6XXX_CC_8NS_SHIFT,
+ .cc_mult_num = 1 << 9,
+ .cc_mult_dem = 15625ULL
+};
+
+/* Family MV88E6393X using internal clock:
+ * Raw timestamps are in units of 4-ns clock periods.
+ *
+ * clkadj = scaled_ppm * 4*2^28 / (10^6 * 2^16)
+ * simplifies to
+ * clkadj = scaled_ppm * 2^8 / 5^6
+ */
+#define MV88E6XXX_CC_4NS_SHIFT 28
+static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_4ns_coeffs = {
+ .cc_shift = MV88E6XXX_CC_4NS_SHIFT,
+ .cc_mult = 4 << MV88E6XXX_CC_4NS_SHIFT,
+ .cc_mult_num = 1 << 8,
+ .cc_mult_dem = 15625ULL
+};
#define TAI_EVENT_WORK_INTERVAL msecs_to_jiffies(100)
@@ -83,6 +111,33 @@ static int mv88e6352_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
return chip->info->ops->gpio_ops->set_pctl(chip, pin, func);
}
+static const struct mv88e6xxx_cc_coeffs *
+mv88e6xxx_cc_coeff_get(struct mv88e6xxx_chip *chip)
+{
+ u16 period_ps;
+ int err;
+
+ err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_CLOCK_PERIOD, &period_ps, 1);
+ if (err) {
+ dev_err(chip->dev, "failed to read cycle counter period: %d\n",
+ err);
+ return ERR_PTR(err);
+ }
+
+ switch (period_ps) {
+ case 4000:
+ return &mv88e6xxx_cc_4ns_coeffs;
+ case 8000:
+ return &mv88e6xxx_cc_8ns_coeffs;
+ case 10000:
+ return &mv88e6xxx_cc_10ns_coeffs;
+ default:
+ dev_err(chip->dev, "unexpected cycle counter period of %u ps\n",
+ period_ps);
+ return ERR_PTR(-ENODEV);
+ }
+}
+
static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
@@ -204,7 +259,6 @@ out:
static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
- const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
int neg_adj = 0;
u32 diff, mult;
u64 adj;
@@ -214,10 +268,10 @@ static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
scaled_ppm = -scaled_ppm;
}
- mult = ptp_ops->cc_mult;
- adj = ptp_ops->cc_mult_num;
+ mult = chip->cc_coeffs->cc_mult;
+ adj = chip->cc_coeffs->cc_mult_num;
adj *= scaled_ppm;
- diff = div_u64(adj, ptp_ops->cc_mult_dem);
+ diff = div_u64(adj, chip->cc_coeffs->cc_mult_dem);
mv88e6xxx_reg_lock(chip);
@@ -364,10 +418,6 @@ const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
- .cc_shift = MV88E6XXX_CC_SHIFT,
- .cc_mult = MV88E6XXX_CC_MULT,
- .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
- .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
@@ -391,10 +441,6 @@ const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
- .cc_shift = MV88E6250_CC_SHIFT,
- .cc_mult = MV88E6250_CC_MULT,
- .cc_mult_num = MV88E6250_CC_MULT_NUM,
- .cc_mult_dem = MV88E6250_CC_MULT_DEM,
};
const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
@@ -418,10 +464,6 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
- .cc_shift = MV88E6XXX_CC_SHIFT,
- .cc_mult = MV88E6XXX_CC_MULT,
- .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
- .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {
@@ -446,10 +488,6 @@ const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
- .cc_shift = MV88E6XXX_CC_SHIFT,
- .cc_mult = MV88E6XXX_CC_MULT,
- .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
- .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
@@ -462,10 +500,10 @@ static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
return 0;
}
-/* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3
+/* With a 250MHz input clock, the 32-bit timestamp counter overflows in ~17.2
* seconds; this task forces periodic reads so that we don't miss any.
*/
-#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 16)
+#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 8)
static void mv88e6xxx_ptp_overflow_check(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
@@ -484,11 +522,15 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
int i;
/* Set up the cycle counter */
+ chip->cc_coeffs = mv88e6xxx_cc_coeff_get(chip);
+ if (IS_ERR(chip->cc_coeffs))
+ return PTR_ERR(chip->cc_coeffs);
+
memset(&chip->tstamp_cc, 0, sizeof(chip->tstamp_cc));
chip->tstamp_cc.read = mv88e6xxx_ptp_clock_read;
chip->tstamp_cc.mask = CYCLECOUNTER_MASK(32);
- chip->tstamp_cc.mult = ptp_ops->cc_mult;
- chip->tstamp_cc.shift = ptp_ops->cc_shift;
+ chip->tstamp_cc.mult = chip->cc_coeffs->cc_mult;
+ chip->tstamp_cc.shift = chip->cc_coeffs->cc_shift;
timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc,
ktime_to_ns(ktime_get_real()));
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 6e422e24750a..99d025b69079 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2254,10 +2254,11 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
- spin_lock_bh(&ptp->ptp_lock);
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
memset(skb_hwtstamps(skb), 0,
sizeof(*skb_hwtstamps(skb)));
skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
@@ -2757,17 +2758,18 @@ static int bnxt_async_event_process(struct bnxt *bp,
case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
if (BNXT_PTP_USE_RTC(bp)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
u64 ns;
if (!ptp)
goto async_event_process_exit;
- spin_lock_bh(&ptp->ptp_lock);
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
bnxt_ptp_update_current_time(bp);
ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
BNXT_PHC_BITS) | ptp->current_time);
bnxt_ptp_rtc_timecounter_init(ptp, ns);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
}
break;
}
@@ -13494,9 +13496,11 @@ static void bnxt_force_fw_reset(struct bnxt *bp)
return;
if (ptp) {
- spin_lock_bh(&ptp->ptp_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
} else {
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
}
@@ -13561,9 +13565,11 @@ void bnxt_fw_reset(struct bnxt *bp)
int n = 0, tmo;
if (ptp) {
- spin_lock_bh(&ptp->ptp_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
} else {
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 37d42423459c..fa514be87650 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -62,13 +62,14 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
u64 ns = timespec64_to_ns(ts);
+ unsigned long flags;
if (BNXT_PTP_USE_RTC(ptp->bp))
return bnxt_ptp_cfg_settime(ptp->bp, ns);
- spin_lock_bh(&ptp->ptp_lock);
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
timecounter_init(&ptp->tc, &ptp->cc, ns);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
@@ -100,13 +101,14 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
static void bnxt_ptp_get_current_time(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
if (!ptp)
return;
- spin_lock_bh(&ptp->ptp_lock);
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
WRITE_ONCE(ptp->old_time, ptp->current_time);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
}
static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts,
@@ -149,17 +151,18 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
+ unsigned long flags;
u64 ns, cycles;
int rc;
- spin_lock_bh(&ptp->ptp_lock);
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
rc = bnxt_refclk_read(ptp->bp, sts, &cycles);
if (rc) {
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
return rc;
}
ns = timecounter_cyc2time(&ptp->tc, cycles);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
*ts = ns_to_timespec64(ns);
return 0;
@@ -177,6 +180,7 @@ void bnxt_ptp_update_current_time(struct bnxt *bp)
static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
{
struct hwrm_port_mac_cfg_input *req;
+ unsigned long flags;
int rc;
rc = hwrm_req_init(ptp->bp, req, HWRM_PORT_MAC_CFG);
@@ -190,9 +194,9 @@ static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
if (rc) {
netdev_err(ptp->bp->dev, "ptp adjphc failed. rc = %x\n", rc);
} else {
- spin_lock_bh(&ptp->ptp_lock);
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
bnxt_ptp_update_current_time(ptp->bp);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
}
return rc;
@@ -202,13 +206,14 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
+ unsigned long flags;
if (BNXT_PTP_USE_RTC(ptp->bp))
return bnxt_ptp_adjphc(ptp, delta);
- spin_lock_bh(&ptp->ptp_lock);
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
timecounter_adjtime(&ptp->tc, delta);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
@@ -236,14 +241,15 @@ static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
struct bnxt *bp = ptp->bp;
+ unsigned long flags;
if (!BNXT_MH(bp))
return bnxt_ptp_adjfine_rtc(bp, scaled_ppm);
- spin_lock_bh(&ptp->ptp_lock);
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
timecounter_read(&ptp->tc);
ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
@@ -251,12 +257,13 @@ void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct ptp_clock_event event;
+ unsigned long flags;
u64 ns, pps_ts;
pps_ts = EVENT_PPS_TS(data2, data1);
- spin_lock_bh(&ptp->ptp_lock);
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
ns = timecounter_cyc2time(&ptp->tc, pps_ts);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
switch (EVENT_DATA2_PPS_EVENT_TYPE(data2)) {
case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL:
@@ -393,16 +400,17 @@ static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns,
{
u64 cycles_now;
u64 nsec_now, nsec_delta;
+ unsigned long flags;
int rc;
- spin_lock_bh(&ptp->ptp_lock);
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now);
if (rc) {
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
return rc;
}
nsec_now = timecounter_cyc2time(&ptp->tc, cycles_now);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
nsec_delta = target_ns - nsec_now;
*cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult);
@@ -689,6 +697,7 @@ static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot)
struct skb_shared_hwtstamps timestamp;
struct bnxt_ptp_tx_req *txts_req;
unsigned long now = jiffies;
+ unsigned long flags;
u64 ts = 0, ns = 0;
u32 tmo = 0;
int rc;
@@ -702,9 +711,9 @@ static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot)
tmo, slot);
if (!rc) {
memset(&timestamp, 0, sizeof(timestamp));
- spin_lock_bh(&ptp->ptp_lock);
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(txts_req->tx_skb, &timestamp);
ptp->stats.ts_pkts++;
@@ -730,6 +739,7 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
unsigned long now = jiffies;
struct bnxt *bp = ptp->bp;
u16 cons = ptp->txts_cons;
+ unsigned long flags;
u32 num_requests;
int rc = 0;
@@ -757,9 +767,9 @@ next_slot:
bnxt_ptp_get_current_time(bp);
ptp->next_period = now + HZ;
if (time_after_eq(now, ptp->next_overflow_check)) {
- spin_lock_bh(&ptp->ptp_lock);
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
timecounter_read(&ptp->tc);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
}
if (rc == -EAGAIN)
@@ -819,6 +829,7 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
u32 opaque = tscmp->tx_ts_cmp_opaque;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
+ unsigned long flags;
u64 ts, ns;
u16 cons;
@@ -833,9 +844,9 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
le32_to_cpu(tscmp->tx_ts_cmp_flags_type),
le32_to_cpu(tscmp->tx_ts_cmp_errors_v));
} else {
- spin_lock_bh(&ptp->ptp_lock);
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(tx_buf->skb, &timestamp);
}
@@ -975,6 +986,7 @@ void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns)
int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
{
struct timespec64 tsp;
+ unsigned long flags;
u64 ns;
int rc;
@@ -993,9 +1005,9 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
if (rc)
return rc;
}
- spin_lock_bh(&bp->ptp_cfg->ptp_lock);
+ spin_lock_irqsave(&bp->ptp_cfg->ptp_lock, flags);
bnxt_ptp_rtc_timecounter_init(bp->ptp_cfg, ns);
- spin_unlock_bh(&bp->ptp_cfg->ptp_lock);
+ spin_unlock_irqrestore(&bp->ptp_cfg->ptp_lock, flags);
return 0;
}
@@ -1063,10 +1075,12 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
atomic64_set(&ptp->stats.ts_err, 0);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- spin_lock_bh(&ptp->ptp_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
WRITE_ONCE(ptp->old_time, ptp->current_time);
- spin_unlock_bh(&ptp->ptp_lock);
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
ptp_schedule_worker(ptp->ptp_clock, 0);
}
ptp->txts_tmo = BNXT_PTP_DFLT_TX_TMO;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index a9a2f9a18c9c..f322466ecad3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -146,11 +146,13 @@ struct bnxt_ptp_cfg {
};
#if BITS_PER_LONG == 32
-#define BNXT_READ_TIME64(ptp, dst, src) \
-do { \
- spin_lock_bh(&(ptp)->ptp_lock); \
- (dst) = (src); \
- spin_unlock_bh(&(ptp)->ptp_lock); \
+#define BNXT_READ_TIME64(ptp, dst, src) \
+do { \
+ unsigned long flags; \
+ \
+ spin_lock_irqsave(&(ptp)->ptp_lock, flags); \
+ (dst) = (src); \
+ spin_unlock_irqrestore(&(ptp)->ptp_lock, flags); \
} while (0)
#else
#define BNXT_READ_TIME64(ptp, dst, src) \
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a8596ebcdfd6..875fe379eea2 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1381,10 +1381,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
- if (unlikely(!wrb_cnt)) {
- dev_kfree_skb_any(skb);
- goto drop;
- }
+ if (unlikely(!wrb_cnt))
+ goto drop_skb;
/* if os2bmc is enabled and if the pkt is destined to bmc,
* enqueue the pkt a 2nd time with mgmt bit set.
@@ -1393,7 +1391,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
if (unlikely(!wrb_cnt))
- goto drop;
+ goto drop_skb;
else
skb_get(skb);
}
@@ -1407,6 +1405,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
be_xmit_flush(adapter, txo);
return NETDEV_TX_OK;
+drop_skb:
+ dev_kfree_skb_any(skb);
drop:
tx_stats(txo)->tx_drv_drops++;
/* Flush the already enqueued tx requests */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 9767586b4eb3..11da139082e1 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -197,55 +197,67 @@ static int mac_probe(struct platform_device *_of_dev)
err = -EINVAL;
goto _return_of_node_put;
}
+ mac_dev->fman_dev = &of_dev->dev;
/* Get the FMan cell-index */
err = of_property_read_u32(dev_node, "cell-index", &val);
if (err) {
dev_err(dev, "failed to read cell-index for %pOF\n", dev_node);
err = -EINVAL;
- goto _return_of_node_put;
+ goto _return_dev_put;
}
/* cell-index 0 => FMan id 1 */
fman_id = (u8)(val + 1);
- priv->fman = fman_bind(&of_dev->dev);
+ priv->fman = fman_bind(mac_dev->fman_dev);
if (!priv->fman) {
dev_err(dev, "fman_bind(%pOF) failed\n", dev_node);
err = -ENODEV;
- goto _return_of_node_put;
+ goto _return_dev_put;
}
+ /* Two references have been taken in of_find_device_by_node()
+ * and fman_bind(). Release one of them here. The second one
+ * will be released in mac_remove().
+ */
+ put_device(mac_dev->fman_dev);
of_node_put(dev_node);
+ dev_node = NULL;
/* Get the address of the memory mapped registers */
mac_dev->res = platform_get_mem_or_io(_of_dev, 0);
if (!mac_dev->res) {
dev_err(dev, "could not get registers\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto _return_dev_put;
}
err = devm_request_resource(dev, fman_get_mem_region(priv->fman),
mac_dev->res);
if (err) {
dev_err_probe(dev, err, "could not request resource\n");
- return err;
+ goto _return_dev_put;
}
mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
resource_size(mac_dev->res));
if (!mac_dev->vaddr) {
dev_err(dev, "devm_ioremap() failed\n");
- return -EIO;
+ err = -EIO;
+ goto _return_dev_put;
}
- if (!of_device_is_available(mac_node))
- return -ENODEV;
+ if (!of_device_is_available(mac_node)) {
+ err = -ENODEV;
+ goto _return_dev_put;
+ }
/* Get the cell-index */
err = of_property_read_u32(mac_node, "cell-index", &val);
if (err) {
dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
- return -EINVAL;
+ err = -EINVAL;
+ goto _return_dev_put;
}
priv->cell_index = (u8)val;
@@ -259,22 +271,26 @@ static int mac_probe(struct platform_device *_of_dev)
if (unlikely(nph < 0)) {
dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
mac_node);
- return nph;
+ err = nph;
+ goto _return_dev_put;
}
if (nph != ARRAY_SIZE(mac_dev->port)) {
dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n",
mac_node);
- return -EINVAL;
+ err = -EINVAL;
+ goto _return_dev_put;
}
- for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ /* PORT_NUM determines the size of the port array */
+ for (i = 0; i < PORT_NUM; i++) {
/* Find the port node */
dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
if (!dev_node) {
dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n",
mac_node);
- return -EINVAL;
+ err = -EINVAL;
+ goto _return_dev_arr_put;
}
of_dev = of_find_device_by_node(dev_node);
@@ -282,17 +298,24 @@ static int mac_probe(struct platform_device *_of_dev)
dev_err(dev, "of_find_device_by_node(%pOF) failed\n",
dev_node);
err = -EINVAL;
- goto _return_of_node_put;
+ goto _return_dev_arr_put;
}
+ mac_dev->fman_port_devs[i] = &of_dev->dev;
- mac_dev->port[i] = fman_port_bind(&of_dev->dev);
+ mac_dev->port[i] = fman_port_bind(mac_dev->fman_port_devs[i]);
if (!mac_dev->port[i]) {
dev_err(dev, "dev_get_drvdata(%pOF) failed\n",
dev_node);
err = -EINVAL;
- goto _return_of_node_put;
+ goto _return_dev_arr_put;
}
+ /* Two references have been taken in of_find_device_by_node()
+ * and fman_port_bind(). Release one of them here. The second
+ * one will be released in mac_remove().
+ */
+ put_device(mac_dev->fman_port_devs[i]);
of_node_put(dev_node);
+ dev_node = NULL;
}
/* Get the PHY connection type */
@@ -312,7 +335,7 @@ static int mac_probe(struct platform_device *_of_dev)
err = init(mac_dev, mac_node, &params);
if (err < 0)
- return err;
+ goto _return_dev_arr_put;
if (!is_zero_ether_addr(mac_dev->addr))
dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
@@ -327,6 +350,12 @@ static int mac_probe(struct platform_device *_of_dev)
return err;
+_return_dev_arr_put:
+ /* mac_dev is kzalloc'ed */
+ for (i = 0; i < PORT_NUM; i++)
+ put_device(mac_dev->fman_port_devs[i]);
+_return_dev_put:
+ put_device(mac_dev->fman_dev);
_return_of_node_put:
of_node_put(dev_node);
return err;
@@ -335,6 +364,11 @@ _return_of_node_put:
static void mac_remove(struct platform_device *pdev)
{
struct mac_device *mac_dev = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < PORT_NUM; i++)
+ put_device(mac_dev->fman_port_devs[i]);
+ put_device(mac_dev->fman_dev);
platform_device_unregister(mac_dev->priv->eth_dev);
}
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index fe747915cc73..8b5b43d50f8e 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -19,12 +19,13 @@
struct fman_mac;
struct mac_priv_s;
+#define PORT_NUM 2
struct mac_device {
void __iomem *vaddr;
struct device *dev;
struct resource *res;
u8 addr[ETH_ALEN];
- struct fman_port *port[2];
+ struct fman_port *port[PORT_NUM];
struct phylink *phylink;
struct phylink_config phylink_config;
phy_interface_t phy_if;
@@ -52,6 +53,9 @@ struct mac_device {
struct fman_mac *fman_mac;
struct mac_priv_s *priv;
+
+ struct device *fman_dev;
+ struct device *fman_port_devs[PORT_NUM];
};
static inline struct mac_device
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 807eb3bbb11c..841e5af7b2be 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1293,8 +1293,10 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
/* save the buffer addr until the last read operation */
*save_buf = read_buf;
+ }
- /* get data ready for the first time to read */
+ /* get data ready for the first time to read */
+ if (!*ppos) {
ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
read_buf, hns3_dbg_cmd[index].buf_len);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 4cbc4d069a1f..b09f0cca34dc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -11,6 +11,7 @@
#include <linux/irq.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
@@ -380,6 +381,24 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
#define HNS3_INVALID_PTYPE \
ARRAY_SIZE(hns3_rx_ptype_tbl)
+static void hns3_dma_map_sync(struct device *dev, unsigned long iova)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iommu_iotlb_gather iotlb_gather;
+ size_t granule;
+
+ if (!domain || !iommu_is_dma_domain(domain))
+ return;
+
+ granule = 1 << __ffs(domain->pgsize_bitmap);
+ iova = ALIGN_DOWN(iova, granule);
+ iotlb_gather.start = iova;
+ iotlb_gather.end = iova + granule - 1;
+ iotlb_gather.pgsize = granule;
+
+ iommu_iotlb_sync(domain, &iotlb_gather);
+}
+
static irqreturn_t hns3_irq_handle(int irq, void *vector)
{
struct hns3_enet_tqp_vector *tqp_vector = vector;
@@ -1032,6 +1051,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
{
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_tx_spare *tx_spare;
struct page *page;
dma_addr_t dma;
@@ -1073,6 +1094,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
tx_spare->buf = page_address(page);
tx_spare->len = PAGE_SIZE << order;
ring->tx_spare = tx_spare;
+ ring->tx_copybreak = priv->tx_copybreak;
return;
dma_mapping_error:
@@ -1724,7 +1746,9 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
unsigned int type)
{
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ struct hnae3_handle *handle = ring->tqp->handle;
struct device *dev = ring_to_dev(ring);
+ struct hnae3_ae_dev *ae_dev;
unsigned int size;
dma_addr_t dma;
@@ -1756,6 +1780,13 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
return -ENOMEM;
}
+ /* Add a SYNC command to sync io-pgtale to avoid errors in pgtable
+ * prefetch
+ */
+ ae_dev = hns3_get_ae_dev(handle);
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ hns3_dma_map_sync(dev, dma);
+
desc_cb->priv = priv;
desc_cb->length = size;
desc_cb->dma = dma;
@@ -2452,7 +2483,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
return ret;
}
- netdev->features = features;
return 0;
}
@@ -4868,6 +4898,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
devm_kfree(&pdev->dev, priv->tqp_vector);
}
+static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
+{
+#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
+#define HNS3_MAX_PACKET_SIZE (64 * 1024)
+
+ struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
+ struct hnae3_handle *handle = priv->ae_handle;
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
+ return;
+
+ if (!(domain && iommu_is_dma_domain(domain)))
+ return;
+
+ priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
+ priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
+
+ if (priv->tx_copybreak < priv->min_tx_copybreak)
+ priv->tx_copybreak = priv->min_tx_copybreak;
+ if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
+ handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
+}
+
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
unsigned int ring_type)
{
@@ -5101,6 +5155,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
int i, j;
int ret;
+ hns3_update_tx_spare_buf_config(priv);
for (i = 0; i < ring_num; i++) {
ret = hns3_alloc_ring_memory(&priv->ring[i]);
if (ret) {
@@ -5305,6 +5360,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->ae_handle = handle;
priv->tx_timeout_count = 0;
priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
+ priv->min_tx_copybreak = 0;
+ priv->min_tx_spare_buf_size = 0;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index d36c4ed16d8d..caf7a4df8585 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -596,6 +596,8 @@ struct hns3_nic_priv {
struct hns3_enet_coalesce rx_coal;
u32 tx_copybreak;
u32 rx_copybreak;
+ u32 min_tx_copybreak;
+ u32 min_tx_spare_buf_size;
};
union l3_hdr_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index b1e988347347..97eaeec1952b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -1933,6 +1933,31 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
return ret;
}
+static int hns3_check_tx_copybreak(struct net_device *netdev, u32 copybreak)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ if (copybreak < priv->min_tx_copybreak) {
+ netdev_err(netdev, "tx copybreak %u should be no less than %u!\n",
+ copybreak, priv->min_tx_copybreak);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int hns3_check_tx_spare_buf_size(struct net_device *netdev, u32 buf_size)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ if (buf_size < priv->min_tx_spare_buf_size) {
+ netdev_err(netdev,
+ "tx spare buf size %u should be no less than %u!\n",
+ buf_size, priv->min_tx_spare_buf_size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int hns3_set_tunable(struct net_device *netdev,
const struct ethtool_tunable *tuna,
const void *data)
@@ -1949,6 +1974,10 @@ static int hns3_set_tunable(struct net_device *netdev,
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
+ ret = hns3_check_tx_copybreak(netdev, *(u32 *)data);
+ if (ret)
+ return ret;
+
priv->tx_copybreak = *(u32 *)data;
for (i = 0; i < h->kinfo.num_tqps; i++)
@@ -1963,6 +1992,10 @@ static int hns3_set_tunable(struct net_device *netdev,
break;
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
+ ret = hns3_check_tx_spare_buf_size(netdev, *(u32 *)data);
+ if (ret)
+ return ret;
+
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
new_tx_spare_buf_size = *(u32 *)data;
netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index bd86efd92a5a..728f4777e51f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -6,6 +6,7 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -3584,6 +3585,17 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
return ret;
}
+static void hclge_set_reset_pending(struct hclge_dev *hdev,
+ enum hnae3_reset_type reset_type)
+{
+ /* When an incorrect reset type is executed, the get_reset_level
+ * function generates the HNAE3_NONE_RESET flag. As a result, this
+ * type do not need to pending.
+ */
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &hdev->reset_pending);
+}
+
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
@@ -3604,7 +3616,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
- set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
hdev->rst_stats.imp_rst_cnt++;
@@ -3614,7 +3626,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
- set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
hdev->rst_stats.global_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
@@ -3769,7 +3781,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
HCLGE_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
- 0, hdev->misc_vector.name, hdev);
+ IRQ_NOAUTOEN, hdev->misc_vector.name, hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -4062,7 +4074,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF reset requested\n");
/* schedule again to check later */
- set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
hclge_reset_task_schedule(hdev);
break;
default:
@@ -4096,6 +4108,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
clear_bit(HNAE3_FLR_RESET, addr);
}
+ clear_bit(HNAE3_NONE_RESET, addr);
+
if (hdev->reset_type != HNAE3_NONE_RESET &&
rst_level < hdev->reset_type)
return HNAE3_NONE_RESET;
@@ -4237,7 +4251,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
return false;
} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
hdev->rst_stats.reset_fail_cnt++;
- set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, hdev->reset_type);
dev_info(&hdev->pdev->dev,
"re-schedule reset task(%u)\n",
hdev->rst_stats.reset_fail_cnt);
@@ -4480,8 +4494,20 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
+#define HCLGE_SUPPORT_RESET_TYPE \
+ (BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
+ BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
+
struct hclge_dev *hdev = ae_dev->priv;
+ if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
+ /* To prevent reset triggered by hclge_reset_event */
+ set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+ dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
+ rst_type);
+ return;
+ }
+
set_bit(rst_type, &hdev->default_reset_request);
}
@@ -11891,9 +11917,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_init_rxd_adv_layout(hdev);
- /* Enable MISC vector(vector0) */
- hclge_enable_vector(&hdev->misc_vector, true);
-
ret = hclge_init_wol(hdev);
if (ret)
dev_warn(&pdev->dev,
@@ -11906,6 +11929,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
+ /* Enable MISC vector(vector0) */
+ enable_irq(hdev->misc_vector.vector_irq);
+ hclge_enable_vector(&hdev->misc_vector, true);
+
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -12311,7 +12338,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
- synchronize_irq(hdev->misc_vector.vector_irq);
+ disable_irq(hdev->misc_vector.vector_irq);
/* Disable all hw interrupts */
hclge_config_mac_tnl_int(hdev, false);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 5505caea88e9..bab16c2191b2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -58,6 +58,9 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
struct hclge_dev *hdev = vport->back;
struct hclge_ptp *ptp = hdev->ptp;
+ if (!ptp)
+ return false;
+
if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
ptp->tx_skipped++;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
index 43c1c18fa81f..8c057192aae6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
@@ -510,9 +510,9 @@ out:
static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
struct hnae3_knic_private_info *kinfo)
{
-#define HCLGE_RING_REG_OFFSET 0x200
#define HCLGE_RING_INT_REG_OFFSET 0x4
+ struct hnae3_queue *tqp;
int i, j, reg_num;
int data_num_sum;
u32 *reg = data;
@@ -533,10 +533,11 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
reg_num = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < kinfo->num_tqps; j++) {
reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
+ tqp = kinfo->tqp[j];
for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGE_RING_REG_OFFSET * j);
+ *reg++ = readl_relaxed(tqp->io_base -
+ HCLGE_TQP_REG_OFFSET +
+ ring_reg_addr_list[i]);
}
data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 094a7c7b5592..896f1eb172d3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1395,6 +1395,17 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
return ret;
}
+static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
+ enum hnae3_reset_type reset_type)
+{
+ /* When an incorrect reset type is executed, the get_reset_level
+ * function generates the HNAE3_NONE_RESET flag. As a result, this
+ * type do not need to pending.
+ */
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &hdev->reset_pending);
+}
+
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_WAIT_US 20000
@@ -1544,7 +1555,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
hdev->rst_stats.rst_fail_cnt);
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
- set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, hdev->reset_type);
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
@@ -1664,6 +1675,8 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
clear_bit(HNAE3_FLR_RESET, addr);
}
+ clear_bit(HNAE3_NONE_RESET, addr);
+
return rst_level;
}
@@ -1673,14 +1686,15 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
struct hclgevf_dev *hdev = ae_dev->priv;
- dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
-
if (hdev->default_reset_request)
hdev->reset_level =
hclgevf_get_reset_level(&hdev->default_reset_request);
else
hdev->reset_level = HNAE3_VF_FUNC_RESET;
+ dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
+ hdev->reset_level);
+
/* reset of this VF requested */
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
@@ -1691,8 +1705,20 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
+#define HCLGEVF_SUPPORT_RESET_TYPE \
+ (BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
+ BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
+ BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
+
struct hclgevf_dev *hdev = ae_dev->priv;
+ if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
+ /* To prevent reset triggered by hclge_reset_event */
+ set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+ dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
+ rst_type);
+ return;
+ }
set_bit(rst_type, &hdev->default_reset_request);
}
@@ -1849,14 +1875,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
*/
if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
/* prepare for full reset of stack + pcie interface */
- set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
/* "defer" schedule the reset task again */
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
} else {
hdev->reset_attempts++;
- set_bit(hdev->reset_level, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, hdev->reset_level);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
}
hclgevf_reset_task_schedule(hdev);
@@ -1979,7 +2005,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
- set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
@@ -2289,6 +2315,7 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
+ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
mutex_init(&hdev->mbx_resp.mbx_mutex);
sema_init(&hdev->reset_sem, 1);
@@ -2988,7 +3015,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
HCLGEVF_DRIVER_NAME);
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
- timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
index 6db415d8b917..7d9d9dbc7560 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -123,10 +123,10 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle)
void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
void *data)
{
-#define HCLGEVF_RING_REG_OFFSET 0x200
#define HCLGEVF_RING_INT_REG_OFFSET 0x4
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_queue *tqp;
int i, j, reg_um;
u32 *reg = data;
@@ -147,10 +147,11 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
reg_um = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < hdev->num_tqps; j++) {
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
+ tqp = &hdev->htqp[j].q;
for (i = 0; i < reg_um; i++)
- *reg++ = hclgevf_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGEVF_RING_REG_OFFSET * j);
+ *reg++ = readl_relaxed(tqp->io_base -
+ HCLGEVF_TQP_REG_OFFSET +
+ ring_reg_addr_list[i]);
}
reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index f2d4669c81cf..58a3d28d938c 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -1012,6 +1012,7 @@ sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
if(skb->len > XMIT_BUFF_SIZE)
{
printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
index 928c8bdb6649..c6779d9dffff 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
@@ -989,5 +989,11 @@ ice_devlink_port_new(struct devlink *devlink,
if (err)
return err;
+ if (!ice_is_eswitch_mode_switchdev(pf)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "SF ports are only supported in eswitch switchdev mode");
+ return -EOPNOTSUPP;
+ }
+
return ice_alloc_dynamic_port(pf, new_attr, extack, devlink_port);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index 74c0e7319a4c..d5ad6d84007c 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -10,6 +10,7 @@
#define ICE_DPLL_PIN_IDX_INVALID 0xff
#define ICE_DPLL_RCLK_NUM_PER_PF 1
#define ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT 25
+#define ICE_DPLL_PIN_GEN_RCLK_FREQ 1953125
/**
* enum ice_dpll_pin_type - enumerate ice pin types:
@@ -2064,6 +2065,73 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
}
/**
+ * ice_dpll_init_info_pins_generic - initializes generic pins info
+ * @pf: board private structure
+ * @input: if input pins initialized
+ *
+ * Init information for generic pins, cache them in PF's pins structures.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input)
+{
+ struct ice_dpll *de = &pf->dplls.eec, *dp = &pf->dplls.pps;
+ static const char labels[][sizeof("99")] = {
+ "0", "1", "2", "3", "4", "5", "6", "7", "8",
+ "9", "10", "11", "12", "13", "14", "15" };
+ u32 cap = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ enum ice_dpll_pin_type pin_type;
+ int i, pin_num, ret = -EINVAL;
+ struct ice_dpll_pin *pins;
+ u32 phase_adj_max;
+
+ if (input) {
+ pin_num = pf->dplls.num_inputs;
+ pins = pf->dplls.inputs;
+ phase_adj_max = pf->dplls.input_phase_adj_max;
+ pin_type = ICE_DPLL_PIN_TYPE_INPUT;
+ cap |= DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE;
+ } else {
+ pin_num = pf->dplls.num_outputs;
+ pins = pf->dplls.outputs;
+ phase_adj_max = pf->dplls.output_phase_adj_max;
+ pin_type = ICE_DPLL_PIN_TYPE_OUTPUT;
+ }
+ if (pin_num > ARRAY_SIZE(labels))
+ return ret;
+
+ for (i = 0; i < pin_num; i++) {
+ pins[i].idx = i;
+ pins[i].prop.board_label = labels[i];
+ pins[i].prop.phase_range.min = phase_adj_max;
+ pins[i].prop.phase_range.max = -phase_adj_max;
+ pins[i].prop.capabilities = cap;
+ pins[i].pf = pf;
+ ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
+ if (ret)
+ break;
+ if (input && pins[i].freq == ICE_DPLL_PIN_GEN_RCLK_FREQ)
+ pins[i].prop.type = DPLL_PIN_TYPE_MUX;
+ else
+ pins[i].prop.type = DPLL_PIN_TYPE_EXT;
+ if (!input)
+ continue;
+ ret = ice_aq_get_cgu_ref_prio(&pf->hw, de->dpll_idx, i,
+ &de->input_prio[i]);
+ if (ret)
+ break;
+ ret = ice_aq_get_cgu_ref_prio(&pf->hw, dp->dpll_idx, i,
+ &dp->input_prio[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
* ice_dpll_init_info_direct_pins - initializes direct pins info
* @pf: board private structure
* @pin_type: type of pins being initialized
@@ -2101,6 +2169,8 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
default:
return -EINVAL;
}
+ if (num_pins != ice_cgu_get_num_pins(hw, input))
+ return ice_dpll_init_info_pins_generic(pf, input);
for (i = 0; i < num_pins; i++) {
caps = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 3a33e6b9b313..ec8db830ac73 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -34,7 +34,6 @@ static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = {
ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
{ "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
- { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0, },
};
static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = {
@@ -52,7 +51,6 @@ static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = {
ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
{ "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
- { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, },
};
static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = {
@@ -5965,6 +5963,25 @@ ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size)
}
/**
+ * ice_cgu_get_num_pins - get pin description array size
+ * @hw: pointer to the hw struct
+ * @input: if request is done against input or output pins
+ *
+ * Return: size of pin description array for given hw.
+ */
+int ice_cgu_get_num_pins(struct ice_hw *hw, bool input)
+{
+ const struct ice_cgu_pin_desc *t;
+ int size;
+
+ t = ice_cgu_get_pin_desc(hw, input, &size);
+ if (t)
+ return size;
+
+ return 0;
+}
+
+/**
* ice_cgu_get_pin_type - get pin's type
* @hw: pointer to the hw struct
* @pin: pin index
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 0852a34ade91..6cedc1a906af 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -404,6 +404,7 @@ int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_is_pca9575_present(struct ice_hw *hw);
+int ice_cgu_get_num_pins(struct ice_hw *hw, bool input);
enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
struct dpll_pin_frequency *
ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f1d088168723..b83df5f94b1f 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -907,7 +907,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
int i, err = 0, vector = 0, free_vector = 0;
err = request_irq(adapter->msix_entries[vector].vector,
- igb_msix_other, 0, netdev->name, adapter);
+ igb_msix_other, IRQF_NO_THREAD, netdev->name, adapter);
if (err)
goto err_out;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index 4746a6b258f0..8af75cb37c3e 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -337,6 +337,51 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
}
/**
+ * octep_oq_next_pkt() - Move to the next packet in Rx queue.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @buff_info: Current packet buffer info.
+ * @read_idx: Current packet index in the ring.
+ * @desc_used: Current packet descriptor number.
+ *
+ * Free the resources associated with a packet.
+ * Increment packet index in the ring and packet descriptor number.
+ */
+static void octep_oq_next_pkt(struct octep_oq *oq,
+ struct octep_rx_buffer *buff_info,
+ u32 *read_idx, u32 *desc_used)
+{
+ dma_unmap_page(oq->dev, oq->desc_ring[*read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info->page = NULL;
+ (*read_idx)++;
+ (*desc_used)++;
+ if (*read_idx == oq->max_count)
+ *read_idx = 0;
+}
+
+/**
+ * octep_oq_drop_rx() - Free the resources associated with a packet.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @buff_info: Current packet buffer info.
+ * @read_idx: Current packet index in the ring.
+ * @desc_used: Current packet descriptor number.
+ *
+ */
+static void octep_oq_drop_rx(struct octep_oq *oq,
+ struct octep_rx_buffer *buff_info,
+ u32 *read_idx, u32 *desc_used)
+{
+ int data_len = buff_info->len - oq->max_single_buffer_size;
+
+ while (data_len > 0) {
+ octep_oq_next_pkt(oq, buff_info, read_idx, desc_used);
+ data_len -= oq->buffer_size;
+ };
+}
+
+/**
* __octep_oq_process_rx() - Process hardware Rx queue and push to stack.
*
* @oct: Octeon device private data structure.
@@ -367,10 +412,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
desc_used = 0;
for (pkt = 0; pkt < pkts_to_process; pkt++) {
buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx];
- dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
- PAGE_SIZE, DMA_FROM_DEVICE);
resp_hw = page_address(buff_info->page);
- buff_info->page = NULL;
/* Swap the length field that is in Big-Endian to CPU */
buff_info->len = be64_to_cpu(resp_hw->length);
@@ -394,36 +436,33 @@ static int __octep_oq_process_rx(struct octep_device *oct,
data_offset = OCTEP_OQ_RESP_HW_SIZE;
rx_ol_flags = 0;
}
+
+ octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used);
+
+ skb = build_skb((void *)resp_hw, PAGE_SIZE);
+ if (!skb) {
+ octep_oq_drop_rx(oq, buff_info,
+ &read_idx, &desc_used);
+ oq->stats.alloc_failures++;
+ continue;
+ }
+ skb_reserve(skb, data_offset);
+
rx_bytes += buff_info->len;
if (buff_info->len <= oq->max_single_buffer_size) {
- skb = build_skb((void *)resp_hw, PAGE_SIZE);
- skb_reserve(skb, data_offset);
skb_put(skb, buff_info->len);
- read_idx++;
- desc_used++;
- if (read_idx == oq->max_count)
- read_idx = 0;
} else {
struct skb_shared_info *shinfo;
u16 data_len;
- skb = build_skb((void *)resp_hw, PAGE_SIZE);
- skb_reserve(skb, data_offset);
/* Head fragment includes response header(s);
* subsequent fragments contains only data.
*/
skb_put(skb, oq->max_single_buffer_size);
- read_idx++;
- desc_used++;
- if (read_idx == oq->max_count)
- read_idx = 0;
-
shinfo = skb_shinfo(skb);
data_len = buff_info->len - oq->max_single_buffer_size;
while (data_len) {
- dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
- PAGE_SIZE, DMA_FROM_DEVICE);
buff_info = (struct octep_rx_buffer *)
&oq->buff_info[read_idx];
if (data_len < oq->buffer_size) {
@@ -438,11 +477,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
buff_info->page, 0,
buff_info->len,
buff_info->len);
- buff_info->page = NULL;
- read_idx++;
- desc_used++;
- if (read_idx == oq->max_count)
- read_idx = 0;
+
+ octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used);
}
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
index 87a67fa3868d..c01b1e8428f6 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
@@ -91,8 +91,8 @@ enum mtk_wed_dummy_cr_idx {
#define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin"
#define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin"
#define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin"
-#define MT7988_FIRMWARE_WO0 "mediatek/mt7988_wo_0.bin"
-#define MT7988_FIRMWARE_WO1 "mediatek/mt7988_wo_1.bin"
+#define MT7988_FIRMWARE_WO0 "mediatek/mt7988/mt7988_wo_0.bin"
+#define MT7988_FIRMWARE_WO1 "mediatek/mt7988/mt7988_wo_1.bin"
#define MTK_WO_MCU_CFG_LS_BASE 0
#define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 060e5b939211..d6f37456fb31 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -389,15 +389,27 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
}
-static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[],
+static struct sk_buff *mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
+ struct page *pages[],
u16 byte_count)
{
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
unsigned int linear_data_size;
+ struct page_pool *page_pool;
struct sk_buff *skb;
int page_index = 0;
bool linear_only;
void *data;
+ linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
+ linear_data_size = linear_only ? byte_count :
+ PAGE_SIZE -
+ MLXSW_PCI_RX_BUF_SW_OVERHEAD;
+
+ page_pool = cq->u.cq.page_pool;
+ page_pool_dma_sync_for_cpu(page_pool, pages[page_index],
+ MLXSW_PCI_SKB_HEADROOM, linear_data_size);
+
data = page_address(pages[page_index]);
net_prefetch(data);
@@ -405,11 +417,6 @@ static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[],
if (unlikely(!skb))
return ERR_PTR(-ENOMEM);
- linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
- linear_data_size = linear_only ? byte_count :
- PAGE_SIZE -
- MLXSW_PCI_RX_BUF_SW_OVERHEAD;
-
skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM);
skb_put(skb, linear_data_size);
@@ -425,6 +432,7 @@ static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[],
page = pages[page_index];
frag_size = min(byte_count, PAGE_SIZE);
+ page_pool_dma_sync_for_cpu(page_pool, page, 0, frag_size);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, 0, frag_size, PAGE_SIZE);
byte_count -= frag_size;
@@ -760,7 +768,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
if (err)
goto out;
- skb = mlxsw_pci_rdq_build_skb(pages, byte_count);
+ skb = mlxsw_pci_rdq_build_skb(q, pages, byte_count);
if (IS_ERR(skb)) {
dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n");
mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries);
@@ -988,12 +996,13 @@ static int mlxsw_pci_cq_page_pool_init(struct mlxsw_pci_queue *q,
if (cq_type != MLXSW_PCI_CQ_RDQ)
return 0;
- pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = MLXSW_PCI_WQE_COUNT * mlxsw_pci->num_sg_entries;
pp_params.nid = dev_to_node(&mlxsw_pci->pdev->dev);
pp_params.dev = &mlxsw_pci->pdev->dev;
pp_params.napi = &q->u.cq.napi;
pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = PAGE_SIZE;
page_pool = page_pool_create(&pp_params);
if (IS_ERR(page_pool))
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index d761a1235994..7ea798a4949e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -481,11 +481,33 @@ mlxsw_sp_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
struct netlink_ext_ack *extack)
{
+ u32 new_kvdl_index, old_kvdl_index = ipip_entry->dip_kvdl_index;
+ struct in6_addr old_addr6 = ipip_entry->parms.daddr.addr6;
struct mlxsw_sp_ipip_parms new_parms;
+ int err;
new_parms = mlxsw_sp_ipip_netdev_parms_init_gre6(ipip_entry->ol_dev);
- return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
- &new_parms, extack);
+
+ err = mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp,
+ &new_parms.daddr.addr6,
+ &new_kvdl_index);
+ if (err)
+ return err;
+ ipip_entry->dip_kvdl_index = new_kvdl_index;
+
+ err = mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
+ &new_parms, extack);
+ if (err)
+ goto err_change_gre;
+
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &old_addr6);
+
+ return 0;
+
+err_change_gre:
+ ipip_entry->dip_kvdl_index = old_kvdl_index;
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &new_parms.daddr.addr6);
+ return err;
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 5b174cb95eb8..d94081c7658e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -16,6 +16,7 @@
#include "spectrum.h"
#include "spectrum_ptp.h"
#include "core.h"
+#include "txheader.h"
#define MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT 29
#define MLXSW_SP1_PTP_CLOCK_FREQ_KHZ 156257 /* 6.4nSec */
@@ -1684,6 +1685,12 @@ int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+
mlxsw_sp_txhdr_construct(skb, tx_info);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 800dfb64ec83..7d6d859cef3f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3197,7 +3197,6 @@ mlxsw_sp_nexthop_sh_counter_get(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
struct mlxsw_sp_nexthop_counter *nhct;
- void *ptr;
int err;
nhct = xa_load(&nh_grp->nhgi->nexthop_counters, nh->id);
@@ -3210,12 +3209,10 @@ mlxsw_sp_nexthop_sh_counter_get(struct mlxsw_sp *mlxsw_sp,
if (IS_ERR(nhct))
return nhct;
- ptr = xa_store(&nh_grp->nhgi->nexthop_counters, nh->id, nhct,
- GFP_KERNEL);
- if (IS_ERR(ptr)) {
- err = PTR_ERR(ptr);
+ err = xa_err(xa_store(&nh_grp->nhgi->nexthop_counters, nh->id, nhct,
+ GFP_KERNEL));
+ if (err)
goto err_store;
- }
return nhct;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 0cc9baaecb1b..713a89bb21e9 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4682,7 +4682,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask))
return IRQ_NONE;
- if (unlikely(status & SYSErr)) {
+ /* At least RTL8168fp may unexpectedly set the SYSErr bit */
+ if (unlikely(status & SYSErr &&
+ tp->mac_version <= RTL_GIGA_MAC_VER_06)) {
rtl8169_pcierr_interrupt(tp->dev);
goto out;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index e0165358c4ac..77b35abc6f6f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -203,8 +203,12 @@ static void _dwmac4_dump_dma_regs(struct stmmac_priv *priv,
readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, channel));
reg_space[DMA_CHAN_RX_CONTROL(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_TX_BASE_ADDR_HI(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(dwmac4_addrs, channel));
reg_space[DMA_CHAN_TX_BASE_ADDR(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_RX_BASE_ADDR_HI(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(dwmac4_addrs, channel));
reg_space[DMA_CHAN_RX_BASE_ADDR(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(dwmac4_addrs, channel));
reg_space[DMA_CHAN_TX_END_ADDR(default_addrs, channel) / 4] =
@@ -225,8 +229,12 @@ static void _dwmac4_dump_dma_regs(struct stmmac_priv *priv,
readl(ioaddr + DMA_CHAN_CUR_TX_DESC(dwmac4_addrs, channel));
reg_space[DMA_CHAN_CUR_RX_DESC(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_CUR_RX_DESC(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_CUR_TX_BUF_ADDR_HI(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR_HI(dwmac4_addrs, channel));
reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_CUR_RX_BUF_ADDR_HI(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR_HI(dwmac4_addrs, channel));
reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(default_addrs, channel) / 4] =
readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(dwmac4_addrs, channel));
reg_space[DMA_CHAN_STATUS(default_addrs, channel) / 4] =
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 17d9120db5fe..4f980dcd3958 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -127,7 +127,9 @@ static inline u32 dma_chanx_base_addr(const struct dwmac4_addrs *addrs,
#define DMA_CHAN_SLOT_CTRL_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x3c)
#define DMA_CHAN_CUR_TX_DESC(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x44)
#define DMA_CHAN_CUR_RX_DESC(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x4c)
+#define DMA_CHAN_CUR_TX_BUF_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x50)
#define DMA_CHAN_CUR_TX_BUF_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x54)
+#define DMA_CHAN_CUR_RX_BUF_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x58)
#define DMA_CHAN_CUR_RX_BUF_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x5c)
#define DMA_CHAN_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x60)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d3895d7eecfc..208dbc68aaf9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4304,11 +4304,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- tx_q->tx_skbuff_dma[first_entry].buf = des;
- tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
- tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
- tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
-
if (priv->dma_cap.addr64 <= 32) {
first->des0 = cpu_to_le32(des);
@@ -4327,6 +4322,23 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
+ /* In case two or more DMA transmit descriptors are allocated for this
+ * non-paged SKB data, the DMA buffer address should be saved to
+ * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
+ * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
+ * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
+ * since the tail areas of the DMA buffer can be accessed by DMA engine
+ * sooner or later.
+ * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
+ * corresponding to the last descriptor, stmmac_tx_clean() will unmap
+ * this DMA buffer right after the DMA engine completely finishes the
+ * full buffer transmission.
+ */
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
+
/* Prepare fragments */
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index a60bfb1abb7f..70f981887518 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1702,20 +1702,24 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
return -EINVAL;
if (data[IFLA_GTP_FD0]) {
- u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
+ int fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
- sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
- if (IS_ERR(sk0))
- return PTR_ERR(sk0);
+ if (fd0 >= 0) {
+ sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
+ if (IS_ERR(sk0))
+ return PTR_ERR(sk0);
+ }
}
if (data[IFLA_GTP_FD1]) {
- u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
+ int fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
- sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
- if (IS_ERR(sk1u)) {
- gtp_encap_disable_sock(sk0);
- return PTR_ERR(sk1u);
+ if (fd1 >= 0) {
+ sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
+ if (IS_ERR(sk1u)) {
+ gtp_encap_disable_sock(sk0);
+ return PTR_ERR(sk1u);
+ }
}
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 153b97f8ec0d..23180f7b67b6 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2798,6 +2798,31 @@ static struct hv_driver netvsc_drv = {
},
};
+/* Set VF's namespace same as the synthetic NIC */
+static void netvsc_event_set_vf_ns(struct net_device *ndev)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct net_device *vf_netdev;
+ int ret;
+
+ vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
+ if (!vf_netdev)
+ return;
+
+ if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
+ ret = dev_change_net_namespace(vf_netdev, dev_net(ndev),
+ "eth%d");
+ if (ret)
+ netdev_err(vf_netdev,
+ "Cannot move to same namespace as %s: %d\n",
+ ndev->name, ret);
+ else
+ netdev_info(vf_netdev,
+ "Moved VF to namespace with: %s\n",
+ ndev->name);
+ }
+}
+
/*
* On Hyper-V, every VF interface is matched with a corresponding
* synthetic interface. The synthetic interface is presented first
@@ -2810,6 +2835,11 @@ static int netvsc_netdev_event(struct notifier_block *this,
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
int ret = 0;
+ if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) {
+ netvsc_event_set_vf_ns(event_dev);
+ return NOTIFY_DONE;
+ }
+
ret = check_dev_is_matching_vf(event_dev);
if (ret != 0)
return NOTIFY_DONE;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 26034f80d4a4..ee2159282573 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3798,8 +3798,7 @@ static void macsec_free_netdev(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
- if (macsec->secy.tx_sc.md_dst)
- metadata_dst_free(macsec->secy.tx_sc.md_dst);
+ dst_release(&macsec->secy.tx_sc.md_dst->dst);
free_percpu(macsec->stats);
free_percpu(macsec->secy.tx_sc.stats);
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index 4dc057c121f5..e70fb6687994 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -588,6 +588,9 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
if (len > MCTP_I2C_MAXMTU)
return -EMSGSIZE;
+ if (!daddr || !saddr)
+ return -EINVAL;
+
lldst = *((u8 *)daddr);
llsrc = *((u8 *)saddr);
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 41e80f78b316..16c382c42227 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -1377,10 +1377,12 @@ static ssize_t nsim_nexthop_bucket_activity_write(struct file *file,
if (pos != 0)
return -EINVAL;
- if (size > sizeof(buf))
+ if (size > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, user_buf, size))
return -EFAULT;
+ buf[size] = 0;
+
if (sscanf(buf, "%u %hu", &nhid, &bucket_index) != 2)
return -EINVAL;
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index fc247f479257..3ab64e04a01c 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -45,8 +45,8 @@
/* Control Register 2 bits */
#define DP83822_FX_ENABLE BIT(14)
-#define DP83822_HW_RESET BIT(15)
-#define DP83822_SW_RESET BIT(14)
+#define DP83822_SW_RESET BIT(15)
+#define DP83822_DIG_RESTART BIT(14)
/* PHY STS bits */
#define DP83822_PHYSTS_DUPLEX BIT(2)
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index e39bfaefe8c5..d81163bc910a 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -815,7 +815,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl,
return HS_TIMEOUT;
}
}
- break;
+ fallthrough;
case PLIP_PK_LENGTH_LSB:
if (plip_send(nibble_timeout, dev,
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
index f8e6854781e6..2906ce173f66 100644
--- a/drivers/net/pse-pd/pse_core.c
+++ b/drivers/net/pse-pd/pse_core.c
@@ -113,7 +113,7 @@ static void pse_release_pis(struct pse_controller_dev *pcdev)
{
int i;
- for (i = 0; i <= pcdev->nr_lines; i++) {
+ for (i = 0; i < pcdev->nr_lines; i++) {
of_node_put(pcdev->pi[i].pairset[0].np);
of_node_put(pcdev->pi[i].pairset[1].np);
of_node_put(pcdev->pi[i].np);
@@ -647,7 +647,7 @@ static int of_pse_match_pi(struct pse_controller_dev *pcdev,
{
int i;
- for (i = 0; i <= pcdev->nr_lines; i++) {
+ for (i = 0; i < pcdev->nr_lines; i++) {
if (pcdev->pi[i].np == np)
return i;
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 4823dbdf5465..0c011d8f5d4d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1076,6 +1076,7 @@ static const struct usb_device_id products[] = {
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0122)}, /* Quectel RG650V */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
@@ -1426,6 +1427,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x030e, 4)}, /* Quectel EM05GV2 */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
+ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0112, 0)}, /* Fibocom FG132 */
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index a5612c799f5e..468c73974046 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -10069,6 +10069,7 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x3069) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x3082) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x3098) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x7205) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) },
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ee1b5fd7b491..44179f4e807f 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1767,7 +1767,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
// can rename the link if it knows better.
if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
- (net->dev_addr [0] & 0x02) == 0))
+ /* somebody touched it*/
+ !is_zero_ether_addr(net->dev_addr)))
strscpy(net->name, "eth%d", sizeof(net->name));
/* WLAN devices should always be named "wlan%d" */
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f8131f92a392..792e9eadbfc3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -4155,7 +4155,7 @@ struct virtnet_stats_ctx {
u32 desc_num[3];
/* The actual supported stat types. */
- u32 bitmap[3];
+ u64 bitmap[3];
/* Used to calculate the reply buffer size. */
u32 size[3];
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index dbaf26d6a7a6..16d07d619b4d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -3043,9 +3043,14 @@ ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar,
struct sk_buff *msdu)
{
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
struct ath10k_wmi *wmi = &ar->wmi;
- idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
+ spin_lock_bh(&ar->data_lock);
+ pkt_addr = idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
+ spin_unlock_bh(&ar->data_lock);
+
+ kfree(pkt_addr);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 4861179b2217..5e061f7525a6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2441,6 +2441,7 @@ wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param)
dma_unmap_single(ar->dev, pkt_addr->paddr,
msdu->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
+ kfree(pkt_addr);
if (param->status) {
info->flags &= ~IEEE80211_TX_STAT_ACK;
@@ -9612,6 +9613,7 @@ static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
dma_unmap_single(ar->dev, pkt_addr->paddr,
msdu->len, DMA_TO_DEVICE);
ieee80211_free_txskb(ar->hw, msdu);
+ kfree(pkt_addr);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index c087d8a0f5b2..40088e62572e 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -5291,8 +5291,11 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
hal_status == HAL_TLV_STATUS_PPDU_DONE) {
rx_mon_stats->status_ppdu_done++;
pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
- ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
- pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ if (!ab->hw_params.full_monitor_mode) {
+ ath11k_dp_rx_mon_dest_process(ar, mac_id,
+ budget, napi);
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ }
}
if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index f29ac6de7139..19702b6f09c3 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -306,7 +306,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
struct sk_buff *skb)
{
struct wil6210_rtap {
- struct ieee80211_radiotap_header rthdr;
+ struct ieee80211_radiotap_header_fixed rthdr;
/* fields should be in the order of bits in rthdr.it_present */
/* flags */
u8 flags;
diff --git a/drivers/net/wireless/broadcom/brcm80211/Kconfig b/drivers/net/wireless/broadcom/brcm80211/Kconfig
index 3a1a35b5672f..19d0c003f626 100644
--- a/drivers/net/wireless/broadcom/brcm80211/Kconfig
+++ b/drivers/net/wireless/broadcom/brcm80211/Kconfig
@@ -27,6 +27,7 @@ source "drivers/net/wireless/broadcom/brcm80211/brcmfmac/Kconfig"
config BRCM_TRACING
bool "Broadcom device tracing"
depends on BRCMSMAC || BRCMFMAC
+ depends on TRACING
help
If you say Y here, the Broadcom wireless drivers will register
with ftrace to dump event information into the trace ringbuffer.
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index b6636002c7d2..fe75941c584d 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -2518,7 +2518,7 @@ static void isr_rx_monitor(struct ipw2100_priv *priv, int i,
* to build this manually element by element, we can write it much
* more efficiently than we can parse it. ORDER MATTERS HERE */
struct ipw_rt_hdr {
- struct ieee80211_radiotap_header rt_hdr;
+ struct ieee80211_radiotap_header_fixed rt_hdr;
s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
} *ipw_rt;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
index 8ebf09121e17..226286cb7eb8 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
@@ -1143,7 +1143,7 @@ struct ipw_prom_priv {
* structure is provided regardless of any bits unset.
*/
struct ipw_rt_hdr {
- struct ieee80211_radiotap_header rt_hdr;
+ struct ieee80211_radiotap_header_fixed rt_hdr;
u64 rt_tsf; /* TSF */ /* XXX */
u8 rt_flags; /* radiotap packet flags */
u8 rt_rate; /* rate in 500kb/s */
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 9d33a66a49b5..958dd4f9bc69 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -3122,6 +3122,7 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
struct il_cmd_meta *out_meta;
dma_addr_t phys_addr;
unsigned long flags;
+ u8 *out_payload;
u32 idx;
u16 fix_size;
@@ -3157,6 +3158,16 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
out_cmd = txq->cmd[idx];
out_meta = &txq->meta[idx];
+ /* The payload is in the same place in regular and huge
+ * command buffers, but we need to let the compiler know when
+ * we're using a larger payload buffer to avoid "field-
+ * spanning write" warnings at run-time for huge commands.
+ */
+ if (cmd->flags & CMD_SIZE_HUGE)
+ out_payload = ((struct il_device_cmd_huge *)out_cmd)->cmd.payload;
+ else
+ out_payload = out_cmd->cmd.payload;
+
if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
spin_unlock_irqrestore(&il->hcmd_lock, flags);
return -ENOSPC;
@@ -3170,7 +3181,7 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
out_meta->callback = cmd->callback;
out_cmd->hdr.cmd = cmd->id;
- memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
+ memcpy(out_payload, cmd->data, cmd->len);
/* At this point, the out_cmd now has all of the incoming cmd
* information */
@@ -4962,6 +4973,8 @@ il_pci_resume(struct device *device)
*/
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+ _il_wr(il, CSR_INT, 0xffffffff);
+ _il_wr(il, CSR_FH_INT_STATUS, 0xffffffff);
il_enable_interrupts(il);
if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 2147781b5fff..725c2a88ddb7 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -560,6 +560,18 @@ struct il_device_cmd {
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd))
+/**
+ * struct il_device_cmd_huge
+ *
+ * For use when sending huge commands.
+ */
+struct il_device_cmd_huge {
+ struct il_cmd_header hdr; /* uCode API */
+ union {
+ u8 payload[IL_MAX_CMD_SIZE - sizeof(struct il_cmd_header)];
+ } __packed cmd;
+} __packed;
+
struct il_host_cmd {
const void *data;
unsigned long reply_page;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index a7cea0a55b35..0bc32291815e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -429,38 +429,28 @@ out_free:
return ret;
}
-static int iwl_acpi_sar_set_profile(union acpi_object *table,
- struct iwl_sar_profile *profile,
- bool enabled, u8 num_chains,
- u8 num_sub_bands)
+static int
+iwl_acpi_parse_chains_table(union acpi_object *table,
+ struct iwl_sar_profile_chain *chains,
+ u8 num_chains, u8 num_sub_bands)
{
- int i, j, idx = 0;
-
- /*
- * The table from ACPI is flat, but we store it in a
- * structured array.
- */
- for (i = 0; i < BIOS_SAR_MAX_CHAINS_PER_PROFILE; i++) {
- for (j = 0; j < BIOS_SAR_MAX_SUB_BANDS_NUM; j++) {
+ for (u8 chain = 0; chain < num_chains; chain++) {
+ for (u8 subband = 0; subband < BIOS_SAR_MAX_SUB_BANDS_NUM;
+ subband++) {
/* if we don't have the values, use the default */
- if (i >= num_chains || j >= num_sub_bands) {
- profile->chains[i].subbands[j] = 0;
+ if (subband >= num_sub_bands) {
+ chains[chain].subbands[subband] = 0;
+ } else if (table->type != ACPI_TYPE_INTEGER ||
+ table->integer.value > U8_MAX) {
+ return -EINVAL;
} else {
- if (table[idx].type != ACPI_TYPE_INTEGER ||
- table[idx].integer.value > U8_MAX)
- return -EINVAL;
-
- profile->chains[i].subbands[j] =
- table[idx].integer.value;
-
- idx++;
+ chains[chain].subbands[subband] =
+ table->integer.value;
+ table++;
}
}
}
- /* Only if all values were valid can the profile be enabled */
- profile->enabled = enabled;
-
return 0;
}
@@ -543,9 +533,11 @@ read_table:
/* The profile from WRDS is officially profile 1, but goes
* into sar_profiles[0] (because we don't have a profile 0).
*/
- ret = iwl_acpi_sar_set_profile(table, &fwrt->sar_profiles[0],
- flags & IWL_SAR_ENABLE_MSK,
- num_chains, num_sub_bands);
+ ret = iwl_acpi_parse_chains_table(table, fwrt->sar_profiles[0].chains,
+ num_chains, num_sub_bands);
+ if (!ret && flags & IWL_SAR_ENABLE_MSK)
+ fwrt->sar_profiles[0].enabled = true;
+
out_free:
kfree(data);
return ret;
@@ -557,7 +549,7 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
bool enabled;
int i, n_profiles, tbl_rev, pos;
int ret = 0;
- u8 num_chains, num_sub_bands;
+ u8 num_sub_bands;
data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
if (IS_ERR(data))
@@ -573,7 +565,6 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
goto out_free;
}
- num_chains = ACPI_SAR_NUM_CHAINS_REV2;
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2;
goto read_table;
@@ -589,7 +580,6 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
goto out_free;
}
- num_chains = ACPI_SAR_NUM_CHAINS_REV1;
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1;
goto read_table;
@@ -605,7 +595,6 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
goto out_free;
}
- num_chains = ACPI_SAR_NUM_CHAINS_REV0;
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0;
goto read_table;
@@ -637,23 +626,54 @@ read_table:
/* the tables start at element 3 */
pos = 3;
+ BUILD_BUG_ON(ACPI_SAR_NUM_CHAINS_REV0 != ACPI_SAR_NUM_CHAINS_REV1);
+ BUILD_BUG_ON(ACPI_SAR_NUM_CHAINS_REV2 != 2 * ACPI_SAR_NUM_CHAINS_REV0);
+
+ /* parse non-cdb chains for all profiles */
for (i = 0; i < n_profiles; i++) {
union acpi_object *table = &wifi_pkg->package.elements[pos];
+
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
* have profile 0). So in the array we start from 1.
*/
- ret = iwl_acpi_sar_set_profile(table,
- &fwrt->sar_profiles[i + 1],
- enabled, num_chains,
- num_sub_bands);
+ ret = iwl_acpi_parse_chains_table(table,
+ fwrt->sar_profiles[i + 1].chains,
+ ACPI_SAR_NUM_CHAINS_REV0,
+ num_sub_bands);
if (ret < 0)
- break;
+ goto out_free;
/* go to the next table */
- pos += num_chains * num_sub_bands;
+ pos += ACPI_SAR_NUM_CHAINS_REV0 * num_sub_bands;
}
+ /* non-cdb table revisions */
+ if (tbl_rev < 2)
+ goto set_enabled;
+
+ /* parse cdb chains for all profiles */
+ for (i = 0; i < n_profiles; i++) {
+ struct iwl_sar_profile_chain *chains;
+ union acpi_object *table;
+
+ table = &wifi_pkg->package.elements[pos];
+ chains = &fwrt->sar_profiles[i + 1].chains[ACPI_SAR_NUM_CHAINS_REV0];
+ ret = iwl_acpi_parse_chains_table(table,
+ chains,
+ ACPI_SAR_NUM_CHAINS_REV0,
+ num_sub_bands);
+ if (ret < 0)
+ goto out_free;
+
+ /* go to the next table */
+ pos += ACPI_SAR_NUM_CHAINS_REV0 * num_sub_bands;
+ }
+
+set_enabled:
+ for (i = 0; i < n_profiles; i++)
+ fwrt->sar_profiles[i + 1].enabled = enabled;
+
out_free:
kfree(data);
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index d8b083be5b6b..de87e0e3e072 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -39,10 +39,12 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
+/* Assumes the appropriate lock is held by the caller */
void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt)
{
iwl_fw_suspend_timestamp(fwrt);
- iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START, NULL);
+ iwl_dbg_tlv_time_point_sync(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START,
+ NULL);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 2abfc986701f..c620911a1193 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1413,25 +1413,35 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
const struct iwl_op_mode_ops *ops = op->ops;
struct dentry *dbgfs_dir = NULL;
struct iwl_op_mode *op_mode = NULL;
+ int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
/* also protects start/stop from racing against each other */
lockdep_assert_held(&iwlwifi_opmode_table_mtx);
+ for (retry = 0; retry <= max_retry; retry++) {
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
- drv->dbgfs_op_mode = debugfs_create_dir(op->name,
- drv->dbgfs_drv);
- dbgfs_dir = drv->dbgfs_op_mode;
+ drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+ drv->dbgfs_drv);
+ dbgfs_dir = drv->dbgfs_op_mode;
#endif
- op_mode = ops->start(drv->trans, drv->trans->cfg,
- &drv->fw, dbgfs_dir);
- if (op_mode)
- return op_mode;
+ op_mode = ops->start(drv->trans, drv->trans->cfg,
+ &drv->fw, dbgfs_dir);
+
+ if (op_mode)
+ return op_mode;
+
+ if (test_bit(STATUS_TRANS_DEAD, &drv->trans->status))
+ break;
+
+ IWL_ERR(drv, "retry init count %d\n", retry);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- debugfs_remove_recursive(drv->dbgfs_op_mode);
- drv->dbgfs_op_mode = NULL;
+ debugfs_remove_recursive(drv->dbgfs_op_mode);
+ drv->dbgfs_op_mode = NULL;
#endif
+ }
return NULL;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 1549ff429549..6a1d31892417 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -98,6 +98,9 @@ void iwl_drv_stop(struct iwl_drv *drv);
#define VISIBLE_IF_IWLWIFI_KUNIT static
#endif
+/* max retry for init flow */
+#define IWL_MAX_INIT_RETRY 2
+
#define FW_NAME_PRE_BUFSIZE 64
struct iwl_trans;
const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 49a6aff42376..244ca8cab9d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1398,7 +1398,9 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
iwl_mvm_pause_tcm(mvm, true);
+ mutex_lock(&mvm->mutex);
iwl_fw_runtime_suspend(&mvm->fwrt);
+ mutex_unlock(&mvm->mutex);
return __iwl_mvm_suspend(hw, wowlan, false);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 08546e673cf5..f30b0fc8eca9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1307,8 +1307,8 @@ static void iwl_mvm_disconnect_iterator(void *data, u8 *mac,
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
{
u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
+ u32 status = 0;
int ret;
- u32 resp;
struct iwl_fw_error_recovery_cmd recovery_cmd = {
.flags = cpu_to_le32(flags),
@@ -1316,7 +1316,6 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
};
struct iwl_host_cmd host_cmd = {
.id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
- .flags = CMD_WANT_SKB,
.data = {&recovery_cmd, },
.len = {sizeof(recovery_cmd), },
};
@@ -1336,7 +1335,7 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
recovery_cmd.buf_size = cpu_to_le32(error_log_size);
}
- ret = iwl_mvm_send_cmd(mvm, &host_cmd);
+ ret = iwl_mvm_send_cmd_status(mvm, &host_cmd, &status);
kfree(mvm->error_recovery_buf);
mvm->error_recovery_buf = NULL;
@@ -1347,11 +1346,10 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
/* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
if (flags & ERROR_RECOVERY_UPDATE_DB) {
- resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
- if (resp) {
+ if (status) {
IWL_ERR(mvm,
"Failed to send recovery cmd blob was invalid %d\n",
- resp);
+ status);
ieee80211_iterate_interfaces(mvm->hw, 0,
iwl_mvm_disconnect_iterator,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index a327893c6dce..80b9a115245f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1293,12 +1293,14 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
+ int retry, max_retry = 0;
mutex_lock(&mvm->mutex);
/* we are starting the mac not in error flow, and restart is enabled */
if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
iwlwifi_mod_params.fw_restart) {
+ max_retry = IWL_MAX_INIT_RETRY;
/*
* This will prevent mac80211 recovery flows to trigger during
* init failures
@@ -1306,7 +1308,13 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
}
- ret = __iwl_mvm_mac_start(mvm);
+ for (retry = 0; retry <= max_retry; retry++) {
+ ret = __iwl_mvm_mac_start(mvm);
+ if (!ret)
+ break;
+
+ IWL_ERR(mvm, "mac start retry %d\n", retry);
+ }
clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
mutex_unlock(&mvm->mutex);
@@ -1970,7 +1978,6 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mvm->p2p_device_vif = NULL;
}
- iwl_mvm_unset_link_mapping(mvm, vif, &vif->bss_conf);
iwl_mvm_mac_ctxt_remove(mvm, vif);
RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL);
@@ -1979,6 +1986,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mvm->monitor_on = false;
out:
+ iwl_mvm_unset_link_mapping(mvm, vif, &vif->bss_conf);
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC) {
iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.mcast_sta);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index f2378e0fb2fb..e252f0dcea20 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -41,8 +41,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
/* reset deflink MLO parameters */
mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
mvmvif->deflink.active = 0;
- /* the first link always points to the default one */
- mvmvif->link[0] = &mvmvif->deflink;
ret = iwl_mvm_mld_mac_ctxt_add(mvm, vif);
if (ret)
@@ -60,9 +58,19 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
- ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
- if (ret)
- goto out_free_bf;
+ /* We want link[0] to point to the default link, unless we have MLO and
+ * in this case this will be modified later by .change_vif_links()
+ * If we are in the restart flow with an MLD connection, we will wait
+ * to .change_vif_links() to setup the links.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+ !ieee80211_vif_is_mld(vif)) {
+ mvmvif->link[0] = &mvmvif->deflink;
+
+ ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
+ if (ret)
+ goto out_free_bf;
+ }
/* Save a pointer to p2p device vif, so it can later be used to
* update the p2p device MAC when a GO is started/stopped
@@ -350,11 +358,6 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
rcu_read_unlock();
}
- if (vif->type == NL80211_IFTYPE_STATION)
- iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
- link_conf,
- false);
-
/* then activate */
ret = iwl_mvm_link_changed(mvm, vif, link_conf,
LINK_CONTEXT_MODIFY_ACTIVE |
@@ -363,6 +366,11 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
if (ret)
goto out;
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf,
+ false);
+
/*
* Power state must be updated before quotas,
* otherwise fw will complain.
@@ -1194,7 +1202,11 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- if (old_links == 0) {
+ /* If we're in RESTART flow, the default link wasn't added in
+ * drv_add_interface(), and link[0] doesn't point to it.
+ */
+ if (old_links == 0 && !test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status)) {
err = iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
if (err)
goto out_err;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 3ce9150213a7..ddcbd80a49fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1774,7 +1774,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
&cp->channel_config[ch_cnt];
u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
- u8 j, k, n_s_ssids = 0, n_bssids = 0;
+ u8 k, n_s_ssids = 0, n_bssids = 0;
u8 max_s_ssids, max_bssids;
bool force_passive = false, found = false, allow_passive = true,
unsolicited_probe_on_chan = false, psc_no_listen = false;
@@ -1799,7 +1799,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
cfg->v5.iter_count = 1;
cfg->v5.iter_interval = 0;
- for (j = 0; j < params->n_6ghz_params; j++) {
+ for (u32 j = 0; j < params->n_6ghz_params; j++) {
s8 tmp_psd_20;
if (!(scan_6ghz_params[j].channel_idx == i))
@@ -1873,7 +1873,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
* SSID.
* TODO: improve this logic
*/
- for (j = 0; j < params->n_6ghz_params; j++) {
+ for (u32 j = 0; j < params->n_6ghz_params; j++) {
if (!(scan_6ghz_params[j].channel_idx == i))
continue;
diff --git a/drivers/net/wireless/marvell/libertas/radiotap.h b/drivers/net/wireless/marvell/libertas/radiotap.h
index 1ed5608d353f..d543bfe739dc 100644
--- a/drivers/net/wireless/marvell/libertas/radiotap.h
+++ b/drivers/net/wireless/marvell/libertas/radiotap.h
@@ -2,7 +2,7 @@
#include <net/ieee80211_radiotap.h>
struct tx_radiotap_hdr {
- struct ieee80211_radiotap_header hdr;
+ struct ieee80211_radiotap_header_fixed hdr;
u8 rate;
u8 txpower;
u8 rts_retries;
@@ -31,7 +31,7 @@ struct tx_radiotap_hdr {
#define IEEE80211_FC_DSTODS 0x0300
struct rx_radiotap_hdr {
- struct ieee80211_radiotap_header hdr;
+ struct ieee80211_radiotap_header_fixed hdr;
u8 flags;
u8 rate;
u8 antsignal;
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
index 98da82b74094..3353012e8542 100644
--- a/drivers/net/wireless/mediatek/mt76/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -84,13 +84,16 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
mutex_lock(&dev->mcu.mutex);
if (dev->mcu_ops->mcu_skb_prepare_msg) {
+ orig_skb = skb;
ret = dev->mcu_ops->mcu_skb_prepare_msg(dev, skb, cmd, &seq);
if (ret < 0)
goto out;
}
retry:
- orig_skb = skb_get(skb);
+ /* orig skb might be needed for retry, mcu_skb_send_msg consumes it */
+ if (orig_skb)
+ skb_get(orig_skb);
ret = dev->mcu_ops->mcu_skb_send_msg(dev, skb, cmd, &seq);
if (ret < 0)
goto out;
@@ -105,7 +108,7 @@ retry:
do {
skb = mt76_mcu_get_response(dev, expires);
if (!skb && !test_bit(MT76_MCU_RESET, &dev->phy.state) &&
- retry++ < dev->mcu_ops->max_retry) {
+ orig_skb && retry++ < dev->mcu_ops->max_retry) {
dev_err(dev->dev, "Retry message %08x (seq %d)\n",
cmd, seq);
skb = orig_skb;
diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c
index 03b7229a0ff5..c3d27aaec297 100644
--- a/drivers/net/wireless/microchip/wilc1000/mon.c
+++ b/drivers/net/wireless/microchip/wilc1000/mon.c
@@ -7,12 +7,12 @@
#include "cfg80211.h"
struct wilc_wfi_radiotap_hdr {
- struct ieee80211_radiotap_header hdr;
+ struct ieee80211_radiotap_header_fixed hdr;
u8 rate;
} __packed;
struct wilc_wfi_radiotap_cb_hdr {
- struct ieee80211_radiotap_header hdr;
+ struct ieee80211_radiotap_header_fixed hdr;
u8 rate;
u8 dump;
u16 tx_flags;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c
index d069a81ac617..cc699efa9c79 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c
@@ -352,7 +352,6 @@ static const struct usb_device_id rtl8192d_usb_ids[] = {
{RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8194, rtl92du_hal_cfg)},
{RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8111, rtl92du_hal_cfg)},
{RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x0193, rtl92du_hal_cfg)},
- {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8171, rtl92du_hal_cfg)},
{RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0xe194, rtl92du_hal_cfg)},
{RTL_USB_DEVICE(0x2019, 0xab2c, rtl92du_hal_cfg)},
{RTL_USB_DEVICE(0x2019, 0xab2d, rtl92du_hal_cfg)},
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index e83ab6fb83f5..b17a429bcd29 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -771,7 +771,6 @@ static void rtw_usb_dynamic_rx_agg_v1(struct rtw_dev *rtwdev, bool enable)
u8 size, timeout;
u16 val16;
- rtw_write32_set(rtwdev, REG_RXDMA_AGG_PG_TH, BIT_EN_PRE_CALC);
rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN);
rtw_write8_clr(rtwdev, REG_RXDMA_AGG_PG_TH + 3, BIT(7));
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index df51b29142aa..8d27374db83c 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -6445,6 +6445,8 @@ static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid)
/* todo DBCC related event */
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC] wl_info phy_now=%d\n", phy_now);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC] rlink cnt_2g=%d cnt_5g=%d\n", cnt_2g, cnt_5g);
if (wl_rinfo->dbcc_en != rtwdev->dbcc_en) {
wl_rinfo->dbcc_chg = 1;
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 02afeb3acce4..5aef7fa37878 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -3026,24 +3026,54 @@ static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
pci_disable_device(pdev);
}
-static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev)
+static bool rtw89_pci_chip_is_manual_dac(struct rtw89_dev *rtwdev)
{
- struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_chip_info *chip = rtwdev->chip;
- if (!rtwpci->enable_dac)
- return;
-
switch (chip->chip_id) {
case RTL8852A:
case RTL8852B:
case RTL8851B:
case RTL8852BT:
- break;
+ return true;
default:
- return;
+ return false;
+ }
+}
+
+static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *bridge = pci_upstream_bridge(rtwpci->pdev);
+
+ if (!rtw89_pci_chip_is_manual_dac(rtwdev))
+ return true;
+
+ if (!bridge)
+ return false;
+
+ switch (bridge->vendor) {
+ case PCI_VENDOR_ID_INTEL:
+ return true;
+ case PCI_VENDOR_ID_ASMEDIA:
+ if (bridge->device == 0x2806)
+ return true;
+ break;
}
+ return false;
+}
+
+static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ if (!rtwpci->enable_dac)
+ return;
+
+ if (!rtw89_pci_chip_is_manual_dac(rtwdev))
+ return;
+
rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS);
}
@@ -3061,6 +3091,9 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
goto err;
}
+ if (!rtw89_pci_is_dac_compatible_bridge(rtwdev))
+ goto no_dac;
+
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
if (!ret) {
rtwpci->enable_dac = true;
@@ -3073,6 +3106,7 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
goto err_release_regions;
}
}
+no_dac:
resource_len = pci_resource_len(pdev, bar_id);
rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index f0e528abb1b4..3f424f14de4e 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -763,7 +763,7 @@ static const struct rhashtable_params hwsim_rht_params = {
};
struct hwsim_radiotap_hdr {
- struct ieee80211_radiotap_header hdr;
+ struct ieee80211_radiotap_header_fixed hdr;
__le64 rt_tsft;
u8 rt_flags;
u8 rt_rate;
@@ -772,7 +772,7 @@ struct hwsim_radiotap_hdr {
} __packed;
struct hwsim_radiotap_ack_hdr {
- struct ieee80211_radiotap_header hdr;
+ struct ieee80211_radiotap_header_fixed hdr;
u8 rt_flags;
u8 pad;
__le16 rt_channel;
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 17431f1b1a0c..65a7ed4d6766 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -1038,7 +1038,7 @@ static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = {
static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = {
.kind = "wwan",
- .maxtype = __IFLA_WWAN_MAX,
+ .maxtype = IFLA_WWAN_MAX,
.alloc = wwan_rtnl_alloc,
.validate = wwan_rtnl_validate,
.newlink = wwan_rtnl_newlink,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 84cb859a911d..b149b638453f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -92,6 +92,17 @@ MODULE_PARM_DESC(apst_secondary_latency_tol_us,
"secondary APST latency tolerance in us");
/*
+ * Older kernels didn't enable protection information if it was at an offset.
+ * Newer kernels do, so it breaks reads on the upgrade if such formats were
+ * used in prior kernels since the metadata written did not contain a valid
+ * checksum.
+ */
+static bool disable_pi_offsets = false;
+module_param(disable_pi_offsets, bool, 0444);
+MODULE_PARM_DESC(disable_pi_offsets,
+ "disable protection information if it has an offset");
+
+/*
* nvme_wq - hosts nvme related works that are not reset or delete
* nvme_reset_wq - hosts nvme reset works
* nvme_delete_wq - hosts nvme delete works
@@ -1390,17 +1401,30 @@ static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
nvme_start_keep_alive(ctrl);
}
-/*
- * In NVMe 1.0 the CNS field was just a binary controller or namespace
- * flag, thus sending any new CNS opcodes has a big chance of not working.
- * Qemu unfortunately had that bug after reporting a 1.1 version compliance
- * (but not for any later version).
- */
-static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
+static bool nvme_id_cns_ok(struct nvme_ctrl *ctrl, u8 cns)
{
- if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
- return ctrl->vs < NVME_VS(1, 2, 0);
- return ctrl->vs < NVME_VS(1, 1, 0);
+ /*
+ * The CNS field occupies a full byte starting with NVMe 1.2
+ */
+ if (ctrl->vs >= NVME_VS(1, 2, 0))
+ return true;
+
+ /*
+ * NVMe 1.1 expanded the CNS value to two bits, which means values
+ * larger than that could get truncated and treated as an incorrect
+ * value.
+ *
+ * Qemu implemented 1.0 behavior for controllers claiming 1.1
+ * compliance, so they need to be quirked here.
+ */
+ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
+ !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS))
+ return cns <= 3;
+
+ /*
+ * NVMe 1.0 used a single bit for the CNS value.
+ */
+ return cns <= 1;
}
static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
@@ -1913,8 +1937,12 @@ static void nvme_configure_metadata(struct nvme_ctrl *ctrl,
if (head->pi_size && head->ms >= head->pi_size)
head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
- if (!(id->dps & NVME_NS_DPS_PI_FIRST))
- info->pi_offset = head->ms - head->pi_size;
+ if (!(id->dps & NVME_NS_DPS_PI_FIRST)) {
+ if (disable_pi_offsets)
+ head->pi_type = 0;
+ else
+ info->pi_offset = head->ms - head->pi_size;
+ }
if (ctrl->ops->flags & NVME_F_FABRICS) {
/*
@@ -3104,7 +3132,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
ctrl->max_zeroes_sectors = 0;
if (ctrl->subsys->subtype != NVME_NQN_NVME ||
- nvme_ctrl_limited_cns(ctrl) ||
+ !nvme_id_cns_ok(ctrl, NVME_ID_CNS_CS_CTRL) ||
test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
return 0;
@@ -4200,7 +4228,7 @@ static void nvme_scan_work(struct work_struct *work)
}
mutex_lock(&ctrl->scan_lock);
- if (nvme_ctrl_limited_cns(ctrl)) {
+ if (!nvme_id_cns_ok(ctrl, NVME_ID_CNS_NS_ACTIVE_LIST)) {
nvme_scan_ns_sequential(ctrl);
} else {
/*
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index b9b79ccfabf8..a96976b22fa7 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -421,10 +421,13 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
struct io_uring_cmd *ioucmd = req->end_io_data;
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
pdu->status = -EINTR;
- else
+ } else {
pdu->status = nvme_req(req)->status;
+ if (!pdu->status)
+ pdu->status = blk_status_to_errno(err);
+ }
pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
/*
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index 29f8639cfe7f..b47d675232d2 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -115,6 +115,7 @@ int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id)
pr_debug("%s: ctrl %d failed to generate private key, err %d\n",
__func__, ctrl->cntlid, ret);
kfree_sensitive(ctrl->dh_key);
+ ctrl->dh_key = NULL;
return ret;
}
ctrl->dh_keysize = crypto_kpp_maxsize(ctrl->dh_tfm);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7d85c04fbba2..225a6cd2e9ca 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1067,8 +1067,15 @@ static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
static void pci_enable_acs(struct pci_dev *dev)
{
struct pci_acs caps;
+ bool enable_acs = false;
int pos;
+ /* If an iommu is present we start with kernel default caps */
+ if (pci_acs_enable) {
+ if (pci_dev_specific_enable_acs(dev))
+ enable_acs = true;
+ }
+
pos = dev->acs_cap;
if (!pos)
return;
@@ -1077,11 +1084,8 @@ static void pci_enable_acs(struct pci_dev *dev)
pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
caps.fw_ctrl = caps.ctrl;
- /* If an iommu is present we start with kernel default caps */
- if (pci_acs_enable) {
- if (pci_dev_specific_enable_acs(dev))
- pci_std_enable_acs(dev, &caps);
- }
+ if (enable_acs)
+ pci_std_enable_acs(dev, &caps);
/*
* Always apply caps from the command line, even if there is no iommu.
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 4f68414c3086..f1615805f5b0 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -3105,7 +3105,9 @@ int pci_host_probe(struct pci_host_bridge *bridge)
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
+ pci_lock_rescan_remove();
pci_bus_add_devices(bus);
+ pci_unlock_rescan_remove();
return 0;
}
EXPORT_SYMBOL_GPL(pci_host_probe);
diff --git a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c
index a23a4312574b..0e6bd47671c2 100644
--- a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c
+++ b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c
@@ -6,9 +6,9 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/pci-pwrctl.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/pwrseq/consumer.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -18,6 +18,40 @@ struct pci_pwrctl_pwrseq_data {
struct pwrseq_desc *pwrseq;
};
+struct pci_pwrctl_pwrseq_pdata {
+ const char *target;
+ /*
+ * Called before doing anything else to perform device-specific
+ * verification between requesting the power sequencing handle.
+ */
+ int (*validate_device)(struct device *dev);
+};
+
+static int pci_pwrctl_pwrseq_qcm_wcn_validate_device(struct device *dev)
+{
+ /*
+ * Old device trees for some platforms already define wifi nodes for
+ * the WCN family of chips since before power sequencing was added
+ * upstream.
+ *
+ * These nodes don't consume the regulator outputs from the PMU, and
+ * if we allow this driver to bind to one of such "incomplete" nodes,
+ * we'll see a kernel log error about the indefinite probe deferral.
+ *
+ * Check the existence of the regulator supply that exists on all
+ * WCN models before moving forward.
+ */
+ if (!device_property_present(dev, "vddaon-supply"))
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct pci_pwrctl_pwrseq_pdata pci_pwrctl_pwrseq_qcom_wcn_pdata = {
+ .target = "wlan",
+ .validate_device = pci_pwrctl_pwrseq_qcm_wcn_validate_device,
+};
+
static void devm_pci_pwrctl_pwrseq_power_off(void *data)
{
struct pwrseq_desc *pwrseq = data;
@@ -27,15 +61,26 @@ static void devm_pci_pwrctl_pwrseq_power_off(void *data)
static int pci_pwrctl_pwrseq_probe(struct platform_device *pdev)
{
+ const struct pci_pwrctl_pwrseq_pdata *pdata;
struct pci_pwrctl_pwrseq_data *data;
struct device *dev = &pdev->dev;
int ret;
+ pdata = device_get_match_data(dev);
+ if (!pdata || !pdata->target)
+ return -EINVAL;
+
+ if (pdata->validate_device) {
+ ret = pdata->validate_device(dev);
+ if (ret)
+ return ret;
+ }
+
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->pwrseq = devm_pwrseq_get(dev, of_device_get_match_data(dev));
+ data->pwrseq = devm_pwrseq_get(dev, pdata->target);
if (IS_ERR(data->pwrseq))
return dev_err_probe(dev, PTR_ERR(data->pwrseq),
"Failed to get the power sequencer\n");
@@ -64,17 +109,17 @@ static const struct of_device_id pci_pwrctl_pwrseq_of_match[] = {
{
/* ATH11K in QCA6390 package. */
.compatible = "pci17cb,1101",
- .data = "wlan",
+ .data = &pci_pwrctl_pwrseq_qcom_wcn_pdata,
},
{
/* ATH11K in WCN6855 package. */
.compatible = "pci17cb,1103",
- .data = "wlan",
+ .data = &pci_pwrctl_pwrseq_qcom_wcn_pdata,
},
{
/* ATH12K in WCN7850 package. */
.compatible = "pci17cb,1107",
- .data = "wlan",
+ .data = &pci_pwrctl_pwrseq_qcom_wcn_pdata,
},
{ }
};
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
index 4c10cafded4e..950b7ae1d1a8 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
@@ -153,7 +153,9 @@ static void xhci_soft_reset(struct brcm_usb_init_params *params,
} else {
USB_CTRL_SET(ctrl, USB_PM, XHC_SOFT_RESETB);
/* Required for COMMONONN to be set */
- USB_XHCI_GBL_UNSET(xhci_gbl, GUSB2PHYCFG, U2_FREECLK_EXISTS);
+ if (params->supported_port_modes != USB_CTLR_MODE_DRD)
+ USB_XHCI_GBL_UNSET(xhci_gbl, GUSB2PHYCFG,
+ U2_FREECLK_EXISTS);
}
}
@@ -328,8 +330,12 @@ static void usb_init_common_7216(struct brcm_usb_init_params *params)
/* 1 millisecond - for USB clocks to settle down */
usleep_range(1000, 2000);
- /* Disable PHY when port is suspended */
- USB_CTRL_SET(ctrl, P0_U2PHY_CFG1, COMMONONN);
+ /*
+ * Disable PHY when port is suspended
+ * Does not work in DRD mode
+ */
+ if (params->supported_port_modes != USB_CTLR_MODE_DRD)
+ USB_CTRL_SET(ctrl, P0_U2PHY_CFG1, COMMONONN);
usb_wake_enable_7216(params, false);
usb_init_common(params);
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 39536b6d96a9..5ebb3a616115 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -220,6 +220,8 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
0, /* USB_CTRL_SETUP_SCB2_EN_MASK */
0, /* USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK */
0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ 0, /* USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK */
+ 0, /* USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK */
0, /* USB_CTRL_SETUP_OC3_DISABLE_MASK */
0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index aeec6eb6be23..dfc4f55d112e 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -174,8 +174,9 @@
#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
#define SIERRA_DEQ_TAU_CTRL2_PREG 0x151
#define SIERRA_DEQ_TAU_CTRL3_PREG 0x152
-#define SIERRA_DEQ_OPENEYE_CTRL_PREG 0x158
+#define SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG 0x158
#define SIERRA_DEQ_CONCUR_EPIOFFSET_MODE_PREG 0x159
+#define SIERRA_DEQ_OPENEYE_CTRL_PREG 0x15C
#define SIERRA_DEQ_PICTRL_PREG 0x161
#define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170
#define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171
@@ -1733,7 +1734,7 @@ static const struct cdns_reg_pairs ml_pcie_100_no_ssc_ln_regs[] = {
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
- {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
@@ -1797,7 +1798,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_no_ssc_ln_regs[] = {
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
- {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
@@ -1874,7 +1875,7 @@ static const struct cdns_reg_pairs ml_pcie_100_int_ssc_ln_regs[] = {
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
- {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
@@ -1941,7 +1942,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_int_ssc_ln_regs[] = {
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
- {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
@@ -2012,7 +2013,7 @@ static const struct cdns_reg_pairs ml_pcie_100_ext_ssc_ln_regs[] = {
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
- {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
@@ -2079,7 +2080,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_ext_ssc_ln_regs[] = {
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
- {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
@@ -2140,7 +2141,7 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_no_ssc[] = {
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
- {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
@@ -2215,7 +2216,7 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_int_ssc[] = {
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
- {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
@@ -2284,7 +2285,7 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
{0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
{0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
{0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
- {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG},
{0x002B, SIERRA_CPI_TRIM_PREG},
{0x0003, SIERRA_EPI_CTRL_PREG},
{0x803F, SIERRA_SDFILT_H2L_A_PREG},
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index 11fcb1867118..e98361dcdead 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -141,11 +141,6 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
IMX8MM_GPR_PCIE_REF_CLK_PLL);
usleep_range(100, 200);
- /* Do the PHY common block reset */
- regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
- IMX8MM_GPR_PCIE_CMN_RST,
- IMX8MM_GPR_PCIE_CMN_RST);
-
switch (imx8_phy->drvdata->variant) {
case IMX8MP:
reset_control_deassert(imx8_phy->perst);
@@ -156,6 +151,11 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
break;
}
+ /* Do the PHY common block reset */
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_CMN_RST,
+ IMX8MM_GPR_PCIE_CMN_RST);
+
/* Polling to check the phy is ready or not. */
ret = readl_poll_timeout(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG075,
val, val == ANA_PLL_DONE, 10, 20000);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index a8adc3214bfe..643045c9024e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -3673,6 +3673,7 @@ static int qmp_combo_probe(struct platform_device *pdev)
return -ENOMEM;
qmp->dev = dev;
+ dev_set_drvdata(dev, qmp);
qmp->orientation = TYPEC_ORIENTATION_NORMAL;
@@ -3749,8 +3750,6 @@ static int qmp_combo_probe(struct platform_device *pdev)
phy_set_drvdata(qmp->dp_phy, qmp);
- dev_set_drvdata(dev, qmp);
-
if (usb_np == dev->of_node)
phy_provider = devm_of_phy_provider_register(dev, qmp_combo_phy_xlate);
else
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index f71787fb4d7e..36aaac34e6c6 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -3661,8 +3661,8 @@ static const struct qmp_phy_cfg x1e80100_qmp_gen4x2_pciephy_cfg = {
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
- .vreg_list = sm8550_qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = pciephy_v6_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
@@ -3695,8 +3695,8 @@ static const struct qmp_phy_cfg x1e80100_qmp_gen4x4_pciephy_cfg = {
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
- .vreg_list = sm8550_qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = pciephy_v6_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c
index 6d0ba39c1943..8bf951b0490c 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c
@@ -1248,6 +1248,7 @@ static int qmp_usb_legacy_probe(struct platform_device *pdev)
return -ENOMEM;
qmp->dev = dev;
+ dev_set_drvdata(dev, qmp);
qmp->cfg = of_device_get_match_data(dev);
if (!qmp->cfg)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 2fd49355aa37..1246d3bc8b92 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -2179,6 +2179,7 @@ static int qmp_usb_probe(struct platform_device *pdev)
return -ENOMEM;
qmp->dev = dev;
+ dev_set_drvdata(dev, qmp);
qmp->cfg = of_device_get_match_data(dev);
if (!qmp->cfg)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
index d4fa1063ea61..cf12a6f12134 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
@@ -1050,6 +1050,7 @@ static int qmp_usbc_probe(struct platform_device *pdev)
return -ENOMEM;
qmp->dev = dev;
+ dev_set_drvdata(dev, qmp);
qmp->orientation = TYPEC_ORIENTATION_NORMAL;
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index 490263375057..2f7a05f21dc5 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -86,6 +86,7 @@ config PHY_ROCKCHIP_PCIE
config PHY_ROCKCHIP_SAMSUNG_HDPTX
tristate "Rockchip Samsung HDMI/eDP Combo PHY driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+ depends on COMMON_CLK
depends on HAS_IOMEM
select GENERIC_PHY
select MFD_SYSCON
diff --git a/drivers/phy/starfive/phy-jh7110-usb.c b/drivers/phy/starfive/phy-jh7110-usb.c
index 633912f8a05d..cb5454fbe2c8 100644
--- a/drivers/phy/starfive/phy-jh7110-usb.c
+++ b/drivers/phy/starfive/phy-jh7110-usb.c
@@ -10,18 +10,24 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/usb/of.h>
#define USB_125M_CLK_RATE 125000000
#define USB_LS_KEEPALIVE_OFF 0x4
#define USB_LS_KEEPALIVE_ENABLE BIT(4)
+#define USB_PDRSTN_SPLIT BIT(17)
+#define SYSCON_USB_SPLIT_OFFSET 0x18
+
struct jh7110_usb2_phy {
struct phy *phy;
void __iomem *regs;
+ struct regmap *sys_syscon;
struct clk *usb_125m_clk;
struct clk *app_125m;
enum phy_mode mode;
@@ -61,6 +67,10 @@ static int usb2_phy_set_mode(struct phy *_phy,
usb2_set_ls_keepalive(phy, (mode != PHY_MODE_USB_DEVICE));
}
+ /* Connect usb 2.0 phy mode */
+ regmap_update_bits(phy->sys_syscon, SYSCON_USB_SPLIT_OFFSET,
+ USB_PDRSTN_SPLIT, USB_PDRSTN_SPLIT);
+
return 0;
}
@@ -129,6 +139,12 @@ static int jh7110_usb_phy_probe(struct platform_device *pdev)
phy_set_drvdata(phy->phy, phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ phy->sys_syscon =
+ syscon_regmap_lookup_by_compatible("starfive,jh7110-sys-syscon");
+ if (IS_ERR(phy->sys_syscon))
+ return dev_err_probe(dev, PTR_ERR(phy->sys_syscon),
+ "Failed to get sys-syscon\n");
+
return PTR_ERR_OR_ZERO(phy_provider);
}
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index cfdb54b6070a..342f5ccf611d 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -699,6 +699,8 @@ static int tegra_xusb_setup_usb_role_switch(struct tegra_xusb_port *port)
return -ENOMEM;
lane = tegra_xusb_find_lane(port->padctl, "usb2", port->index);
+ if (IS_ERR(lane))
+ return PTR_ERR(lane);
/*
* Assign phy dev to usb-phy dev. Host/device drivers can use phy
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index a6c0c5607ffd..c6e846d385d2 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -450,8 +450,8 @@ static int wiz_mode_select(struct wiz *wiz)
} else if (wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
ret = regmap_field_write(wiz->p0_mac_src_sel[i], 0x3);
ret = regmap_field_write(wiz->p0_rxfclk_sel[i], 0x3);
- ret = regmap_field_write(wiz->p0_refclk_sel[i], 0x3);
- mode = LANE_MODE_GEN1;
+ ret = regmap_field_write(wiz->p0_refclk_sel[i], 0x2);
+ mode = LANE_MODE_GEN2;
} else {
continue;
}
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 7a48220b4f5a..abdca3f05c5c 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -3908,6 +3908,16 @@ static int platform_profile_setup(struct asus_wmi *asus)
if (!asus->throttle_thermal_policy_dev)
return 0;
+ /*
+ * We need to set the default thermal profile during probe or otherwise
+ * the system will often remain in silent mode, causing low performance.
+ */
+ err = throttle_thermal_policy_set_default(asus);
+ if (err < 0) {
+ pr_warn("Failed to set default thermal profile\n");
+ return err;
+ }
+
dev_info(dev, "Using throttle_thermal_policy for platform_profile support\n");
asus->platform_profile_handler.profile_get = asus_wmi_platform_profile_get;
diff --git a/drivers/platform/x86/dell/dell-wmi-base.c b/drivers/platform/x86/dell/dell-wmi-base.c
index 502783a7adb1..24fd7ffadda9 100644
--- a/drivers/platform/x86/dell/dell-wmi-base.c
+++ b/drivers/platform/x86/dell/dell-wmi-base.c
@@ -264,6 +264,15 @@ static const struct key_entry dell_wmi_keymap_type_0010[] = {
/*Speaker Mute*/
{ KE_KEY, 0x109, { KEY_MUTE} },
+ /* S2Idle screen off */
+ { KE_IGNORE, 0x120, { KEY_RESERVED }},
+
+ /* Leaving S4 or S2Idle suspend */
+ { KE_IGNORE, 0x130, { KEY_RESERVED }},
+
+ /* Entering S2Idle suspend */
+ { KE_IGNORE, 0x140, { KEY_RESERVED }},
+
/* Mic mute */
{ KE_KEY, 0x150, { KEY_MICMUTE } },
diff --git a/drivers/platform/x86/intel/pmc/adl.c b/drivers/platform/x86/intel/pmc/adl.c
index 9d9c07f44ff6..e7878558fd90 100644
--- a/drivers/platform/x86/intel/pmc/adl.c
+++ b/drivers/platform/x86/intel/pmc/adl.c
@@ -295,8 +295,6 @@ const struct pmc_reg_map adl_reg_map = {
.ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
- .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET,
- .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE,
.ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED,
.lpm_num_modes = ADL_LPM_NUM_MODES,
.lpm_num_maps = ADL_LPM_NUM_MAPS,
diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c
index 513c02670c5a..dd72974bf71e 100644
--- a/drivers/platform/x86/intel/pmc/cnp.c
+++ b/drivers/platform/x86/intel/pmc/cnp.c
@@ -200,8 +200,6 @@ const struct pmc_reg_map cnp_reg_map = {
.ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
- .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET,
- .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE,
.ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
.etr3_offset = ETR3_OFFSET,
};
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index ecb47f8b4f83..4e9c8c96c8cc 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -11,7 +11,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/acpi_pmtmr.h>
#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
@@ -1258,39 +1257,6 @@ static bool pmc_core_is_pson_residency_enabled(struct pmc_dev *pmcdev)
return val == 1;
}
-/*
- * Enable or disable ACPI PM Timer
- *
- * This function is intended to be a callback for ACPI PM suspend/resume event.
- * The ACPI PM Timer is enabled on resume only if it was enabled during suspend.
- */
-static void pmc_core_acpi_pm_timer_suspend_resume(void *data, bool suspend)
-{
- struct pmc_dev *pmcdev = data;
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
- const struct pmc_reg_map *map = pmc->map;
- bool enabled;
- u32 reg;
-
- if (!map->acpi_pm_tmr_ctl_offset)
- return;
-
- guard(mutex)(&pmcdev->lock);
-
- if (!suspend && !pmcdev->enable_acpi_pm_timer_on_resume)
- return;
-
- reg = pmc_core_reg_read(pmc, map->acpi_pm_tmr_ctl_offset);
- enabled = !(reg & map->acpi_pm_tmr_disable_bit);
- if (suspend)
- reg |= map->acpi_pm_tmr_disable_bit;
- else
- reg &= ~map->acpi_pm_tmr_disable_bit;
- pmc_core_reg_write(pmc, map->acpi_pm_tmr_ctl_offset, reg);
-
- pmcdev->enable_acpi_pm_timer_on_resume = suspend && enabled;
-}
-
static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
{
debugfs_remove_recursive(pmcdev->dbgfs_dir);
@@ -1486,7 +1452,6 @@ static int pmc_core_probe(struct platform_device *pdev)
struct pmc_dev *pmcdev;
const struct x86_cpu_id *cpu_id;
int (*core_init)(struct pmc_dev *pmcdev);
- const struct pmc_reg_map *map;
struct pmc *primary_pmc;
int ret;
@@ -1545,11 +1510,6 @@ static int pmc_core_probe(struct platform_device *pdev)
pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK) *
pmc_core_adjust_slp_s0_step(primary_pmc, 1));
- map = primary_pmc->map;
- if (map->acpi_pm_tmr_ctl_offset)
- acpi_pmtmr_register_suspend_resume_callback(pmc_core_acpi_pm_timer_suspend_resume,
- pmcdev);
-
device_initialized = true;
dev_info(&pdev->dev, " initialized\n");
@@ -1559,12 +1519,6 @@ static int pmc_core_probe(struct platform_device *pdev)
static void pmc_core_remove(struct platform_device *pdev)
{
struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
- const struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
- const struct pmc_reg_map *map = pmc->map;
-
- if (map->acpi_pm_tmr_ctl_offset)
- acpi_pmtmr_unregister_suspend_resume_callback();
-
pmc_core_dbgfs_unregister(pmcdev);
pmc_core_clean_structure(pdev);
}
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index 75fd593a7b0f..b9d3291d0bf2 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -68,8 +68,6 @@ struct telem_endpoint;
#define SPT_PMC_LTR_SCC 0x3A0
#define SPT_PMC_LTR_ISH 0x3A4
-#define SPT_PMC_ACPI_PM_TMR_CTL_OFFSET 0x18FC
-
/* Sunrise Point: PGD PFET Enable Ack Status Registers */
enum ppfear_regs {
SPT_PMC_XRAM_PPFEAR0A = 0x590,
@@ -150,8 +148,6 @@ enum ppfear_regs {
#define SPT_PMC_VRIC1_SLPS0LVEN BIT(13)
#define SPT_PMC_VRIC1_XTALSDQDIS BIT(22)
-#define SPT_PMC_BIT_ACPI_PM_TMR_DISABLE BIT(1)
-
/* Cannonlake Power Management Controller register offsets */
#define CNP_PMC_SLPS0_DBG_OFFSET 0x10B4
#define CNP_PMC_PM_CFG_OFFSET 0x1818
@@ -355,8 +351,6 @@ struct pmc_reg_map {
const u8 *lpm_reg_index;
const u32 pson_residency_offset;
const u32 pson_residency_counter_step;
- const u32 acpi_pm_tmr_ctl_offset;
- const u32 acpi_pm_tmr_disable_bit;
};
/**
@@ -432,8 +426,6 @@ struct pmc_dev {
u32 die_c6_offset;
struct telem_endpoint *punit_ep;
struct pmc_info *regmap_list;
-
- bool enable_acpi_pm_timer_on_resume;
};
enum pmc_index {
diff --git a/drivers/platform/x86/intel/pmc/core_ssram.c b/drivers/platform/x86/intel/pmc/core_ssram.c
index c259c96b7dfd..8504154b649f 100644
--- a/drivers/platform/x86/intel/pmc/core_ssram.c
+++ b/drivers/platform/x86/intel/pmc/core_ssram.c
@@ -29,7 +29,7 @@
#define LPM_REG_COUNT 28
#define LPM_MODE_OFFSET 1
-DEFINE_FREE(pmc_core_iounmap, void __iomem *, iounmap(_T));
+DEFINE_FREE(pmc_core_iounmap, void __iomem *, if (_T) iounmap(_T))
static u32 pmc_core_find_guid(struct pmc_info *list, const struct pmc_reg_map *map)
{
@@ -262,6 +262,8 @@ pmc_core_ssram_get_pmc(struct pmc_dev *pmcdev, int pmc_idx, u32 offset)
ssram_base = ssram_pcidev->resource[0].start;
tmp_ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
+ if (!tmp_ssram)
+ return -ENOMEM;
if (pmc_idx != PMC_IDX_MAIN) {
/*
diff --git a/drivers/platform/x86/intel/pmc/icl.c b/drivers/platform/x86/intel/pmc/icl.c
index cbbd44054468..71b0fd6cb7d8 100644
--- a/drivers/platform/x86/intel/pmc/icl.c
+++ b/drivers/platform/x86/intel/pmc/icl.c
@@ -46,8 +46,6 @@ const struct pmc_reg_map icl_reg_map = {
.ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
- .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET,
- .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE,
.ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
.etr3_offset = ETR3_OFFSET,
};
diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c
index 91f2fa728f5c..c7d15d864039 100644
--- a/drivers/platform/x86/intel/pmc/mtl.c
+++ b/drivers/platform/x86/intel/pmc/mtl.c
@@ -462,8 +462,6 @@ const struct pmc_reg_map mtl_socm_reg_map = {
.ppfear_buckets = MTL_SOCM_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
- .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET,
- .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE,
.lpm_num_maps = ADL_LPM_NUM_MAPS,
.ltr_ignore_max = MTL_SOCM_NUM_IP_IGN_ALLOWED,
.lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
diff --git a/drivers/platform/x86/intel/pmc/tgl.c b/drivers/platform/x86/intel/pmc/tgl.c
index 371b4e30f142..e0580de18077 100644
--- a/drivers/platform/x86/intel/pmc/tgl.c
+++ b/drivers/platform/x86/intel/pmc/tgl.c
@@ -197,8 +197,6 @@ const struct pmc_reg_map tgl_reg_map = {
.ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
- .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET,
- .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE,
.ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
.lpm_num_maps = TGL_LPM_NUM_MAPS,
.lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
diff --git a/drivers/powercap/dtpm_devfreq.c b/drivers/powercap/dtpm_devfreq.c
index f40bce8176df..d1dff6ccab12 100644
--- a/drivers/powercap/dtpm_devfreq.c
+++ b/drivers/powercap/dtpm_devfreq.c
@@ -178,7 +178,7 @@ static int __dtpm_devfreq_setup(struct devfreq *devfreq, struct dtpm *parent)
ret = dev_pm_qos_add_request(dev, &dtpm_devfreq->qos_req,
DEV_PM_QOS_MAX_FREQUENCY,
PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
- if (ret) {
+ if (ret < 0) {
pr_err("Failed to add QoS request: %d\n", ret);
goto out_dtpm_unregister;
}
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 0b2f29006908..d3af1dfa3c7d 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -1440,14 +1440,18 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
goto unlock;
ret = wait_event_timeout(channel->intent_req_wq,
- READ_ONCE(channel->intent_req_result) >= 0 &&
- READ_ONCE(channel->intent_received),
+ READ_ONCE(channel->intent_req_result) == 0 ||
+ (READ_ONCE(channel->intent_req_result) > 0 &&
+ READ_ONCE(channel->intent_received)) ||
+ glink->abort_tx,
10 * HZ);
if (!ret) {
dev_err(glink->dev, "intent request timed out\n");
ret = -ETIMEDOUT;
+ } else if (glink->abort_tx) {
+ ret = -ECANCELED;
} else {
- ret = READ_ONCE(channel->intent_req_result) ? 0 : -ECANCELED;
+ ret = READ_ONCE(channel->intent_req_result) ? 0 : -EAGAIN;
}
unlock:
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index de15fc0df104..b52513eeeafa 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -3651,7 +3651,7 @@ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
enum dma_data_direction dir;
struct scsi_data_buffer *sdb = &scp->sdb;
u8 *fsp;
- int i;
+ int i, total = 0;
/*
* Even though reads are inherently atomic (in this driver), we expect
@@ -3688,18 +3688,16 @@ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
fsp + (block * sdebug_sector_size),
sdebug_sector_size, sg_skip, do_write);
sdeb_data_sector_unlock(sip, do_write);
- if (ret != sdebug_sector_size) {
- ret += (i * sdebug_sector_size);
+ total += ret;
+ if (ret != sdebug_sector_size)
break;
- }
sg_skip += sdebug_sector_size;
if (++block >= sdebug_store_sectors)
block = 0;
}
- ret = num * sdebug_sector_size;
sdeb_data_unlock(sip, atomic);
- return ret;
+ return total;
}
/* Returns number of bytes copied or -1 if error. */
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 8fa4ffd3a9b5..28bcc65e91be 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -139,6 +139,7 @@ struct qcom_llcc_config {
int size;
bool need_llcc_cfg;
bool no_edac;
+ bool irq_configured;
};
struct qcom_sct_config {
@@ -718,6 +719,7 @@ static const struct qcom_llcc_config x1e80100_cfg[] = {
.need_llcc_cfg = true,
.reg_offset = llcc_v2_1_reg_offset,
.edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ .irq_configured = true,
},
};
@@ -1345,6 +1347,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
drv_data->cfg = llcc_cfg;
drv_data->cfg_size = sz;
drv_data->edac_reg_offset = cfg->edac_reg_offset;
+ drv_data->ecc_irq_configured = cfg->irq_configured;
mutex_init(&drv_data->lock);
platform_set_drvdata(pdev, drv_data);
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index 9606222993fd..baa4ac6704a9 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -4,6 +4,7 @@
* Copyright (c) 2022, Linaro Ltd
*/
#include <linux/auxiliary_bus.h>
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -13,6 +14,8 @@
#include <linux/soc/qcom/pmic_glink.h>
#include <linux/spinlock.h>
+#define PMIC_GLINK_SEND_TIMEOUT (5 * HZ)
+
enum {
PMIC_GLINK_CLIENT_BATT = 0,
PMIC_GLINK_CLIENT_ALTMODE,
@@ -112,13 +115,29 @@ EXPORT_SYMBOL_GPL(pmic_glink_client_register);
int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len)
{
struct pmic_glink *pg = client->pg;
+ bool timeout_reached = false;
+ unsigned long start;
int ret;
mutex_lock(&pg->state_lock);
- if (!pg->ept)
+ if (!pg->ept) {
ret = -ECONNRESET;
- else
- ret = rpmsg_send(pg->ept, data, len);
+ } else {
+ start = jiffies;
+ for (;;) {
+ ret = rpmsg_send(pg->ept, data, len);
+ if (ret != -EAGAIN)
+ break;
+
+ if (timeout_reached) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ usleep_range(1000, 5000);
+ timeout_reached = time_after(jiffies, start + PMIC_GLINK_SEND_TIMEOUT);
+ }
+ }
mutex_unlock(&pg->state_lock);
return ret;
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 64fc4f41da77..ecfd3da9d5e8 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -786,10 +786,16 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u",
SOCINFO_MAJOR(le32_to_cpu(info->ver)),
SOCINFO_MINOR(le32_to_cpu(info->ver)));
- if (offsetof(struct socinfo, serial_num) <= item_size)
+ if (!qs->attr.soc_id || !qs->attr.revision)
+ return -ENOMEM;
+
+ if (offsetof(struct socinfo, serial_num) <= item_size) {
qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%u",
le32_to_cpu(info->serial_num));
+ if (!qs->attr.serial_number)
+ return -ENOMEM;
+ }
qs->soc_dev = soc_device_register(&qs->attr);
if (IS_ERR(qs->soc_dev))
diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c
index fff312c6968d..4f3dd70d6a1a 100644
--- a/drivers/soundwire/intel_ace2x.c
+++ b/drivers/soundwire/intel_ace2x.c
@@ -376,11 +376,12 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
static int intel_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_cdns_dai_runtime *dai_runtime;
+ struct snd_pcm_hw_params *hw_params;
int ch, dir;
- int ret = 0;
dai_runtime = cdns->dai_runtime_array[dai->id];
if (!dai_runtime) {
@@ -389,12 +390,8 @@ static int intel_prepare(struct snd_pcm_substream *substream,
return -EIO;
}
+ hw_params = &rtd->dpcm[substream->stream].hw_params;
if (dai_runtime->suspended) {
- struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
- struct snd_pcm_hw_params *hw_params;
-
- hw_params = &rtd->dpcm[substream->stream].hw_params;
-
dai_runtime->suspended = false;
/*
@@ -415,15 +412,11 @@ static int intel_prepare(struct snd_pcm_substream *substream,
/* the SHIM will be configured in the callback functions */
sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);
-
- /* Inform DSP about PDI stream number */
- ret = intel_params_stream(sdw, substream, dai,
- hw_params,
- sdw->instance,
- dai_runtime->pdi->intel_alh_id);
}
- return ret;
+ /* Inform DSP about PDI stream number */
+ return intel_params_stream(sdw, substream, dai, hw_params, sdw->instance,
+ dai_runtime->pdi->intel_alh_id);
}
static int
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 191de1917f83..3fa990fb59c7 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1003,6 +1003,7 @@ static int dspi_setup(struct spi_device *spi)
u32 cs_sck_delay = 0, sck_cs_delay = 0;
struct fsl_dspi_platform_data *pdata;
unsigned char pasc = 0, asc = 0;
+ struct gpio_desc *gpio_cs;
struct chip_data *chip;
unsigned long clkrate;
bool cs = true;
@@ -1077,7 +1078,10 @@ static int dspi_setup(struct spi_device *spi)
chip->ctar_val |= SPI_CTAR_LSBFE;
}
- gpiod_direction_output(spi_get_csgpiod(spi, 0), false);
+ gpio_cs = spi_get_csgpiod(spi, 0);
+ if (gpio_cs)
+ gpiod_direction_output(gpio_cs, false);
+
dspi_deassert_cs(spi, &cs);
spi_set_ctldata(spi, chip);
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index f6e40f90418f..768d7482102a 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -1116,6 +1116,11 @@ static int spi_geni_probe(struct platform_device *pdev)
init_completion(&mas->tx_reset_done);
init_completion(&mas->rx_reset_done);
spin_lock_init(&mas->lock);
+
+ ret = geni_icc_get(&mas->se, NULL);
+ if (ret)
+ return ret;
+
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
ret = devm_pm_runtime_enable(dev);
@@ -1125,9 +1130,6 @@ static int spi_geni_probe(struct platform_device *pdev)
if (device_property_read_bool(&pdev->dev, "spi-slave"))
spi->target = true;
- ret = geni_icc_get(&mas->se, NULL);
- if (ret)
- return ret;
/* Set the bus quota to a reasonable value for register access */
mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ);
mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c
index ddd98ddb7913..c5677fd94e5e 100644
--- a/drivers/spi/spi-mtk-snfi.c
+++ b/drivers/spi/spi-mtk-snfi.c
@@ -1187,7 +1187,7 @@ cleanup:
/**
* mtk_snand_is_page_ops() - check if the op is a controller supported page op.
- * @op spi-mem op to check
+ * @op: spi-mem op to check
*
* Check whether op can be executed with read_from_cache or program_load
* mode in the controller.
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 4c4ff074e3f6..fc72a89fb3a7 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -2044,6 +2044,7 @@ static const struct stm32_spi_cfg stm32mp25_spi_cfg = {
.baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX,
.has_fifo = true,
.prevent_dma_burst = true,
+ .has_device_mode = true,
};
static const struct of_device_id stm32_spi_of_match[] = {
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 6c390c4eb26d..492612e8f8ba 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -129,12 +129,15 @@ static unsigned long ad9832_calc_freqreg(unsigned long mclk, unsigned long fout)
static int ad9832_write_frequency(struct ad9832_state *st,
unsigned int addr, unsigned long fout)
{
+ unsigned long clk_freq;
unsigned long regval;
- if (fout > (clk_get_rate(st->mclk) / 2))
+ clk_freq = clk_get_rate(st->mclk);
+
+ if (!clk_freq || fout > (clk_freq / 2))
return -EINVAL;
- regval = ad9832_calc_freqreg(clk_get_rate(st->mclk), fout);
+ regval = ad9832_calc_freqreg(clk_freq, fout);
st->freq_data[0] = cpu_to_be16((AD9832_CMD_FRE8BITSW << CMD_SHIFT) |
(addr << ADD_SHIFT) |
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 721319329afa..7db9869a9f3f 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -516,7 +516,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
*/
tb_retimer_set_inbound_sbtx(port);
- for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
+ for (max = 1, i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
/*
* Last retimer is true only for the last on-board
* retimer (the one connected directly to the Type-C
@@ -527,9 +527,10 @@ int tb_retimer_scan(struct tb_port *port, bool add)
last_idx = i;
else if (ret < 0)
break;
+
+ max = i;
}
- max = i;
ret = 0;
/* Add retimers if they do not exist already */
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 10e719dd837c..4f777788e917 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -288,6 +288,24 @@ static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
}
+static int tb_switch_tmu_hifi_uni_required(struct device *dev, void *not_used)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ if (sw && tb_switch_tmu_is_enabled(sw) &&
+ tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_HIFI_UNI))
+ return 1;
+
+ return device_for_each_child(dev, NULL,
+ tb_switch_tmu_hifi_uni_required);
+}
+
+static bool tb_tmu_hifi_uni_required(struct tb *tb)
+{
+ return device_for_each_child(&tb->dev, NULL,
+ tb_switch_tmu_hifi_uni_required) == 1;
+}
+
static int tb_enable_tmu(struct tb_switch *sw)
{
int ret;
@@ -302,12 +320,30 @@ static int tb_enable_tmu(struct tb_switch *sw)
ret = tb_switch_tmu_configure(sw,
TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
if (ret == -EOPNOTSUPP) {
- if (tb_switch_clx_is_enabled(sw, TB_CL1))
- ret = tb_switch_tmu_configure(sw,
- TB_SWITCH_TMU_MODE_LOWRES);
- else
- ret = tb_switch_tmu_configure(sw,
- TB_SWITCH_TMU_MODE_HIFI_BI);
+ if (tb_switch_clx_is_enabled(sw, TB_CL1)) {
+ /*
+ * Figure out uni-directional HiFi TMU requirements
+ * currently in the domain. If there are no
+ * uni-directional HiFi requirements we can put the TMU
+ * into LowRes mode.
+ *
+ * Deliberately skip bi-directional HiFi links
+ * as these work independently of other links
+ * (and they do not allow any CL states anyway).
+ */
+ if (tb_tmu_hifi_uni_required(sw->tb))
+ ret = tb_switch_tmu_configure(sw,
+ TB_SWITCH_TMU_MODE_HIFI_UNI);
+ else
+ ret = tb_switch_tmu_configure(sw,
+ TB_SWITCH_TMU_MODE_LOWRES);
+ } else {
+ ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI);
+ }
+
+ /* If not supported, fallback to bi-directional HiFi */
+ if (ret == -EOPNOTSUPP)
+ ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI);
}
if (ret)
return ret;
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index a63dcf48e59d..f5846598d80e 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -8219,7 +8219,7 @@ static void ufshcd_update_rtc(struct ufs_hba *hba)
err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED,
0, 0, &val);
- ufshcd_rpm_put_sync(hba);
+ ufshcd_rpm_put(hba);
if (err)
dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err);
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 21585ed89ef8..03c22114214b 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -170,11 +170,11 @@ static int usb_acpi_add_usb4_devlink(struct usb_device *udev)
struct fwnode_handle *nhi_fwnode __free(fwnode_handle) =
fwnode_find_reference(dev_fwnode(&port_dev->dev), "usb4-host-interface", 0);
- if (IS_ERR(nhi_fwnode))
+ if (IS_ERR(nhi_fwnode) || !nhi_fwnode->dev)
return 0;
link = device_link_add(&port_dev->child->dev, nhi_fwnode->dev,
- DL_FLAG_AUTOREMOVE_CONSUMER |
+ DL_FLAG_STATELESS |
DL_FLAG_RPM_ACTIVE |
DL_FLAG_PM_RUNTIME);
if (!link) {
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 68226defdc60..4d73fae80b12 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -23,7 +23,6 @@ static void dwc2_set_bcm_params(struct dwc2_hsotg *hsotg)
p->max_transfer_size = 65535;
p->max_packet_count = 511;
p->ahbcfg = 0x10;
- p->no_clock_gating = true;
}
static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7e538194a0a4..cb07cee9ed0c 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -640,7 +640,7 @@ int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id)
pm_runtime_put_noidle(&dev->dev);
if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0)
- pm_runtime_forbid(&dev->dev);
+ pm_runtime_get(&dev->dev);
else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
pm_runtime_allow(&dev->dev);
@@ -683,7 +683,9 @@ void xhci_pci_remove(struct pci_dev *dev)
xhci->xhc_state |= XHCI_STATE_REMOVING;
- if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0)
+ pm_runtime_put(&dev->dev);
+ else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
pm_runtime_forbid(&dev->dev);
if (xhci->shared_hcd) {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index b6eb928e260f..928b93ad1ee8 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1718,6 +1718,14 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);
+ cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
+
+ /* If CMD ring stopped we own the trbs between enqueue and dequeue */
+ if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
+ complete_all(&xhci->cmd_ring_stop_completion);
+ return;
+ }
+
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
cmd_trb);
/*
@@ -1734,14 +1742,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cancel_delayed_work(&xhci->cmd_timer);
- cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
-
- /* If CMD ring stopped we own the trbs between enqueue and dequeue */
- if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
- complete_all(&xhci->cmd_ring_stop_completion);
- return;
- }
-
if (cmd->command_trb != xhci->cmd_ring->dequeue) {
xhci_err(xhci,
"Command completion event does not match command\n");
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 06e0fb23566c..06f789097989 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -628,7 +628,7 @@ void devm_usb_put_phy(struct device *dev, struct usb_phy *phy)
{
int r;
- r = devres_destroy(dev, devm_usb_phy_release, devm_usb_phy_match, phy);
+ r = devres_release(dev, devm_usb_phy_release, devm_usb_phy_match, phy);
dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
}
EXPORT_SYMBOL_GPL(devm_usb_put_phy);
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index d61b4c74648d..58f40156de56 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -2293,7 +2293,7 @@ void typec_port_register_altmodes(struct typec_port *port,
const struct typec_altmode_ops *ops, void *drvdata,
struct typec_altmode **altmodes, size_t n)
{
- struct fwnode_handle *altmodes_node, *child;
+ struct fwnode_handle *child;
struct typec_altmode_desc desc;
struct typec_altmode *alt;
size_t index = 0;
@@ -2301,7 +2301,9 @@ void typec_port_register_altmodes(struct typec_port *port,
u32 vdo;
int ret;
- altmodes_node = device_get_named_child_node(&port->dev, "altmodes");
+ struct fwnode_handle *altmodes_node __free(fwnode_handle) =
+ device_get_named_child_node(&port->dev, "altmodes");
+
if (!altmodes_node)
return; /* No altmodes specified */
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
index 501eddb294e4..b80eb2d78d88 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
@@ -93,8 +93,10 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
return -EINVAL;
bridge_dev = devm_drm_dp_hpd_bridge_alloc(tcpm->dev, to_of_node(tcpm->tcpc.fwnode));
- if (IS_ERR(bridge_dev))
- return PTR_ERR(bridge_dev);
+ if (IS_ERR(bridge_dev)) {
+ ret = PTR_ERR(bridge_dev);
+ goto fwnode_remove;
+ }
tcpm->tcpm_port = tcpm_register_port(tcpm->dev, &tcpm->tcpc);
if (IS_ERR(tcpm->tcpm_port)) {
@@ -123,7 +125,7 @@ port_stop:
port_unregister:
tcpm_unregister_port(tcpm->tcpm_port);
fwnode_remove:
- fwnode_remove_software_node(tcpm->tcpc.fwnode);
+ fwnode_handle_put(tcpm->tcpc.fwnode);
return ret;
}
@@ -135,7 +137,7 @@ static void qcom_pmic_typec_remove(struct platform_device *pdev)
tcpm->pdphy_stop(tcpm);
tcpm->port_stop(tcpm);
tcpm_unregister_port(tcpm->tcpm_port);
- fwnode_remove_software_node(tcpm->tcpc.fwnode);
+ fwnode_handle_put(tcpm->tcpc.fwnode);
}
static const struct pmic_typec_resources pm8150b_typec_res = {
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index fc619478200f..7ae341a40342 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -4515,7 +4515,8 @@ static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
return ERROR_RECOVERY;
if (port->pwr_role == TYPEC_SOURCE)
return SRC_UNATTACHED;
- if (port->state == SNK_WAIT_CAPABILITIES_TIMEOUT)
+ if (port->state == SNK_WAIT_CAPABILITIES ||
+ port->state == SNK_WAIT_CAPABILITIES_TIMEOUT)
return SNK_READY;
return SNK_UNATTACHED;
}
@@ -5043,8 +5044,11 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, SNK_SOFT_RESET,
PD_T_SINK_WAIT_CAP);
} else {
- tcpm_set_state(port, SNK_WAIT_CAPABILITIES_TIMEOUT,
- PD_T_SINK_WAIT_CAP);
+ if (!port->self_powered)
+ upcoming_state = SNK_WAIT_CAPABILITIES_TIMEOUT;
+ else
+ upcoming_state = hard_reset_state(port);
+ tcpm_set_state(port, upcoming_state, PD_T_SINK_WAIT_CAP);
}
break;
case SNK_WAIT_CAPABILITIES_TIMEOUT:
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index ea36c6956bf3..de035071fedb 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1236,7 +1236,6 @@ config FB_3DFX_I2C
config FB_VOODOO1
tristate "3Dfx Voodoo Graphics (sst1) support"
depends on FB && PCI
- depends on FB_DEVICE
select FB_IOMEM_HELPERS
help
Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or
@@ -1374,6 +1373,7 @@ config FB_VT8500
config FB_WM8505
bool "Wondermedia WM8xxx-series frame buffer support"
depends on (FB = y) && HAS_IOMEM && (ARCH_VT8500 || COMPILE_TEST)
+ select FB_IOMEM_FOPS
select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
select FB_SYS_IMAGEBLIT
@@ -1660,19 +1660,6 @@ config FB_SH7760
and 8, 15 or 16 bpp color; 90 degrees clockwise display rotation for
panels <= 320 pixel horizontal resolution.
-config FB_DA8XX
- tristate "DA8xx/OMAP-L1xx/AM335x Framebuffer support"
- depends on FB && HAVE_CLK && HAS_IOMEM
- depends on ARCH_DAVINCI_DA8XX || SOC_AM33XX || COMPILE_TEST
- select FB_CFB_REV_PIXELS_IN_BYTE
- select FB_IOMEM_HELPERS
- select FB_MODE_HELPERS
- select VIDEOMODE_HELPERS
- help
- This is the frame buffer device driver for the TI LCD controller
- found on DA8xx/OMAP-L1xx/AM335x SoCs.
- If unsure, say N.
-
config FB_VIRTUAL
tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
depends on FB
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index 3eecd51267fa..b3d12f977c06 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -121,7 +121,6 @@ obj-$(CONFIG_FB_VESA) += vesafb.o
obj-$(CONFIG_FB_EFI) += efifb.o
obj-$(CONFIG_FB_VGA16) += vga16fb.o
obj-$(CONFIG_FB_OF) += offb.o
-obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o
obj-$(CONFIG_FB_SIMPLE) += simplefb.o
diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c
index 4a64940e0c00..e757462af0a6 100644
--- a/drivers/video/fbdev/bw2.c
+++ b/drivers/video/fbdev/bw2.c
@@ -147,7 +147,7 @@ bw2_blank(int blank, struct fb_info *info)
return 0;
}
-static struct sbus_mmap_map bw2_mmap_map[] = {
+static const struct sbus_mmap_map bw2_mmap_map[] = {
{
.size = SBUS_MMAP_FBSIZE(1)
},
diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c
index 430e1a7b352b..5389f8f07346 100644
--- a/drivers/video/fbdev/cg14.c
+++ b/drivers/video/fbdev/cg14.c
@@ -360,7 +360,7 @@ static void cg14_init_fix(struct fb_info *info, int linebytes,
info->fix.accel = FB_ACCEL_SUN_CG14;
}
-static struct sbus_mmap_map __cg14_mmap_map[CG14_MMAP_ENTRIES] = {
+static const struct sbus_mmap_map __cg14_mmap_map[CG14_MMAP_ENTRIES] = {
{
.voff = CG14_REGS,
.poff = 0x80000000,
diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c
index e4c53c6632ba..a58a483014e6 100644
--- a/drivers/video/fbdev/cg3.c
+++ b/drivers/video/fbdev/cg3.c
@@ -209,7 +209,7 @@ static int cg3_blank(int blank, struct fb_info *info)
return 0;
}
-static struct sbus_mmap_map cg3_mmap_map[] = {
+static const struct sbus_mmap_map cg3_mmap_map[] = {
{
.voff = CG3_MMAP_OFFSET,
.poff = CG3_RAM_OFFSET,
diff --git a/drivers/video/fbdev/cg6.c b/drivers/video/fbdev/cg6.c
index 0b60df51e7bc..56d74468040a 100644
--- a/drivers/video/fbdev/cg6.c
+++ b/drivers/video/fbdev/cg6.c
@@ -545,7 +545,7 @@ static int cg6_blank(int blank, struct fb_info *info)
return 0;
}
-static struct sbus_mmap_map cg6_mmap_map[] = {
+static const struct sbus_mmap_map cg6_mmap_map[] = {
{
.voff = CG6_FBC,
.poff = CG6_FBC_OFFSET,
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
deleted file mode 100644
index fad1e13c6332..000000000000
--- a/drivers/video/fbdev/da8xx-fb.c
+++ /dev/null
@@ -1,1665 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2008-2009 MontaVista Software Inc.
- * Copyright (C) 2008-2009 Texas Instruments Inc
- *
- * Based on the LCD driver for TI Avalanche processors written by
- * Ajay Singh and Shalom Hai.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/fb.h>
-#include <linux/dma-mapping.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/uaccess.h>
-#include <linux/pm_runtime.h>
-#include <linux/interrupt.h>
-#include <linux/wait.h>
-#include <linux/clk.h>
-#include <linux/cpufreq.h>
-#include <linux/console.h>
-#include <linux/regulator/consumer.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/lcm.h>
-#include <video/da8xx-fb.h>
-#include <asm/div64.h>
-
-#define DRIVER_NAME "da8xx_lcdc"
-
-#define LCD_VERSION_1 1
-#define LCD_VERSION_2 2
-
-/* LCD Status Register */
-#define LCD_END_OF_FRAME1 BIT(9)
-#define LCD_END_OF_FRAME0 BIT(8)
-#define LCD_PL_LOAD_DONE BIT(6)
-#define LCD_FIFO_UNDERFLOW BIT(5)
-#define LCD_SYNC_LOST BIT(2)
-#define LCD_FRAME_DONE BIT(0)
-
-/* LCD DMA Control Register */
-#define LCD_DMA_BURST_SIZE(x) ((x) << 4)
-#define LCD_DMA_BURST_1 0x0
-#define LCD_DMA_BURST_2 0x1
-#define LCD_DMA_BURST_4 0x2
-#define LCD_DMA_BURST_8 0x3
-#define LCD_DMA_BURST_16 0x4
-#define LCD_V1_END_OF_FRAME_INT_ENA BIT(2)
-#define LCD_V2_END_OF_FRAME0_INT_ENA BIT(8)
-#define LCD_V2_END_OF_FRAME1_INT_ENA BIT(9)
-#define LCD_DUAL_FRAME_BUFFER_ENABLE BIT(0)
-
-/* LCD Control Register */
-#define LCD_CLK_DIVISOR(x) ((x) << 8)
-#define LCD_RASTER_MODE 0x01
-
-/* LCD Raster Control Register */
-#define LCD_PALETTE_LOAD_MODE(x) ((x) << 20)
-#define PALETTE_AND_DATA 0x00
-#define PALETTE_ONLY 0x01
-#define DATA_ONLY 0x02
-
-#define LCD_MONO_8BIT_MODE BIT(9)
-#define LCD_RASTER_ORDER BIT(8)
-#define LCD_TFT_MODE BIT(7)
-#define LCD_V1_UNDERFLOW_INT_ENA BIT(6)
-#define LCD_V2_UNDERFLOW_INT_ENA BIT(5)
-#define LCD_V1_PL_INT_ENA BIT(4)
-#define LCD_V2_PL_INT_ENA BIT(6)
-#define LCD_MONOCHROME_MODE BIT(1)
-#define LCD_RASTER_ENABLE BIT(0)
-#define LCD_TFT_ALT_ENABLE BIT(23)
-#define LCD_STN_565_ENABLE BIT(24)
-#define LCD_V2_DMA_CLK_EN BIT(2)
-#define LCD_V2_LIDD_CLK_EN BIT(1)
-#define LCD_V2_CORE_CLK_EN BIT(0)
-#define LCD_V2_LPP_B10 26
-#define LCD_V2_TFT_24BPP_MODE BIT(25)
-#define LCD_V2_TFT_24BPP_UNPACK BIT(26)
-
-/* LCD Raster Timing 2 Register */
-#define LCD_AC_BIAS_TRANSITIONS_PER_INT(x) ((x) << 16)
-#define LCD_AC_BIAS_FREQUENCY(x) ((x) << 8)
-#define LCD_SYNC_CTRL BIT(25)
-#define LCD_SYNC_EDGE BIT(24)
-#define LCD_INVERT_PIXEL_CLOCK BIT(22)
-#define LCD_INVERT_LINE_CLOCK BIT(21)
-#define LCD_INVERT_FRAME_CLOCK BIT(20)
-
-/* LCD Block */
-#define LCD_PID_REG 0x0
-#define LCD_CTRL_REG 0x4
-#define LCD_STAT_REG 0x8
-#define LCD_RASTER_CTRL_REG 0x28
-#define LCD_RASTER_TIMING_0_REG 0x2C
-#define LCD_RASTER_TIMING_1_REG 0x30
-#define LCD_RASTER_TIMING_2_REG 0x34
-#define LCD_DMA_CTRL_REG 0x40
-#define LCD_DMA_FRM_BUF_BASE_ADDR_0_REG 0x44
-#define LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG 0x48
-#define LCD_DMA_FRM_BUF_BASE_ADDR_1_REG 0x4C
-#define LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG 0x50
-
-/* Interrupt Registers available only in Version 2 */
-#define LCD_RAW_STAT_REG 0x58
-#define LCD_MASKED_STAT_REG 0x5c
-#define LCD_INT_ENABLE_SET_REG 0x60
-#define LCD_INT_ENABLE_CLR_REG 0x64
-#define LCD_END_OF_INT_IND_REG 0x68
-
-/* Clock registers available only on Version 2 */
-#define LCD_CLK_ENABLE_REG 0x6c
-#define LCD_CLK_RESET_REG 0x70
-#define LCD_CLK_MAIN_RESET BIT(3)
-
-#define LCD_NUM_BUFFERS 2
-
-#define PALETTE_SIZE 256
-
-#define CLK_MIN_DIV 2
-#define CLK_MAX_DIV 255
-
-static void __iomem *da8xx_fb_reg_base;
-static unsigned int lcd_revision;
-static irq_handler_t lcdc_irq_handler;
-static wait_queue_head_t frame_done_wq;
-static int frame_done_flag;
-
-static unsigned int lcdc_read(unsigned int addr)
-{
- return (unsigned int)__raw_readl(da8xx_fb_reg_base + (addr));
-}
-
-static void lcdc_write(unsigned int val, unsigned int addr)
-{
- __raw_writel(val, da8xx_fb_reg_base + (addr));
-}
-
-struct da8xx_fb_par {
- struct device *dev;
- dma_addr_t p_palette_base;
- unsigned char *v_palette_base;
- dma_addr_t vram_phys;
- unsigned long vram_size;
- void *vram_virt;
- unsigned int dma_start;
- unsigned int dma_end;
- struct clk *lcdc_clk;
- int irq;
- unsigned int palette_sz;
- int blank;
- wait_queue_head_t vsync_wait;
- int vsync_flag;
- int vsync_timeout;
- spinlock_t lock_for_chan_update;
-
- /*
- * LCDC has 2 ping pong DMA channels, channel 0
- * and channel 1.
- */
- unsigned int which_dma_channel_done;
-#ifdef CONFIG_CPU_FREQ
- struct notifier_block freq_transition;
-#endif
- unsigned int lcdc_clk_rate;
- struct regulator *lcd_supply;
- u32 pseudo_palette[16];
- struct fb_videomode mode;
- struct lcd_ctrl_config cfg;
-};
-
-static struct fb_var_screeninfo da8xx_fb_var;
-
-static struct fb_fix_screeninfo da8xx_fb_fix = {
- .id = "DA8xx FB Drv",
- .type = FB_TYPE_PACKED_PIXELS,
- .type_aux = 0,
- .visual = FB_VISUAL_PSEUDOCOLOR,
- .xpanstep = 0,
- .ypanstep = 1,
- .ywrapstep = 0,
- .accel = FB_ACCEL_NONE
-};
-
-static struct fb_videomode known_lcd_panels[] = {
- /* Sharp LCD035Q3DG01 */
- [0] = {
- .name = "Sharp_LCD035Q3DG01",
- .xres = 320,
- .yres = 240,
- .pixclock = KHZ2PICOS(4607),
- .left_margin = 6,
- .right_margin = 8,
- .upper_margin = 2,
- .lower_margin = 2,
- .hsync_len = 0,
- .vsync_len = 0,
- .sync = FB_SYNC_CLK_INVERT,
- },
- /* Sharp LK043T1DG01 */
- [1] = {
- .name = "Sharp_LK043T1DG01",
- .xres = 480,
- .yres = 272,
- .pixclock = KHZ2PICOS(7833),
- .left_margin = 2,
- .right_margin = 2,
- .upper_margin = 2,
- .lower_margin = 2,
- .hsync_len = 41,
- .vsync_len = 10,
- .sync = 0,
- .flag = 0,
- },
- [2] = {
- /* Hitachi SP10Q010 */
- .name = "SP10Q010",
- .xres = 320,
- .yres = 240,
- .pixclock = KHZ2PICOS(7833),
- .left_margin = 10,
- .right_margin = 10,
- .upper_margin = 10,
- .lower_margin = 10,
- .hsync_len = 10,
- .vsync_len = 10,
- .sync = 0,
- .flag = 0,
- },
- [3] = {
- /* Densitron 84-0023-001T */
- .name = "Densitron_84-0023-001T",
- .xres = 320,
- .yres = 240,
- .pixclock = KHZ2PICOS(6400),
- .left_margin = 0,
- .right_margin = 0,
- .upper_margin = 0,
- .lower_margin = 0,
- .hsync_len = 30,
- .vsync_len = 3,
- .sync = 0,
- },
-};
-
-static bool da8xx_fb_is_raster_enabled(void)
-{
- return !!(lcdc_read(LCD_RASTER_CTRL_REG) & LCD_RASTER_ENABLE);
-}
-
-/* Enable the Raster Engine of the LCD Controller */
-static void lcd_enable_raster(void)
-{
- u32 reg;
-
- /* Put LCDC in reset for several cycles */
- if (lcd_revision == LCD_VERSION_2)
- /* Write 1 to reset LCDC */
- lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
- mdelay(1);
-
- /* Bring LCDC out of reset */
- if (lcd_revision == LCD_VERSION_2)
- lcdc_write(0, LCD_CLK_RESET_REG);
- mdelay(1);
-
- /* Above reset sequence doesnot reset register context */
- reg = lcdc_read(LCD_RASTER_CTRL_REG);
- if (!(reg & LCD_RASTER_ENABLE))
- lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
-}
-
-/* Disable the Raster Engine of the LCD Controller */
-static void lcd_disable_raster(enum da8xx_frame_complete wait_for_frame_done)
-{
- u32 reg;
- int ret;
-
- reg = lcdc_read(LCD_RASTER_CTRL_REG);
- if (reg & LCD_RASTER_ENABLE)
- lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
- else
- /* return if already disabled */
- return;
-
- if ((wait_for_frame_done == DA8XX_FRAME_WAIT) &&
- (lcd_revision == LCD_VERSION_2)) {
- frame_done_flag = 0;
- ret = wait_event_interruptible_timeout(frame_done_wq,
- frame_done_flag != 0,
- msecs_to_jiffies(50));
- if (ret == 0)
- pr_err("LCD Controller timed out\n");
- }
-}
-
-static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
-{
- u32 start;
- u32 end;
- u32 reg_ras;
- u32 reg_dma;
- u32 reg_int;
-
- /* init reg to clear PLM (loading mode) fields */
- reg_ras = lcdc_read(LCD_RASTER_CTRL_REG);
- reg_ras &= ~(3 << 20);
-
- reg_dma = lcdc_read(LCD_DMA_CTRL_REG);
-
- if (load_mode == LOAD_DATA) {
- start = par->dma_start;
- end = par->dma_end;
-
- reg_ras |= LCD_PALETTE_LOAD_MODE(DATA_ONLY);
- if (lcd_revision == LCD_VERSION_1) {
- reg_dma |= LCD_V1_END_OF_FRAME_INT_ENA;
- } else {
- reg_int = lcdc_read(LCD_INT_ENABLE_SET_REG) |
- LCD_V2_END_OF_FRAME0_INT_ENA |
- LCD_V2_END_OF_FRAME1_INT_ENA |
- LCD_FRAME_DONE | LCD_SYNC_LOST;
- lcdc_write(reg_int, LCD_INT_ENABLE_SET_REG);
- }
- reg_dma |= LCD_DUAL_FRAME_BUFFER_ENABLE;
-
- lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
- lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
- lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
- lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
- } else if (load_mode == LOAD_PALETTE) {
- start = par->p_palette_base;
- end = start + par->palette_sz - 1;
-
- reg_ras |= LCD_PALETTE_LOAD_MODE(PALETTE_ONLY);
-
- if (lcd_revision == LCD_VERSION_1) {
- reg_ras |= LCD_V1_PL_INT_ENA;
- } else {
- reg_int = lcdc_read(LCD_INT_ENABLE_SET_REG) |
- LCD_V2_PL_INT_ENA;
- lcdc_write(reg_int, LCD_INT_ENABLE_SET_REG);
- }
-
- lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
- lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
- }
-
- lcdc_write(reg_dma, LCD_DMA_CTRL_REG);
- lcdc_write(reg_ras, LCD_RASTER_CTRL_REG);
-
- /*
- * The Raster enable bit must be set after all other control fields are
- * set.
- */
- lcd_enable_raster();
-}
-
-/* Configure the Burst Size and fifo threhold of DMA */
-static int lcd_cfg_dma(int burst_size, int fifo_th)
-{
- u32 reg;
-
- reg = lcdc_read(LCD_DMA_CTRL_REG) & 0x00000001;
- switch (burst_size) {
- case 1:
- reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_1);
- break;
- case 2:
- reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_2);
- break;
- case 4:
- reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_4);
- break;
- case 8:
- reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_8);
- break;
- case 16:
- default:
- reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_16);
- break;
- }
-
- reg |= (fifo_th << 8);
-
- lcdc_write(reg, LCD_DMA_CTRL_REG);
-
- return 0;
-}
-
-static void lcd_cfg_ac_bias(int period, int transitions_per_int)
-{
- u32 reg;
-
- /* Set the AC Bias Period and Number of Transisitons per Interrupt */
- reg = lcdc_read(LCD_RASTER_TIMING_2_REG) & 0xFFF00000;
- reg |= LCD_AC_BIAS_FREQUENCY(period) |
- LCD_AC_BIAS_TRANSITIONS_PER_INT(transitions_per_int);
- lcdc_write(reg, LCD_RASTER_TIMING_2_REG);
-}
-
-static void lcd_cfg_horizontal_sync(int back_porch, int pulse_width,
- int front_porch)
-{
- u32 reg;
-
- reg = lcdc_read(LCD_RASTER_TIMING_0_REG) & 0x3ff;
- reg |= (((back_porch-1) & 0xff) << 24)
- | (((front_porch-1) & 0xff) << 16)
- | (((pulse_width-1) & 0x3f) << 10);
- lcdc_write(reg, LCD_RASTER_TIMING_0_REG);
-
- /*
- * LCDC Version 2 adds some extra bits that increase the allowable
- * size of the horizontal timing registers.
- * remember that the registers use 0 to represent 1 so all values
- * that get set into register need to be decremented by 1
- */
- if (lcd_revision == LCD_VERSION_2) {
- /* Mask off the bits we want to change */
- reg = lcdc_read(LCD_RASTER_TIMING_2_REG) & ~0x780000ff;
- reg |= ((front_porch-1) & 0x300) >> 8;
- reg |= ((back_porch-1) & 0x300) >> 4;
- reg |= ((pulse_width-1) & 0x3c0) << 21;
- lcdc_write(reg, LCD_RASTER_TIMING_2_REG);
- }
-}
-
-static void lcd_cfg_vertical_sync(int back_porch, int pulse_width,
- int front_porch)
-{
- u32 reg;
-
- reg = lcdc_read(LCD_RASTER_TIMING_1_REG) & 0x3ff;
- reg |= ((back_porch & 0xff) << 24)
- | ((front_porch & 0xff) << 16)
- | (((pulse_width-1) & 0x3f) << 10);
- lcdc_write(reg, LCD_RASTER_TIMING_1_REG);
-}
-
-static int lcd_cfg_display(const struct lcd_ctrl_config *cfg,
- struct fb_videomode *panel)
-{
- u32 reg;
- u32 reg_int;
-
- reg = lcdc_read(LCD_RASTER_CTRL_REG) & ~(LCD_TFT_MODE |
- LCD_MONO_8BIT_MODE |
- LCD_MONOCHROME_MODE);
-
- switch (cfg->panel_shade) {
- case MONOCHROME:
- reg |= LCD_MONOCHROME_MODE;
- if (cfg->mono_8bit_mode)
- reg |= LCD_MONO_8BIT_MODE;
- break;
- case COLOR_ACTIVE:
- reg |= LCD_TFT_MODE;
- if (cfg->tft_alt_mode)
- reg |= LCD_TFT_ALT_ENABLE;
- break;
-
- case COLOR_PASSIVE:
- /* AC bias applicable only for Pasive panels */
- lcd_cfg_ac_bias(cfg->ac_bias, cfg->ac_bias_intrpt);
- if (cfg->bpp == 12 && cfg->stn_565_mode)
- reg |= LCD_STN_565_ENABLE;
- break;
-
- default:
- return -EINVAL;
- }
-
- /* enable additional interrupts here */
- if (lcd_revision == LCD_VERSION_1) {
- reg |= LCD_V1_UNDERFLOW_INT_ENA;
- } else {
- reg_int = lcdc_read(LCD_INT_ENABLE_SET_REG) |
- LCD_V2_UNDERFLOW_INT_ENA;
- lcdc_write(reg_int, LCD_INT_ENABLE_SET_REG);
- }
-
- lcdc_write(reg, LCD_RASTER_CTRL_REG);
-
- reg = lcdc_read(LCD_RASTER_TIMING_2_REG);
-
- reg |= LCD_SYNC_CTRL;
-
- if (cfg->sync_edge)
- reg |= LCD_SYNC_EDGE;
- else
- reg &= ~LCD_SYNC_EDGE;
-
- if ((panel->sync & FB_SYNC_HOR_HIGH_ACT) == 0)
- reg |= LCD_INVERT_LINE_CLOCK;
- else
- reg &= ~LCD_INVERT_LINE_CLOCK;
-
- if ((panel->sync & FB_SYNC_VERT_HIGH_ACT) == 0)
- reg |= LCD_INVERT_FRAME_CLOCK;
- else
- reg &= ~LCD_INVERT_FRAME_CLOCK;
-
- lcdc_write(reg, LCD_RASTER_TIMING_2_REG);
-
- return 0;
-}
-
-static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height,
- u32 bpp, u32 raster_order)
-{
- u32 reg;
-
- if (bpp > 16 && lcd_revision == LCD_VERSION_1)
- return -EINVAL;
-
- /* Set the Panel Width */
- /* Pixels per line = (PPL + 1)*16 */
- if (lcd_revision == LCD_VERSION_1) {
- /*
- * 0x3F in bits 4..9 gives max horizontal resolution = 1024
- * pixels.
- */
- width &= 0x3f0;
- } else {
- /*
- * 0x7F in bits 4..10 gives max horizontal resolution = 2048
- * pixels.
- */
- width &= 0x7f0;
- }
-
- reg = lcdc_read(LCD_RASTER_TIMING_0_REG);
- reg &= 0xfffffc00;
- if (lcd_revision == LCD_VERSION_1) {
- reg |= ((width >> 4) - 1) << 4;
- } else {
- width = (width >> 4) - 1;
- reg |= ((width & 0x3f) << 4) | ((width & 0x40) >> 3);
- }
- lcdc_write(reg, LCD_RASTER_TIMING_0_REG);
-
- /* Set the Panel Height */
- /* Set bits 9:0 of Lines Per Pixel */
- reg = lcdc_read(LCD_RASTER_TIMING_1_REG);
- reg = ((height - 1) & 0x3ff) | (reg & 0xfffffc00);
- lcdc_write(reg, LCD_RASTER_TIMING_1_REG);
-
- /* Set bit 10 of Lines Per Pixel */
- if (lcd_revision == LCD_VERSION_2) {
- reg = lcdc_read(LCD_RASTER_TIMING_2_REG);
- reg |= ((height - 1) & 0x400) << 16;
- lcdc_write(reg, LCD_RASTER_TIMING_2_REG);
- }
-
- /* Set the Raster Order of the Frame Buffer */
- reg = lcdc_read(LCD_RASTER_CTRL_REG) & ~(1 << 8);
- if (raster_order)
- reg |= LCD_RASTER_ORDER;
-
- par->palette_sz = 16 * 2;
-
- switch (bpp) {
- case 1:
- case 2:
- case 4:
- case 16:
- break;
- case 24:
- reg |= LCD_V2_TFT_24BPP_MODE;
- break;
- case 32:
- reg |= LCD_V2_TFT_24BPP_MODE;
- reg |= LCD_V2_TFT_24BPP_UNPACK;
- break;
- case 8:
- par->palette_sz = 256 * 2;
- break;
-
- default:
- return -EINVAL;
- }
-
- lcdc_write(reg, LCD_RASTER_CTRL_REG);
-
- return 0;
-}
-
-#define CNVT_TOHW(val, width) ((((val) << (width)) + 0x7FFF - (val)) >> 16)
-static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info)
-{
- struct da8xx_fb_par *par = info->par;
- unsigned short *palette = (unsigned short *) par->v_palette_base;
- u_short pal;
- int update_hw = 0;
-
- if (regno > 255)
- return 1;
-
- if (info->fix.visual == FB_VISUAL_DIRECTCOLOR)
- return 1;
-
- if (info->var.bits_per_pixel > 16 && lcd_revision == LCD_VERSION_1)
- return -EINVAL;
-
- switch (info->fix.visual) {
- case FB_VISUAL_TRUECOLOR:
- red = CNVT_TOHW(red, info->var.red.length);
- green = CNVT_TOHW(green, info->var.green.length);
- blue = CNVT_TOHW(blue, info->var.blue.length);
- break;
- case FB_VISUAL_PSEUDOCOLOR:
- switch (info->var.bits_per_pixel) {
- case 4:
- if (regno > 15)
- return -EINVAL;
-
- if (info->var.grayscale) {
- pal = regno;
- } else {
- red >>= 4;
- green >>= 8;
- blue >>= 12;
-
- pal = red & 0x0f00;
- pal |= green & 0x00f0;
- pal |= blue & 0x000f;
- }
- if (regno == 0)
- pal |= 0x2000;
- palette[regno] = pal;
- break;
-
- case 8:
- red >>= 4;
- green >>= 8;
- blue >>= 12;
-
- pal = (red & 0x0f00);
- pal |= (green & 0x00f0);
- pal |= (blue & 0x000f);
-
- if (palette[regno] != pal) {
- update_hw = 1;
- palette[regno] = pal;
- }
- break;
- }
- break;
- }
-
- /* Truecolor has hardware independent palette */
- if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
- u32 v;
-
- if (regno > 15)
- return -EINVAL;
-
- v = (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset);
-
- ((u32 *) (info->pseudo_palette))[regno] = v;
- if (palette[0] != 0x4000) {
- update_hw = 1;
- palette[0] = 0x4000;
- }
- }
-
- /* Update the palette in the h/w as needed. */
- if (update_hw)
- lcd_blit(LOAD_PALETTE, par);
-
- return 0;
-}
-#undef CNVT_TOHW
-
-static void da8xx_fb_lcd_reset(void)
-{
- /* DMA has to be disabled */
- lcdc_write(0, LCD_DMA_CTRL_REG);
- lcdc_write(0, LCD_RASTER_CTRL_REG);
-
- if (lcd_revision == LCD_VERSION_2) {
- lcdc_write(0, LCD_INT_ENABLE_SET_REG);
- /* Write 1 to reset */
- lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
- lcdc_write(0, LCD_CLK_RESET_REG);
- }
-}
-
-static int da8xx_fb_config_clk_divider(struct da8xx_fb_par *par,
- unsigned lcdc_clk_div,
- unsigned lcdc_clk_rate)
-{
- int ret;
-
- if (par->lcdc_clk_rate != lcdc_clk_rate) {
- ret = clk_set_rate(par->lcdc_clk, lcdc_clk_rate);
- if (ret) {
- dev_err(par->dev,
- "unable to set clock rate at %u\n",
- lcdc_clk_rate);
- return ret;
- }
- par->lcdc_clk_rate = clk_get_rate(par->lcdc_clk);
- }
-
- /* Configure the LCD clock divisor. */
- lcdc_write(LCD_CLK_DIVISOR(lcdc_clk_div) |
- (LCD_RASTER_MODE & 0x1), LCD_CTRL_REG);
-
- if (lcd_revision == LCD_VERSION_2)
- lcdc_write(LCD_V2_DMA_CLK_EN | LCD_V2_LIDD_CLK_EN |
- LCD_V2_CORE_CLK_EN, LCD_CLK_ENABLE_REG);
-
- return 0;
-}
-
-static unsigned int da8xx_fb_calc_clk_divider(struct da8xx_fb_par *par,
- unsigned pixclock,
- unsigned *lcdc_clk_rate)
-{
- unsigned lcdc_clk_div;
-
- pixclock = PICOS2KHZ(pixclock) * 1000;
-
- *lcdc_clk_rate = par->lcdc_clk_rate;
-
- if (pixclock < (*lcdc_clk_rate / CLK_MAX_DIV)) {
- *lcdc_clk_rate = clk_round_rate(par->lcdc_clk,
- pixclock * CLK_MAX_DIV);
- lcdc_clk_div = CLK_MAX_DIV;
- } else if (pixclock > (*lcdc_clk_rate / CLK_MIN_DIV)) {
- *lcdc_clk_rate = clk_round_rate(par->lcdc_clk,
- pixclock * CLK_MIN_DIV);
- lcdc_clk_div = CLK_MIN_DIV;
- } else {
- lcdc_clk_div = *lcdc_clk_rate / pixclock;
- }
-
- return lcdc_clk_div;
-}
-
-static int da8xx_fb_calc_config_clk_divider(struct da8xx_fb_par *par,
- struct fb_videomode *mode)
-{
- unsigned lcdc_clk_rate;
- unsigned lcdc_clk_div = da8xx_fb_calc_clk_divider(par, mode->pixclock,
- &lcdc_clk_rate);
-
- return da8xx_fb_config_clk_divider(par, lcdc_clk_div, lcdc_clk_rate);
-}
-
-static unsigned da8xx_fb_round_clk(struct da8xx_fb_par *par,
- unsigned pixclock)
-{
- unsigned lcdc_clk_div, lcdc_clk_rate;
-
- lcdc_clk_div = da8xx_fb_calc_clk_divider(par, pixclock, &lcdc_clk_rate);
- return KHZ2PICOS(lcdc_clk_rate / (1000 * lcdc_clk_div));
-}
-
-static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
- struct fb_videomode *panel)
-{
- u32 bpp;
- int ret = 0;
-
- ret = da8xx_fb_calc_config_clk_divider(par, panel);
- if (ret) {
- dev_err(par->dev, "unable to configure clock\n");
- return ret;
- }
-
- if (panel->sync & FB_SYNC_CLK_INVERT)
- lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) |
- LCD_INVERT_PIXEL_CLOCK), LCD_RASTER_TIMING_2_REG);
- else
- lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) &
- ~LCD_INVERT_PIXEL_CLOCK), LCD_RASTER_TIMING_2_REG);
-
- /* Configure the DMA burst size and fifo threshold. */
- ret = lcd_cfg_dma(cfg->dma_burst_sz, cfg->fifo_th);
- if (ret < 0)
- return ret;
-
- /* Configure the vertical and horizontal sync properties. */
- lcd_cfg_vertical_sync(panel->upper_margin, panel->vsync_len,
- panel->lower_margin);
- lcd_cfg_horizontal_sync(panel->left_margin, panel->hsync_len,
- panel->right_margin);
-
- /* Configure for disply */
- ret = lcd_cfg_display(cfg, panel);
- if (ret < 0)
- return ret;
-
- bpp = cfg->bpp;
-
- if (bpp == 12)
- bpp = 16;
- ret = lcd_cfg_frame_buffer(par, (unsigned int)panel->xres,
- (unsigned int)panel->yres, bpp,
- cfg->raster_order);
- if (ret < 0)
- return ret;
-
- /* Configure FDD */
- lcdc_write((lcdc_read(LCD_RASTER_CTRL_REG) & 0xfff00fff) |
- (cfg->fdd << 12), LCD_RASTER_CTRL_REG);
-
- return 0;
-}
-
-/* IRQ handler for version 2 of LCDC */
-static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
-{
- struct da8xx_fb_par *par = arg;
- u32 stat = lcdc_read(LCD_MASKED_STAT_REG);
-
- if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
- lcd_disable_raster(DA8XX_FRAME_NOWAIT);
- lcdc_write(stat, LCD_MASKED_STAT_REG);
- lcd_enable_raster();
- } else if (stat & LCD_PL_LOAD_DONE) {
- /*
- * Must disable raster before changing state of any control bit.
- * And also must be disabled before clearing the PL loading
- * interrupt via the following write to the status register. If
- * this is done after then one gets multiple PL done interrupts.
- */
- lcd_disable_raster(DA8XX_FRAME_NOWAIT);
-
- lcdc_write(stat, LCD_MASKED_STAT_REG);
-
- /* Disable PL completion interrupt */
- lcdc_write(LCD_V2_PL_INT_ENA, LCD_INT_ENABLE_CLR_REG);
-
- /* Setup and start data loading mode */
- lcd_blit(LOAD_DATA, par);
- } else {
- lcdc_write(stat, LCD_MASKED_STAT_REG);
-
- if (stat & LCD_END_OF_FRAME0) {
- par->which_dma_channel_done = 0;
- lcdc_write(par->dma_start,
- LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
- lcdc_write(par->dma_end,
- LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
- par->vsync_flag = 1;
- wake_up_interruptible(&par->vsync_wait);
- }
-
- if (stat & LCD_END_OF_FRAME1) {
- par->which_dma_channel_done = 1;
- lcdc_write(par->dma_start,
- LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
- lcdc_write(par->dma_end,
- LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
- par->vsync_flag = 1;
- wake_up_interruptible(&par->vsync_wait);
- }
-
- /* Set only when controller is disabled and at the end of
- * active frame
- */
- if (stat & BIT(0)) {
- frame_done_flag = 1;
- wake_up_interruptible(&frame_done_wq);
- }
- }
-
- lcdc_write(0, LCD_END_OF_INT_IND_REG);
- return IRQ_HANDLED;
-}
-
-/* IRQ handler for version 1 LCDC */
-static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg)
-{
- struct da8xx_fb_par *par = arg;
- u32 stat = lcdc_read(LCD_STAT_REG);
- u32 reg_ras;
-
- if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
- lcd_disable_raster(DA8XX_FRAME_NOWAIT);
- lcdc_write(stat, LCD_STAT_REG);
- lcd_enable_raster();
- } else if (stat & LCD_PL_LOAD_DONE) {
- /*
- * Must disable raster before changing state of any control bit.
- * And also must be disabled before clearing the PL loading
- * interrupt via the following write to the status register. If
- * this is done after then one gets multiple PL done interrupts.
- */
- lcd_disable_raster(DA8XX_FRAME_NOWAIT);
-
- lcdc_write(stat, LCD_STAT_REG);
-
- /* Disable PL completion inerrupt */
- reg_ras = lcdc_read(LCD_RASTER_CTRL_REG);
- reg_ras &= ~LCD_V1_PL_INT_ENA;
- lcdc_write(reg_ras, LCD_RASTER_CTRL_REG);
-
- /* Setup and start data loading mode */
- lcd_blit(LOAD_DATA, par);
- } else {
- lcdc_write(stat, LCD_STAT_REG);
-
- if (stat & LCD_END_OF_FRAME0) {
- par->which_dma_channel_done = 0;
- lcdc_write(par->dma_start,
- LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
- lcdc_write(par->dma_end,
- LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
- par->vsync_flag = 1;
- wake_up_interruptible(&par->vsync_wait);
- }
-
- if (stat & LCD_END_OF_FRAME1) {
- par->which_dma_channel_done = 1;
- lcdc_write(par->dma_start,
- LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
- lcdc_write(par->dma_end,
- LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
- par->vsync_flag = 1;
- wake_up_interruptible(&par->vsync_wait);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static int fb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- int err = 0;
- struct da8xx_fb_par *par = info->par;
- int bpp = var->bits_per_pixel >> 3;
- unsigned long line_size = var->xres_virtual * bpp;
-
- if (var->bits_per_pixel > 16 && lcd_revision == LCD_VERSION_1)
- return -EINVAL;
-
- switch (var->bits_per_pixel) {
- case 1:
- case 8:
- var->red.offset = 0;
- var->red.length = 8;
- var->green.offset = 0;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 0;
- var->transp.length = 0;
- var->nonstd = 0;
- break;
- case 4:
- var->red.offset = 0;
- var->red.length = 4;
- var->green.offset = 0;
- var->green.length = 4;
- var->blue.offset = 0;
- var->blue.length = 4;
- var->transp.offset = 0;
- var->transp.length = 0;
- var->nonstd = FB_NONSTD_REV_PIX_IN_B;
- break;
- case 16: /* RGB 565 */
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.offset = 0;
- var->transp.length = 0;
- var->nonstd = 0;
- break;
- case 24:
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->nonstd = 0;
- break;
- case 32:
- var->transp.offset = 24;
- var->transp.length = 8;
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->nonstd = 0;
- break;
- default:
- err = -EINVAL;
- }
-
- var->red.msb_right = 0;
- var->green.msb_right = 0;
- var->blue.msb_right = 0;
- var->transp.msb_right = 0;
-
- if (line_size * var->yres_virtual > par->vram_size)
- var->yres_virtual = par->vram_size / line_size;
-
- if (var->yres > var->yres_virtual)
- var->yres = var->yres_virtual;
-
- if (var->xres > var->xres_virtual)
- var->xres = var->xres_virtual;
-
- if (var->xres + var->xoffset > var->xres_virtual)
- var->xoffset = var->xres_virtual - var->xres;
- if (var->yres + var->yoffset > var->yres_virtual)
- var->yoffset = var->yres_virtual - var->yres;
-
- var->pixclock = da8xx_fb_round_clk(par, var->pixclock);
-
- return err;
-}
-
-#ifdef CONFIG_CPU_FREQ
-static int lcd_da8xx_cpufreq_transition(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct da8xx_fb_par *par;
-
- par = container_of(nb, struct da8xx_fb_par, freq_transition);
- if (val == CPUFREQ_POSTCHANGE) {
- if (par->lcdc_clk_rate != clk_get_rate(par->lcdc_clk)) {
- par->lcdc_clk_rate = clk_get_rate(par->lcdc_clk);
- lcd_disable_raster(DA8XX_FRAME_WAIT);
- da8xx_fb_calc_config_clk_divider(par, &par->mode);
- if (par->blank == FB_BLANK_UNBLANK)
- lcd_enable_raster();
- }
- }
-
- return 0;
-}
-
-static int lcd_da8xx_cpufreq_register(struct da8xx_fb_par *par)
-{
- par->freq_transition.notifier_call = lcd_da8xx_cpufreq_transition;
-
- return cpufreq_register_notifier(&par->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-static void lcd_da8xx_cpufreq_deregister(struct da8xx_fb_par *par)
-{
- cpufreq_unregister_notifier(&par->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-#endif
-
-static void fb_remove(struct platform_device *dev)
-{
- struct fb_info *info = platform_get_drvdata(dev);
- struct da8xx_fb_par *par = info->par;
- int ret;
-
-#ifdef CONFIG_CPU_FREQ
- lcd_da8xx_cpufreq_deregister(par);
-#endif
- if (par->lcd_supply) {
- ret = regulator_disable(par->lcd_supply);
- if (ret)
- dev_warn(&dev->dev, "Failed to disable regulator (%pe)\n",
- ERR_PTR(ret));
- }
-
- lcd_disable_raster(DA8XX_FRAME_WAIT);
- lcdc_write(0, LCD_RASTER_CTRL_REG);
-
- /* disable DMA */
- lcdc_write(0, LCD_DMA_CTRL_REG);
-
- unregister_framebuffer(info);
- fb_dealloc_cmap(&info->cmap);
- pm_runtime_put_sync(&dev->dev);
- pm_runtime_disable(&dev->dev);
- framebuffer_release(info);
-}
-
-/*
- * Function to wait for vertical sync which for this LCD peripheral
- * translates into waiting for the current raster frame to complete.
- */
-static int fb_wait_for_vsync(struct fb_info *info)
-{
- struct da8xx_fb_par *par = info->par;
- int ret;
-
- /*
- * Set flag to 0 and wait for isr to set to 1. It would seem there is a
- * race condition here where the ISR could have occurred just before or
- * just after this set. But since we are just coarsely waiting for
- * a frame to complete then that's OK. i.e. if the frame completed
- * just before this code executed then we have to wait another full
- * frame time but there is no way to avoid such a situation. On the
- * other hand if the frame completed just after then we don't need
- * to wait long at all. Either way we are guaranteed to return to the
- * user immediately after a frame completion which is all that is
- * required.
- */
- par->vsync_flag = 0;
- ret = wait_event_interruptible_timeout(par->vsync_wait,
- par->vsync_flag != 0,
- par->vsync_timeout);
- if (ret < 0)
- return ret;
- if (ret == 0)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int fb_ioctl(struct fb_info *info, unsigned int cmd,
- unsigned long arg)
-{
- struct lcd_sync_arg sync_arg;
-
- switch (cmd) {
- case FBIOGET_CONTRAST:
- case FBIOPUT_CONTRAST:
- case FBIGET_BRIGHTNESS:
- case FBIPUT_BRIGHTNESS:
- case FBIGET_COLOR:
- case FBIPUT_COLOR:
- return -ENOTTY;
- case FBIPUT_HSYNC:
- if (copy_from_user(&sync_arg, (char *)arg,
- sizeof(struct lcd_sync_arg)))
- return -EFAULT;
- lcd_cfg_horizontal_sync(sync_arg.back_porch,
- sync_arg.pulse_width,
- sync_arg.front_porch);
- break;
- case FBIPUT_VSYNC:
- if (copy_from_user(&sync_arg, (char *)arg,
- sizeof(struct lcd_sync_arg)))
- return -EFAULT;
- lcd_cfg_vertical_sync(sync_arg.back_porch,
- sync_arg.pulse_width,
- sync_arg.front_porch);
- break;
- case FBIO_WAITFORVSYNC:
- return fb_wait_for_vsync(info);
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int cfb_blank(int blank, struct fb_info *info)
-{
- struct da8xx_fb_par *par = info->par;
- int ret = 0;
-
- if (par->blank == blank)
- return 0;
-
- par->blank = blank;
- switch (blank) {
- case FB_BLANK_UNBLANK:
- lcd_enable_raster();
-
- if (par->lcd_supply) {
- ret = regulator_enable(par->lcd_supply);
- if (ret)
- return ret;
- }
- break;
- case FB_BLANK_NORMAL:
- case FB_BLANK_VSYNC_SUSPEND:
- case FB_BLANK_HSYNC_SUSPEND:
- case FB_BLANK_POWERDOWN:
- if (par->lcd_supply) {
- ret = regulator_disable(par->lcd_supply);
- if (ret)
- return ret;
- }
-
- lcd_disable_raster(DA8XX_FRAME_WAIT);
- break;
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-/*
- * Set new x,y offsets in the virtual display for the visible area and switch
- * to the new mode.
- */
-static int da8xx_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *fbi)
-{
- int ret = 0;
- struct fb_var_screeninfo new_var;
- struct da8xx_fb_par *par = fbi->par;
- struct fb_fix_screeninfo *fix = &fbi->fix;
- unsigned int end;
- unsigned int start;
- unsigned long irq_flags;
-
- if (var->xoffset != fbi->var.xoffset ||
- var->yoffset != fbi->var.yoffset) {
- memcpy(&new_var, &fbi->var, sizeof(new_var));
- new_var.xoffset = var->xoffset;
- new_var.yoffset = var->yoffset;
- if (fb_check_var(&new_var, fbi))
- ret = -EINVAL;
- else {
- memcpy(&fbi->var, &new_var, sizeof(new_var));
-
- start = fix->smem_start +
- new_var.yoffset * fix->line_length +
- new_var.xoffset * fbi->var.bits_per_pixel / 8;
- end = start + fbi->var.yres * fix->line_length - 1;
- par->dma_start = start;
- par->dma_end = end;
- spin_lock_irqsave(&par->lock_for_chan_update,
- irq_flags);
- if (par->which_dma_channel_done == 0) {
- lcdc_write(par->dma_start,
- LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
- lcdc_write(par->dma_end,
- LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
- } else if (par->which_dma_channel_done == 1) {
- lcdc_write(par->dma_start,
- LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
- lcdc_write(par->dma_end,
- LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
- }
- spin_unlock_irqrestore(&par->lock_for_chan_update,
- irq_flags);
- }
- }
-
- return ret;
-}
-
-static int da8xxfb_set_par(struct fb_info *info)
-{
- struct da8xx_fb_par *par = info->par;
- int ret;
- bool raster = da8xx_fb_is_raster_enabled();
-
- if (raster)
- lcd_disable_raster(DA8XX_FRAME_WAIT);
-
- fb_var_to_videomode(&par->mode, &info->var);
-
- par->cfg.bpp = info->var.bits_per_pixel;
-
- info->fix.visual = (par->cfg.bpp <= 8) ?
- FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
- info->fix.line_length = (par->mode.xres * par->cfg.bpp) / 8;
-
- ret = lcd_init(par, &par->cfg, &par->mode);
- if (ret < 0) {
- dev_err(par->dev, "lcd init failed\n");
- return ret;
- }
-
- par->dma_start = info->fix.smem_start +
- info->var.yoffset * info->fix.line_length +
- info->var.xoffset * info->var.bits_per_pixel / 8;
- par->dma_end = par->dma_start +
- info->var.yres * info->fix.line_length - 1;
-
- lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
- lcdc_write(par->dma_end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
- lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
- lcdc_write(par->dma_end, LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
-
- if (raster)
- lcd_enable_raster();
-
- return 0;
-}
-
-static const struct fb_ops da8xx_fb_ops = {
- .owner = THIS_MODULE,
- FB_DEFAULT_IOMEM_OPS,
- .fb_check_var = fb_check_var,
- .fb_set_par = da8xxfb_set_par,
- .fb_setcolreg = fb_setcolreg,
- .fb_pan_display = da8xx_pan_display,
- .fb_ioctl = fb_ioctl,
- .fb_blank = cfb_blank,
-};
-
-static struct fb_videomode *da8xx_fb_get_videomode(struct platform_device *dev)
-{
- struct da8xx_lcdc_platform_data *fb_pdata = dev_get_platdata(&dev->dev);
- struct fb_videomode *lcdc_info;
- int i;
-
- for (i = 0, lcdc_info = known_lcd_panels;
- i < ARRAY_SIZE(known_lcd_panels); i++, lcdc_info++) {
- if (strcmp(fb_pdata->type, lcdc_info->name) == 0)
- break;
- }
-
- if (i == ARRAY_SIZE(known_lcd_panels)) {
- dev_err(&dev->dev, "no panel found\n");
- return NULL;
- }
- dev_info(&dev->dev, "found %s panel\n", lcdc_info->name);
-
- return lcdc_info;
-}
-
-static int fb_probe(struct platform_device *device)
-{
- struct da8xx_lcdc_platform_data *fb_pdata =
- dev_get_platdata(&device->dev);
- struct lcd_ctrl_config *lcd_cfg;
- struct fb_videomode *lcdc_info;
- struct fb_info *da8xx_fb_info;
- struct da8xx_fb_par *par;
- struct clk *tmp_lcdc_clk;
- int ret;
- unsigned long ulcm;
-
- if (fb_pdata == NULL) {
- dev_err(&device->dev, "Can not get platform data\n");
- return -ENOENT;
- }
-
- lcdc_info = da8xx_fb_get_videomode(device);
- if (lcdc_info == NULL)
- return -ENODEV;
-
- da8xx_fb_reg_base = devm_platform_ioremap_resource(device, 0);
- if (IS_ERR(da8xx_fb_reg_base))
- return PTR_ERR(da8xx_fb_reg_base);
-
- tmp_lcdc_clk = devm_clk_get(&device->dev, "fck");
- if (IS_ERR(tmp_lcdc_clk))
- return dev_err_probe(&device->dev, PTR_ERR(tmp_lcdc_clk),
- "Can not get device clock\n");
-
- pm_runtime_enable(&device->dev);
- pm_runtime_get_sync(&device->dev);
-
- /* Determine LCD IP Version */
- switch (lcdc_read(LCD_PID_REG)) {
- case 0x4C100102:
- lcd_revision = LCD_VERSION_1;
- break;
- case 0x4F200800:
- case 0x4F201000:
- lcd_revision = LCD_VERSION_2;
- break;
- default:
- dev_warn(&device->dev, "Unknown PID Reg value 0x%x, "
- "defaulting to LCD revision 1\n",
- lcdc_read(LCD_PID_REG));
- lcd_revision = LCD_VERSION_1;
- break;
- }
-
- lcd_cfg = (struct lcd_ctrl_config *)fb_pdata->controller_data;
-
- if (!lcd_cfg) {
- ret = -EINVAL;
- goto err_pm_runtime_disable;
- }
-
- da8xx_fb_info = framebuffer_alloc(sizeof(struct da8xx_fb_par),
- &device->dev);
- if (!da8xx_fb_info) {
- ret = -ENOMEM;
- goto err_pm_runtime_disable;
- }
-
- par = da8xx_fb_info->par;
- par->dev = &device->dev;
- par->lcdc_clk = tmp_lcdc_clk;
- par->lcdc_clk_rate = clk_get_rate(par->lcdc_clk);
-
- par->lcd_supply = devm_regulator_get_optional(&device->dev, "lcd");
- if (IS_ERR(par->lcd_supply)) {
- if (PTR_ERR(par->lcd_supply) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto err_release_fb;
- }
-
- par->lcd_supply = NULL;
- } else {
- ret = regulator_enable(par->lcd_supply);
- if (ret)
- goto err_release_fb;
- }
-
- fb_videomode_to_var(&da8xx_fb_var, lcdc_info);
- par->cfg = *lcd_cfg;
-
- da8xx_fb_lcd_reset();
-
- /* allocate frame buffer */
- par->vram_size = lcdc_info->xres * lcdc_info->yres * lcd_cfg->bpp;
- ulcm = lcm((lcdc_info->xres * lcd_cfg->bpp)/8, PAGE_SIZE);
- par->vram_size = roundup(par->vram_size/8, ulcm);
- par->vram_size = par->vram_size * LCD_NUM_BUFFERS;
-
- par->vram_virt = dmam_alloc_coherent(par->dev,
- par->vram_size,
- &par->vram_phys,
- GFP_KERNEL | GFP_DMA);
- if (!par->vram_virt) {
- dev_err(&device->dev,
- "GLCD: kmalloc for frame buffer failed\n");
- ret = -EINVAL;
- goto err_disable_reg;
- }
-
- da8xx_fb_info->screen_base = (char __iomem *) par->vram_virt;
- da8xx_fb_fix.smem_start = par->vram_phys;
- da8xx_fb_fix.smem_len = par->vram_size;
- da8xx_fb_fix.line_length = (lcdc_info->xres * lcd_cfg->bpp) / 8;
-
- par->dma_start = par->vram_phys;
- par->dma_end = par->dma_start + lcdc_info->yres *
- da8xx_fb_fix.line_length - 1;
-
- /* allocate palette buffer */
- par->v_palette_base = dmam_alloc_coherent(par->dev, PALETTE_SIZE,
- &par->p_palette_base,
- GFP_KERNEL | GFP_DMA);
- if (!par->v_palette_base) {
- dev_err(&device->dev,
- "GLCD: kmalloc for palette buffer failed\n");
- ret = -EINVAL;
- goto err_release_fb;
- }
-
- par->irq = platform_get_irq(device, 0);
- if (par->irq < 0) {
- ret = -ENOENT;
- goto err_release_fb;
- }
-
- da8xx_fb_var.grayscale =
- lcd_cfg->panel_shade == MONOCHROME ? 1 : 0;
- da8xx_fb_var.bits_per_pixel = lcd_cfg->bpp;
-
- /* Initialize fbinfo */
- da8xx_fb_info->fix = da8xx_fb_fix;
- da8xx_fb_info->var = da8xx_fb_var;
- da8xx_fb_info->fbops = &da8xx_fb_ops;
- da8xx_fb_info->pseudo_palette = par->pseudo_palette;
- da8xx_fb_info->fix.visual = (da8xx_fb_info->var.bits_per_pixel <= 8) ?
- FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
-
- ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0);
- if (ret)
- goto err_disable_reg;
- da8xx_fb_info->cmap.len = par->palette_sz;
-
- /* initialize var_screeninfo */
- da8xx_fb_var.activate = FB_ACTIVATE_FORCE;
- fb_set_var(da8xx_fb_info, &da8xx_fb_var);
-
- platform_set_drvdata(device, da8xx_fb_info);
-
- /* initialize the vsync wait queue */
- init_waitqueue_head(&par->vsync_wait);
- par->vsync_timeout = HZ / 5;
- par->which_dma_channel_done = -1;
- spin_lock_init(&par->lock_for_chan_update);
-
- /* Register the Frame Buffer */
- if (register_framebuffer(da8xx_fb_info) < 0) {
- dev_err(&device->dev,
- "GLCD: Frame Buffer Registration Failed!\n");
- ret = -EINVAL;
- goto err_dealloc_cmap;
- }
-
-#ifdef CONFIG_CPU_FREQ
- ret = lcd_da8xx_cpufreq_register(par);
- if (ret) {
- dev_err(&device->dev, "failed to register cpufreq\n");
- goto err_cpu_freq;
- }
-#endif
-
- if (lcd_revision == LCD_VERSION_1)
- lcdc_irq_handler = lcdc_irq_handler_rev01;
- else {
- init_waitqueue_head(&frame_done_wq);
- lcdc_irq_handler = lcdc_irq_handler_rev02;
- }
-
- ret = devm_request_irq(&device->dev, par->irq, lcdc_irq_handler, 0,
- DRIVER_NAME, par);
- if (ret)
- goto irq_freq;
- return 0;
-
-irq_freq:
-#ifdef CONFIG_CPU_FREQ
- lcd_da8xx_cpufreq_deregister(par);
-err_cpu_freq:
-#endif
- unregister_framebuffer(da8xx_fb_info);
-
-err_dealloc_cmap:
- fb_dealloc_cmap(&da8xx_fb_info->cmap);
-
-err_disable_reg:
- if (par->lcd_supply)
- regulator_disable(par->lcd_supply);
-err_release_fb:
- framebuffer_release(da8xx_fb_info);
-
-err_pm_runtime_disable:
- pm_runtime_put_sync(&device->dev);
- pm_runtime_disable(&device->dev);
-
- return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static struct lcdc_context {
- u32 clk_enable;
- u32 ctrl;
- u32 dma_ctrl;
- u32 raster_timing_0;
- u32 raster_timing_1;
- u32 raster_timing_2;
- u32 int_enable_set;
- u32 dma_frm_buf_base_addr_0;
- u32 dma_frm_buf_ceiling_addr_0;
- u32 dma_frm_buf_base_addr_1;
- u32 dma_frm_buf_ceiling_addr_1;
- u32 raster_ctrl;
-} reg_context;
-
-static void lcd_context_save(void)
-{
- if (lcd_revision == LCD_VERSION_2) {
- reg_context.clk_enable = lcdc_read(LCD_CLK_ENABLE_REG);
- reg_context.int_enable_set = lcdc_read(LCD_INT_ENABLE_SET_REG);
- }
-
- reg_context.ctrl = lcdc_read(LCD_CTRL_REG);
- reg_context.dma_ctrl = lcdc_read(LCD_DMA_CTRL_REG);
- reg_context.raster_timing_0 = lcdc_read(LCD_RASTER_TIMING_0_REG);
- reg_context.raster_timing_1 = lcdc_read(LCD_RASTER_TIMING_1_REG);
- reg_context.raster_timing_2 = lcdc_read(LCD_RASTER_TIMING_2_REG);
- reg_context.dma_frm_buf_base_addr_0 =
- lcdc_read(LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
- reg_context.dma_frm_buf_ceiling_addr_0 =
- lcdc_read(LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
- reg_context.dma_frm_buf_base_addr_1 =
- lcdc_read(LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
- reg_context.dma_frm_buf_ceiling_addr_1 =
- lcdc_read(LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
- reg_context.raster_ctrl = lcdc_read(LCD_RASTER_CTRL_REG);
- return;
-}
-
-static void lcd_context_restore(void)
-{
- if (lcd_revision == LCD_VERSION_2) {
- lcdc_write(reg_context.clk_enable, LCD_CLK_ENABLE_REG);
- lcdc_write(reg_context.int_enable_set, LCD_INT_ENABLE_SET_REG);
- }
-
- lcdc_write(reg_context.ctrl, LCD_CTRL_REG);
- lcdc_write(reg_context.dma_ctrl, LCD_DMA_CTRL_REG);
- lcdc_write(reg_context.raster_timing_0, LCD_RASTER_TIMING_0_REG);
- lcdc_write(reg_context.raster_timing_1, LCD_RASTER_TIMING_1_REG);
- lcdc_write(reg_context.raster_timing_2, LCD_RASTER_TIMING_2_REG);
- lcdc_write(reg_context.dma_frm_buf_base_addr_0,
- LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
- lcdc_write(reg_context.dma_frm_buf_ceiling_addr_0,
- LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
- lcdc_write(reg_context.dma_frm_buf_base_addr_1,
- LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
- lcdc_write(reg_context.dma_frm_buf_ceiling_addr_1,
- LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
- lcdc_write(reg_context.raster_ctrl, LCD_RASTER_CTRL_REG);
- return;
-}
-
-static int fb_suspend(struct device *dev)
-{
- struct fb_info *info = dev_get_drvdata(dev);
- struct da8xx_fb_par *par = info->par;
- int ret;
-
- console_lock();
- if (par->lcd_supply) {
- ret = regulator_disable(par->lcd_supply);
- if (ret)
- return ret;
- }
-
- fb_set_suspend(info, 1);
- lcd_disable_raster(DA8XX_FRAME_WAIT);
- lcd_context_save();
- pm_runtime_put_sync(dev);
- console_unlock();
-
- return 0;
-}
-static int fb_resume(struct device *dev)
-{
- struct fb_info *info = dev_get_drvdata(dev);
- struct da8xx_fb_par *par = info->par;
- int ret;
-
- console_lock();
- pm_runtime_get_sync(dev);
- lcd_context_restore();
- if (par->blank == FB_BLANK_UNBLANK) {
- lcd_enable_raster();
-
- if (par->lcd_supply) {
- ret = regulator_enable(par->lcd_supply);
- if (ret)
- return ret;
- }
- }
-
- fb_set_suspend(info, 0);
- console_unlock();
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(fb_pm_ops, fb_suspend, fb_resume);
-
-static struct platform_driver da8xx_fb_driver = {
- .probe = fb_probe,
- .remove = fb_remove,
- .driver = {
- .name = DRIVER_NAME,
- .pm = &fb_pm_ops,
- },
-};
-module_platform_driver(da8xx_fb_driver);
-
-MODULE_DESCRIPTION("Framebuffer driver for TI da8xx/omap-l1xx");
-MODULE_AUTHOR("Texas Instruments");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c
index 0b7e7b38c05a..34b6abff9493 100644
--- a/drivers/video/fbdev/ffb.c
+++ b/drivers/video/fbdev/ffb.c
@@ -710,7 +710,7 @@ static int ffb_blank(int blank, struct fb_info *info)
return 0;
}
-static struct sbus_mmap_map ffb_mmap_map[] = {
+static const struct sbus_mmap_map ffb_mmap_map[] = {
{
.voff = FFB_SFB8R_VOFF,
.poff = FFB_SFB8R_POFF,
diff --git a/drivers/video/fbdev/leo.c b/drivers/video/fbdev/leo.c
index 271e2e8c6a84..b9fb059df2c7 100644
--- a/drivers/video/fbdev/leo.c
+++ b/drivers/video/fbdev/leo.c
@@ -338,7 +338,7 @@ static int leo_blank(int blank, struct fb_info *info)
return 0;
}
-static struct sbus_mmap_map leo_mmap_map[] = {
+static const struct sbus_mmap_map leo_mmap_map[] = {
{
.voff = LEO_SS0_MAP,
.poff = LEO_OFF_SS0,
diff --git a/drivers/video/fbdev/nvidia/nv_hw.c b/drivers/video/fbdev/nvidia/nv_hw.c
index 9b0a324bb1b4..75afaa07e7eb 100644
--- a/drivers/video/fbdev/nvidia/nv_hw.c
+++ b/drivers/video/fbdev/nvidia/nv_hw.c
@@ -1509,10 +1509,10 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
NV_WR32(par->PFIFO, 0x0495 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0140 * 4, 0x00000001);
- if (!state) {
- par->CurrentState = NULL;
- return;
- }
+ if (!state) {
+ par->CurrentState = NULL;
+ return;
+ }
if (par->Architecture >= NV_ARCH_10) {
if (par->twoHeads) {
diff --git a/drivers/video/fbdev/p9100.c b/drivers/video/fbdev/p9100.c
index 124468f0e9ef..0bc0f78fe4b9 100644
--- a/drivers/video/fbdev/p9100.c
+++ b/drivers/video/fbdev/p9100.c
@@ -206,7 +206,7 @@ p9100_blank(int blank, struct fb_info *info)
return 0;
}
-static struct sbus_mmap_map p9100_mmap_map[] = {
+static const struct sbus_mmap_map p9100_mmap_map[] = {
{ CG3_MMAP_OFFSET, 0, SBUS_MMAP_FBSIZE(1) },
{ 0, 0, 0 }
};
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index 634e3d159452..4c79654bda30 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -38,7 +38,7 @@ static unsigned long sbusfb_mmapsize(long size, unsigned long fbsize)
return fbsize * (-size);
}
-int sbusfb_mmap_helper(struct sbus_mmap_map *map,
+int sbusfb_mmap_helper(const struct sbus_mmap_map *map,
unsigned long physbase,
unsigned long fbsize,
unsigned long iospace,
diff --git a/drivers/video/fbdev/sbuslib.h b/drivers/video/fbdev/sbuslib.h
index 6466b4cbcd7b..e9af2dc93f94 100644
--- a/drivers/video/fbdev/sbuslib.h
+++ b/drivers/video/fbdev/sbuslib.h
@@ -19,7 +19,7 @@ struct sbus_mmap_map {
extern void sbusfb_fill_var(struct fb_var_screeninfo *var,
struct device_node *dp, int bpp);
-extern int sbusfb_mmap_helper(struct sbus_mmap_map *map,
+extern int sbusfb_mmap_helper(const struct sbus_mmap_map *map,
unsigned long physbase, unsigned long fbsize,
unsigned long iospace,
struct vm_area_struct *vma);
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index f8ae54ca0cc3..2ea947f57efb 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -716,6 +716,7 @@ static void sstfb_setvgapass( struct fb_info *info, int enable )
pci_write_config_dword(sst_dev, PCI_INIT_ENABLE, tmp);
}
+#ifdef CONFIG_FB_DEVICE
static ssize_t store_vgapass(struct device *device, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -739,7 +740,8 @@ static ssize_t show_vgapass(struct device *device, struct device_attribute *attr
static struct device_attribute device_attrs[] = {
__ATTR(vgapass, S_IRUGO|S_IWUSR, show_vgapass, store_vgapass)
- };
+};
+#endif
static int sstfb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
@@ -1436,9 +1438,10 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sstfb_clear_screen(info);
+#ifdef CONFIG_FB_DEVICE
if (device_create_file(info->dev, &device_attrs[0]))
printk(KERN_WARNING "sstfb: can't create sysfs entry.\n");
-
+#endif
fb_info(info, "%s frame buffer device at 0x%p\n",
fix->id, info->screen_base);
@@ -1468,7 +1471,9 @@ static void sstfb_remove(struct pci_dev *pdev)
info = pci_get_drvdata(pdev);
par = info->par;
+#ifdef CONFIG_FB_DEVICE
device_remove_file(info->dev, &device_attrs[0]);
+#endif
sst_shutdown(info);
iounmap(info->screen_base);
iounmap(par->mmio_vbase);
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index 6eb8bb2e3501..f9a0085ad72b 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -236,7 +236,7 @@ tcx_blank(int blank, struct fb_info *info)
return 0;
}
-static struct sbus_mmap_map __tcx_mmap_map[TCX_MMAP_ENTRIES] = {
+static const struct sbus_mmap_map __tcx_mmap_map[TCX_MMAP_ENTRIES] = {
{
.voff = TCX_RAM8BIT,
.size = SBUS_MMAP_FBSIZE(1)