summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-03-09 22:18:59 -0800
committerJakub Kicinski <kuba@kernel.org>2023-03-09 22:22:11 -0800
commitd0ddf5065ffef45f8fce4001abe0206081c7ff10 (patch)
treeea83817cbe9fc25261eae87b85afd9fe086f479e /drivers
parentdb47fa2e4cbf180a39d8e6d6170962bd7d82e52d (diff)
parent44889ba56cbb3d51154660ccd15818bc77276696 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Documentation/bpf/bpf_devel_QA.rst b7abcd9c656b ("bpf, doc: Link to submitting-patches.rst for general patch submission info") d56b0c461d19 ("bpf, docs: Fix link to netdev-FAQ target") https://lore.kernel.org/all/20230307095812.236eb1be@canb.auug.org.au/ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/resource.c7
-rw-r--r--drivers/acpi/x86/s2idle.c24
-rw-r--r--drivers/acpi/x86/utils.c37
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/auxdisplay/hd44780.c2
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/core.c12
-rw-r--r--drivers/base/platform-msi.c1
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/rbd.c20
-rw-r--r--drivers/block/ublk_drv.c3
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/cpufreq/amd-pstate.c3
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c2
-rw-r--r--drivers/crypto/caam/caamalg.c26
-rw-r--r--drivers/crypto/caam/caamalg_qi.c40
-rw-r--r--drivers/crypto/caam/qi.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c202
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c15
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c15
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c1
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig6
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c5
-rw-r--r--drivers/hid/hid-core.c32
-rw-r--r--drivers/hid/hid-cp2112.c1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c9
-rw-r--r--drivers/hid/uhid.c1
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-gxp.c21
-rw-r--r--drivers/i3c/master.c1
-rw-r--r--drivers/i3c/master/dw-i3c-master.c5
-rw-r--r--drivers/mtd/ubi/block.c109
-rw-r--r--drivers/mtd/ubi/build.c32
-rw-r--r--drivers/mtd/ubi/debug.c19
-rw-r--r--drivers/mtd/ubi/eba.c2
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c12
-rw-r--r--drivers/mtd/ubi/fastmap.c2
-rw-r--r--drivers/mtd/ubi/kapi.c1
-rw-r--r--drivers/mtd/ubi/misc.c2
-rw-r--r--drivers/mtd/ubi/vmt.c18
-rw-r--r--drivers/mtd/ubi/wl.c27
-rw-r--r--drivers/net/dsa/mt7530.c35
-rw-r--r--drivers/net/ethernet/Kconfig10
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c8
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/fealnx.c1953
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_police.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c32
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1
-rw-r--r--drivers/net/ieee802154/ca8210.c2
-rw-r--r--drivers/net/phy/microchip.c32
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/phy/smsc.c14
-rw-r--r--drivers/net/usb/cdc_mbim.c5
-rw-r--r--drivers/net/usb/lan78xx.c27
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/wireguard/queueing.h2
-rw-r--r--drivers/nfc/fdp/i2c.c4
-rw-r--r--drivers/nvme/host/auth.c2
-rw-r--r--drivers/nvme/host/core.c37
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/tcp.c6
-rw-r--r--drivers/pci/msi/api.c4
-rw-r--r--drivers/perf/riscv_pmu_sbi.c7
-rw-r--r--drivers/platform/mellanox/Kconfig9
-rw-r--r--drivers/platform/x86/Kconfig3
-rw-r--r--drivers/platform/x86/amd/pmc.c30
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c12
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c5
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c5
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.h1
-rw-r--r--drivers/platform/x86/intel/tpmi.c14
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/power/supply/power_supply_core.c6
-rw-r--r--drivers/power/supply/qcom_battmgr.c3
-rw-r--r--drivers/powercap/intel_rapl_msr.c2
-rw-r--r--drivers/powercap/powercap_sys.c1
-rw-r--r--drivers/pwm/pwm-ab8500.c112
-rw-r--r--drivers/pwm/pwm-dwc.c38
-rw-r--r--drivers/pwm/pwm-iqs620a.c4
-rw-r--r--drivers/pwm/pwm-lp3943.c1
-rw-r--r--drivers/pwm/pwm-sifive.c8
-rw-r--r--drivers/pwm/pwm-stm32-lp.c2
-rw-r--r--drivers/regulator/core.c6
-rw-r--r--drivers/regulator/max597x-regulator.c2
-rw-r--r--drivers/rtc/Kconfig14
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c7
-rw-r--r--drivers/rtc/rtc-abx80x.c77
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c152
-rw-r--r--drivers/rtc/rtc-ds1307.c6
-rw-r--r--drivers/rtc/rtc-efi.c2
-rw-r--r--drivers/rtc/rtc-hym8563.c7
-rw-r--r--drivers/rtc/rtc-isl12022.c93
-rw-r--r--drivers/rtc/rtc-jz4740.c94
-rw-r--r--drivers/rtc/rtc-m41t80.c7
-rw-r--r--drivers/rtc/rtc-max8907.c1
-rw-r--r--drivers/rtc/rtc-moxart.c89
-rw-r--r--drivers/rtc/rtc-nxp-bbnsm.c226
-rw-r--r--drivers/rtc/rtc-pcf2123.c7
-rw-r--r--drivers/rtc/rtc-pcf85063.c7
-rw-r--r--drivers/rtc/rtc-pcf8523.c7
-rw-r--r--drivers/rtc/rtc-pcf85363.c44
-rw-r--r--drivers/rtc/rtc-pcf8563.c7
-rw-r--r--drivers/rtc/rtc-pm8xxx.c533
-rw-r--r--drivers/rtc/rtc-rv3028.c7
-rw-r--r--drivers/rtc/rtc-rv3029c2.c7
-rw-r--r--drivers/rtc/rtc-rv3032.c14
-rw-r--r--drivers/rtc/rtc-rv8803.c52
-rw-r--r--drivers/rtc/rtc-rx6110.c1
-rw-r--r--drivers/rtc/rtc-rx8010.c8
-rw-r--r--drivers/rtc/rtc-sun6i.c16
-rw-r--r--drivers/s390/crypto/ap_queue.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c4
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c46
-rw-r--r--drivers/s390/scsi/zfcp_def.h6
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c22
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h2
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h26
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h1
-rw-r--r--drivers/scsi/hosts.c4
-rw-r--r--drivers/scsi/ipr.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h11
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c28
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c75
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c4
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c2
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c10
-rw-r--r--drivers/scsi/sd.c29
-rw-r--r--drivers/scsi/sd_dif.c10
-rw-r--r--drivers/scsi/ses.c64
-rw-r--r--drivers/sh/clk/core.c2
-rw-r--r--drivers/spi/spi-cadence-quadspi.c2
-rw-r--r--drivers/spi/spi-sn-f-ospi.c2
-rw-r--r--drivers/spi/spi-tegra210-quad.c15
-rw-r--r--drivers/thermal/intel/Kconfig3
-rw-r--r--drivers/thermal/intel/intel_quark_dts_thermal.c12
-rw-r--r--drivers/tty/vt/vc_screen.c11
-rw-r--r--drivers/ufs/core/ufshcd.c92
-rw-r--r--drivers/ufs/host/Kconfig2
-rw-r--r--drivers/ufs/host/ufs-mediatek.c2
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c20
-rw-r--r--drivers/watchdog/Kconfig4
-rw-r--r--drivers/watchdog/apple_wdt.c18
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c15
-rw-r--r--drivers/watchdog/aspeed_wdt.c1
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c2
-rw-r--r--drivers/watchdog/at91sam9_wdt.c7
-rw-r--r--drivers/watchdog/bcm7038_wdt.c15
-rw-r--r--drivers/watchdog/cadence_wdt.c17
-rw-r--r--drivers/watchdog/da9062_wdt.c15
-rw-r--r--drivers/watchdog/da9063_wdt.c15
-rw-r--r--drivers/watchdog/davinci_wdt.c18
-rw-r--r--drivers/watchdog/dw_wdt.c1
-rw-r--r--drivers/watchdog/iTCO_wdt.c4
-rw-r--r--drivers/watchdog/imgpdc_wdt.c31
-rw-r--r--drivers/watchdog/imx2_wdt.c55
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c15
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c30
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c16
-rw-r--r--drivers/watchdog/mt7621_wdt.c122
-rw-r--r--drivers/watchdog/mtk_wdt.c7
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c16
-rw-r--r--drivers/watchdog/pcwd_usb.c6
-rw-r--r--drivers/watchdog/pic32-dmt.c15
-rw-r--r--drivers/watchdog/pic32-wdt.c17
-rw-r--r--drivers/watchdog/pnx4008_wdt.c15
-rw-r--r--drivers/watchdog/qcom-wdt.c16
-rw-r--r--drivers/watchdog/realtek_otto_wdt.c17
-rw-r--r--drivers/watchdog/rtd119x_wdt.c16
-rw-r--r--drivers/watchdog/rzg2l_wdt.c45
-rw-r--r--drivers/watchdog/rzn1_wdt.c18
-rw-r--r--drivers/watchdog/sbsa_gwdt.c1
-rw-r--r--drivers/watchdog/visconti_wdt.c17
-rw-r--r--drivers/watchdog/watchdog_dev.c23
-rw-r--r--drivers/watchdog/wdat_wdt.c6
-rw-r--r--drivers/watchdog/ziirave_wdt.c5
254 files changed, 4739 insertions, 1760 deletions
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index a222bda7e15b..7c9125df5a65 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -440,6 +440,13 @@ static const struct dmi_system_id asus_laptop[] = {
},
},
{
+ .ident = "Asus ExpertBook B2402FBA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B2402FBA"),
+ },
+ },
+ {
.ident = "Asus ExpertBook B2502",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index c7afce465a07..e499c60c4579 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -384,29 +384,6 @@ static const struct acpi_device_id amd_hid_ids[] = {
{}
};
-static int lps0_prefer_amd(const struct dmi_system_id *id)
-{
- pr_debug("Using AMD GUID w/ _REV 2.\n");
- rev_id = 2;
- return 0;
-}
-static const struct dmi_system_id s2idle_dmi_table[] __initconst = {
- {
- /*
- * AMD Rembrandt based HP EliteBook 835/845/865 G9
- * Contains specialized AML in AMD/_REV 2 path to avoid
- * triggering a bug in Qualcomm WLAN firmware. This may be
- * removed in the future if that firmware is fixed.
- */
- .callback = lps0_prefer_amd,
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
- DMI_MATCH(DMI_BOARD_NAME, "8990"),
- },
- },
- {}
-};
-
static int lps0_device_attach(struct acpi_device *adev,
const struct acpi_device_id *not_used)
{
@@ -586,7 +563,6 @@ static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
void __init acpi_s2idle_setup(void)
{
- dmi_check_system(s2idle_dmi_table);
acpi_scan_add_handler(&lps0_handler);
s2idle_set_ops(&acpi_s2idle_ops_lps0);
}
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 4e816bb402f6..e45285d4e62a 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -200,39 +200,28 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
* a hardcoded allowlist for D3 support, which was used for these platforms.
*
* This allows quirking on Linux in a similar fashion.
+ *
+ * Cezanne systems shouldn't *normally* need this as the BIOS includes
+ * StorageD3Enable. But for two reasons we have added it.
+ * 1) The BIOS on a number of Dell systems have ambiguity
+ * between the same value used for _ADR on ACPI nodes GPP1.DEV0 and GPP1.NVME.
+ * GPP1.NVME is needed to get StorageD3Enable node set properly.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216440
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216773
+ * https://bugzilla.kernel.org/show_bug.cgi?id=217003
+ * 2) On at least one HP system StorageD3Enable is missing on the second NVME
+ disk in the system.
*/
static const struct x86_cpu_id storage_d3_cpu_ids[] = {
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
- {}
-};
-
-static const struct dmi_system_id force_storage_d3_dmi[] = {
- {
- /*
- * _ADR is ambiguous between GPP1.DEV0 and GPP1.NVME
- * but .NVME is needed to get StorageD3Enable node
- * https://bugzilla.kernel.org/show_bug.cgi?id=216440
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14 7425 2-in-1"),
- }
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 16 5625"),
- }
- },
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */
{}
};
bool force_storage_d3(void)
{
- const struct dmi_system_id *dmi_id = dmi_first_match(force_storage_d3_dmi);
-
- return dmi_id || x86_match_cpu(storage_d3_cpu_ids);
+ return x86_match_cpu(storage_d3_cpu_ids);
}
/*
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 3bb9bb483fe3..14a1c0d14916 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -421,7 +421,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
- { PCI_VDEVICE(INTEL, 0xa0d3), board_ahci_low_power }, /* Tiger Lake UP{3,4} AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 8b2a0eb3f32a..d56a5d508ccd 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -322,8 +322,10 @@ fail1:
static int hd44780_remove(struct platform_device *pdev)
{
struct charlcd *lcd = platform_get_drvdata(pdev);
+ struct hd44780_common *hdc = lcd->drvdata;
charlcd_unregister(lcd);
+ kfree(hdc->hd44780);
kfree(lcd->drvdata);
kfree(lcd);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index cfe8615d5106..dd4b82d7510f 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -62,7 +62,7 @@ static struct subsys_private *bus_to_subsys(const struct bus_type *bus)
struct subsys_private *sp = NULL;
struct kobject *kobj;
- if (!bus)
+ if (!bus || !bus_kset)
return NULL;
spin_lock(&bus_kset->list_lock);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index e54a10b5dbd7..6878dfcbf0d6 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -98,7 +98,7 @@ static int __fwnode_link_add(struct fwnode_handle *con,
list_add(&link->s_hook, &sup->consumers);
list_add(&link->c_hook, &con->suppliers);
- pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",
+ pr_debug("%pfwf Linked as a fwnode consumer to %pfwf\n",
con, sup);
return 0;
@@ -122,7 +122,7 @@ int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
*/
static void __fwnode_link_del(struct fwnode_link *link)
{
- pr_debug("%pfwP Dropping the fwnode link to %pfwP\n",
+ pr_debug("%pfwf Dropping the fwnode link to %pfwf\n",
link->consumer, link->supplier);
list_del(&link->s_hook);
list_del(&link->c_hook);
@@ -1062,7 +1062,7 @@ int device_links_check_suppliers(struct device *dev)
if (!dev_is_best_effort(dev)) {
fwnode_ret = -EPROBE_DEFER;
dev_err_probe(dev, -EPROBE_DEFER,
- "wait for supplier %pfwP\n", sup_fw);
+ "wait for supplier %pfwf\n", sup_fw);
} else {
fwnode_ret = -EAGAIN;
}
@@ -2046,9 +2046,9 @@ static int fw_devlink_create_devlink(struct device *con,
goto out;
}
- if (!device_link_add(con, sup_dev, flags)) {
- dev_err(con, "Failed to create device link with %s\n",
- dev_name(sup_dev));
+ if (con != sup_dev && !device_link_add(con, sup_dev, flags)) {
+ dev_err(con, "Failed to create device link (0x%x) with %s\n",
+ flags, dev_name(sup_dev));
ret = -EINVAL;
}
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 5883e7634a2b..f37ad34c80ec 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -324,6 +324,7 @@ void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int vir
struct platform_msi_priv_data *data = domain->host_data;
msi_lock_descs(data->dev);
+ msi_domain_depopulate_descs(data->dev, virq, nr_irqs);
irq_domain_free_irqs_common(domain, virq, nr_irqs);
msi_free_msi_descs_range(data->dev, virq, virq + nr_irqs - 1);
msi_unlock_descs(data->dev);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 5f04235e4ff7..839373451c2b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -977,13 +977,13 @@ loop_set_status_from_info(struct loop_device *lo,
return -EINVAL;
}
+ /* Avoid assigning overflow values */
+ if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX)
+ return -EOVERFLOW;
+
lo->lo_offset = info->lo_offset;
lo->lo_sizelimit = info->lo_sizelimit;
- /* loff_t vars have been assigned __u64 */
- if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
- return -EOVERFLOW;
-
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
lo->lo_flags = info->lo_flags;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 1faca7e07a4d..5cb008b9700a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -5291,8 +5291,7 @@ static void rbd_dev_release(struct device *dev)
module_put(THIS_MODULE);
}
-static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
- struct rbd_spec *spec)
+static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
{
struct rbd_device *rbd_dev;
@@ -5337,9 +5336,6 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
rbd_dev->dev.parent = &rbd_root_dev;
device_initialize(&rbd_dev->dev);
- rbd_dev->rbd_client = rbdc;
- rbd_dev->spec = spec;
-
return rbd_dev;
}
@@ -5352,12 +5348,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
{
struct rbd_device *rbd_dev;
- rbd_dev = __rbd_dev_create(rbdc, spec);
+ rbd_dev = __rbd_dev_create(spec);
if (!rbd_dev)
return NULL;
- rbd_dev->opts = opts;
-
/* get an id and fill in device name */
rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
minor_to_rbd_dev_id(1 << MINORBITS),
@@ -5374,6 +5368,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
/* we have a ref from do_rbd_add() */
__module_get(THIS_MODULE);
+ rbd_dev->rbd_client = rbdc;
+ rbd_dev->spec = spec;
+ rbd_dev->opts = opts;
+
dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
return rbd_dev;
@@ -6735,7 +6733,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
goto out_err;
}
- parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
+ parent = __rbd_dev_create(rbd_dev->parent_spec);
if (!parent) {
ret = -ENOMEM;
goto out_err;
@@ -6745,8 +6743,8 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
* Images related by parent/child relationships always share
* rbd_client and spec/parent_spec, so bump their refcounts.
*/
- __rbd_get_client(rbd_dev->rbd_client);
- rbd_spec_get(rbd_dev->parent_spec);
+ parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
+ parent->spec = rbd_spec_get(rbd_dev->parent_spec);
__set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index b9c759cef00e..d1d1c8d606c8 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1271,9 +1271,6 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
ub_cmd->result);
- if (!(issue_flags & IO_URING_F_SQE128))
- goto out;
-
if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
goto out;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index ce3ccd172cc8..253f2ddb8913 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1311,7 +1311,7 @@ static void __cold try_to_generate_entropy(void)
/* Basic CPU round-robin, which avoids the current CPU. */
do {
cpu = cpumask_next(cpu, &timer_cpus);
- if (cpu == nr_cpumask_bits)
+ if (cpu >= nr_cpu_ids)
cpu = cpumask_first(&timer_cpus);
} while (cpu == smp_processor_id() && num_cpus > 1);
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 45c88894fd8e..73c7643b2697 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1263,7 +1263,7 @@ static int __init amd_pstate_init(void)
* with amd_pstate=passive or other modes in kernel command line
*/
if (cppc_state == AMD_PSTATE_DISABLE) {
- pr_debug("driver load is disabled, boot with specific mode to enable this\n");
+ pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
}
@@ -1353,4 +1353,3 @@ early_param("amd_pstate", amd_pstate_param);
MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index c11d22fd84c3..021f423705e1 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -189,8 +189,8 @@ static int apple_soc_cpufreq_find_cluster(struct cpufreq_policy *policy,
*info = match->data;
*reg_base = of_iomap(args.np, 0);
- if (IS_ERR(*reg_base))
- return PTR_ERR(*reg_base);
+ if (!*reg_base)
+ return -ENOMEM;
return 0;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cb4beec27555..48a4613cef1e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -3358,6 +3358,7 @@ static const struct x86_cpu_id intel_epp_balance_perf[] = {
* AlderLake Mobile CPUs.
*/
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102),
+ X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 32),
{}
};
@@ -3516,4 +3517,3 @@ early_param("intel_pstate", intel_pstate_setup);
MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
-MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 4a9b998a8d26..12b1c8346243 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -60,7 +60,11 @@
#include <crypto/xts.h>
#include <asm/unaligned.h>
#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
/*
* crypto alg
@@ -1000,6 +1004,13 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
crypto_finalize_aead_request(jrp->engine, req, ecode);
}
+static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc)
+{
+
+ return PTR_ALIGN((u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
+ dma_get_cache_alignment());
+}
+
static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
@@ -1027,8 +1038,7 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
* This is used e.g. by the CTS mode.
*/
if (ivsize && !ecode) {
- memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
- ivsize);
+ memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
@@ -1683,18 +1693,19 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
/*
* allocate space for base edesc and hw desc commands, link tables, IV
*/
- aligned_size = ALIGN(ivsize, __alignof__(*edesc));
- aligned_size += sizeof(*edesc) + desc_bytes + sec4_sg_bytes;
+ aligned_size = sizeof(*edesc) + desc_bytes + sec4_sg_bytes;
aligned_size = ALIGN(aligned_size, dma_get_cache_alignment());
- iv = kzalloc(aligned_size, flags);
- if (!iv) {
+ aligned_size += ~(ARCH_KMALLOC_MINALIGN - 1) &
+ (dma_get_cache_alignment() - 1);
+ aligned_size += ALIGN(ivsize, dma_get_cache_alignment());
+ edesc = kzalloc(aligned_size, flags);
+ if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0);
return ERR_PTR(-ENOMEM);
}
- edesc = (void *)(iv + ALIGN(ivsize, __alignof__(*edesc)));
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->mapped_src_nents = mapped_src_nents;
@@ -1706,6 +1717,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
/* Make sure IV is located in a DMAable area */
if (ivsize) {
+ iv = skcipher_edesc_iv(edesc);
memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 5e218bf20d5b..743ce50c14f2 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -20,8 +20,11 @@
#include "caamalg_desc.h"
#include <crypto/xts.h>
#include <asm/unaligned.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
+#include <linux/string.h>
/*
* crypto alg
@@ -1204,6 +1207,12 @@ static int ipsec_gcm_decrypt(struct aead_request *req)
false);
}
+static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc)
+{
+ return PTR_ALIGN((u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
+ dma_get_cache_alignment());
+}
+
static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
{
struct skcipher_edesc *edesc;
@@ -1236,8 +1245,7 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
* This is used e.g. by the CTS mode.
*/
if (!ecode)
- memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
- ivsize);
+ memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
@@ -1259,6 +1267,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
+ unsigned int len;
drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (IS_ERR(drv_ctx))
@@ -1319,9 +1328,12 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(ALIGN(ivsize, __alignof__(*edesc)) +
- offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes >
- CAAM_QI_MEMCACHE_SIZE)) {
+
+ len = offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes;
+ len = ALIGN(len, dma_get_cache_alignment());
+ len += ivsize;
+
+ if (unlikely(len > CAAM_QI_MEMCACHE_SIZE)) {
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
@@ -1330,18 +1342,24 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
}
/* allocate space for base edesc, link tables and IV */
- iv = qi_cache_alloc(flags);
- if (unlikely(!iv)) {
+ edesc = qi_cache_alloc(flags);
+ if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
- edesc = (void *)(iv + ALIGN(ivsize, __alignof__(*edesc)));
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+ edesc->drv_req.app_ctx = req;
+ edesc->drv_req.cbk = skcipher_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
/* Make sure IV is located in a DMAable area */
sg_table = &edesc->sgt[0];
+ iv = skcipher_edesc_iv(edesc);
memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
@@ -1353,13 +1371,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
return ERR_PTR(-ENOMEM);
}
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
- edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->drv_req.app_ctx = req;
- edesc->drv_req.cbk = skcipher_done;
- edesc->drv_req.drv_ctx = drv_ctx;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 4c52c9365558..2ad2c1035856 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -8,7 +8,13 @@
*/
#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include <soc/fsl/qman.h>
#include "debugfs.h"
@@ -755,8 +761,8 @@ int caam_qi_init(struct platform_device *caam_pdev)
napi_enable(irqtask);
}
- qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
- 0, NULL);
+ qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE,
+ dma_get_cache_alignment(), 0, NULL);
if (!qi_cache) {
dev_err(qidev, "Can't allocate CAAM cache\n");
free_rsp_fqs();
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 5341b6b242c3..a82d36ea88e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -6,6 +6,7 @@ config DRM_AMDGPU
select FW_LOADER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
+ select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_SCHED
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 458362e4ea01..d4196fcb85a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1073,6 +1073,9 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
(pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
return false;
+ if (adev->asic_type < CHIP_RAVEN)
+ return false;
+
/*
* If ACPI_FADT_LOW_POWER_S0 is not set in the FADT, it is generally
* risky to do any special firmware-related preparations for entering
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 86fbb4138285..f5ffca24def4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -107,9 +107,12 @@
* - 3.50.0 - Update AMDGPU_INFO_DEV_INFO IOCTL for minimum engine and memory clock
* Update AMDGPU_INFO_SENSOR IOCTL for PEAK_PSTATE engine and memory clock
* 3.51.0 - Return the PCIe gen and lanes from the INFO ioctl
+ * 3.52.0 - Add AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD, add device_info fields:
+ * tcp_cache_size, num_sqc_per_wgp, sqc_data_cache_size, sqc_inst_cache_size,
+ * gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 51
+#define KMS_DRIVER_MINOR 52
#define KMS_DRIVER_PATCHLEVEL 0
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -921,7 +924,7 @@ module_param_named(reset_method, amdgpu_reset_method, int, 0444);
* result in the GPU entering bad status when the number of total
* faulty pages by ECC exceeds the threshold value.
*/
-MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement, -2 = ignore bad page threshold)");
+MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = ignore threshold (default value), 0 = disable bad page retirement, -2 = driver sets threshold)");
module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
@@ -2414,8 +2417,10 @@ static int amdgpu_pmops_suspend(struct device *dev)
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
- else
+ else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
+ if (!adev->in_s0ix && !adev->in_s3)
+ return 0;
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2436,6 +2441,9 @@ static int amdgpu_pmops_resume(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
+ if (!adev->in_s0ix && !adev->in_s3)
+ return 0;
+
/* Avoids registers access if device is physically gone */
if (!pci_device_is_present(adev->pdev))
adev->no_hw_access = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 86ec9d0d12c8..de9e7a00bb15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -178,6 +178,8 @@ struct amdgpu_gfx_config {
uint32_t num_sc_per_sh;
uint32_t num_packer_per_sc;
uint32_t pa_sc_tile_steering_override;
+ /* Whether texture coordinate truncation is conformant. */
+ bool ta_cntl2_truncate_coord_mode;
uint64_t tcc_disabled_mask;
uint32_t gc_num_tcp_per_sa;
uint32_t gc_num_sdp_interface;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 94f10ac0eef7..12a6826caef4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -552,6 +552,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
/* VANGOGH */
case IP_VERSION(10, 3, 1):
/* YELLOW_CARP*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index ca945055e683..0efb38539d70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -808,6 +808,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
if (amdgpu_is_tmz(adev))
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
+ if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
+ dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_SIZE;
@@ -865,6 +867,15 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
+ dev_info->tcp_cache_size = adev->gfx.config.gc_tcp_l1_size;
+ dev_info->num_sqc_per_wgp = adev->gfx.config.gc_num_sqc_per_wgp;
+ dev_info->sqc_data_cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
+ dev_info->sqc_inst_cache_size = adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
+ dev_info->gl1c_cache_size = adev->gfx.config.gc_gl1c_size_per_instance *
+ adev->gfx.config.gc_gl1c_per_sa;
+ dev_info->gl2c_cache_size = adev->gfx.config.gc_gl2c_per_gpu;
+ dev_info->mall_size = adev->gmc.mall_size;
+
ret = copy_to_user(out, dev_info,
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
kfree(dev_info);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 981010de0a28..e3e1ed4314dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -139,7 +139,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
- else
+ else if (adev->gmc.real_vram_size != adev->gmc.visible_vram_size)
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 15e601f09648..28fe6d941054 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1683,7 +1683,7 @@ static int psp_hdcp_initialize(struct psp_context *psp)
psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
- if (!psp->hdcp_context.context.initialized) {
+ if (!psp->hdcp_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
if (ret)
return ret;
@@ -1750,7 +1750,7 @@ static int psp_dtm_initialize(struct psp_context *psp)
psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
- if (!psp->dtm_context.context.initialized) {
+ if (!psp->dtm_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
if (ret)
return ret;
@@ -1818,7 +1818,7 @@ static int psp_rap_initialize(struct psp_context *psp)
psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
- if (!psp->rap_context.context.initialized) {
+ if (!psp->rap_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 6e543558386d..63dfcc98152d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -176,7 +176,7 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
err_data.err_addr_cnt);
- amdgpu_ras_save_bad_pages(adev);
+ amdgpu_ras_save_bad_pages(adev, NULL);
}
dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
@@ -2084,22 +2084,32 @@ out:
/*
* write error record array to eeprom, the function should be
* protected by recovery_lock
+ * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
*/
-int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
+int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
+ unsigned long *new_cnt)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
struct amdgpu_ras_eeprom_control *control;
int save_count;
- if (!con || !con->eh_data)
+ if (!con || !con->eh_data) {
+ if (new_cnt)
+ *new_cnt = 0;
+
return 0;
+ }
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
save_count = data->count - control->ras_num_recs;
mutex_unlock(&con->recovery_lock);
+
+ if (new_cnt)
+ *new_cnt = save_count / adev->umc.retire_unit;
+
/* only new entries are saved */
if (save_count > 0) {
if (amdgpu_ras_eeprom_append(control,
@@ -2186,11 +2196,12 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
/*
* Justification of value bad_page_cnt_threshold in ras structure
*
- * Generally, -1 <= amdgpu_bad_page_threshold <= max record length
- * in eeprom, and introduce two scenarios accordingly.
+ * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
+ * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
+ * scenarios accordingly.
*
* Bad page retirement enablement:
- * - If amdgpu_bad_page_threshold = -1,
+ * - If amdgpu_bad_page_threshold = -2,
* bad_page_cnt_threshold = typical value by formula.
*
* - When the value from user is 0 < amdgpu_bad_page_threshold <
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index f2ad999993f6..ef38f4c93df0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -547,7 +547,8 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
struct eeprom_table_record *bps, int pages);
-int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev);
+int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
+ unsigned long *new_cnt);
static inline enum ta_ras_block
amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 2d9f3f4cd79e..2e08fce87521 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -417,7 +417,8 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- if (!__is_ras_eeprom_supported(adev))
+ if (!__is_ras_eeprom_supported(adev) ||
+ !amdgpu_bad_page_threshold)
return false;
/* skip check eeprom table for VEGA20 Gaming */
@@ -428,10 +429,18 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
return false;
if (con->eeprom_control.tbl_hdr.header == RAS_TABLE_HDR_BAD) {
- dev_warn(adev->dev, "This GPU is in BAD status.");
- dev_warn(adev->dev, "Please retire it or set a larger "
- "threshold value when reloading driver.\n");
- return true;
+ if (amdgpu_bad_page_threshold == -1) {
+ dev_warn(adev->dev, "RAS records:%d exceed threshold:%d",
+ con->eeprom_control.ras_num_recs, con->bad_page_cnt_threshold);
+ dev_warn(adev->dev,
+ "But GPU can be operated due to bad_page_threshold = -1.\n");
+ return false;
+ } else {
+ dev_warn(adev->dev, "This GPU is in BAD status.");
+ dev_warn(adev->dev, "Please retire it or set a larger "
+ "threshold value when reloading driver.\n");
+ return true;
+ }
}
return false;
@@ -1191,8 +1200,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
} else {
dev_err(adev->dev, "RAS records:%d exceed threshold:%d",
control->ras_num_recs, ras->bad_page_cnt_threshold);
- if (amdgpu_bad_page_threshold == -2) {
- dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -2.");
+ if (amdgpu_bad_page_threshold == -1) {
+ dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -1.");
res = 0;
} else {
*exceed_err_limit = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 1c7fcb4f2380..1b8574bc4463 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -68,7 +68,7 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
err_data.err_addr_cnt);
- amdgpu_ras_save_bad_pages(adev);
+ amdgpu_ras_save_bad_pages(adev, NULL);
}
out:
@@ -147,7 +147,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
err_data->err_addr_cnt) {
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
err_data->err_addr_cnt);
- amdgpu_ras_save_bad_pages(adev);
+ amdgpu_ras_save_bad_pages(adev, &(err_data->ue_count));
amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index a6951160f13a..f2bf979af588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -74,6 +74,8 @@ struct amdgpu_umc {
/* UMC regiser per channel offset */
uint32_t channel_offs;
+ /* how many pages are retired in one UE */
+ uint32_t retire_unit;
/* channel index table of interleaved memory */
const uint32_t *channel_idx_tbl;
struct ras_common_if *ras_if;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 9fa1d814508a..43d6a9d6a538 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -453,7 +453,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
/* Limit maximum size to 2GiB due to SG table limitations */
size = min(remaining_size, 2ULL << 30);
- if (size >= (u64)pages_per_block << PAGE_SHIFT)
+ if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
+ !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
min_block_size = (u64)pages_per_block << PAGE_SHIFT;
cur_size = size;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 8ad8a0bffcac..3bf697a80cf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1503,44 +1503,70 @@ static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
}
-static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
+static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev)
{
- u32 data, mask;
+ u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
+
+ gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE);
+ gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
+ CC_GC_SA_UNIT_DISABLE,
+ SA_DISABLE);
+ gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE);
+ gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
+ GC_USER_SA_UNIT_DISABLE,
+ SA_DISABLE);
+ sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
+ adev->gfx.config.max_shader_engines);
- data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
- data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
+ return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
+}
- data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
- data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
+{
+ u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
+ u32 rb_mask;
- mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
- adev->gfx.config.max_sh_per_se);
+ gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
+ gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
+ CC_RB_BACKEND_DISABLE,
+ BACKEND_DISABLE);
+ gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
+ gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
+ GC_USER_RB_BACKEND_DISABLE,
+ BACKEND_DISABLE);
+ rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
+ adev->gfx.config.max_shader_engines);
- return (~data) & mask;
+ return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
}
static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
{
- int i, j;
- u32 data;
- u32 active_rbs = 0;
- u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
- adev->gfx.config.max_sh_per_se;
+ u32 rb_bitmap_width_per_sa;
+ u32 max_sa;
+ u32 active_sa_bitmap;
+ u32 global_active_rb_bitmap;
+ u32 active_rb_bitmap = 0;
+ u32 i;
- mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
- for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff);
- data = gfx_v11_0_get_rb_active_bitmap(adev);
- active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
- rb_bitmap_width_per_sh);
- }
+ /* query sa bitmap from SA_UNIT_DISABLE registers */
+ active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev);
+ /* query rb bitmap from RB_BACKEND_DISABLE registers */
+ global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev);
+
+ /* generate active rb bitmap according to active sa bitmap */
+ max_sa = adev->gfx.config.max_shader_engines *
+ adev->gfx.config.max_sh_per_se;
+ rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
+ adev->gfx.config.max_sh_per_se;
+ for (i = 0; i < max_sa; i++) {
+ if (active_sa_bitmap & (1 << i))
+ active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
}
- gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- adev->gfx.config.backend_enable_mask = active_rbs;
- adev->gfx.config.num_rbs = hweight32(active_rbs);
+ active_rb_bitmap |= global_active_rb_bitmap;
+ adev->gfx.config.backend_enable_mask = active_rb_bitmap;
+ adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
#define DEFAULT_SH_MEM_BASES (0x6000)
@@ -1633,6 +1659,11 @@ static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
gfx_v11_0_get_tcc_info(adev);
adev->gfx.config.pa_sc_tile_steering_override = 0;
+ /* Set whether texture coordinate truncation is conformant. */
+ tmp = RREG32_SOC15(GC, 0, regTA_CNTL2);
+ adev->gfx.config.ta_cntl2_truncate_coord_mode =
+ REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE);
+
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 7db1f1a7e33c..ab2556ca984e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -692,6 +692,7 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
+ adev->umc.retire_unit = 1;
adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
adev->umc.ras = &umc_v8_7_ras;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 0a31a341aa43..85e0afc3d4f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -570,6 +570,7 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.node_inst_num = adev->gmc.num_umc;
adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
+ adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
if (adev->umc.node_inst_num == 4)
adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0];
else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index d65c6cea3445..b06170c00dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1288,6 +1288,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
+ adev->umc.retire_unit = 1;
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
adev->umc.ras = &umc_v6_1_ras;
break;
@@ -1296,6 +1297,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
+ adev->umc.retire_unit = 1;
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
adev->umc.ras = &umc_v6_1_ras;
break;
@@ -1305,6 +1307,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
+ adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2);
if (!adev->gmc.xgmi.connected_to_cpu)
adev->umc.ras = &umc_v6_7_ras;
if (1 & adev->smuio.funcs->get_die_id(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
index 31776b12e4c4..4b0d563c6522 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
@@ -382,6 +382,11 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
break;
+ case IP_VERSION(7, 5, 1):
+ data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
+ data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
+ WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
+ fallthrough;
default:
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index da394bc06bba..fb55e8cb9967 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -209,6 +209,45 @@ static int umc_v8_10_swizzle_mode_na_to_pa(struct amdgpu_device *adev,
return 0;
}
+static void umc_v8_10_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst,
+ uint32_t node_inst, uint64_t mc_umc_status)
+{
+ uint64_t na_err_addr_base;
+ uint64_t na_err_addr, retired_page_addr;
+ uint32_t channel_index, addr_lsb, col = 0;
+ int ret = 0;
+
+ channel_index =
+ adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
+ adev->umc.channel_inst_num +
+ umc_inst * adev->umc.channel_inst_num +
+ ch_inst];
+
+ /* the lowest lsb bits should be ignored */
+ addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
+ err_addr &= ~((0x1ULL << addr_lsb) - 1);
+ na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
+
+ /* loop for all possibilities of [C6 C5] in normal address. */
+ for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
+ na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
+
+ /* Mapping normal error address to retired soc physical address. */
+ ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
+ na_err_addr, &retired_page_addr);
+ if (ret) {
+ dev_err(adev->dev, "Failed to map pa from umc na.\n");
+ break;
+ }
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
+ retired_page_addr);
+ amdgpu_umc_fill_error_record(err_data, na_err_addr,
+ retired_page_addr, channel_index, umc_inst);
+ }
+}
+
static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t umc_reg_offset,
@@ -218,10 +257,7 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
{
uint64_t mc_umc_status_addr;
uint64_t mc_umc_status, err_addr;
- uint64_t mc_umc_addrt0, na_err_addr_base;
- uint64_t na_err_addr, retired_page_addr;
- uint32_t channel_index, addr_lsb, col = 0;
- int ret = 0;
+ uint64_t mc_umc_addrt0;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -236,12 +272,6 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
return;
}
- channel_index =
- adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
- adev->umc.channel_inst_num +
- umc_inst * adev->umc.channel_inst_num +
- ch_inst];
-
/* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
@@ -251,27 +281,8 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* the lowest lsb bits should be ignored */
- addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
- err_addr &= ~((0x1ULL << addr_lsb) - 1);
- na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
-
- /* loop for all possibilities of [C6 C5] in normal address. */
- for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
- na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
-
- /* Mapping normal error address to retired soc physical address. */
- ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
- na_err_addr, &retired_page_addr);
- if (ret) {
- dev_err(adev->dev, "Failed to map pa from umc na.\n");
- break;
- }
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
- retired_page_addr);
- amdgpu_umc_fill_error_record(err_data, na_err_addr,
- retired_page_addr, channel_index, umc_inst);
- }
+ umc_v8_10_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst, node_inst, mc_umc_status);
}
/* clear umc status */
@@ -349,6 +360,133 @@ static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
return true;
}
+static void umc_v8_10_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst,
+ unsigned long *error_count)
+{
+ uint64_t mc_umc_status;
+ uint32_t eccinfo_table_idx;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
+ adev->umc.channel_inst_num +
+ umc_inst * adev->umc.channel_inst_num +
+ ch_inst;
+
+ /* check the MCUMC_STATUS */
+ mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) {
+ *error_count += 1;
+ }
+}
+
+static void umc_v8_10_ecc_info_query_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst,
+ unsigned long *error_count)
+{
+ uint64_t mc_umc_status;
+ uint32_t eccinfo_table_idx;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
+ adev->umc.channel_inst_num +
+ umc_inst * adev->umc.channel_inst_num +
+ ch_inst;
+
+ /* check the MCUMC_STATUS */
+ mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
+ if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) {
+ *error_count += 1;
+ }
+}
+
+static void umc_v8_10_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ uint32_t node_inst = 0;
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+
+ /* TODO: driver needs to toggle DF Cstate to ensure
+ * safe access of UMC registers. Will add the protection
+ */
+ LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
+ umc_v8_10_ecc_info_query_correctable_error_count(adev,
+ node_inst, umc_inst, ch_inst,
+ &(err_data->ce_count));
+ umc_v8_10_ecc_info_query_uncorrectable_error_count(adev,
+ node_inst, umc_inst, ch_inst,
+ &(err_data->ue_count));
+ }
+}
+
+static void umc_v8_10_ecc_info_query_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ uint32_t ch_inst,
+ uint32_t umc_inst,
+ uint32_t node_inst)
+{
+ uint32_t eccinfo_table_idx;
+ uint64_t mc_umc_status, err_addr;
+
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
+ adev->umc.channel_inst_num +
+ umc_inst * adev->umc.channel_inst_num +
+ ch_inst;
+
+ mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
+
+ if (mc_umc_status == 0)
+ return;
+
+ if (!err_data->err_addr)
+ return;
+
+ /* calculate error address if ue error is detected */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1)) {
+
+ err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
+ err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+
+ umc_v8_10_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst, node_inst, mc_umc_status);
+ }
+}
+
+static void umc_v8_10_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ uint32_t node_inst = 0;
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+
+ /* TODO: driver needs to toggle DF Cstate to ensure
+ * safe access of UMC resgisters. Will add the protection
+ * when firmware interface is ready
+ */
+ LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
+ umc_v8_10_ecc_info_query_error_address(adev,
+ err_data,
+ ch_inst,
+ umc_inst,
+ node_inst);
+ }
+}
+
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
.query_ras_error_count = umc_v8_10_query_ras_error_count,
.query_ras_error_address = umc_v8_10_query_ras_error_address,
@@ -360,4 +498,6 @@ struct amdgpu_umc_ras umc_v8_10_ras = {
},
.err_cnt_init = umc_v8_10_err_cnt_init,
.query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
+ .ecc_info_query_ras_error_count = umc_v8_10_ecc_info_query_ras_error_count,
+ .ecc_info_query_ras_error_address = umc_v8_10_ecc_info_query_ras_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 22a41766a8c7..43d587404c3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -78,9 +78,17 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
static int vcn_v4_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
adev->vcn.harvest_config = VCN_HARVEST_MMSCH;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
+ adev->vcn.harvest_config |= 1 << i;
+ dev_info(adev->dev, "VCN%d is disabled by hypervisor\n", i);
+ }
+ }
+ }
/* re-use enc ring as unified ring */
adev->vcn.num_enc_rings = 1;
@@ -238,16 +246,11 @@ static int vcn_v4_0_hw_init(void *handle)
continue;
ring = &adev->vcn.inst[i].ring_enc[0];
- if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
- ring->sched.ready = false;
- ring->no_scheduler = true;
- dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
- } else {
- ring->wptr = 0;
- ring->wptr_old = 0;
- vcn_v4_0_unified_ring_set_wptr(ring);
- ring->sched.ready = true;
- }
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v4_0_unified_ring_set_wptr(ring);
+ ring->sched.ready = true;
+
}
} else {
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c06ada0844ba..7a95698d83f7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -2373,7 +2373,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
if (init_mqd_managers(dqm))
goto out_free;
- if (allocate_hiq_sdma_mqd(dqm)) {
+ if (!dev->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) {
pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
goto out_free;
}
@@ -2397,7 +2397,8 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
dqm->ops.uninitialize(dqm);
- deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
+ if (!dqm->dev->shared_resources.enable_mes)
+ deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
kfree(dqm);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index dd0436bf349a..c894cf8f7c50 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -778,16 +778,13 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
struct kfd_event_waiter *event_waiters;
uint32_t i;
- event_waiters = kmalloc_array(num_events,
- sizeof(struct kfd_event_waiter),
- GFP_KERNEL);
+ event_waiters = kcalloc(num_events, sizeof(struct kfd_event_waiter),
+ GFP_KERNEL);
if (!event_waiters)
return NULL;
- for (i = 0; (event_waiters) && (i < num_events) ; i++) {
+ for (i = 0; i < num_events; i++)
init_wait(&event_waiters[i].wait);
- event_waiters[i].activated = false;
- }
return event_waiters;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 4f6390f3236e..4a9af800b1f1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -308,11 +308,16 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct queue_properties *q)
{
struct v11_sdma_mqd *m;
+ int size;
m = (struct v11_sdma_mqd *) mqd_mem_obj->cpu_ptr;
- memset(m, 0, sizeof(struct v11_sdma_mqd));
+ if (mm->dev->shared_resources.enable_mes)
+ size = PAGE_SIZE;
+ else
+ size = sizeof(struct v11_sdma_mqd);
+ memset(m, 0, size);
*mqd = m;
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
@@ -443,6 +448,14 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
+ /*
+ * To allocate SDMA MQDs by generic functions
+ * when MES is enabled.
+ */
+ if (dev->shared_resources.enable_mes) {
+ mqd->allocate_mqd = allocate_mqd;
+ mqd->free_mqd = kfd_free_mqd_cp;
+ }
pr_debug("%s@%i\n", __func__, __LINE__);
break;
default:
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 2efe93f74f84..0c9bd0a53e60 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -28,7 +28,6 @@ config DRM_AMD_DC_DCN
config DRM_AMD_DC_HDCP
bool "Enable HDCP support in DC"
depends on DRM_AMD_DC
- select DRM_DISPLAY_HDCP_HELPER
help
Choose this option if you want to support HDCP authentication.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c420bce47acb..009ef917dad4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -41,6 +41,8 @@
#include "dpcd_defs.h"
#include "link/protocols/link_dpcd.h"
#include "link_service_types.h"
+#include "link/protocols/link_dp_capability.h"
+#include "link/protocols/link_ddc.h"
#include "vid.h"
#include "amdgpu.h"
@@ -2302,6 +2304,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (suspend) {
drm_dp_mst_topology_mgr_suspend(mgr);
} else {
+ /* if extended timeout is supported in hardware,
+ * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
+ * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
+ */
+ try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+ if (!dp_is_lttpr_present(aconnector->dc_link))
+ try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+
ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) {
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
@@ -4265,6 +4275,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/* Update the actual used number of crtc */
adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+ amdgpu_dm_set_irq_funcs(adev);
+
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
DRM_ERROR("DM: Failed to initialize mode config\n");
@@ -4757,8 +4769,6 @@ static int dm_early_init(void *handle)
break;
}
- amdgpu_dm_set_irq_funcs(adev);
-
if (adev->mode_info.funcs == NULL)
adev->mode_info.funcs = &dm_display_funcs;
@@ -7235,7 +7245,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
/* This defaults to the max in the range, but we want 8bpc for non-edp. */
- aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
+ aconnector->base.state->max_bpc = 16;
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
if (connector_type == DRM_MODE_CONNECTOR_eDP &&
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 6fdc2027c2b4..1583157da355 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -1149,6 +1149,8 @@ static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id)
switch (branch_dev_id) {
case DP_BRANCH_DEVICE_ID_0060AD:
+ case DP_BRANCH_DEVICE_ID_00E04C:
+ case DP_BRANCH_DEVICE_ID_90CC24:
ret_val = true;
break;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index fb3fd5b7c78b..0d4d3d586166 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -779,10 +779,8 @@ void dce110_edp_wait_for_hpd_ready(
dal_gpio_destroy_irq(&hpd);
- if (false == edp_hpd_high) {
- DC_LOG_WARNING(
- "%s: wait timed out!\n", __func__);
- }
+ /* ensure that the panel is detected */
+ ASSERT(edp_hpd_high);
}
void dce110_edp_power_control(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index df787fcf8e86..3b4d4d68359b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -998,5 +998,8 @@ void dcn30_prepare_bandwidth(struct dc *dc,
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
dcn20_prepare_bandwidth(dc, context);
+
+ dc_dmub_srv_p_state_delegate(dc,
+ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 379729b02847..c3d75e56410c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -1802,7 +1802,10 @@ static unsigned int CalculateVMAndRowBytes(
}
if (SurfaceTiling == dm_sw_linear) {
- *dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
+ if (PTEBufferSizeInRequests == 0)
+ *dpte_row_height = 1;
+ else
+ *dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
*dpte_row_width_ub = (dml_ceil(((double) SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
} else if (ScanDirection != dm_vert) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
index 86e9d2e886d6..aaa5064408ba 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
@@ -33,6 +33,7 @@
#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40
#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1
#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/
+#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
#define EDID_SEGMENT_SIZE 256
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 4874d1bf1dcb..d4370856f164 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -60,8 +60,6 @@
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
-#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
-
struct dp_lt_fallback_entry {
enum dc_lane_count lane_count;
enum dc_link_rate link_rate;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 834d146c4991..0652b001ad54 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1202,10 +1202,17 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
- ret = smu_setup_pptable(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to setup pptable!\n");
- return ret;
+ /*
+ * It is assumed the pptable used before runpm is same as
+ * the one used afterwards. Thus, we can reuse the stored
+ * copy and do not need to resetup the pptable again.
+ */
+ if (!adev->in_runpm) {
+ ret = smu_setup_pptable(smu);
+ if (ret) {
+ dev_err(adev->dev, "Failed to setup pptable!\n");
+ return ret;
+ }
}
/* smu_dump_pptable(smu); */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 6492d69e2e60..e1ef88ee1ed3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -256,7 +256,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
* to be backward compatible.
* 2. New fw usually brings some optimizations. But that's visible
* only on the paired driver.
- * Considering above, we just leave user a warning message instead
+ * Considering above, we just leave user a verbal message instead
* of halt driver loading.
*/
if (if_version != smu->smc_driver_if_version) {
@@ -264,7 +264,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
- dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
+ dev_info(smu->adev->dev, "SMU driver if version not matched\n");
}
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 56a02bc60cee..c788aa7a99a9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -93,7 +93,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
* to be backward compatible.
* 2. New fw usually brings some optimizations. But that's visible
* only on the paired driver.
- * Considering above, we just leave user a warning message instead
+ * Considering above, we just leave user a verbal message instead
* of halt driver loading.
*/
if (if_version != smu->smc_driver_if_version) {
@@ -101,7 +101,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
- dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
+ dev_info(smu->adev->dev, "SMU driver if version not matched\n");
}
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 78945e79dbee..a52ed0580fd7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -311,7 +311,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
* to be backward compatible.
* 2. New fw usually brings some optimizations. But that's visible
* only on the paired driver.
- * Considering above, we just leave user a warning message instead
+ * Considering above, we just leave user a verbal message instead
* of halt driver loading.
*/
if (if_version != smu->smc_driver_if_version) {
@@ -319,7 +319,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
- dev_warn(adev->dev, "SMU driver if version not matched\n");
+ dev_info(adev->dev, "SMU driver if version not matched\n");
}
return ret;
@@ -2229,10 +2229,23 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
enum smu_baco_seq baco_seq)
{
- return smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_ArmD3,
- baco_seq,
- NULL);
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ArmD3,
+ baco_seq,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (baco_seq == BACO_SEQ_BAMACO ||
+ baco_seq == BACO_SEQ_BACO)
+ smu_baco->state = SMU_BACO_STATE_ENTER;
+ else
+ smu_baco->state = SMU_BACO_STATE_EXIT;
+
+ return 0;
}
bool smu_v13_0_baco_is_support(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 7c906ab3ddd2..923a9fb3c887 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -147,6 +147,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
+ MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 7e5c6a8d0212..75185a960fc4 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -744,7 +744,7 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
return sgt;
}
-EXPORT_SYMBOL(drm_gem_shmem_get_pages_sgt);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
/**
* drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 3d1cd04ac5fa..98f4e44976e0 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -118,9 +118,6 @@ config DRM_I915_USERPTR
If in doubt, say "Y".
-config DRM_I915_GVT
- bool
-
config DRM_I915_GVT_KVMGT
tristate "Enable KVM host support Intel GVT-g graphics virtualization"
depends on DRM_I915
@@ -172,3 +169,6 @@ menu "drm/i915 Unstable Evolution"
depends on DRM_I915
source "drivers/gpu/drm/i915/Kconfig.unstable"
endmenu
+
+config DRM_I915_GVT
+ bool
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 6e48d3bcdfec..a280448df771 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -199,6 +199,8 @@ static struct intel_quirk intel_quirks[] = {
/* ECS Liva Q2 */
{ 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ /* HP Notebook - 14-r206nv */
+ { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
};
void intel_init_quirks(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index 169393a7ad88..3bb1c701d5ff 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -559,12 +559,15 @@ static bool reg_needs_read_steering(struct intel_gt *gt,
i915_mcr_reg_t reg,
enum intel_steering_type type)
{
- const u32 offset = i915_mmio_reg_offset(reg);
+ u32 offset = i915_mmio_reg_offset(reg);
const struct intel_mmio_range *entry;
if (likely(!gt->steering_table[type]))
return false;
+ if (IS_GSI_REG(offset))
+ offset += gt->uncore->gsi_offset;
+
for (entry = gt->steering_table[type]; entry->end; entry++) {
if (offset >= entry->start && offset <= entry->end)
return true;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 15ec64d881c4..fb99143be98e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -53,7 +53,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
if (unlikely(ret))
goto err_unpin;
- if (i915_vma_is_map_and_fenceable(vma)) {
+ if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) {
addr = (void __force *)i915_vma_pin_iomap(vma);
} else {
int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
@@ -98,7 +98,7 @@ void intel_ring_unpin(struct intel_ring *ring)
return;
i915_vma_unset_ggtt_write(vma);
- if (i915_vma_is_map_and_fenceable(vma))
+ if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915))
i915_vma_unpin_iomap(vma);
else
i915_gem_object_unpin_map(vma->obj);
@@ -116,7 +116,7 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE |
I915_BO_ALLOC_PM_VOLATILE);
- if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt))
+ if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt) && !HAS_LLC(i915))
obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 0616b73175f3..baccbf1761b7 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -147,9 +147,9 @@ vgpu_scan_nonprivbb_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
- vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
- "0x%llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
+ vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
+ "0x%llx\n");
static int vgpu_status_get(void *data, u64 *val)
{
@@ -165,7 +165,7 @@ static int vgpu_status_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(vgpu_status_fops, vgpu_status_get, NULL, "0x%llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(vgpu_status_fops, vgpu_status_get, NULL, "0x%llx\n");
/**
* intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
@@ -180,10 +180,10 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu,
&vgpu_mmio_diff_fops);
- debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
- &vgpu_scan_nonprivbb_fops);
- debugfs_create_file("status", 0644, vgpu->debugfs, vgpu,
- &vgpu_status_fops);
+ debugfs_create_file_unsafe("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
+ &vgpu_scan_nonprivbb_fops);
+ debugfs_create_file_unsafe("status", 0644, vgpu->debugfs, vgpu,
+ &vgpu_status_fops);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index dce93738e98a..4dd52ac2043e 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -171,7 +171,7 @@ static int verify_firmware(struct intel_gvt *gvt,
mem = (fw->data + h->cfg_space_offset);
id = *(u16 *)(mem + PCI_VENDOR_ID);
- VERIFY("vender id", id, pdev->vendor);
+ VERIFY("vendor id", id, pdev->vendor);
id = *(u16 *)(mem + PCI_DEVICE_ID);
VERIFY("device id", id, pdev->device);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 8ae7039b3683..de675d799c7d 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -699,7 +699,7 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
- debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
+ debugfs_lookup_and_remove(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs);
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
&vgpu->track_node);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index a5497440484f..08ad1bd651f1 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -323,7 +323,7 @@ int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
GFP_KERNEL);
if (ret < 0)
- goto out_unlock;;
+ goto out_unlock;
vgpu->id = ret;
vgpu->sched_ctl.weight = conf->weight;
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c1356aff87da..d26aa52217ce 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -136,7 +136,7 @@ static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_fbdev *fbdev = NULL;
+ struct msm_fbdev *fbdev;
struct drm_fb_helper *helper;
int ret;
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 84429728347f..a6c8542087ec 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -233,7 +233,7 @@ void omap_fbdev_init(struct drm_device *dev)
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev)
- goto fail;
+ return;
INIT_WORK(&fbdev->work, pan_worker);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 1471c3a96602..4aca09cab4b8 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2123,11 +2123,12 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
/*
* On DCE32 any encoder can drive any block so usually just use crtc id,
- * but Apple thinks different at least on iMac10,1, so there use linkb,
+ * but Apple thinks different at least on iMac10,1 and iMac11,2, so there use linkb,
* otherwise the internal eDP panel will stay dark.
*/
if (ASIC_IS_DCE32(rdev)) {
- if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
+ if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1") ||
+ dmi_match(DMI_PRODUCT_NAME, "iMac11,2"))
enc_idx = (dig->linkb) ? 1 : 0;
else
enc_idx = radeon_crtc->crtc_id;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 842afc88a949..22623eb4f72f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -256,6 +256,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
{
struct hid_report *report;
struct hid_field *field;
+ unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
unsigned int usages;
unsigned int offset;
unsigned int i;
@@ -286,8 +287,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
+ if (parser->device->ll_driver->max_buffer_size)
+ max_buffer_size = parser->device->ll_driver->max_buffer_size;
+
/* Total size check: Allow for possible report index byte */
- if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
+ if (report->size > (max_buffer_size - 1) << 3) {
hid_err(parser->device, "report is too long\n");
return -1;
}
@@ -1963,6 +1967,7 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
struct hid_driver *hdrv;
+ int max_buffer_size = HID_MAX_BUFFER_SIZE;
u32 rsize, csize = size;
u8 *cdata = data;
int ret = 0;
@@ -1978,10 +1983,13 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
rsize = hid_compute_report_size(report);
- if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
- rsize = HID_MAX_BUFFER_SIZE - 1;
- else if (rsize > HID_MAX_BUFFER_SIZE)
- rsize = HID_MAX_BUFFER_SIZE;
+ if (hid->ll_driver->max_buffer_size)
+ max_buffer_size = hid->ll_driver->max_buffer_size;
+
+ if (report_enum->numbered && rsize >= max_buffer_size)
+ rsize = max_buffer_size - 1;
+ else if (rsize > max_buffer_size)
+ rsize = max_buffer_size;
if (csize < rsize) {
dbg_hid("report %d is too short, (%d < %d)\n", report->id,
@@ -2396,7 +2404,12 @@ int hid_hw_raw_request(struct hid_device *hdev,
unsigned char reportnum, __u8 *buf,
size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
{
- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
+ unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
+
+ if (hdev->ll_driver->max_buffer_size)
+ max_buffer_size = hdev->ll_driver->max_buffer_size;
+
+ if (len < 1 || len > max_buffer_size || !buf)
return -EINVAL;
return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
@@ -2415,7 +2428,12 @@ EXPORT_SYMBOL_GPL(hid_hw_raw_request);
*/
int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
{
- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
+ unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
+
+ if (hdev->ll_driver->max_buffer_size)
+ max_buffer_size = hdev->ll_driver->max_buffer_size;
+
+ if (len < 1 || len > max_buffer_size || !buf)
return -EINVAL;
if (hdev->ll_driver->output_report)
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 1e16b0fa310d..27cadadda7c9 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -1354,6 +1354,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
girq->parents = NULL;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
+ girq->threaded = true;
ret = gpiochip_add_data(&dev->gc, dev);
if (ret < 0) {
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 25dcda76d6c7..5fc88a063297 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -4399,6 +4399,8 @@ static const struct hid_device_id hidpp_devices[] = {
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb02a) },
{ /* MX Master 3 mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023) },
+ { /* MX Master 3S mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb034) },
{}
};
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 15e14239af82..a49c6affd7c4 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -5,6 +5,7 @@
* Copyright (c) 2014-2016, Intel Corporation.
*/
+#include <linux/devm-helpers.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
@@ -621,7 +622,6 @@ static void recv_ipc(struct ishtp_device *dev, uint32_t doorbell_val)
case MNG_RESET_NOTIFY:
if (!ishtp_dev) {
ishtp_dev = dev;
- INIT_WORK(&fw_reset_work, fw_reset_work_fn);
}
schedule_work(&fw_reset_work);
break;
@@ -940,6 +940,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
{
struct ishtp_device *dev;
int i;
+ int ret;
dev = devm_kzalloc(&pdev->dev,
sizeof(struct ishtp_device) + sizeof(struct ish_hw),
@@ -975,6 +976,12 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
list_add_tail(&tx_buf->link, &dev->wr_free_list);
}
+ ret = devm_work_autocancel(&pdev->dev, &fw_reset_work, fw_reset_work_fn);
+ if (ret) {
+ dev_err(dev->devc, "Failed to initialise FW reset work\n");
+ return NULL;
+ }
+
dev->ops = &ish_hw_ops;
dev->devc = &pdev->dev;
dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index f161c95a1ad2..4588d2cd4ea4 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -395,6 +395,7 @@ static const struct hid_ll_driver uhid_hid_driver = {
.parse = uhid_hid_parse,
.raw_request = uhid_hid_raw_request,
.output_report = uhid_hid_output_report,
+ .max_buffer_size = UHID_DATA_MAX,
};
#ifdef CONFIG_COMPAT
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 9b8e84f20604..25eb4e8fd22f 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -914,6 +914,7 @@ config I2C_PASEMI
config I2C_APPLE
tristate "Apple SMBus platform driver"
+ depends on !I2C_PASEMI
depends on ARCH_APPLE || COMPILE_TEST
default ARCH_APPLE
help
diff --git a/drivers/i2c/busses/i2c-gxp.c b/drivers/i2c/busses/i2c-gxp.c
index da4c8e5a8039..d4b55d989a26 100644
--- a/drivers/i2c/busses/i2c-gxp.c
+++ b/drivers/i2c/busses/i2c-gxp.c
@@ -126,22 +126,13 @@ static int gxp_i2c_master_xfer(struct i2c_adapter *adapter,
time_left = wait_for_completion_timeout(&drvdata->completion,
adapter->timeout);
ret = num - drvdata->msgs_remaining;
- if (time_left == 0) {
- switch (drvdata->state) {
- case GXP_I2C_WDATA_PHASE:
- break;
- case GXP_I2C_RDATA_PHASE:
- break;
- case GXP_I2C_ADDR_PHASE:
- break;
- default:
- break;
- }
+ if (time_left == 0)
return -ETIMEDOUT;
- }
- if (drvdata->state == GXP_I2C_ADDR_NACK ||
- drvdata->state == GXP_I2C_DATA_NACK)
+ if (drvdata->state == GXP_I2C_ADDR_NACK)
+ return -ENXIO;
+
+ if (drvdata->state == GXP_I2C_DATA_NACK)
return -EIO;
return ret;
@@ -525,7 +516,7 @@ static int gxp_i2c_probe(struct platform_device *pdev)
i2cg_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"hpe,sysreg");
if (IS_ERR(i2cg_map)) {
- return dev_err_probe(&pdev->dev, IS_ERR(i2cg_map),
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2cg_map),
"failed to map i2cg_handle\n");
}
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 7a60e1c5e587..54e4c34b4a22 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1438,6 +1438,7 @@ static int i3c_master_early_i3c_dev_add(struct i3c_master_controller *master,
{
struct i3c_device_info info = {
.static_addr = boardinfo->static_addr,
+ .pid = boardinfo->pid,
};
struct i3c_dev_desc *i3cdev;
int ret;
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 51a8608203de..48954d3e6571 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -531,7 +531,7 @@ static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
if (hcnt < SCL_I3C_TIMING_CNT_MIN)
hcnt = SCL_I3C_TIMING_CNT_MIN;
- lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_TYP_I3C_SCL_RATE) - hcnt;
+ lcnt = DIV_ROUND_UP(core_rate, master->base.bus.scl_rate.i3c) - hcnt;
if (lcnt < SCL_I3C_TIMING_CNT_MIN)
lcnt = SCL_I3C_TIMING_CNT_MIN;
@@ -541,7 +541,8 @@ static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
if (!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_I2C_SLAVE_PRESENT))
writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
- lcnt = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period);
+ lcnt = max_t(u8,
+ DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period), lcnt);
scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I3C_OD_TIMING);
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 75eaecc8639f..1de87062c67b 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -35,7 +35,6 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/mtd/ubi.h>
-#include <linux/workqueue.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/hdreg.h>
@@ -62,7 +61,6 @@ struct ubiblock_param {
};
struct ubiblock_pdu {
- struct work_struct work;
struct ubi_sgl usgl;
};
@@ -82,8 +80,6 @@ struct ubiblock {
struct gendisk *gd;
struct request_queue *rq;
- struct workqueue_struct *wq;
-
struct mutex dev_mutex;
struct list_head list;
struct blk_mq_tag_set tag_set;
@@ -181,20 +177,29 @@ static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
return NULL;
}
-static int ubiblock_read(struct ubiblock_pdu *pdu)
+static blk_status_t ubiblock_read(struct request *req)
{
- int ret, leb, offset, bytes_left, to_read;
- u64 pos;
- struct request *req = blk_mq_rq_from_pdu(pdu);
+ struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
struct ubiblock *dev = req->q->queuedata;
+ u64 pos = blk_rq_pos(req) << 9;
+ int to_read = blk_rq_bytes(req);
+ int bytes_left = to_read;
+ /* Get LEB:offset address to read from */
+ int offset = do_div(pos, dev->leb_size);
+ int leb = pos;
+ struct req_iterator iter;
+ struct bio_vec bvec;
+ int ret;
- to_read = blk_rq_bytes(req);
- pos = blk_rq_pos(req) << 9;
+ blk_mq_start_request(req);
- /* Get LEB:offset address to read from */
- offset = do_div(pos, dev->leb_size);
- leb = pos;
- bytes_left = to_read;
+ /*
+ * It is safe to ignore the return value of blk_rq_map_sg() because
+ * the number of sg entries is limited to UBI_MAX_SG_COUNT
+ * and ubi_read_sg() will check that limit.
+ */
+ ubi_sgl_init(&pdu->usgl);
+ blk_rq_map_sg(req->q, req, pdu->usgl.sg);
while (bytes_left) {
/*
@@ -206,14 +211,17 @@ static int ubiblock_read(struct ubiblock_pdu *pdu)
ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
if (ret < 0)
- return ret;
+ break;
bytes_left -= to_read;
to_read = bytes_left;
leb += 1;
offset = 0;
}
- return 0;
+
+ rq_for_each_segment(bvec, req, iter)
+ flush_dcache_page(bvec.bv_page);
+ return errno_to_blk_status(ret);
}
static int ubiblock_open(struct block_device *bdev, fmode_t mode)
@@ -289,47 +297,15 @@ static const struct block_device_operations ubiblock_ops = {
.getgeo = ubiblock_getgeo,
};
-static void ubiblock_do_work(struct work_struct *work)
-{
- int ret;
- struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
- struct request *req = blk_mq_rq_from_pdu(pdu);
- struct req_iterator iter;
- struct bio_vec bvec;
-
- blk_mq_start_request(req);
-
- /*
- * It is safe to ignore the return value of blk_rq_map_sg() because
- * the number of sg entries is limited to UBI_MAX_SG_COUNT
- * and ubi_read_sg() will check that limit.
- */
- blk_rq_map_sg(req->q, req, pdu->usgl.sg);
-
- ret = ubiblock_read(pdu);
-
- rq_for_each_segment(bvec, req, iter)
- flush_dcache_page(bvec.bv_page);
-
- blk_mq_end_request(req, errno_to_blk_status(ret));
-}
-
static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
- struct request *req = bd->rq;
- struct ubiblock *dev = hctx->queue->queuedata;
- struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
-
- switch (req_op(req)) {
+ switch (req_op(bd->rq)) {
case REQ_OP_READ:
- ubi_sgl_init(&pdu->usgl);
- queue_work(dev->wq, &pdu->work);
- return BLK_STS_OK;
+ return ubiblock_read(bd->rq);
default:
return BLK_STS_IOERR;
}
-
}
static int ubiblock_init_request(struct blk_mq_tag_set *set,
@@ -339,8 +315,6 @@ static int ubiblock_init_request(struct blk_mq_tag_set *set,
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
- INIT_WORK(&pdu->work, ubiblock_do_work);
-
return 0;
}
@@ -354,9 +328,12 @@ static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
u64 size = vi->used_bytes >> 9;
if (vi->used_bytes % 512) {
- pr_warn("UBI: block: volume size is not a multiple of 512, "
- "last %llu bytes are ignored!\n",
- vi->used_bytes - (size << 9));
+ if (vi->vol_type == UBI_DYNAMIC_VOLUME)
+ pr_warn("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
+ vi->used_bytes - (size << 9));
+ else
+ pr_info("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
+ vi->used_bytes - (size << 9));
}
if ((sector_t)size != size)
@@ -401,7 +378,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->tag_set.ops = &ubiblock_mq_ops;
dev->tag_set.queue_depth = 64;
dev->tag_set.numa_node = NUMA_NO_NODE;
- dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
dev->tag_set.driver_data = dev;
dev->tag_set.nr_hw_queues = 1;
@@ -439,32 +416,20 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->rq = gd->queue;
blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
- /*
- * Create one workqueue per volume (per registered block device).
- * Remember workqueues are cheap, they're not threads.
- */
- dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
- if (!dev->wq) {
- ret = -ENOMEM;
- goto out_remove_minor;
- }
-
list_add_tail(&dev->list, &ubiblock_devices);
/* Must be the last step: anyone can call file ops from now on */
- ret = add_disk(dev->gd);
+ ret = device_add_disk(vi->dev, dev->gd, NULL);
if (ret)
- goto out_destroy_wq;
+ goto out_remove_minor;
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
dev->ubi_num, dev->vol_id, vi->name);
mutex_unlock(&devices_mutex);
return 0;
-out_destroy_wq:
- list_del(&dev->list);
- destroy_workqueue(dev->wq);
out_remove_minor:
+ list_del(&dev->list);
idr_remove(&ubiblock_minor_idr, gd->first_minor);
out_cleanup_disk:
put_disk(dev->gd);
@@ -482,8 +447,6 @@ static void ubiblock_cleanup(struct ubiblock *dev)
{
/* Stop new requests to arrive */
del_gendisk(dev->gd);
- /* Flush pending work */
- destroy_workqueue(dev->wq);
/* Finally destroy the blk queue */
dev_info(disk_to_dev(dev->gd), "released");
put_disk(dev->gd);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index a901f8edfa41..0904eb40c95f 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -35,7 +35,7 @@
#define MTD_PARAM_LEN_MAX 64
/* Maximum number of comma-separated items in the 'mtd=' parameter */
-#define MTD_PARAM_MAX_COUNT 4
+#define MTD_PARAM_MAX_COUNT 5
/* Maximum value for the number of bad PEBs per 1024 PEBs */
#define MAX_MTD_UBI_BEB_LIMIT 768
@@ -53,12 +53,14 @@
* @ubi_num: UBI number
* @vid_hdr_offs: VID header offset
* @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
+ * @enable_fm: enable fastmap when value is non-zero
*/
struct mtd_dev_param {
char name[MTD_PARAM_LEN_MAX];
int ubi_num;
int vid_hdr_offs;
int max_beb_per1024;
+ int enable_fm;
};
/* Numbers of elements set in the @mtd_dev_param array */
@@ -468,6 +470,7 @@ static int uif_init(struct ubi_device *ubi)
err = ubi_add_volume(ubi, ubi->volumes[i]);
if (err) {
ubi_err(ubi, "cannot add volume %d", i);
+ ubi->volumes[i] = NULL;
goto out_volumes;
}
}
@@ -663,6 +666,12 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
+ if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) >
+ ubi->vid_hdr_alsize)) {
+ ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset);
+ return -EINVAL;
+ }
+
dbg_gen("min_io_size %d", ubi->min_io_size);
dbg_gen("max_write_size %d", ubi->max_write_size);
dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
@@ -906,6 +915,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
ubi->dev.release = dev_release;
ubi->dev.class = &ubi_class;
ubi->dev.groups = ubi_dev_groups;
+ ubi->dev.parent = &mtd->dev;
ubi->mtd = mtd;
ubi->ubi_num = ubi_num;
@@ -1248,7 +1258,7 @@ static int __init ubi_init(void)
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, p->ubi_num,
p->vid_hdr_offs, p->max_beb_per1024,
- false);
+ p->enable_fm == 0 ? true : false);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
pr_err("UBI error: cannot attach mtd%d\n",
@@ -1427,7 +1437,7 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
int err = kstrtoint(token, 10, &p->max_beb_per1024);
if (err) {
- pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
+ pr_err("UBI error: bad value for max_beb_per1024 parameter: %s\n",
token);
return -EINVAL;
}
@@ -1438,13 +1448,25 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
int err = kstrtoint(token, 10, &p->ubi_num);
if (err) {
- pr_err("UBI error: bad value for ubi_num parameter: %s",
+ pr_err("UBI error: bad value for ubi_num parameter: %s\n",
token);
return -EINVAL;
}
} else
p->ubi_num = UBI_DEV_NUM_AUTO;
+ token = tokens[4];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->enable_fm);
+
+ if (err) {
+ pr_err("UBI error: bad value for enable_fm parameter: %s\n",
+ token);
+ return -EINVAL;
+ }
+ } else
+ p->enable_fm = 0;
+
mtd_devs += 1;
return 0;
}
@@ -1457,11 +1479,13 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
"Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
__stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
"Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
+ "Optional \"enable_fm\" parameter determines whether to enable fastmap during attach. If the value is non-zero, fastmap is enabled. Default value is 0.\n"
"\n"
"Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
"Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
"Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
"Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
+ "example 5: mtd=1,0,0,5 mtd=2,0,0,6,1 - attach MTD device /dev/mtd1 to UBI 5 and disable fastmap; attach MTD device /dev/mtd2 to UBI 6 and enable fastmap.(only works when fastmap is enabled and fm_autoconvert=Y).\n"
"\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
#ifdef CONFIG_MTD_UBI_FASTMAP
module_param(fm_autoconvert, bool, 0644);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index fcca6942dbdd..27168f511d6d 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -504,6 +504,7 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
{
unsigned long ubi_num = ubi->ubi_num;
struct ubi_debug_info *d = &ubi->dbg;
+ umode_t mode = S_IRUSR | S_IWUSR;
int n;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
@@ -518,41 +519,41 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
- d->dfs_chk_gen = debugfs_create_file("chk_gen", S_IWUSR, d->dfs_dir,
+ d->dfs_chk_gen = debugfs_create_file("chk_gen", mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
- d->dfs_chk_io = debugfs_create_file("chk_io", S_IWUSR, d->dfs_dir,
+ d->dfs_chk_io = debugfs_create_file("chk_io", mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
- d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", S_IWUSR,
+ d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", mode,
d->dfs_dir, (void *)ubi_num,
&dfs_fops);
- d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", S_IWUSR,
+ d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", mode,
d->dfs_dir, (void *)ubi_num,
&dfs_fops);
d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips",
- S_IWUSR, d->dfs_dir,
+ mode, d->dfs_dir,
(void *)ubi_num,
&dfs_fops);
d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures",
- S_IWUSR, d->dfs_dir,
+ mode, d->dfs_dir,
(void *)ubi_num,
&dfs_fops);
d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut",
- S_IWUSR, d->dfs_dir,
+ mode, d->dfs_dir,
(void *)ubi_num,
&dfs_fops);
d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min",
- S_IWUSR, d->dfs_dir,
+ mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max",
- S_IWUSR, d->dfs_dir,
+ mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 09c408c45a62..403b79d6efd5 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -61,7 +61,7 @@ struct ubi_eba_table {
};
/**
- * next_sqnum - get next sequence number.
+ * ubi_next_sqnum - get next sequence number.
* @ubi: UBI device description object
*
* This function returns next sequence number to use, which is just the current
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 0ee452275578..863f571f1adb 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -146,13 +146,15 @@ void ubi_refill_pools(struct ubi_device *ubi)
if (ubi->fm_anchor) {
wl_tree_add(ubi->fm_anchor, &ubi->free);
ubi->free_count++;
+ ubi->fm_anchor = NULL;
}
- /*
- * All available PEBs are in ubi->free, now is the time to get
- * the best anchor PEBs.
- */
- ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
+ if (!ubi->fm_disabled)
+ /*
+ * All available PEBs are in ubi->free, now is the time to get
+ * the best anchor PEBs.
+ */
+ ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
for (;;) {
enough = 0;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index ca2d9efe62c3..28c8151a0725 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -93,7 +93,7 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
/**
- * new_fm_vhdr - allocate a new volume header for fastmap usage.
+ * new_fm_vbuf() - allocate a new volume header for fastmap usage.
* @ubi: UBI device description object
* @vol_id: the VID of the new header
*
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 0fce99ff29b5..5db653eacbd4 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -79,6 +79,7 @@ void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
vi->name_len = vol->name_len;
vi->name = vol->name;
vi->cdev = vol->cdev.dev;
+ vi->dev = &vol->dev;
}
/**
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 7b30c8ee3e82..1794d66b6eb7 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -10,7 +10,7 @@
#include "ubi.h"
/**
- * calc_data_len - calculate how much real data is stored in a buffer.
+ * ubi_calc_data_len - calculate how much real data is stored in a buffer.
* @ubi: UBI device description object
* @buf: a buffer with the contents of the physical eraseblock
* @length: the buffer length
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 8fcc0bdf0635..2c867d16f89f 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -464,7 +464,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
for (i = 0; i < -pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
if (err)
- goto out_acc;
+ goto out_free;
}
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs += pebs;
@@ -512,8 +512,10 @@ out_acc:
ubi->avail_pebs += pebs;
spin_unlock(&ubi->volumes_lock);
}
+ return err;
+
out_free:
- kfree(new_eba_tbl);
+ ubi_eba_destroy_table(new_eba_tbl);
return err;
}
@@ -580,6 +582,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
if (err) {
ubi_err(ubi, "cannot add character device for volume %d, error %d",
vol_id, err);
+ vol_release(&vol->dev);
return err;
}
@@ -590,15 +593,14 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
vol->dev.groups = volume_dev_groups;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
- if (err)
- goto out_cdev;
+ if (err) {
+ cdev_del(&vol->cdev);
+ put_device(&vol->dev);
+ return err;
+ }
self_check_volumes(ubi);
return err;
-
-out_cdev:
- cdev_del(&vol->cdev);
- return err;
}
/**
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 68eb0f21b3fe..40f39e5d6dfc 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -165,7 +165,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
}
/**
- * wl_tree_destroy - destroy a wear-leveling entry.
+ * wl_entry_destroy - destroy a wear-leveling entry.
* @ubi: UBI device description object
* @e: the wear-leveling entry to add
*
@@ -890,8 +890,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
- if (e2)
+ if (e2) {
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e2);
+ spin_unlock(&ubi->wl_lock);
+ }
goto out_ro;
}
@@ -973,11 +976,11 @@ out_error:
spin_lock(&ubi->wl_lock);
ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0;
+ wl_entry_destroy(ubi, e1);
+ wl_entry_destroy(ubi, e2);
spin_unlock(&ubi->wl_lock);
ubi_free_vid_buf(vidb);
- wl_entry_destroy(ubi, e1);
- wl_entry_destroy(ubi, e2);
out_ro:
ubi_ro_mode(ubi);
@@ -1130,14 +1133,18 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
/* Re-schedule the LEB for erasure */
err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
if (err1) {
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
+ spin_unlock(&ubi->wl_lock);
err = err1;
goto out_ro;
}
return err;
}
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
+ spin_unlock(&ubi->wl_lock);
if (err != -EIO)
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
@@ -1253,6 +1260,18 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
+ if (!e) {
+ /*
+ * This wl entry has been removed for some errors by other
+ * process (eg. wear leveling worker), corresponding process
+ * (except __erase_worker, which cannot concurrent with
+ * ubi_wl_put_peb) will set ubi ro_mode at the same time,
+ * just ignore this wl entry.
+ */
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_protect);
+ return 0;
+ }
if (e == ubi->move_from) {
/*
* User is putting the physical eraseblock which was selected to
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 3a15015bc409..a508402c4ecb 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -393,6 +393,24 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
}
+/* Set up switch core clock for MT7530 */
+static void mt7530_pll_setup(struct mt7530_priv *priv)
+{
+ /* Disable PLL */
+ core_write(priv, CORE_GSWPLL_GRP1, 0);
+
+ /* Set core clock into 500Mhz */
+ core_write(priv, CORE_GSWPLL_GRP2,
+ RG_GSWPLL_POSDIV_500M(1) |
+ RG_GSWPLL_FBKDIV_500M(25));
+
+ /* Enable PLL */
+ core_write(priv, CORE_GSWPLL_GRP1,
+ RG_GSWPLL_EN_PRE |
+ RG_GSWPLL_POSDIV_200M(2) |
+ RG_GSWPLL_FBKDIV_200M(32));
+}
+
/* Setup TX circuit including relevant PAD and driving */
static int
mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
@@ -453,21 +471,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
REG_GSWCK_EN | REG_TRGMIICK_EN);
- /* Setup core clock for MT7530 */
- /* Disable PLL */
- core_write(priv, CORE_GSWPLL_GRP1, 0);
-
- /* Set core clock into 500Mhz */
- core_write(priv, CORE_GSWPLL_GRP2,
- RG_GSWPLL_POSDIV_500M(1) |
- RG_GSWPLL_FBKDIV_500M(25));
-
- /* Enable PLL */
- core_write(priv, CORE_GSWPLL_GRP1,
- RG_GSWPLL_EN_PRE |
- RG_GSWPLL_POSDIV_200M(2) |
- RG_GSWPLL_FBKDIV_200M(32));
-
/* Setup the MT7530 TRGMII Tx Clock */
core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
@@ -2196,6 +2199,8 @@ mt7530_setup(struct dsa_switch *ds)
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
+ mt7530_pll_setup(priv);
+
/* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
val = mt7530_read(priv, MT7530_MHWTRAP);
val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 323ec56e8a74..1917da784191 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -132,6 +132,16 @@ source "drivers/net/ethernet/mscc/Kconfig"
source "drivers/net/ethernet/microsoft/Kconfig"
source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
+
+config FEALNX
+ tristate "Myson MTD-8xx PCI Ethernet support"
+ depends on PCI
+ select CRC32
+ select MII
+ help
+ Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
+ cards. <http://www.myson.com.tw/>
+
source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/natsemi/Kconfig"
source "drivers/net/ethernet/neterion/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 2fedbaa545eb..0d872d4efcd1 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/
obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
+obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 3038386a5afd..1761df8fb7f9 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -890,13 +890,13 @@ static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
if (iost & BGMAC_BCMA_IOST_ATTACHED) {
flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
- if (!bgmac->has_robosw)
+ if (bgmac->in_init || !bgmac->has_robosw)
flags |= BGMAC_BCMA_IOCTL_SW_RESET;
}
bgmac_clk_enable(bgmac, flags);
}
- if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
+ if (iost & BGMAC_BCMA_IOST_ATTACHED && (bgmac->in_init || !bgmac->has_robosw))
bgmac_idm_write(bgmac, BCMA_IOCTL,
bgmac_idm_read(bgmac, BCMA_IOCTL) &
~BGMAC_BCMA_IOCTL_SW_RESET);
@@ -1490,6 +1490,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
struct net_device *net_dev = bgmac->net_dev;
int err;
+ bgmac->in_init = true;
+
bgmac_chip_intrs_off(bgmac);
net_dev->irq = bgmac->irq;
@@ -1542,6 +1544,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
/* Omit FCS from max MTU size */
net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN;
+ bgmac->in_init = false;
+
err = register_netdev(bgmac->net_dev);
if (err) {
dev_err(bgmac->dev, "Cannot register net device\n");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index e05ac92c0650..d73ef262991d 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -472,6 +472,8 @@ struct bgmac {
int irq;
u32 int_mask;
+ bool in_init;
+
/* Current MAC state */
int mac_speed;
int mac_duplex;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7245fee13ad0..dceaecab6605 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3144,7 +3144,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
static void bnxt_free_tpa_info(struct bnxt *bp)
{
- int i;
+ int i, j;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
@@ -3152,8 +3152,10 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
kfree(rxr->rx_tpa_idx_map);
rxr->rx_tpa_idx_map = NULL;
if (rxr->rx_tpa) {
- kfree(rxr->rx_tpa[0].agg_arr);
- rxr->rx_tpa[0].agg_arr = NULL;
+ for (j = 0; j < bp->max_tpa; j++) {
+ kfree(rxr->rx_tpa[j].agg_arr);
+ rxr->rx_tpa[j].agg_arr = NULL;
+ }
}
kfree(rxr->rx_tpa);
rxr->rx_tpa = NULL;
@@ -3162,14 +3164,13 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
static int bnxt_alloc_tpa_info(struct bnxt *bp)
{
- int i, j, total_aggs = 0;
+ int i, j;
bp->max_tpa = MAX_TPA;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
if (!bp->max_tpa_v2)
return 0;
bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
- total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
}
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -3183,12 +3184,12 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_CHIP_P5))
continue;
- agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
- rxr->rx_tpa[0].agg_arr = agg;
- if (!agg)
- return -ENOMEM;
- for (j = 1; j < bp->max_tpa; j++)
- rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
+ for (j = 0; j < bp->max_tpa; j++) {
+ agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+ if (!agg)
+ return -ENOMEM;
+ rxr->rx_tpa[j].agg_arr = agg;
+ }
rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
GFP_KERNEL);
if (!rxr->rx_tpa_idx_map)
@@ -13200,8 +13201,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_free_hwrm_resources(bp);
bnxt_ethtool_free(bp);
bnxt_dcb_free(bp);
- kfree(bp->edev);
- bp->edev = NULL;
kfree(bp->ptp_cfg);
bp->ptp_cfg = NULL;
kfree(bp->fw_health);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index d4cc9c371e7b..e7b5e28ee29f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -317,9 +317,11 @@ static void bnxt_aux_dev_release(struct device *dev)
{
struct bnxt_aux_priv *aux_priv =
container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
+ struct bnxt *bp = netdev_priv(aux_priv->edev->net);
ida_free(&bnxt_aux_dev_ids, aux_priv->id);
kfree(aux_priv->edev->ulp_tbl);
+ bp->edev = NULL;
kfree(aux_priv->edev);
kfree(aux_priv);
}
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
new file mode 100644
index 000000000000..ed18450fd2cc
--- /dev/null
+++ b/drivers/net/ethernet/fealnx.c
@@ -0,0 +1,1953 @@
+/*
+ Written 1998-2000 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/pci-skeleton.html
+
+ Linux kernel updates:
+
+ Version 2.51, Nov 17, 2001 (jgarzik):
+ - Add ethtool support
+ - Replace some MII-related magic numbers with constants
+
+*/
+
+#define DRV_NAME "fealnx"
+
+static int debug; /* 1-> print debug message */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
+/* Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak;
+
+/* Used to pass the media type, etc. */
+/* Both 'options[]' and 'full_duplex[]' should exist for driver */
+/* interoperability. */
+/* The media type is usually passed in 'options[]'. */
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+
+/* Operational parameters that are set at compile time. */
+/* Keep the ring sizes a power of two for compile efficiency. */
+/* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */
+/* Making the Tx ring too large decreases the effectiveness of channel */
+/* bonding and packet priority. */
+/* There are no ill effects from too-large receive rings. */
+// 88-12-9 modify,
+// #define TX_RING_SIZE 16
+// #define RX_RING_SIZE 32
+#define TX_RING_SIZE 6
+#define RX_RING_SIZE 12
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
+
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+
+/* This driver was written to use PCI memory space, however some x86 systems
+ work only with I/O space accesses. */
+#ifndef __alpha__
+#define USE_IO_OPS
+#endif
+
+/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
+/* This is only in the support-all-kernels source code. */
+
+#define RUN_AT(x) (jiffies + (x))
+
+MODULE_AUTHOR("Myson or whoever");
+MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
+MODULE_LICENSE("GPL");
+module_param(max_interrupt_work, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(multicast_filter_limit, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
+MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
+MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
+
+enum {
+ MIN_REGION_SIZE = 136,
+};
+
+/* A chip capabilities table, matching the entries in pci_tbl[] above. */
+enum chip_capability_flags {
+ HAS_MII_XCVR,
+ HAS_CHIP_XCVR,
+};
+
+/* 89/6/13 add, */
+/* for different PHY */
+enum phy_type_flags {
+ MysonPHY = 1,
+ AhdocPHY = 2,
+ SeeqPHY = 3,
+ MarvellPHY = 4,
+ Myson981 = 5,
+ LevelOnePHY = 6,
+ OtherPHY = 10,
+};
+
+struct chip_info {
+ char *chip_name;
+ int flags;
+};
+
+static const struct chip_info skel_netdrv_tbl[] = {
+ { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
+ { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
+ { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
+};
+
+/* Offsets to the Command and Status Registers. */
+enum fealnx_offsets {
+ PAR0 = 0x0, /* physical address 0-3 */
+ PAR1 = 0x04, /* physical address 4-5 */
+ MAR0 = 0x08, /* multicast address 0-3 */
+ MAR1 = 0x0C, /* multicast address 4-7 */
+ FAR0 = 0x10, /* flow-control address 0-3 */
+ FAR1 = 0x14, /* flow-control address 4-5 */
+ TCRRCR = 0x18, /* receive & transmit configuration */
+ BCR = 0x1C, /* bus command */
+ TXPDR = 0x20, /* transmit polling demand */
+ RXPDR = 0x24, /* receive polling demand */
+ RXCWP = 0x28, /* receive current word pointer */
+ TXLBA = 0x2C, /* transmit list base address */
+ RXLBA = 0x30, /* receive list base address */
+ ISR = 0x34, /* interrupt status */
+ IMR = 0x38, /* interrupt mask */
+ FTH = 0x3C, /* flow control high/low threshold */
+ MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */
+ TALLY = 0x44, /* tally counters for crc and mpa */
+ TSR = 0x48, /* tally counter for transmit status */
+ BMCRSR = 0x4c, /* basic mode control and status */
+ PHYIDENTIFIER = 0x50, /* phy identifier */
+ ANARANLPAR = 0x54, /* auto-negotiation advertisement and link
+ partner ability */
+ ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */
+ BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */
+};
+
+/* Bits in the interrupt status/enable registers. */
+/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
+enum intr_status_bits {
+ RFCON = 0x00020000, /* receive flow control xon packet */
+ RFCOFF = 0x00010000, /* receive flow control xoff packet */
+ LSCStatus = 0x00008000, /* link status change */
+ ANCStatus = 0x00004000, /* autonegotiation completed */
+ FBE = 0x00002000, /* fatal bus error */
+ FBEMask = 0x00001800, /* mask bit12-11 */
+ ParityErr = 0x00000000, /* parity error */
+ TargetErr = 0x00001000, /* target abort */
+ MasterErr = 0x00000800, /* master error */
+ TUNF = 0x00000400, /* transmit underflow */
+ ROVF = 0x00000200, /* receive overflow */
+ ETI = 0x00000100, /* transmit early int */
+ ERI = 0x00000080, /* receive early int */
+ CNTOVF = 0x00000040, /* counter overflow */
+ RBU = 0x00000020, /* receive buffer unavailable */
+ TBU = 0x00000010, /* transmit buffer unavilable */
+ TI = 0x00000008, /* transmit interrupt */
+ RI = 0x00000004, /* receive interrupt */
+ RxErr = 0x00000002, /* receive error */
+};
+
+/* Bits in the NetworkConfig register, W for writing, R for reading */
+/* FIXME: some names are invented by me. Marked with (name?) */
+/* If you have docs and know bit names, please fix 'em */
+enum rx_mode_bits {
+ CR_W_ENH = 0x02000000, /* enhanced mode (name?) */
+ CR_W_FD = 0x00100000, /* full duplex */
+ CR_W_PS10 = 0x00080000, /* 10 mbit */
+ CR_W_TXEN = 0x00040000, /* tx enable (name?) */
+ CR_W_PS1000 = 0x00010000, /* 1000 mbit */
+ /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
+ CR_W_RXMODEMASK = 0x000000e0,
+ CR_W_PROM = 0x00000080, /* promiscuous mode */
+ CR_W_AB = 0x00000040, /* accept broadcast */
+ CR_W_AM = 0x00000020, /* accept mutlicast */
+ CR_W_ARP = 0x00000008, /* receive runt pkt */
+ CR_W_ALP = 0x00000004, /* receive long pkt */
+ CR_W_SEP = 0x00000002, /* receive error pkt */
+ CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */
+
+ CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */
+ CR_R_FD = 0x00100000, /* full duplex detected */
+ CR_R_PS10 = 0x00080000, /* 10 mbit detected */
+ CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */
+};
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct fealnx_desc {
+ s32 status;
+ s32 control;
+ u32 buffer;
+ u32 next_desc;
+ struct fealnx_desc *next_desc_logical;
+ struct sk_buff *skbuff;
+ u32 reserved1;
+ u32 reserved2;
+};
+
+/* Bits in network_desc.status */
+enum rx_desc_status_bits {
+ RXOWN = 0x80000000, /* own bit */
+ FLNGMASK = 0x0fff0000, /* frame length */
+ FLNGShift = 16,
+ MARSTATUS = 0x00004000, /* multicast address received */
+ BARSTATUS = 0x00002000, /* broadcast address received */
+ PHYSTATUS = 0x00001000, /* physical address received */
+ RXFSD = 0x00000800, /* first descriptor */
+ RXLSD = 0x00000400, /* last descriptor */
+ ErrorSummary = 0x80, /* error summary */
+ RUNTPKT = 0x40, /* runt packet received */
+ LONGPKT = 0x20, /* long packet received */
+ FAE = 0x10, /* frame align error */
+ CRC = 0x08, /* crc error */
+ RXER = 0x04, /* receive error */
+};
+
+enum rx_desc_control_bits {
+ RXIC = 0x00800000, /* interrupt control */
+ RBSShift = 0,
+};
+
+enum tx_desc_status_bits {
+ TXOWN = 0x80000000, /* own bit */
+ JABTO = 0x00004000, /* jabber timeout */
+ CSL = 0x00002000, /* carrier sense lost */
+ LC = 0x00001000, /* late collision */
+ EC = 0x00000800, /* excessive collision */
+ UDF = 0x00000400, /* fifo underflow */
+ DFR = 0x00000200, /* deferred */
+ HF = 0x00000100, /* heartbeat fail */
+ NCRMask = 0x000000ff, /* collision retry count */
+ NCRShift = 0,
+};
+
+enum tx_desc_control_bits {
+ TXIC = 0x80000000, /* interrupt control */
+ ETIControl = 0x40000000, /* early transmit interrupt */
+ TXLD = 0x20000000, /* last descriptor */
+ TXFD = 0x10000000, /* first descriptor */
+ CRCEnable = 0x08000000, /* crc control */
+ PADEnable = 0x04000000, /* padding control */
+ RetryTxLC = 0x02000000, /* retry late collision */
+ PKTSMask = 0x3ff800, /* packet size bit21-11 */
+ PKTSShift = 11,
+ TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */
+ TBSShift = 0,
+};
+
+/* BootROM/EEPROM/MII Management Register */
+#define MASK_MIIR_MII_READ 0x00000000
+#define MASK_MIIR_MII_WRITE 0x00000008
+#define MASK_MIIR_MII_MDO 0x00000004
+#define MASK_MIIR_MII_MDI 0x00000002
+#define MASK_MIIR_MII_MDC 0x00000001
+
+/* ST+OP+PHYAD+REGAD+TA */
+#define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
+#define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */
+
+/* ------------------------------------------------------------------------- */
+/* Constants for Myson PHY */
+/* ------------------------------------------------------------------------- */
+#define MysonPHYID 0xd0000302
+/* 89-7-27 add, (begin) */
+#define MysonPHYID0 0x0302
+#define StatusRegister 18
+#define SPEED100 0x0400 // bit10
+#define FULLMODE 0x0800 // bit11
+/* 89-7-27 add, (end) */
+
+/* ------------------------------------------------------------------------- */
+/* Constants for Seeq 80225 PHY */
+/* ------------------------------------------------------------------------- */
+#define SeeqPHYID0 0x0016
+
+#define MIIRegister18 18
+#define SPD_DET_100 0x80
+#define DPLX_DET_FULL 0x40
+
+/* ------------------------------------------------------------------------- */
+/* Constants for Ahdoc 101 PHY */
+/* ------------------------------------------------------------------------- */
+#define AhdocPHYID0 0x0022
+
+#define DiagnosticReg 18
+#define DPLX_FULL 0x0800
+#define Speed_100 0x0400
+
+/* 89/6/13 add, */
+/* -------------------------------------------------------------------------- */
+/* Constants */
+/* -------------------------------------------------------------------------- */
+#define MarvellPHYID0 0x0141
+#define LevelOnePHYID0 0x0013
+
+#define MII1000BaseTControlReg 9
+#define MII1000BaseTStatusReg 10
+#define SpecificReg 17
+
+/* for 1000BaseT Control Register */
+#define PHYAbletoPerform1000FullDuplex 0x0200
+#define PHYAbletoPerform1000HalfDuplex 0x0100
+#define PHY1000AbilityMask 0x300
+
+// for phy specific status register, marvell phy.
+#define SpeedMask 0x0c000
+#define Speed_1000M 0x08000
+#define Speed_100M 0x4000
+#define Speed_10M 0
+#define Full_Duplex 0x2000
+
+// 89/12/29 add, for phy specific status register, levelone phy, (begin)
+#define LXT1000_100M 0x08000
+#define LXT1000_1000M 0x0c000
+#define LXT1000_Full 0x200
+// 89/12/29 add, for phy specific status register, levelone phy, (end)
+
+/* for 3-in-1 case, BMCRSR register */
+#define LinkIsUp2 0x00040000
+
+/* for PHY */
+#define LinkIsUp 0x0004
+
+
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct fealnx_desc *rx_ring;
+ struct fealnx_desc *tx_ring;
+
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+
+ spinlock_t lock;
+
+ /* Media monitoring timer. */
+ struct timer_list timer;
+
+ /* Reset timer */
+ struct timer_list reset_timer;
+ int reset_timer_armed;
+ unsigned long crvalue_sv;
+ unsigned long imrvalue_sv;
+
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int flags;
+ struct pci_dev *pci_dev;
+ unsigned long crvalue;
+ unsigned long bcrvalue;
+ unsigned long imrvalue;
+ struct fealnx_desc *cur_rx;
+ struct fealnx_desc *lack_rxbuf;
+ int really_rx_count;
+ struct fealnx_desc *cur_tx;
+ struct fealnx_desc *cur_tx_copy;
+ int really_tx_count;
+ int free_tx_count;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+
+ /* These values are keep track of the transceiver/media in use. */
+ unsigned int linkok;
+ unsigned int line_speed;
+ unsigned int duplexmode;
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int PHYType;
+
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ unsigned char phys[2]; /* MII device addresses. */
+ struct mii_if_info mii;
+ void __iomem *mem;
+};
+
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void getlinktype(struct net_device *dev);
+static void getlinkstatus(struct net_device *dev);
+static void netdev_timer(struct timer_list *t);
+static void reset_timer(struct timer_list *t);
+static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue);
+static void init_ring(struct net_device *dev);
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance);
+static int netdev_rx(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void __set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static const struct ethtool_ops netdev_ethtool_ops;
+static int netdev_close(struct net_device *dev);
+static void reset_rx_descriptors(struct net_device *dev);
+static void reset_tx_descriptors(struct net_device *dev);
+
+static void stop_nic_rx(void __iomem *ioaddr, long crvalue)
+{
+ int delay = 0x1000;
+ iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR);
+ while (--delay) {
+ if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP)
+ break;
+ }
+}
+
+
+static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
+{
+ int delay = 0x1000;
+ iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR);
+ while (--delay) {
+ if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP))
+ == (CR_R_RXSTOP+CR_R_TXSTOP) )
+ break;
+ }
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_start_xmit = start_tx,
+ .ndo_get_stats = get_stats,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_eth_ioctl = mii_ioctl,
+ .ndo_tx_timeout = fealnx_tx_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int fealnx_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct netdev_private *np;
+ int i, option, err, irq;
+ static int card_idx = -1;
+ char boardname[12];
+ void __iomem *ioaddr;
+ unsigned long len;
+ unsigned int chip_id = ent->driver_data;
+ struct net_device *dev;
+ void *ring_space;
+ dma_addr_t ring_dma;
+ u8 addr[ETH_ALEN];
+#ifdef USE_IO_OPS
+ int bar = 0;
+#else
+ int bar = 1;
+#endif
+
+ card_idx++;
+ sprintf(boardname, "fealnx%d", card_idx);
+
+ option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ i = pci_enable_device(pdev);
+ if (i) return i;
+ pci_set_master(pdev);
+
+ len = pci_resource_len(pdev, bar);
+ if (len < MIN_REGION_SIZE) {
+ dev_err(&pdev->dev,
+ "region size %ld too small, aborting\n", len);
+ return -ENODEV;
+ }
+
+ i = pci_request_regions(pdev, boardname);
+ if (i)
+ return i;
+
+ irq = pdev->irq;
+
+ ioaddr = pci_iomap(pdev, bar, len);
+ if (!ioaddr) {
+ err = -ENOMEM;
+ goto err_out_res;
+ }
+
+ dev = alloc_etherdev(sizeof(struct netdev_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_out_unmap;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* read ethernet id */
+ for (i = 0; i < 6; ++i)
+ addr[i] = ioread8(ioaddr + PAR0 + i);
+ eth_hw_addr_set(dev, addr);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ iowrite32(0x00000001, ioaddr + BCR);
+
+ /* Make certain the descriptor lists are aligned. */
+ np = netdev_priv(dev);
+ np->mem = ioaddr;
+ spin_lock_init(&np->lock);
+ np->pci_dev = pdev;
+ np->flags = skel_netdrv_tbl[chip_id].flags;
+ pci_set_drvdata(pdev, dev);
+ np->mii.dev = dev;
+ np->mii.mdio_read = mdio_read;
+ np->mii.mdio_write = mdio_write;
+ np->mii.phy_id_mask = 0x1f;
+ np->mii.reg_num_mask = 0x1f;
+
+ ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
+ if (!ring_space) {
+ err = -ENOMEM;
+ goto err_out_free_dev;
+ }
+ np->rx_ring = ring_space;
+ np->rx_ring_dma = ring_dma;
+
+ ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
+ if (!ring_space) {
+ err = -ENOMEM;
+ goto err_out_free_rx;
+ }
+ np->tx_ring = ring_space;
+ np->tx_ring_dma = ring_dma;
+
+ /* find the connected MII xcvrs */
+ if (np->flags == HAS_MII_XCVR) {
+ int phy, phy_idx = 0;
+
+ for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys);
+ phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ dev_info(&pdev->dev,
+ "MII PHY found at address %d, status "
+ "0x%4.4x.\n", phy, mii_status);
+ /* get phy type */
+ {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], 2);
+ if (data == SeeqPHYID0)
+ np->PHYType = SeeqPHY;
+ else if (data == AhdocPHYID0)
+ np->PHYType = AhdocPHY;
+ else if (data == MarvellPHYID0)
+ np->PHYType = MarvellPHY;
+ else if (data == MysonPHYID0)
+ np->PHYType = Myson981;
+ else if (data == LevelOnePHYID0)
+ np->PHYType = LevelOnePHY;
+ else
+ np->PHYType = OtherPHY;
+ }
+ }
+ }
+
+ np->mii_cnt = phy_idx;
+ if (phy_idx == 0)
+ dev_warn(&pdev->dev,
+ "MII PHY not found -- this device may "
+ "not operate correctly.\n");
+ } else {
+ np->phys[0] = 32;
+/* 89/6/23 add, (begin) */
+ /* get phy type */
+ if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID)
+ np->PHYType = MysonPHY;
+ else
+ np->PHYType = OtherPHY;
+ }
+ np->mii.phy_id = np->phys[0];
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x200)
+ np->mii.full_duplex = 1;
+ np->default_port = option & 15;
+ }
+
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->mii.full_duplex = full_duplex[card_idx];
+
+ if (np->mii.full_duplex) {
+ dev_info(&pdev->dev, "Media type forced to Full Duplex.\n");
+/* 89/6/13 add, (begin) */
+// if (np->PHYType==MarvellPHY)
+ if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], 9);
+ data = (data & 0xfcff) | 0x0200;
+ mdio_write(dev, np->phys[0], 9, data);
+ }
+/* 89/6/13 add, (end) */
+ if (np->flags == HAS_MII_XCVR)
+ mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
+ else
+ iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR);
+ np->mii.force_media = 1;
+ }
+
+ dev->netdev_ops = &netdev_ops;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_out_free_tx;
+
+ printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
+ dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr,
+ dev->dev_addr, irq);
+
+ return 0;
+
+err_out_free_tx:
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
+err_out_free_rx:
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
+err_out_free_dev:
+ free_netdev(dev);
+err_out_unmap:
+ pci_iounmap(pdev, ioaddr);
+err_out_res:
+ pci_release_regions(pdev);
+ return err;
+}
+
+
+static void fealnx_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct netdev_private *np = netdev_priv(dev);
+
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
+ unregister_netdev(dev);
+ pci_iounmap(pdev, np->mem);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ } else
+ printk(KERN_ERR "fealnx: remove for unknown device\n");
+}
+
+
+static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad)
+{
+ ulong miir;
+ int i;
+ unsigned int mask, data;
+
+ /* enable MII output */
+ miir = (ulong) ioread32(miiport);
+ miir &= 0xfffffff0;
+
+ miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
+
+ /* send 32 1's preamble */
+ for (i = 0; i < 32; i++) {
+ /* low MDC; MDO is already high (miir) */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ /* high MDC */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+ }
+
+ /* calculate ST+OP+PHYAD+REGAD+TA */
+ data = opcode | (phyad << 7) | (regad << 2);
+
+ /* sent out */
+ mask = 0x8000;
+ while (mask) {
+ /* low MDC, prepare MDO */
+ miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
+ if (mask & data)
+ miir |= MASK_MIIR_MII_MDO;
+
+ iowrite32(miir, miiport);
+ /* high MDC */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+ udelay(30);
+
+ /* next */
+ mask >>= 1;
+ if (mask == 0x2 && opcode == OP_READ)
+ miir &= ~MASK_MIIR_MII_WRITE;
+ }
+ return miir;
+}
+
+
+static int mdio_read(struct net_device *dev, int phyad, int regad)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *miiport = np->mem + MANAGEMENT;
+ ulong miir;
+ unsigned int mask, data;
+
+ miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
+
+ /* read data */
+ mask = 0x8000;
+ data = 0;
+ while (mask) {
+ /* low MDC */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ /* read MDI */
+ miir = ioread32(miiport);
+ if (miir & MASK_MIIR_MII_MDI)
+ data |= mask;
+
+ /* high MDC, and wait */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+ udelay(30);
+
+ /* next */
+ mask >>= 1;
+ }
+
+ /* low MDC */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ return data & 0xffff;
+}
+
+
+static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *miiport = np->mem + MANAGEMENT;
+ ulong miir;
+ unsigned int mask;
+
+ miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
+
+ /* write data */
+ mask = 0x8000;
+ while (mask) {
+ /* low MDC, prepare MDO */
+ miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
+ if (mask & data)
+ miir |= MASK_MIIR_MII_MDO;
+ iowrite32(miir, miiport);
+
+ /* high MDC */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ /* next */
+ mask >>= 1;
+ }
+
+ /* low MDC */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ const int irq = np->pci_dev->irq;
+ int rc, i;
+
+ iowrite32(0x00000001, ioaddr + BCR); /* Reset */
+
+ rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
+ if (rc)
+ return -EAGAIN;
+
+ for (i = 0; i < 3; i++)
+ iowrite16(((const unsigned short *)dev->dev_addr)[i],
+ ioaddr + PAR0 + i*2);
+
+ init_ring(dev);
+
+ iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
+ iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
+
+ /* Initialize other registers. */
+ /* Configure the PCI bus bursts and FIFO thresholds.
+ 486: Set 8 longword burst.
+ 586: no burst limit.
+ Burst length 5:3
+ 0 0 0 1
+ 0 0 1 4
+ 0 1 0 8
+ 0 1 1 16
+ 1 0 0 32
+ 1 0 1 64
+ 1 1 0 128
+ 1 1 1 256
+ Wait the specified 50 PCI cycles after a reset by initializing
+ Tx and Rx queues and the address filter list.
+ FIXME (Ueimor): optimistic for alpha + posted writes ? */
+
+ np->bcrvalue = 0x10; /* little-endian, 8 burst length */
+#ifdef __BIG_ENDIAN
+ np->bcrvalue |= 0x04; /* big-endian */
+#endif
+
+#if defined(__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
+ if (boot_cpu_data.x86 <= 4)
+ np->crvalue = 0xa00;
+ else
+#endif
+ np->crvalue = 0xe00; /* rx 128 burst length */
+
+
+// 89/12/29 add,
+// 90/1/16 modify,
+// np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
+ np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
+ if (np->pci_dev->device == 0x891) {
+ np->bcrvalue |= 0x200; /* set PROG bit */
+ np->crvalue |= CR_W_ENH; /* set enhanced bit */
+ np->imrvalue |= ETI;
+ }
+ iowrite32(np->bcrvalue, ioaddr + BCR);
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ iowrite32(0, ioaddr + RXPDR);
+// 89/9/1 modify,
+// np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */
+ np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */
+ np->mii.full_duplex = np->mii.force_media;
+ getlinkstatus(dev);
+ if (np->linkok)
+ getlinktype(dev);
+ __set_rx_mode(dev);
+
+ netif_start_queue(dev);
+
+ /* Clear and Enable interrupts by setting the interrupt mask. */
+ iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
+ iowrite32(np->imrvalue, ioaddr + IMR);
+
+ if (debug)
+ printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
+
+ /* Set the timer to check for link beat. */
+ timer_setup(&np->timer, netdev_timer, 0);
+ np->timer.expires = RUN_AT(3 * HZ);
+
+ /* timer handler */
+ add_timer(&np->timer);
+
+ timer_setup(&np->reset_timer, reset_timer, 0);
+ np->reset_timer_armed = 0;
+ return rc;
+}
+
+
+static void getlinkstatus(struct net_device *dev)
+/* function: Routine will read MII Status Register to get link status. */
+/* input : dev... pointer to the adapter block. */
+/* output : none. */
+{
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned int i, DelayTime = 0x1000;
+
+ np->linkok = 0;
+
+ if (np->PHYType == MysonPHY) {
+ for (i = 0; i < DelayTime; ++i) {
+ if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
+ np->linkok = 1;
+ return;
+ }
+ udelay(100);
+ }
+ } else {
+ for (i = 0; i < DelayTime; ++i) {
+ if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
+ np->linkok = 1;
+ return;
+ }
+ udelay(100);
+ }
+ }
+}
+
+
+static void getlinktype(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (np->PHYType == MysonPHY) { /* 3-in-1 case */
+ if (ioread32(np->mem + TCRRCR) & CR_R_FD)
+ np->duplexmode = 2; /* full duplex */
+ else
+ np->duplexmode = 1; /* half duplex */
+ if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
+ np->line_speed = 1; /* 10M */
+ else
+ np->line_speed = 2; /* 100M */
+ } else {
+ if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], MIIRegister18);
+ if (data & SPD_DET_100)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ if (data & DPLX_DET_FULL)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ } else if (np->PHYType == AhdocPHY) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], DiagnosticReg);
+ if (data & Speed_100)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ if (data & DPLX_FULL)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ }
+/* 89/6/13 add, (begin) */
+ else if (np->PHYType == MarvellPHY) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], SpecificReg);
+ if (data & Full_Duplex)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ data &= SpeedMask;
+ if (data == Speed_1000M)
+ np->line_speed = 3; /* 1000M */
+ else if (data == Speed_100M)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ }
+/* 89/6/13 add, (end) */
+/* 89/7/27 add, (begin) */
+ else if (np->PHYType == Myson981) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], StatusRegister);
+
+ if (data & SPEED100)
+ np->line_speed = 2;
+ else
+ np->line_speed = 1;
+
+ if (data & FULLMODE)
+ np->duplexmode = 2;
+ else
+ np->duplexmode = 1;
+ }
+/* 89/7/27 add, (end) */
+/* 89/12/29 add */
+ else if (np->PHYType == LevelOnePHY) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], SpecificReg);
+ if (data & LXT1000_Full)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ data &= SpeedMask;
+ if (data == LXT1000_1000M)
+ np->line_speed = 3; /* 1000M */
+ else if (data == LXT1000_100M)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ }
+ np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
+ if (np->line_speed == 1)
+ np->crvalue |= CR_W_PS10;
+ else if (np->line_speed == 3)
+ np->crvalue |= CR_W_PS1000;
+ if (np->duplexmode == 2)
+ np->crvalue |= CR_W_FD;
+ }
+}
+
+
+/* Take lock before calling this */
+static void allocate_rx_buffers(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ /* allocate skb for rx buffers */
+ while (np->really_rx_count != RX_RING_SIZE) {
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, np->rx_buf_sz);
+ if (skb == NULL)
+ break; /* Better luck next round. */
+
+ while (np->lack_rxbuf->skbuff)
+ np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
+
+ np->lack_rxbuf->skbuff = skb;
+ np->lack_rxbuf->buffer = dma_map_single(&np->pci_dev->dev,
+ skb->data,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ np->lack_rxbuf->status = RXOWN;
+ ++np->really_rx_count;
+ }
+}
+
+
+static void netdev_timer(struct timer_list *t)
+{
+ struct netdev_private *np = from_timer(np, t, timer);
+ struct net_device *dev = np->mii.dev;
+ void __iomem *ioaddr = np->mem;
+ int old_crvalue = np->crvalue;
+ unsigned int old_linkok = np->linkok;
+ unsigned long flags;
+
+ if (debug)
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
+ "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR),
+ ioread32(ioaddr + TCRRCR));
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ if (np->flags == HAS_MII_XCVR) {
+ getlinkstatus(dev);
+ if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */
+ getlinktype(dev);
+ if (np->crvalue != old_crvalue) {
+ stop_nic_rxtx(ioaddr, np->crvalue);
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+ }
+ }
+ }
+
+ allocate_rx_buffers(dev);
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ np->timer.expires = RUN_AT(10 * HZ);
+ add_timer(&np->timer);
+}
+
+
+/* Take lock before calling */
+/* Reset chip and disable rx, tx and interrupts */
+static void reset_and_disable_rxtx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ int delay=51;
+
+ /* Reset the chip's Tx and Rx processes. */
+ stop_nic_rxtx(ioaddr, 0);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite32(0, ioaddr + IMR);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ iowrite32(0x00000001, ioaddr + BCR);
+
+ /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
+ We surely wait too long (address+data phase). Who cares? */
+ while (--delay) {
+ ioread32(ioaddr + BCR);
+ rmb();
+ }
+}
+
+
+/* Take lock before calling */
+/* Restore chip after reset */
+static void enable_rxtx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+
+ reset_rx_descriptors(dev);
+
+ iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
+ ioaddr + TXLBA);
+ iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
+ ioaddr + RXLBA);
+
+ iowrite32(np->bcrvalue, ioaddr + BCR);
+
+ iowrite32(0, ioaddr + RXPDR);
+ __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
+
+ /* Clear and Enable interrupts by setting the interrupt mask. */
+ iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
+ iowrite32(np->imrvalue, ioaddr + IMR);
+
+ iowrite32(0, ioaddr + TXPDR);
+}
+
+
+static void reset_timer(struct timer_list *t)
+{
+ struct netdev_private *np = from_timer(np, t, reset_timer);
+ struct net_device *dev = np->mii.dev;
+ unsigned long flags;
+
+ printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
+
+ spin_lock_irqsave(&np->lock, flags);
+ np->crvalue = np->crvalue_sv;
+ np->imrvalue = np->imrvalue_sv;
+
+ reset_and_disable_rxtx(dev);
+ /* works for me without this:
+ reset_tx_descriptors(dev); */
+ enable_rxtx(dev);
+ netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */
+
+ np->reset_timer_armed = 0;
+
+ spin_unlock_irqrestore(&np->lock, flags);
+}
+
+
+static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ unsigned long flags;
+ int i;
+
+ printk(KERN_WARNING
+ "%s: Transmit timed out, status %8.8x, resetting...\n",
+ dev->name, ioread32(ioaddr + ISR));
+
+ {
+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(KERN_CONT " %8.8x",
+ (unsigned int) np->rx_ring[i].status);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
+ printk(KERN_CONT "\n");
+ }
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ reset_and_disable_rxtx(dev);
+ reset_tx_descriptors(dev);
+ enable_rxtx(dev);
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ netif_trans_update(dev); /* prevent tx timeout */
+ dev->stats.tx_errors++;
+ netif_wake_queue(dev); /* or .._start_.. ?? */
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ /* initialize rx variables */
+ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+ np->cur_rx = &np->rx_ring[0];
+ np->lack_rxbuf = np->rx_ring;
+ np->really_rx_count = 0;
+
+ /* initial rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
+ np->rx_ring[i].next_desc = np->rx_ring_dma +
+ (i + 1)*sizeof(struct fealnx_desc);
+ np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
+ np->rx_ring[i].skbuff = NULL;
+ }
+
+ /* for the last rx descriptor */
+ np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
+ np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
+
+ /* allocate skb for rx buffers */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
+
+ if (skb == NULL) {
+ np->lack_rxbuf = &np->rx_ring[i];
+ break;
+ }
+
+ ++np->really_rx_count;
+ np->rx_ring[i].skbuff = skb;
+ np->rx_ring[i].buffer = dma_map_single(&np->pci_dev->dev,
+ skb->data,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ np->rx_ring[i].status = RXOWN;
+ np->rx_ring[i].control |= RXIC;
+ }
+
+ /* initialize tx variables */
+ np->cur_tx = &np->tx_ring[0];
+ np->cur_tx_copy = &np->tx_ring[0];
+ np->really_tx_count = 0;
+ np->free_tx_count = TX_RING_SIZE;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].status = 0;
+ /* do we need np->tx_ring[i].control = XXX; ?? */
+ np->tx_ring[i].next_desc = np->tx_ring_dma +
+ (i + 1)*sizeof(struct fealnx_desc);
+ np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
+ np->tx_ring[i].skbuff = NULL;
+ }
+
+ /* for the last tx descriptor */
+ np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
+ np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
+}
+
+
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ np->cur_tx_copy->skbuff = skb;
+
+#define one_buffer
+#define BPT 1022
+#if defined(one_buffer)
+ np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
+ np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
+ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
+// 89/12/29 add,
+ if (np->pci_dev->device == 0x891)
+ np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+ np->cur_tx_copy->status = TXOWN;
+ np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
+ --np->free_tx_count;
+#elif defined(two_buffer)
+ if (skb->len > BPT) {
+ struct fealnx_desc *next;
+
+ /* for the first descriptor */
+ np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
+ skb->data, BPT,
+ DMA_TO_DEVICE);
+ np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
+ np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
+ np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */
+
+ /* for the last descriptor */
+ next = np->cur_tx_copy->next_desc_logical;
+ next->skbuff = skb;
+ next->control = TXIC | TXLD | CRCEnable | PADEnable;
+ next->control |= (skb->len << PKTSShift); /* pkt size */
+ next->control |= ((skb->len - BPT) << TBSShift); /* buf size */
+// 89/12/29 add,
+ if (np->pci_dev->device == 0x891)
+ np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+ next->buffer = dma_map_single(&ep->pci_dev->dev,
+ skb->data + BPT, skb->len - BPT,
+ DMA_TO_DEVICE);
+
+ next->status = TXOWN;
+ np->cur_tx_copy->status = TXOWN;
+
+ np->cur_tx_copy = next->next_desc_logical;
+ np->free_tx_count -= 2;
+ } else {
+ np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
+ skb->data, skb->len,
+ DMA_TO_DEVICE);
+ np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
+ np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
+ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
+// 89/12/29 add,
+ if (np->pci_dev->device == 0x891)
+ np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+ np->cur_tx_copy->status = TXOWN;
+ np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
+ --np->free_tx_count;
+ }
+#endif
+
+ if (np->free_tx_count < 2)
+ netif_stop_queue(dev);
+ ++np->really_tx_count;
+ iowrite32(0, np->mem + TXPDR);
+
+ spin_unlock_irqrestore(&np->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+
+/* Take lock before calling */
+/* Chip probably hosed tx ring. Clean up. */
+static void reset_tx_descriptors(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct fealnx_desc *cur;
+ int i;
+
+ /* initialize tx variables */
+ np->cur_tx = &np->tx_ring[0];
+ np->cur_tx_copy = &np->tx_ring[0];
+ np->really_tx_count = 0;
+ np->free_tx_count = TX_RING_SIZE;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ cur = &np->tx_ring[i];
+ if (cur->skbuff) {
+ dma_unmap_single(&np->pci_dev->dev, cur->buffer,
+ cur->skbuff->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(cur->skbuff);
+ cur->skbuff = NULL;
+ }
+ cur->status = 0;
+ cur->control = 0; /* needed? */
+ /* probably not needed. We do it for purely paranoid reasons */
+ cur->next_desc = np->tx_ring_dma +
+ (i + 1)*sizeof(struct fealnx_desc);
+ cur->next_desc_logical = &np->tx_ring[i + 1];
+ }
+ /* for the last tx descriptor */
+ np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
+ np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
+}
+
+
+/* Take lock and stop rx before calling this */
+static void reset_rx_descriptors(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct fealnx_desc *cur = np->cur_rx;
+ int i;
+
+ allocate_rx_buffers(dev);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (cur->skbuff)
+ cur->status = RXOWN;
+ cur = cur->next_desc_logical;
+ }
+
+ iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
+ np->mem + RXLBA);
+}
+
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t intr_handler(int irq, void *dev_instance)
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ long boguscnt = max_interrupt_work;
+ unsigned int num_tx = 0;
+ int handled = 0;
+
+ spin_lock(&np->lock);
+
+ iowrite32(0, ioaddr + IMR);
+
+ do {
+ u32 intr_status = ioread32(ioaddr + ISR);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ iowrite32(intr_status, ioaddr + ISR);
+
+ if (debug)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
+ intr_status);
+
+ if (!(intr_status & np->imrvalue))
+ break;
+
+ handled = 1;
+
+// 90/1/16 delete,
+//
+// if (intr_status & FBE)
+// { /* fatal error */
+// stop_nic_tx(ioaddr, 0);
+// stop_nic_rx(ioaddr, 0);
+// break;
+// };
+
+ if (intr_status & TUNF)
+ iowrite32(0, ioaddr + TXPDR);
+
+ if (intr_status & CNTOVF) {
+ /* missed pkts */
+ dev->stats.rx_missed_errors +=
+ ioread32(ioaddr + TALLY) & 0x7fff;
+
+ /* crc error */
+ dev->stats.rx_crc_errors +=
+ (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+ }
+
+ if (intr_status & (RI | RBU)) {
+ if (intr_status & RI)
+ netdev_rx(dev);
+ else {
+ stop_nic_rx(ioaddr, np->crvalue);
+ reset_rx_descriptors(dev);
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+ }
+ }
+
+ while (np->really_tx_count) {
+ long tx_status = np->cur_tx->status;
+ long tx_control = np->cur_tx->control;
+
+ if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */
+ struct fealnx_desc *next;
+
+ next = np->cur_tx->next_desc_logical;
+ tx_status = next->status;
+ tx_control = next->control;
+ }
+
+ if (tx_status & TXOWN)
+ break;
+
+ if (!(np->crvalue & CR_W_ENH)) {
+ if (tx_status & (CSL | LC | EC | UDF | HF)) {
+ dev->stats.tx_errors++;
+ if (tx_status & EC)
+ dev->stats.tx_aborted_errors++;
+ if (tx_status & CSL)
+ dev->stats.tx_carrier_errors++;
+ if (tx_status & LC)
+ dev->stats.tx_window_errors++;
+ if (tx_status & UDF)
+ dev->stats.tx_fifo_errors++;
+ if ((tx_status & HF) && np->mii.full_duplex == 0)
+ dev->stats.tx_heartbeat_errors++;
+
+ } else {
+ dev->stats.tx_bytes +=
+ ((tx_control & PKTSMask) >> PKTSShift);
+
+ dev->stats.collisions +=
+ ((tx_status & NCRMask) >> NCRShift);
+ dev->stats.tx_packets++;
+ }
+ } else {
+ dev->stats.tx_bytes +=
+ ((tx_control & PKTSMask) >> PKTSShift);
+ dev->stats.tx_packets++;
+ }
+
+ /* Free the original skb. */
+ dma_unmap_single(&np->pci_dev->dev,
+ np->cur_tx->buffer,
+ np->cur_tx->skbuff->len,
+ DMA_TO_DEVICE);
+ dev_consume_skb_irq(np->cur_tx->skbuff);
+ np->cur_tx->skbuff = NULL;
+ --np->really_tx_count;
+ if (np->cur_tx->control & TXLD) {
+ np->cur_tx = np->cur_tx->next_desc_logical;
+ ++np->free_tx_count;
+ } else {
+ np->cur_tx = np->cur_tx->next_desc_logical;
+ np->cur_tx = np->cur_tx->next_desc_logical;
+ np->free_tx_count += 2;
+ }
+ num_tx++;
+ } /* end of for loop */
+
+ if (num_tx && np->free_tx_count >= 2)
+ netif_wake_queue(dev);
+
+ /* read transmit status for enhanced mode only */
+ if (np->crvalue & CR_W_ENH) {
+ long data;
+
+ data = ioread32(ioaddr + TSR);
+ dev->stats.tx_errors += (data & 0xff000000) >> 24;
+ dev->stats.tx_aborted_errors +=
+ (data & 0xff000000) >> 24;
+ dev->stats.tx_window_errors +=
+ (data & 0x00ff0000) >> 16;
+ dev->stats.collisions += (data & 0x0000ffff);
+ }
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n", dev->name, intr_status);
+ if (!np->reset_timer_armed) {
+ np->reset_timer_armed = 1;
+ np->reset_timer.expires = RUN_AT(HZ/2);
+ add_timer(&np->reset_timer);
+ stop_nic_rxtx(ioaddr, 0);
+ netif_stop_queue(dev);
+ /* or netif_tx_disable(dev); ?? */
+ /* Prevent other paths from enabling tx,rx,intrs */
+ np->crvalue_sv = np->crvalue;
+ np->imrvalue_sv = np->imrvalue;
+ np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
+ np->imrvalue = 0;
+ }
+
+ break;
+ }
+ } while (1);
+
+ /* read the tally counters */
+ /* missed pkts */
+ dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
+
+ /* crc error */
+ dev->stats.rx_crc_errors +=
+ (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+
+ if (debug)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, ioread32(ioaddr + ISR));
+
+ iowrite32(np->imrvalue, ioaddr + IMR);
+
+ spin_unlock(&np->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
+ s32 rx_status = np->cur_rx->status;
+
+ if (np->really_rx_count == 0)
+ break;
+
+ if (debug)
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status);
+
+ if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) ||
+ (rx_status & ErrorSummary)) {
+ if (rx_status & ErrorSummary) { /* there was a fatal error */
+ if (debug)
+ printk(KERN_DEBUG
+ "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, rx_status);
+
+ dev->stats.rx_errors++; /* end of a packet. */
+ if (rx_status & (LONGPKT | RUNTPKT))
+ dev->stats.rx_length_errors++;
+ if (rx_status & RXER)
+ dev->stats.rx_frame_errors++;
+ if (rx_status & CRC)
+ dev->stats.rx_crc_errors++;
+ } else {
+ int need_to_reset = 0;
+ int desno = 0;
+
+ if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */
+ struct fealnx_desc *cur;
+
+ /* check this packet is received completely? */
+ cur = np->cur_rx;
+ while (desno <= np->really_rx_count) {
+ ++desno;
+ if ((!(cur->status & RXOWN)) &&
+ (cur->status & RXLSD))
+ break;
+ /* goto next rx descriptor */
+ cur = cur->next_desc_logical;
+ }
+ if (desno > np->really_rx_count)
+ need_to_reset = 1;
+ } else /* RXLSD did not find, something error */
+ need_to_reset = 1;
+
+ if (need_to_reset == 0) {
+ int i;
+
+ dev->stats.rx_length_errors++;
+
+ /* free all rx descriptors related this long pkt */
+ for (i = 0; i < desno; ++i) {
+ if (!np->cur_rx->skbuff) {
+ printk(KERN_DEBUG
+ "%s: I'm scared\n", dev->name);
+ break;
+ }
+ np->cur_rx->status = RXOWN;
+ np->cur_rx = np->cur_rx->next_desc_logical;
+ }
+ continue;
+ } else { /* rx error, need to reset this chip */
+ stop_nic_rx(ioaddr, np->crvalue);
+ reset_rx_descriptors(dev);
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+ }
+ break; /* exit the while loop */
+ }
+ } else { /* this received pkt is ok */
+
+ struct sk_buff *skb;
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
+
+#ifndef final_version
+ if (debug)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ " status %x.\n", pkt_len, rx_status);
+#endif
+
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ np->cur_rx->buffer,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ /* Call copy + cksum if available. */
+
+#if ! defined(__alpha__)
+ skb_copy_to_linear_data(skb,
+ np->cur_rx->skbuff->data, pkt_len);
+ skb_put(skb, pkt_len);
+#else
+ skb_put_data(skb, np->cur_rx->skbuff->data,
+ pkt_len);
+#endif
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ np->cur_rx->buffer,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_single(&np->pci_dev->dev,
+ np->cur_rx->buffer,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ skb_put(skb = np->cur_rx->skbuff, pkt_len);
+ np->cur_rx->skbuff = NULL;
+ --np->really_rx_count;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+
+ np->cur_rx = np->cur_rx->next_desc_logical;
+ } /* end of while loop */
+
+ /* allocate skb for rx buffers */
+ allocate_rx_buffers(dev);
+
+ return 0;
+}
+
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+
+ /* The chip only need report frame silently dropped. */
+ if (netif_running(dev)) {
+ dev->stats.rx_missed_errors +=
+ ioread32(ioaddr + TALLY) & 0x7fff;
+ dev->stats.rx_crc_errors +=
+ (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+ }
+
+ return &dev->stats;
+}
+
+
+/* for dev->set_multicast_list */
+static void set_rx_mode(struct net_device *dev)
+{
+ spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock;
+ unsigned long flags;
+ spin_lock_irqsave(lp, flags);
+ __set_rx_mode(dev);
+ spin_unlock_irqrestore(lp, flags);
+}
+
+
+/* Take lock before calling */
+static void __set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = CR_W_AB | CR_W_AM;
+ } else {
+ struct netdev_hw_addr *ha;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bit;
+ bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
+ mc_filter[bit >> 5] |= (1 << bit);
+ }
+ rx_mode = CR_W_AB | CR_W_AM;
+ }
+
+ stop_nic_rxtx(ioaddr, np->crvalue);
+
+ iowrite32(mc_filter[0], ioaddr + MAR0);
+ iowrite32(mc_filter[1], ioaddr + MAR1);
+ np->crvalue &= ~CR_W_RXMODEMASK;
+ np->crvalue |= rx_mode;
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+}
+
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ spin_lock_irq(&np->lock);
+ mii_ethtool_get_link_ksettings(&np->mii, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return 0;
+}
+
+static int netdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii);
+}
+
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ debug = value;
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+ .get_link_ksettings = netdev_get_link_ksettings,
+ .set_link_ksettings = netdev_set_link_ksettings,
+};
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+
+static int netdev_close(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ int i;
+
+ netif_stop_queue(dev);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite32(0x0000, ioaddr + IMR);
+
+ /* Stop the chip's Tx and Rx processes. */
+ stop_nic_rxtx(ioaddr, 0);
+
+ del_timer_sync(&np->timer);
+ del_timer_sync(&np->reset_timer);
+
+ free_irq(np->pci_dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = np->rx_ring[i].skbuff;
+
+ np->rx_ring[i].status = 0;
+ if (skb) {
+ dma_unmap_single(&np->pci_dev->dev,
+ np->rx_ring[i].buffer, np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ np->rx_ring[i].skbuff = NULL;
+ }
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct sk_buff *skb = np->tx_ring[i].skbuff;
+
+ if (skb) {
+ dma_unmap_single(&np->pci_dev->dev,
+ np->tx_ring[i].buffer, skb->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ np->tx_ring[i].skbuff = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static const struct pci_device_id fealnx_pci_tbl[] = {
+ {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
+ {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
+ {} /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
+
+
+static struct pci_driver fealnx_driver = {
+ .name = "fealnx",
+ .id_table = fealnx_pci_tbl,
+ .probe = fealnx_init_one,
+ .remove = fealnx_remove_one,
+};
+
+module_pci_driver(fealnx_driver);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index c557dfc50aad..396e555023aa 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -1411,7 +1411,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
tlv->ouisubtype = htonl(ouisubtype);
buf[0] = dcbcfg->pfc.pfccap & 0xF;
- buf[1] = dcbcfg->pfc.pfcena & 0xF;
+ buf[1] = dcbcfg->pfc.pfcena;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index b360bd8f1599..f86e814354a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -4331,6 +4331,8 @@ ice_get_module_eeprom(struct net_device *netdev,
* SFP modules only ever use page 0.
*/
if (page == 0 || !(data[0x2] & 0x4)) {
+ u32 copy_len;
+
/* If i2c bus is busy due to slow page change or
* link management access, call can fail. This is normal.
* So we retry this a few times.
@@ -4354,8 +4356,8 @@ ice_get_module_eeprom(struct net_device *netdev,
}
/* Make sure we have enough room for the new block */
- if ((i + SFF_READ_BLOCK_SIZE) < ee->len)
- memcpy(data + i, value, SFF_READ_BLOCK_SIZE);
+ copy_len = min_t(u32, SFF_READ_BLOCK_SIZE, ee->len - i);
+ memcpy(data + i, value, copy_len);
}
}
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 781475480ff2..0f52ea38b6f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2126,7 +2126,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
ice_for_each_rxq(vsi, i)
ice_tx_xsk_pool(vsi, i);
- return ret;
+ return 0;
}
/**
@@ -2693,12 +2693,14 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
return ret;
/* allocate memory for Tx/Rx ring stat pointers */
- if (ice_vsi_alloc_stat_arrays(vsi))
+ ret = ice_vsi_alloc_stat_arrays(vsi);
+ if (ret)
goto unroll_vsi_alloc;
ice_alloc_fd_res(vsi);
- if (ice_vsi_get_qs(vsi)) {
+ ret = ice_vsi_get_qs(vsi);
+ if (ret) {
dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
vsi->idx);
goto unroll_vsi_alloc_stat;
@@ -2811,6 +2813,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
break;
default:
/* clean up the resources and exit */
+ ret = -EINVAL;
goto unroll_vsi_init;
}
@@ -3508,10 +3511,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (vsi_flags & ICE_VSI_FLAG_INIT) {
ret = -EIO;
goto err_vsi_cfg_tc_lan;
- } else {
- kfree(coalesce);
- return ice_schedule_reset(pf, ICE_RESET_PFR);
}
+
+ kfree(coalesce);
+ return ice_schedule_reset(pf, ICE_RESET_PFR);
}
ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);
@@ -3759,7 +3762,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
dev = ice_pf_to_dev(pf);
if (vsi->tc_cfg.ena_tc == ena_tc &&
vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
- return ret;
+ return 0;
ice_for_each_traffic_class(i) {
/* build bitmap of enabled TCs */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 6b48cbc049c6..76f29a5bf8d7 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -1455,8 +1455,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
if (match.mask->vlan_priority) {
fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
headers->vlan_hdr.vlan_prio =
- cpu_to_be16((match.key->vlan_priority <<
- VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
+ be16_encode_bits(match.key->vlan_priority,
+ VLAN_PRIO_MASK);
}
if (match.mask->vlan_tpid)
@@ -1489,8 +1489,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
if (match.mask->vlan_priority) {
fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
headers->cvlan_hdr.vlan_prio =
- cpu_to_be16((match.key->vlan_priority <<
- VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
+ be16_encode_bits(match.key->vlan_priority,
+ VLAN_PRIO_MASK);
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 389663a13d1d..ef721caeac49 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -884,6 +884,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
int rvu_cpt_init(struct rvu *rvu);
+#define NDC_AF_BANK_MASK GENMASK_ULL(7, 0)
+#define NDC_AF_BANK_LINE_MASK GENMASK_ULL(31, 16)
+
/* CN10K RVU */
int rvu_set_channels_base(struct rvu *rvu);
void rvu_program_channels(struct rvu *rvu);
@@ -902,6 +905,8 @@ static inline void rvu_dbg_init(struct rvu *rvu) {}
static inline void rvu_dbg_exit(struct rvu *rvu) {}
#endif
+int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
+
/* RVU Switch */
void rvu_switch_enable(struct rvu *rvu);
void rvu_switch_disable(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index fa280ebd3052..26cfa501f1a1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -198,9 +198,6 @@ enum cpt_eng_type {
CPT_IE_TYPE = 3,
};
-#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
- blk_addr, NDC_AF_CONST) & 0xFF)
-
#define rvu_dbg_NULL NULL
#define rvu_dbg_open_NULL NULL
@@ -1448,6 +1445,7 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
struct nix_hw *nix_hw;
struct rvu *rvu;
int bank, max_bank;
+ u64 ndc_af_const;
if (blk_addr == BLKADDR_NDC_NPA0) {
rvu = s->private;
@@ -1456,7 +1454,8 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
rvu = nix_hw->rvu;
}
- max_bank = NDC_MAX_BANK(rvu, blk_addr);
+ ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
+ max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
for (bank = 0; bank < max_bank; bank++) {
seq_printf(s, "BANK:%d\n", bank);
seq_printf(s, "\tHits:\t%lld\n",
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 26e639e57dae..4ad707e758b9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -790,6 +790,7 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
struct nix_aq_res_s *result;
int timeout = 1000;
u64 reg, head;
+ int ret;
result = (struct nix_aq_res_s *)aq->res->base;
@@ -813,9 +814,22 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return -EBUSY;
}
- if (result->compcode != NIX_AQ_COMP_GOOD)
+ if (result->compcode != NIX_AQ_COMP_GOOD) {
/* TODO: Replace this with some error code */
+ if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
+ result->compcode == NIX_AQ_COMP_LOCKERR ||
+ result->compcode == NIX_AQ_COMP_CTX_POISON) {
+ ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
+ if (ret)
+ dev_err(rvu->dev,
+ "%s: Not able to unlock cachelines\n", __func__);
+ }
+
return -EBUSY;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 70bd036ed76e..4f5ca5ab13a4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -4,7 +4,7 @@
* Copyright (C) 2018 Marvell.
*
*/
-
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -42,9 +42,18 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return -EBUSY;
}
- if (result->compcode != NPA_AQ_COMP_GOOD)
+ if (result->compcode != NPA_AQ_COMP_GOOD) {
/* TODO: Replace this with some error code */
+ if (result->compcode == NPA_AQ_COMP_CTX_FAULT ||
+ result->compcode == NPA_AQ_COMP_LOCKERR ||
+ result->compcode == NPA_AQ_COMP_CTX_POISON) {
+ if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0))
+ dev_err(rvu->dev,
+ "%s: Not able to unlock cachelines\n", __func__);
+ }
+
return -EBUSY;
+ }
return 0;
}
@@ -545,3 +554,48 @@ void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
npa_ctx_free(rvu, pfvf);
}
+
+/* Due to an Hardware errata, in some corner cases, AQ context lock
+ * operations can result in a NDC way getting into an illegal state
+ * of not valid but locked.
+ *
+ * This API solves the problem by clearing the lock bit of the NDC block.
+ * The operation needs to be done for each line of all the NDC banks.
+ */
+int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
+{
+ int bank, max_bank, line, max_line, err;
+ u64 reg, ndc_af_const;
+
+ /* Set the ENABLE bit(63) to '0' */
+ reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
+ rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
+
+ /* Poll until the BUSY bits(47:32) are set to '0' */
+ err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true);
+ if (err) {
+ dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n");
+ return err;
+ }
+
+ ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST);
+ max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
+ max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const);
+ for (bank = 0; bank < max_bank; bank++) {
+ for (line = 0; line < max_line; line++) {
+ /* Check if 'cache line valid bit(63)' is not set
+ * but 'cache line lock bit(60)' is set and on
+ * success, reset the lock bit(60).
+ */
+ reg = rvu_read64(rvu, blkaddr,
+ NDC_AF_BANKX_LINEX_METADATA(bank, line));
+ if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) {
+ rvu_write64(rvu, blkaddr,
+ NDC_AF_BANKX_LINEX_METADATA(bank, line),
+ reg & ~BIT_ULL(60));
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 1729b22580ce..7007f0b8e659 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -694,6 +694,7 @@
#define NDC_AF_INTR_ENA_W1S (0x00068)
#define NDC_AF_INTR_ENA_W1C (0x00070)
#define NDC_AF_ACTIVE_PC (0x00078)
+#define NDC_AF_CAMS_RD_INTERVAL (0x00080)
#define NDC_AF_BP_TEST_ENABLE (0x001F8)
#define NDC_AF_BP_TEST(a) (0x00200 | (a) << 3)
#define NDC_AF_BLK_RST (0x002F0)
@@ -709,6 +710,8 @@
(0x00F00 | (a) << 5 | (b) << 4)
#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
+#define NDC_AF_BANKX_LINEX_METADATA(a, b) \
+ (0x10000 | (a) << 12 | (b) << 3)
/* LBK */
#define LBK_CONST (0x10ull)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b22cd160554e..52aa71f0c499 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -591,7 +591,8 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur;
mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
- MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
+ MAC_MCR_RX_FIFO_CLR_DIS;
/* Only update control register when needed! */
if (mcr_new != mcr_cur)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index afc9d52e79bf..b65de174c3d9 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -397,6 +397,7 @@
#define MAC_MCR_FORCE_MODE BIT(15)
#define MAC_MCR_TX_EN BIT(14)
#define MAC_MCR_RX_EN BIT(13)
+#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12)
#define MAC_MCR_BACKOFF_EN BIT(9)
#define MAC_MCR_BACKPR_EN BIT(8)
#define MAC_MCR_FORCE_RX_FC BIT(5)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
index a9aec900d608..7d66fe75cd3b 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
@@ -194,7 +194,7 @@ int lan966x_police_port_del(struct lan966x_port *port,
return -EINVAL;
}
- err = lan966x_police_del(port, port->tc.police_id);
+ err = lan966x_police_del(port, POL_IDX_PORT + port->chip_port);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to add policer to port");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
index 871a3e62f852..2d763664dcda 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
@@ -249,6 +249,21 @@ static int sparx5_dcb_ieee_dscp_setdel(struct net_device *dev,
return 0;
}
+static int sparx5_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app)
+{
+ int err;
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_delapp);
+ else
+ err = dcb_ieee_delapp(dev, app);
+
+ if (err < 0)
+ return err;
+
+ return sparx5_dcb_app_update(dev);
+}
+
static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
{
struct dcb_app app_itr;
@@ -264,7 +279,7 @@ static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
if (prio) {
app_itr = *app;
app_itr.priority = prio;
- dcb_ieee_delapp(dev, &app_itr);
+ sparx5_dcb_ieee_delapp(dev, &app_itr);
}
if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
@@ -281,21 +296,6 @@ out:
return err;
}
-static int sparx5_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app)
-{
- int err;
-
- if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
- err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_delapp);
- else
- err = dcb_ieee_delapp(dev, app);
-
- if (err < 0)
- return err;
-
- return sparx5_dcb_app_update(dev);
-}
-
static int sparx5_dcb_setapptrust(struct net_device *dev, u8 *selectors,
int nselectors)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index 59fb0583cc08..0cc026b0aefd 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -324,14 +324,15 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
/* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
nfp_nfd3_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
- nfp_nfd3_tx_csum(dp, r_vec, txbuf, txd, skb);
+ if (ipsec)
+ nfp_nfd3_ipsec_tx(txd, skb);
+ else
+ nfp_nfd3_tx_csum(dp, r_vec, txbuf, txd, skb);
if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
txd->flags |= NFD3_DESC_TX_VLAN;
txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
}
- if (ipsec)
- nfp_nfd3_ipsec_tx(txd, skb);
/* Gather DMA */
if (nr_frags > 0) {
__le64 second_half;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c b/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c
index e90f8c975903..51087693072c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c
@@ -10,9 +10,30 @@
void nfp_nfd3_ipsec_tx(struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb)
{
struct xfrm_state *x = xfrm_input_state(skb);
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct iphdr *iph = ip_hdr(skb);
+ int l4_proto;
if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)) {
- txd->flags |= NFD3_DESC_TX_CSUM | NFD3_DESC_TX_IP4_CSUM |
- NFD3_DESC_TX_TCP_CSUM | NFD3_DESC_TX_UDP_CSUM;
+ txd->flags |= NFD3_DESC_TX_CSUM;
+
+ if (iph->version == 4)
+ txd->flags |= NFD3_DESC_TX_IP4_CSUM;
+
+ if (x->props.mode == XFRM_MODE_TRANSPORT)
+ l4_proto = xo->proto;
+ else if (x->props.mode == XFRM_MODE_TUNNEL)
+ l4_proto = xo->inner_ipproto;
+ else
+ return;
+
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ txd->flags |= NFD3_DESC_TX_UDP_CSUM;
+ return;
+ case IPPROTO_TCP:
+ txd->flags |= NFD3_DESC_TX_TCP_CSUM;
+ return;
+ }
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index d60c0e991a91..33b6d74adb4b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -387,7 +387,8 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
if (!skb_is_gso(skb)) {
real_len = skb->len;
/* Metadata desc */
- metadata = nfp_nfdk_tx_csum(dp, r_vec, 1, skb, metadata);
+ if (!ipsec)
+ metadata = nfp_nfdk_tx_csum(dp, r_vec, 1, skb, metadata);
txd->raw = cpu_to_le64(metadata);
txd++;
} else {
@@ -395,7 +396,8 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
(txd + 1)->raw = nfp_nfdk_tx_tso(r_vec, txbuf, skb);
real_len = txbuf->real_len;
/* Metadata desc */
- metadata = nfp_nfdk_tx_csum(dp, r_vec, txbuf->pkt_cnt, skb, metadata);
+ if (!ipsec)
+ metadata = nfp_nfdk_tx_csum(dp, r_vec, txbuf->pkt_cnt, skb, metadata);
txd->raw = cpu_to_le64(metadata);
txd += 2;
txbuf++;
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c b/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
index 58d8f59eb885..cec199f4c852 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
@@ -9,9 +9,13 @@
u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb)
{
struct xfrm_state *x = xfrm_input_state(skb);
+ struct iphdr *iph = ip_hdr(skb);
- if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM))
- flags |= NFDK_DESC_TX_L3_CSUM | NFDK_DESC_TX_L4_CSUM;
+ if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)) {
+ if (iph->version == 4)
+ flags |= NFDK_DESC_TX_L3_CSUM;
+ flags |= NFDK_DESC_TX_L4_CSUM;
+ }
return flags;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 81b7ca0ad222..62f0bf91d1e1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -38,6 +38,7 @@
#include <net/tls.h>
#include <net/vxlan.h>
#include <net/xdp_sock_drv.h>
+#include <net/xfrm.h>
#include "nfpcore/nfp_dev.h"
#include "nfpcore/nfp_nsp.h"
@@ -1897,6 +1898,9 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
features &= ~NETIF_F_GSO_MASK;
}
+ if (xfrm_offload(skb))
+ return features;
+
/* VXLAN/GRE check */
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e4902a7bb61e..8f543c3ab5c5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1170,6 +1170,7 @@ static int stmmac_init_phy(struct net_device *dev)
phylink_ethtool_get_wol(priv->phylink, &wol);
device_set_wakeup_capable(priv->device, !!wol.supported);
+ device_set_wakeup_enable(priv->device, !!wol.wolopts);
}
return ret;
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index e1a569b99e4a..0b0c6c0764fe 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -1913,6 +1913,8 @@ static int ca8210_skb_tx(
* packet
*/
mac_len = ieee802154_hdr_peek_addrs(skb, &header);
+ if (mac_len < 0)
+ return mac_len;
secspec.security_level = header.sec.level;
secspec.key_id_mode = header.sec.key_id_mode;
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index ccecee2524ce..0b88635f4fbc 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -342,6 +342,37 @@ static int lan88xx_config_aneg(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static void lan88xx_link_change_notify(struct phy_device *phydev)
+{
+ int temp;
+
+ /* At forced 100 F/H mode, chip may fail to set mode correctly
+ * when cable is switched between long(~50+m) and short one.
+ * As workaround, set to 10 before setting to 100
+ * at forced 100 F/H mode.
+ */
+ if (!phydev->autoneg && phydev->speed == 100) {
+ /* disable phy interrupt */
+ temp = phy_read(phydev, LAN88XX_INT_MASK);
+ temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
+ phy_write(phydev, LAN88XX_INT_MASK, temp);
+
+ temp = phy_read(phydev, MII_BMCR);
+ temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+ phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
+ temp |= BMCR_SPEED100;
+ phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
+
+ /* clear pending interrupt generated while workaround */
+ temp = phy_read(phydev, LAN88XX_INT_STS);
+
+ /* enable phy interrupt back */
+ temp = phy_read(phydev, LAN88XX_INT_MASK);
+ temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
+ phy_write(phydev, LAN88XX_INT_MASK, temp);
+ }
+}
+
static struct phy_driver microchip_phy_driver[] = {
{
.phy_id = 0x0007c132,
@@ -359,6 +390,7 @@ static struct phy_driver microchip_phy_driver[] = {
.config_init = lan88xx_config_init,
.config_aneg = lan88xx_config_aneg,
+ .link_change_notify = lan88xx_link_change_notify,
.config_intr = lan88xx_phy_config_intr,
.handle_interrupt = lan88xx_handle_interrupt,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 3f8a64fb9d71..1785f1cead97 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -3098,8 +3098,6 @@ static int phy_probe(struct device *dev)
if (phydrv->flags & PHY_IS_INTERNAL)
phydev->is_internal = true;
- mutex_lock(&phydev->lock);
-
/* Deassert the reset signal */
phy_device_reset(phydev, 0);
@@ -3146,7 +3144,7 @@ static int phy_probe(struct device *dev)
*/
err = genphy_c45_read_eee_adv(phydev, phydev->advertising_eee);
if (err)
- return err;
+ goto out;
/* There is no "enabled" flag. If PHY is advertising, assume it is
* kind of enabled.
@@ -3188,12 +3186,10 @@ static int phy_probe(struct device *dev)
phydev->state = PHY_READY;
out:
- /* Assert the reset signal */
+ /* Re-assert the reset signal on error */
if (err)
phy_device_reset(phydev, 1);
- mutex_unlock(&phydev->lock);
-
return err;
}
@@ -3203,9 +3199,7 @@ static int phy_remove(struct device *dev)
cancel_delayed_work_sync(&phydev->state_queue);
- mutex_lock(&phydev->lock);
phydev->state = PHY_DOWN;
- mutex_unlock(&phydev->lock);
sfp_bus_del_upstream(phydev->sfp_bus);
phydev->sfp_bus = NULL;
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index af89f3ef1c4f..feb6ac3939ac 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -44,7 +44,6 @@ static struct smsc_hw_stat smsc_hw_stats[] = {
};
struct smsc_phy_priv {
- u16 intmask;
bool energy_enable;
};
@@ -57,7 +56,6 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
static int smsc_phy_config_intr(struct phy_device *phydev)
{
- struct smsc_phy_priv *priv = phydev->priv;
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
@@ -65,14 +63,9 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
if (rc)
return rc;
- priv->intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6;
- if (priv->energy_enable)
- priv->intmask |= MII_LAN83C185_ISF_INT7;
-
- rc = phy_write(phydev, MII_LAN83C185_IM, priv->intmask);
+ rc = phy_write(phydev, MII_LAN83C185_IM,
+ MII_LAN83C185_ISF_INT_PHYLIB_EVENTS);
} else {
- priv->intmask = 0;
-
rc = phy_write(phydev, MII_LAN83C185_IM, 0);
if (rc)
return rc;
@@ -85,7 +78,6 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
{
- struct smsc_phy_priv *priv = phydev->priv;
int irq_status;
irq_status = phy_read(phydev, MII_LAN83C185_ISF);
@@ -96,7 +88,7 @@ static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
return IRQ_NONE;
}
- if (!(irq_status & priv->intmask))
+ if (!(irq_status & MII_LAN83C185_ISF_INT_PHYLIB_EVENTS))
return IRQ_NONE;
phy_trigger_machine(phydev);
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index c89639381eca..cd4083e0b3b9 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -665,6 +665,11 @@ static const struct usb_device_id mbim_devs[] = {
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
+ /* Telit FE990 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+ },
+
/* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f18ab8e220db..068488890d57 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2115,33 +2115,8 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
static void lan78xx_link_status_change(struct net_device *net)
{
struct phy_device *phydev = net->phydev;
- int temp;
-
- /* At forced 100 F/H mode, chip may fail to set mode correctly
- * when cable is switched between long(~50+m) and short one.
- * As workaround, set to 10 before setting to 100
- * at forced 100 F/H mode.
- */
- if (!phydev->autoneg && (phydev->speed == 100)) {
- /* disable phy interrupt */
- temp = phy_read(phydev, LAN88XX_INT_MASK);
- temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
- phy_write(phydev, LAN88XX_INT_MASK, temp);
- temp = phy_read(phydev, MII_BMCR);
- temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
- phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
- temp |= BMCR_SPEED100;
- phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
-
- /* clear pending interrupt generated while workaround */
- temp = phy_read(phydev, LAN88XX_INT_STS);
-
- /* enable phy interrupt back */
- temp = phy_read(phydev, LAN88XX_INT_MASK);
- temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
- phy_write(phydev, LAN88XX_INT_MASK, temp);
- }
+ phy_print_status(phydev);
}
static int irq_map(struct irq_domain *d, unsigned int irq,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index a808d718c012..571e37e67f9c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1364,6 +1364,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 583adb37ee1e..125284b346a7 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -106,7 +106,7 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
{
unsigned int cpu = *stored_cpu, cpu_index, i;
- if (unlikely(cpu == nr_cpumask_bits ||
+ if (unlikely(cpu >= nr_cpu_ids ||
!cpumask_test_cpu(cpu, cpu_online_mask))) {
cpu_index = id % cpumask_weight(cpu_online_mask);
cpu = cpumask_first(cpu_online_mask);
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 2d53e0f88d2f..1e0f2297f9c6 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -247,6 +247,9 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
len, sizeof(**fw_vsc_cfg),
GFP_KERNEL);
+ if (!*fw_vsc_cfg)
+ goto alloc_err;
+
r = device_property_read_u8_array(dev, FDP_DP_FW_VSC_CFG_NAME,
*fw_vsc_cfg, len);
@@ -260,6 +263,7 @@ vsc_read_err:
*fw_vsc_cfg = NULL;
}
+alloc_err:
dev_dbg(dev, "Clock type: %d, clock frequency: %d, VSC: %s",
*clock_type, *clock_freq, *fw_vsc_cfg != NULL ? "yes" : "no");
}
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 901c59145811..ea16a0aba679 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -256,7 +256,7 @@ select_kpp:
chap->qid, ret, gid_name);
chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
chap->dh_tfm = NULL;
- return -ret;
+ return ret;
}
dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
chap->qid, gid_name);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8698410aeb84..c2730b116dc6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -38,6 +38,7 @@ struct nvme_ns_info {
bool is_shared;
bool is_readonly;
bool is_ready;
+ bool is_removed;
};
unsigned int admin_timeout = 60;
@@ -1402,16 +1403,8 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
if (error) {
dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
- goto out_free_id;
+ kfree(*id);
}
-
- error = NVME_SC_INVALID_NS | NVME_SC_DNR;
- if ((*id)->ncap == 0) /* namespace not allocated or attached */
- goto out_free_id;
- return 0;
-
-out_free_id:
- kfree(*id);
return error;
}
@@ -1425,6 +1418,13 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
ret = nvme_identify_ns(ctrl, info->nsid, &id);
if (ret)
return ret;
+
+ if (id->ncap == 0) {
+ /* namespace not allocated or attached */
+ info->is_removed = true;
+ return -ENODEV;
+ }
+
info->anagrpid = id->anagrpid;
info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
@@ -3104,7 +3104,7 @@ static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
* Rather than blindly freezing the IO queues for this effect that
* doesn't even apply to IO, mask it off.
*/
- log->acs[nvme_admin_security_recv] &= ~NVME_CMD_EFFECTS_CSE_MASK;
+ log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK);
log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
@@ -4429,6 +4429,7 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns_info info = { .nsid = nsid };
struct nvme_ns *ns;
+ int ret;
if (nvme_identify_ns_descs(ctrl, &info))
return;
@@ -4445,19 +4446,19 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
* set up a namespace. If not fall back to the legacy version.
*/
if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
- (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) {
- if (nvme_ns_info_from_id_cs_indep(ctrl, &info))
- return;
- } else {
- if (nvme_ns_info_from_identify(ctrl, &info))
- return;
- }
+ (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS))
+ ret = nvme_ns_info_from_id_cs_indep(ctrl, &info);
+ else
+ ret = nvme_ns_info_from_identify(ctrl, &info);
+
+ if (info.is_removed)
+ nvme_ns_remove_by_nsid(ctrl, nsid);
/*
* Ignore the namespace if it is not ready. We will get an AEN once it
* becomes ready and restart the scan.
*/
- if (!info.is_ready)
+ if (ret || !info.is_ready)
return;
ns = nvme_find_get_ns(ctrl, nsid);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index a6e22116e139..dcac3df8a5f7 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -189,7 +189,8 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl)
{
- if (!ctrl->subsys)
+ if (!ctrl->subsys ||
+ !strcmp(ctrl->opts->subsysnqn, NVME_DISC_SUBSYS_NAME))
return ctrl->opts->subsysnqn;
return ctrl->subsys->subnqn;
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 1955c0ec209e..7723a4989524 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2492,6 +2492,10 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
len = nvmf_get_address(ctrl, buf, size);
+ mutex_lock(&queue->queue_lock);
+
+ if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ goto done;
ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
if (ret > 0) {
if (len > 0)
@@ -2499,6 +2503,8 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
(len) ? "," : "", &src_addr);
}
+done:
+ mutex_unlock(&queue->queue_lock);
return len;
}
diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c
index b8009aa11f3c..be679aa5db64 100644
--- a/drivers/pci/msi/api.c
+++ b/drivers/pci/msi/api.c
@@ -163,11 +163,11 @@ EXPORT_SYMBOL_GPL(pci_msix_alloc_irq_at);
/**
* pci_msix_free_irq - Free an interrupt on a PCI/MSIX interrupt domain
- * which was allocated via pci_msix_alloc_irq_at()
*
* @dev: The PCI device to operate on
* @map: A struct msi_map describing the interrupt to free
- * as returned from the allocation function.
+ *
+ * Undo an interrupt vector allocation. Does not disable MSI-X.
*/
void pci_msix_free_irq(struct pci_dev *dev, struct msi_map map)
{
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index c9116d9e4b57..70cb50fd41c2 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -436,11 +436,8 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
bSoftware = config >> 63;
raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
if (bSoftware) {
- if (raw_config_val < SBI_PMU_FW_MAX)
- ret = (raw_config_val & 0xFFFF) |
- (SBI_PMU_EVENT_TYPE_FW << 16);
- else
- return -EINVAL;
+ ret = (raw_config_val & 0xFFFF) |
+ (SBI_PMU_EVENT_TYPE_FW << 16);
} else {
ret = RISCV_PMU_RAW_EVENT_IDX;
*econfig = raw_config_val;
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index 09c7829e95c4..382793e73a60 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -16,17 +16,17 @@ if MELLANOX_PLATFORM
config MLXREG_HOTPLUG
tristate "Mellanox platform hotplug driver support"
- depends on REGMAP
depends on HWMON
depends on I2C
+ select REGMAP
help
This driver handles hot-plug events for the power suppliers, power
cables and fans on the wide range Mellanox IB and Ethernet systems.
config MLXREG_IO
tristate "Mellanox platform register access driver support"
- depends on REGMAP
depends on HWMON
+ select REGMAP
help
This driver allows access to Mellanox programmable device register
space through sysfs interface. The sets of registers for sysfs access
@@ -36,9 +36,9 @@ config MLXREG_IO
config MLXREG_LC
tristate "Mellanox line card platform driver support"
- depends on REGMAP
depends on HWMON
depends on I2C
+ select REGMAP
help
This driver provides support for the Mellanox MSN4800-XX line cards,
which are the part of MSN4800 Ethernet modular switch systems
@@ -80,10 +80,9 @@ config MLXBF_PMC
config NVSW_SN2201
tristate "Nvidia SN2201 platform driver support"
- depends on REGMAP
depends on HWMON
depends on I2C
- depends on REGMAP_I2C
+ select REGMAP_I2C
help
This driver provides support for the Nvidia SN2201 platform.
The SN2201 is a highly integrated for one rack unit system with
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index ec7c2b4e1721..4a01b315e0a9 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -955,7 +955,8 @@ config SERIAL_MULTI_INSTANTIATE
config MLX_PLATFORM
tristate "Mellanox Technologies platform support"
- depends on I2C && REGMAP
+ depends on I2C
+ select REGMAP
help
This option enables system support for the Mellanox Technologies
platform. The Mellanox systems provide data center networking
diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c
index ab05b9ee6655..2edaae04a691 100644
--- a/drivers/platform/x86/amd/pmc.c
+++ b/drivers/platform/x86/amd/pmc.c
@@ -171,9 +171,7 @@ MODULE_PARM_DESC(disable_workarounds, "Disable workarounds for platform bugs");
static struct amd_pmc_dev pmc;
static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
-#ifdef CONFIG_SUSPEND
static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
-#endif
static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
{
@@ -386,7 +384,6 @@ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table
return 0;
}
-#ifdef CONFIG_SUSPEND
static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
{
struct smu_metrics table;
@@ -400,7 +397,6 @@ static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
dev_dbg(pdev->dev, "Last suspend in deepest state for %lluus\n",
table.timein_s0i3_lastcapture);
}
-#endif
static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
{
@@ -673,7 +669,6 @@ out_unlock:
return rc;
}
-#ifdef CONFIG_SUSPEND
static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
{
switch (dev->cpu_id) {
@@ -861,9 +856,7 @@ static int __maybe_unused amd_pmc_suspend_handler(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
-
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
@@ -905,7 +898,6 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
return 0;
}
-#ifdef CONFIG_SUSPEND
static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
{
int err;
@@ -926,7 +918,6 @@ static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
return 0;
}
-#endif
static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
{
@@ -1017,11 +1008,11 @@ static int amd_pmc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, dev);
-#ifdef CONFIG_SUSPEND
- err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
- if (err)
- dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
-#endif
+ if (IS_ENABLED(CONFIG_SUSPEND)) {
+ err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
+ if (err)
+ dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
+ }
amd_pmc_dbgfs_register(dev);
return 0;
@@ -1035,9 +1026,8 @@ static int amd_pmc_remove(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
-#ifdef CONFIG_SUSPEND
- acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
-#endif
+ if (IS_ENABLED(CONFIG_SUSPEND))
+ acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
amd_pmc_dbgfs_unregister(dev);
pci_dev_put(dev->rdev);
mutex_destroy(&dev->lock);
@@ -1061,9 +1051,7 @@ static struct platform_driver amd_pmc_driver = {
.name = "amd_pmc",
.acpi_match_table = amd_pmc_acpi_ids,
.dev_groups = pmc_groups,
-#ifdef CONFIG_SUSPEND
- .pm = &amd_pmc_pm,
-#endif
+ .pm = pm_sleep_ptr(&amd_pmc_pm),
},
.probe = amd_pmc_probe,
.remove = amd_pmc_remove,
diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c
index d547c9d09725..2750dee99c3e 100644
--- a/drivers/platform/x86/dell/dell-wmi-ddv.c
+++ b/drivers/platform/x86/dell/dell-wmi-ddv.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/hwmon.h>
#include <linux/kstrtox.h>
-#include <linux/math.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -96,6 +95,7 @@ struct combined_chip_info {
};
struct dell_wmi_ddv_sensors {
+ bool active;
struct mutex lock; /* protect caching */
unsigned long timestamp;
union acpi_object *obj;
@@ -520,6 +520,9 @@ static struct hwmon_channel_info *dell_wmi_ddv_channel_create(struct device *dev
static void dell_wmi_ddv_hwmon_cache_invalidate(struct dell_wmi_ddv_sensors *sensors)
{
+ if (!sensors->active)
+ return;
+
mutex_lock(&sensors->lock);
kfree(sensors->obj);
sensors->obj = NULL;
@@ -530,6 +533,7 @@ static void dell_wmi_ddv_hwmon_cache_destroy(void *data)
{
struct dell_wmi_ddv_sensors *sensors = data;
+ sensors->active = false;
mutex_destroy(&sensors->lock);
kfree(sensors->obj);
}
@@ -549,6 +553,7 @@ static struct hwmon_channel_info *dell_wmi_ddv_channel_init(struct wmi_device *w
return ERR_PTR(ret);
mutex_init(&sensors->lock);
+ sensors->active = true;
ret = devm_add_action_or_reset(&wdev->dev, dell_wmi_ddv_hwmon_cache_destroy, sensors);
if (ret < 0)
@@ -659,7 +664,8 @@ static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char
if (ret < 0)
return ret;
- return sysfs_emit(buf, "%d\n", DIV_ROUND_CLOSEST(value, 10));
+ /* Use 2731 instead of 2731.5 to avoid unnecessary rounding */
+ return sysfs_emit(buf, "%d\n", value - 2731);
}
static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -852,7 +858,7 @@ static int dell_wmi_ddv_resume(struct device *dev)
{
struct dell_wmi_ddv_data *data = dev_get_drvdata(dev);
- /* Force re-reading of all sensors */
+ /* Force re-reading of all active sensors */
dell_wmi_ddv_hwmon_cache_invalidate(&data->fans);
dell_wmi_ddv_hwmon_cache_invalidate(&data->temps);
diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
index 309eab9c0558..322237e056f3 100644
--- a/drivers/platform/x86/intel/int3472/tps68470_board_data.c
+++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
@@ -159,9 +159,10 @@ static const struct int3472_tps68470_board_data surface_go_tps68470_board_data =
static const struct int3472_tps68470_board_data surface_go3_tps68470_board_data = {
.dev_name = "i2c-INT3472:01",
.tps68470_regulator_pdata = &surface_go_tps68470_pdata,
- .n_gpiod_lookups = 1,
+ .n_gpiod_lookups = 2,
.tps68470_gpio_lookup_tables = {
- &surface_go_int347a_gpios
+ &surface_go_int347a_gpios,
+ &surface_go_int347e_gpios,
},
};
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index a7e02b24a87a..0954a04623ed 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -47,7 +47,7 @@ struct isst_cmd_set_req_type {
static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
{0xD0, 0x00, 0x03},
- {0x7F, 0x00, 0x0B},
+ {0x7F, 0x00, 0x0C},
{0x7F, 0x10, 0x12},
{0x7F, 0x20, 0x23},
{0x94, 0x03, 0x03},
@@ -112,6 +112,7 @@ static void isst_delete_hash(void)
* isst_store_cmd() - Store command to a hash table
* @cmd: Mailbox command.
* @sub_cmd: Mailbox sub-command or MSR id.
+ * @cpu: Target CPU for the command
* @mbox_cmd_type: Mailbox or MSR command.
* @param: Mailbox parameter.
* @data: Mailbox request data or MSR data.
@@ -363,7 +364,7 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
/**
* isst_if_get_pci_dev() - Get the PCI device instance for a CPU
* @cpu: Logical CPU number.
- * @bus_number: The bus number assigned by the hardware.
+ * @bus_no: The bus number assigned by the hardware.
* @dev: The device number assigned by the hardware.
* @fn: The function number assigned by the hardware.
*
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
index fdecdae248d7..35ff506b402e 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
@@ -40,6 +40,7 @@
* @offset: Offset to the first valid member in command structure.
* This will be the offset of the start of the command
* after command count field
+ * @owner: Registered module owner
* @cmd_callback: Callback function to handle IOCTL. The callback has the
* command pointer with data for command. There is a pointer
* called write_only, which when set, will not copy the
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
index c60733261c89..c999732b0f1e 100644
--- a/drivers/platform/x86/intel/tpmi.c
+++ b/drivers/platform/x86/intel/tpmi.c
@@ -209,14 +209,14 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
if (!name)
return -EOPNOTSUPP;
- feature_vsec_dev = kzalloc(sizeof(*feature_vsec_dev), GFP_KERNEL);
- if (!feature_vsec_dev)
+ res = kcalloc(pfs->pfs_header.num_entries, sizeof(*res), GFP_KERNEL);
+ if (!res)
return -ENOMEM;
- res = kcalloc(pfs->pfs_header.num_entries, sizeof(*res), GFP_KERNEL);
- if (!res) {
+ feature_vsec_dev = kzalloc(sizeof(*feature_vsec_dev), GFP_KERNEL);
+ if (!feature_vsec_dev) {
ret = -ENOMEM;
- goto free_vsec;
+ goto free_res;
}
snprintf(feature_id_name, sizeof(feature_id_name), "tpmi-%s", name);
@@ -239,6 +239,8 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
/*
* intel_vsec_add_aux() is resource managed, no explicit
* delete is required on error or on module unload.
+ * feature_vsec_dev memory is also freed as part of device
+ * delete.
*/
ret = intel_vsec_add_aux(vsec_dev->pcidev, &vsec_dev->auxdev.dev,
feature_vsec_dev, feature_id_name);
@@ -249,8 +251,6 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
free_res:
kfree(res);
-free_vsec:
- kfree(feature_vsec_dev);
return ret;
}
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 7b6779cdb134..67367f010139 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -5980,7 +5980,7 @@ MODULE_DEVICE_TABLE(dmi, mlxplat_dmi_table);
static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
{
struct i2c_adapter *search_adap;
- int shift, i;
+ int i, shift = 0;
/* Scan adapters from expected id to verify it is free. */
*nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR;
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index cc5b2e22b42a..f3d7c1da299f 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -1207,13 +1207,13 @@ __power_supply_register(struct device *parent,
struct power_supply *psy;
int rc;
+ if (!desc || !desc->name || !desc->properties || !desc->num_properties)
+ return ERR_PTR(-EINVAL);
+
if (!parent)
pr_warn("%s: Expected proper parent device for '%s'\n",
__func__, desc->name);
- if (!desc || !desc->name || !desc->properties || !desc->num_properties)
- return ERR_PTR(-EINVAL);
-
if (psy_has_property(desc, POWER_SUPPLY_PROP_USB_TYPE) &&
(!desc->usb_types || !desc->num_usb_types))
return ERR_PTR(-EINVAL);
diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
index ec31f887184f..de77df97b3a4 100644
--- a/drivers/power/supply/qcom_battmgr.c
+++ b/drivers/power/supply/qcom_battmgr.c
@@ -1126,8 +1126,7 @@ static void qcom_battmgr_sm8350_callback(struct qcom_battmgr *battmgr,
battmgr->info.charge_type = le32_to_cpu(resp->intval.value);
break;
case BATT_CAPACITY:
- battmgr->status.percent = le32_to_cpu(resp->intval.value);
- do_div(battmgr->status.percent, 100);
+ battmgr->status.percent = le32_to_cpu(resp->intval.value) / 100;
break;
case BATT_VOLT_OCV:
battmgr->status.voltage_ocv = le32_to_cpu(resp->intval.value);
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index bc6adda58883..a27673706c3d 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -143,6 +143,8 @@ static const struct x86_cpu_id pl4_support_ids[] = {
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_N, X86_FEATURE_ANY },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE, X86_FEATURE_ANY },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE_P, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_METEORLAKE, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_METEORLAKE_L, X86_FEATURE_ANY },
{}
};
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index e180dee0f83d..52c32dcbf7d8 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -679,4 +679,3 @@ fs_initcall(powercap_init);
MODULE_DESCRIPTION("PowerCap sysfs Driver");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index ad37bc46f272..507ff0d5f7bd 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -3,6 +3,7 @@
* Copyright (C) ST-Ericsson SA 2010
*
* Author: Arun R Murthy <arun.murthy@stericsson.com>
+ * Datasheet: https://web.archive.org/web/20130614115108/http://www.stericsson.com/developers/CD00291561_UM1031_AB8500_user_manual-rev5_CTDS_public.pdf
*/
#include <linux/err.h>
#include <linux/platform_device.h>
@@ -20,6 +21,8 @@
#define AB8500_PWM_OUT_CTRL2_REG 0x61
#define AB8500_PWM_OUT_CTRL7_REG 0x66
+#define AB8500_PWM_CLKRATE 9600000
+
struct ab8500_pwm_chip {
struct pwm_chip chip;
unsigned int hwid;
@@ -35,13 +38,60 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
{
int ret;
u8 reg;
- unsigned int higher_val, lower_val;
+ u8 higher_val, lower_val;
+ unsigned int duty_steps, div;
struct ab8500_pwm_chip *ab8500 = ab8500_pwm_from_chip(chip);
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
- if (!state->enabled) {
+ if (state->enabled) {
+ /*
+ * A time quantum is
+ * q = (32 - FreqPWMOutx[3:0]) / AB8500_PWM_CLKRATE
+ * The period is always 1024 q, duty_cycle is between 1q and 1024q.
+ *
+ * FreqPWMOutx[3:0] | output frequency | output frequency | 1024q = period
+ * | (from manual) | (1 / 1024q) | = 1 / freq
+ * -----------------+------------------+------------------+--------------
+ * b0000 | 293 Hz | 292.968750 Hz | 3413333.33 ns
+ * b0001 | 302 Hz | 302.419355 Hz | 3306666.66 ns
+ * b0010 | 312 Hz | 312.500000 Hz | 3200000 ns
+ * b0011 | 323 Hz | 323.275862 Hz | 3093333.33 ns
+ * b0100 | 334 Hz | 334.821429 Hz | 2986666.66 ns
+ * b0101 | 347 Hz | 347.222222 Hz | 2880000 ns
+ * b0110 | 360 Hz | 360.576923 Hz | 2773333.33 ns
+ * b0111 | 375 Hz | 375.000000 Hz | 2666666.66 ns
+ * b1000 | 390 Hz | 390.625000 Hz | 2560000 ns
+ * b1001 | 407 Hz | 407.608696 Hz | 2453333.33 ns
+ * b1010 | 426 Hz | 426.136364 Hz | 2346666.66 ns
+ * b1011 | 446 Hz | 446.428571 Hz | 2240000 ns
+ * b1100 | 468 Hz | 468.750000 Hz | 2133333.33 ns
+ * b1101 | 493 Hz | 493.421053 Hz | 2026666.66 ns
+ * b1110 | 520 Hz | 520.833333 Hz | 1920000 ns
+ * b1111 | 551 Hz | 551.470588 Hz | 1813333.33 ns
+ *
+ *
+ * AB8500_PWM_CLKRATE is a multiple of 1024, so the division by
+ * 1024 can be done in this factor without loss of precision.
+ */
+ div = min_t(u64, mul_u64_u64_div_u64(state->period,
+ AB8500_PWM_CLKRATE >> 10,
+ NSEC_PER_SEC), 32); /* 32 - FreqPWMOutx[3:0] */
+ if (div <= 16)
+ /* requested period < 3413333.33 */
+ return -EINVAL;
+
+ duty_steps = max_t(u64, mul_u64_u64_div_u64(state->duty_cycle,
+ AB8500_PWM_CLKRATE,
+ (u64)NSEC_PER_SEC * div), 1024);
+ }
+
+ /*
+ * The hardware doesn't support duty_steps = 0 explicitly, but emits low
+ * when disabled.
+ */
+ if (!state->enabled || duty_steps == 0) {
ret = abx500_mask_and_set_register_interruptible(chip->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
1 << ab8500->hwid, 0);
@@ -53,28 +103,29 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
/*
- * get the first 8 bits that are be written to
+ * The lower 8 bits of duty_steps is written to ...
* AB8500_PWM_OUT_CTRL1_REG[0:7]
*/
- lower_val = state->duty_cycle & 0x00FF;
+ lower_val = (duty_steps - 1) & 0x00ff;
/*
- * get bits [9:10] that are to be written to
- * AB8500_PWM_OUT_CTRL2_REG[0:1]
+ * The two remaining high bits to
+ * AB8500_PWM_OUT_CTRL2_REG[0:1]; together with FreqPWMOutx.
*/
- higher_val = ((state->duty_cycle & 0x0300) >> 8);
+ higher_val = ((duty_steps - 1) & 0x0300) >> 8 | (32 - div) << 4;
reg = AB8500_PWM_OUT_CTRL1_REG + (ab8500->hwid * 2);
ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC,
- reg, (u8)lower_val);
+ reg, lower_val);
if (ret < 0)
return ret;
ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC,
- (reg + 1), (u8)higher_val);
+ (reg + 1), higher_val);
if (ret < 0)
return ret;
+ /* enable */
ret = abx500_mask_and_set_register_interruptible(chip->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
1 << ab8500->hwid, 1 << ab8500->hwid);
@@ -85,8 +136,51 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
+static int ab8500_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ u8 ctrl7, lower_val, higher_val;
+ int ret;
+ struct ab8500_pwm_chip *ab8500 = ab8500_pwm_from_chip(chip);
+ unsigned int div, duty_steps;
+
+ ret = abx500_get_register_interruptible(chip->dev, AB8500_MISC,
+ AB8500_PWM_OUT_CTRL7_REG,
+ &ctrl7);
+ if (ret)
+ return ret;
+
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ if (!(ctrl7 & 1 << ab8500->hwid)) {
+ state->enabled = false;
+ return 0;
+ }
+
+ ret = abx500_get_register_interruptible(chip->dev, AB8500_MISC,
+ AB8500_PWM_OUT_CTRL1_REG + (ab8500->hwid * 2),
+ &lower_val);
+ if (ret)
+ return ret;
+
+ ret = abx500_get_register_interruptible(chip->dev, AB8500_MISC,
+ AB8500_PWM_OUT_CTRL2_REG + (ab8500->hwid * 2),
+ &higher_val);
+ if (ret)
+ return ret;
+
+ div = 32 - ((higher_val & 0xf0) >> 4);
+ duty_steps = ((higher_val & 3) << 8 | lower_val) + 1;
+
+ state->period = DIV64_U64_ROUND_UP((u64)div << 10, AB8500_PWM_CLKRATE);
+ state->duty_cycle = DIV64_U64_ROUND_UP((u64)div * duty_steps, AB8500_PWM_CLKRATE);
+
+ return 0;
+}
+
static const struct pwm_ops ab8500_pwm_ops = {
.apply = ab8500_pwm_apply,
+ .get_state = ab8500_pwm_get_state,
.owner = THIS_MODULE,
};
diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
index bd2308812096..3bbb26c862c3 100644
--- a/drivers/pwm/pwm-dwc.c
+++ b/drivers/pwm/pwm-dwc.c
@@ -198,20 +198,35 @@ static const struct pwm_ops dwc_pwm_ops = {
.owner = THIS_MODULE,
};
+static struct dwc_pwm *dwc_pwm_alloc(struct device *dev)
+{
+ struct dwc_pwm *dwc;
+
+ dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return NULL;
+
+ dwc->chip.dev = dev;
+ dwc->chip.ops = &dwc_pwm_ops;
+ dwc->chip.npwm = DWC_TIMERS_TOTAL;
+
+ dev_set_drvdata(dev, dwc);
+ return dwc;
+}
+
static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct device *dev = &pci->dev;
struct dwc_pwm *dwc;
int ret;
- dwc = devm_kzalloc(&pci->dev, sizeof(*dwc), GFP_KERNEL);
+ dwc = dwc_pwm_alloc(dev);
if (!dwc)
return -ENOMEM;
ret = pcim_enable_device(pci);
if (ret) {
- dev_err(&pci->dev,
- "Failed to enable device (%pe)\n", ERR_PTR(ret));
+ dev_err(dev, "Failed to enable device (%pe)\n", ERR_PTR(ret));
return ret;
}
@@ -219,24 +234,17 @@ static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
if (ret) {
- dev_err(&pci->dev,
- "Failed to iomap PCI BAR (%pe)\n", ERR_PTR(ret));
+ dev_err(dev, "Failed to iomap PCI BAR (%pe)\n", ERR_PTR(ret));
return ret;
}
dwc->base = pcim_iomap_table(pci)[0];
if (!dwc->base) {
- dev_err(&pci->dev, "Base address missing\n");
+ dev_err(dev, "Base address missing\n");
return -ENOMEM;
}
- pci_set_drvdata(pci, dwc);
-
- dwc->chip.dev = dev;
- dwc->chip.ops = &dwc_pwm_ops;
- dwc->chip.npwm = DWC_TIMERS_TOTAL;
-
- ret = pwmchip_add(&dwc->chip);
+ ret = devm_pwmchip_add(dev, &dwc->chip);
if (ret)
return ret;
@@ -248,12 +256,8 @@ static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
static void dwc_pwm_remove(struct pci_dev *pci)
{
- struct dwc_pwm *dwc = pci_get_drvdata(pci);
-
pm_runtime_forbid(&pci->dev);
pm_runtime_get_noresume(&pci->dev);
-
- pwmchip_remove(&dwc->chip);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
index 4987ca940b64..8362b4870c66 100644
--- a/drivers/pwm/pwm-iqs620a.c
+++ b/drivers/pwm/pwm-iqs620a.c
@@ -55,8 +55,8 @@ static int iqs620_pwm_init(struct iqs620_pwm_private *iqs620_pwm,
if (ret)
return ret;
- return regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS,
- IQS620_PWR_SETTINGS_PWM_OUT, 0xff);
+ return regmap_set_bits(iqs62x->regmap, IQS620_PWR_SETTINGS,
+ IQS620_PWR_SETTINGS_PWM_OUT);
}
static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 215ef9069114..35675e4058c6 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -8,7 +8,6 @@
*/
#include <linux/err.h>
-#include <linux/i2c.h>
#include <linux/mfd/lp3943.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 62b6acc6373d..393a4b97fc19 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -161,7 +161,13 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
mutex_lock(&ddata->lock);
if (state->period != ddata->approx_period) {
- if (ddata->user_count != 1) {
+ /*
+ * Don't let a 2nd user change the period underneath the 1st user.
+ * However if ddate->approx_period == 0 this is the first time we set
+ * any period, so let whoever gets here first set the period so other
+ * users who agree on the period won't fail.
+ */
+ if (ddata->user_count != 1 && ddata->approx_period) {
mutex_unlock(&ddata->lock);
return -EBUSY;
}
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 514ff58a4471..f315fa106be8 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -127,7 +127,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
/* ensure CMP & ARR registers are properly written */
ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
- (val & STM32_LPTIM_CMPOK_ARROK),
+ (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
100, 1000);
if (ret) {
dev_err(priv->chip.dev, "ARR/CMP registers write issue\n");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ae69e493913d..4fcd36055b02 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1584,7 +1584,7 @@ static int set_machine_constraints(struct regulator_dev *rdev)
}
if (rdev->desc->off_on_delay)
- rdev->last_off = ktime_get();
+ rdev->last_off = ktime_get_boottime();
/* If the constraints say the regulator should be on at this point
* and we have control then make sure it is enabled.
@@ -2673,7 +2673,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
* this regulator was disabled.
*/
ktime_t end = ktime_add_us(rdev->last_off, rdev->desc->off_on_delay);
- s64 remaining = ktime_us_delta(end, ktime_get());
+ s64 remaining = ktime_us_delta(end, ktime_get_boottime());
if (remaining > 0)
_regulator_delay_helper(remaining);
@@ -2912,7 +2912,7 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
}
if (rdev->desc->off_on_delay)
- rdev->last_off = ktime_get();
+ rdev->last_off = ktime_get_boottime();
trace_regulator_disable_complete(rdev_get_name(rdev));
diff --git a/drivers/regulator/max597x-regulator.c b/drivers/regulator/max597x-regulator.c
index f0fb0f56e420..648e3641885a 100644
--- a/drivers/regulator/max597x-regulator.c
+++ b/drivers/regulator/max597x-regulator.c
@@ -193,7 +193,7 @@ static int max597x_get_status(struct regulator_dev *rdev)
ret = regmap_read(rdev->regmap, MAX5970_REG_STATUS3, &val);
if (ret)
- return REGULATOR_FAILED_RETRY;
+ return ret;
if (val & MAX5970_STATUS3_ALERT)
return REGULATOR_STATUS_ERROR;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 2ba72de0fa47..5a71579af0a1 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1677,7 +1677,7 @@ config RTC_DRV_MPC5121
config RTC_DRV_JZ4740
tristate "Ingenic JZ4740 SoC"
depends on MIPS || COMPILE_TEST
- depends on OF
+ depends on OF && COMMON_CLK
help
If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
controllers.
@@ -1773,6 +1773,18 @@ config RTC_DRV_SNVS
This driver can also be built as a module, if so, the module
will be called "rtc-snvs".
+config RTC_DRV_BBNSM
+ tristate "NXP BBNSM RTC support"
+ select REGMAP_MMIO
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ help
+ If you say yes here you get support for the NXP BBNSM RTC module.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-bbnsm".
+
config RTC_DRV_IMX_SC
depends on IMX_SCU
depends on HAVE_ARM_SMCCC
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 59eb30289335..ea445d1ebb17 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_RTC_DRV_ASPEED) += rtc-aspeed.o
obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o
+obj-$(CONFIG_RTC_DRV_BBNSM) += rtc-nxp-bbnsm.o
obj-$(CONFIG_RTC_DRV_BD70528) += rtc-bd70528.o
obj-$(CONFIG_RTC_DRV_BQ32K) += rtc-bq32k.o
obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 7c30cb3c764d..499d89150afc 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -392,7 +392,7 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
return err;
if (!rtc->ops) {
err = -ENODEV;
- } else if (!test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->read_alarm) {
+ } else if (!test_bit(RTC_FEATURE_ALARM, rtc->features)) {
err = -EINVAL;
} else {
memset(alarm, 0, sizeof(struct rtc_wkalrm));
diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
index 2f8deb8c4cd3..34611f6dedcb 100644
--- a/drivers/rtc/rtc-ab-eoz9.c
+++ b/drivers/rtc/rtc-ab-eoz9.c
@@ -536,9 +536,14 @@ static int abeoz9_probe(struct i2c_client *client)
clear_bit(RTC_FEATURE_ALARM, data->rtc->features);
if (client->irq > 0) {
+ unsigned long irqflags = IRQF_TRIGGER_LOW;
+
+ if (dev_fwnode(&client->dev))
+ irqflags = 0;
+
ret = devm_request_threaded_irq(dev, client->irq, NULL,
abeoz9_rtc_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ irqflags | IRQF_ONESHOT,
dev_name(dev), dev);
if (ret) {
dev_err(dev, "failed to request alarm irq\n");
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index 2e0e6432901b..f34a2e59cac7 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -11,6 +11,7 @@
*/
#include <linux/bcd.h>
+#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
@@ -88,6 +89,16 @@
#define ABX8XX_TRICKLE_STANDARD_DIODE 0x8
#define ABX8XX_TRICKLE_SCHOTTKY_DIODE 0x4
+#define ABX8XX_REG_EXTRAM 0x3f
+#define ABX8XX_EXTRAM_XADS GENMASK(1, 0)
+
+#define ABX8XX_SRAM_BASE 0x40
+#define ABX8XX_SRAM_WIN_SIZE 0x40
+#define ABX8XX_RAM_SIZE 256
+
+#define NVMEM_ADDR_LOWER GENMASK(5, 0)
+#define NVMEM_ADDR_UPPER GENMASK(7, 6)
+
static u8 trickle_resistors[] = {0, 3, 6, 11};
enum abx80x_chip {AB0801, AB0803, AB0804, AB0805,
@@ -674,6 +685,68 @@ static int abx80x_setup_watchdog(struct abx80x_priv *priv)
}
#endif
+static int abx80x_nvmem_xfer(struct abx80x_priv *priv, unsigned int offset,
+ void *val, size_t bytes, bool write)
+{
+ int ret;
+
+ while (bytes) {
+ u8 extram, reg, len, lower, upper;
+
+ lower = FIELD_GET(NVMEM_ADDR_LOWER, offset);
+ upper = FIELD_GET(NVMEM_ADDR_UPPER, offset);
+ extram = FIELD_PREP(ABX8XX_EXTRAM_XADS, upper);
+ reg = ABX8XX_SRAM_BASE + lower;
+ len = min(lower + bytes, (size_t)ABX8XX_SRAM_WIN_SIZE) - lower;
+ len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX);
+
+ ret = i2c_smbus_write_byte_data(priv->client, ABX8XX_REG_EXTRAM,
+ extram);
+ if (ret)
+ return ret;
+
+ if (write)
+ ret = i2c_smbus_write_i2c_block_data(priv->client, reg,
+ len, val);
+ else
+ ret = i2c_smbus_read_i2c_block_data(priv->client, reg,
+ len, val);
+ if (ret)
+ return ret;
+
+ offset += len;
+ val += len;
+ bytes -= len;
+ }
+
+ return 0;
+}
+
+static int abx80x_nvmem_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ return abx80x_nvmem_xfer(priv, offset, val, bytes, false);
+}
+
+static int abx80x_nvmem_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ return abx80x_nvmem_xfer(priv, offset, val, bytes, true);
+}
+
+static int abx80x_setup_nvmem(struct abx80x_priv *priv)
+{
+ struct nvmem_config config = {
+ .type = NVMEM_TYPE_BATTERY_BACKED,
+ .reg_read = abx80x_nvmem_read,
+ .reg_write = abx80x_nvmem_write,
+ .size = ABX8XX_RAM_SIZE,
+ .priv = priv,
+ };
+
+ return devm_rtc_nvmem_register(priv->rtc, &config);
+}
+
static const struct i2c_device_id abx80x_id[] = {
{ "abx80x", ABX80X },
{ "ab0801", AB0801 },
@@ -840,6 +913,10 @@ static int abx80x_probe(struct i2c_client *client)
return err;
}
+ err = abx80x_setup_nvmem(priv);
+ if (err)
+ return err;
+
if (client->irq > 0) {
dev_info(&client->dev, "IRQ %d supplied\n", client->irq);
err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
index c74130e8f496..1efa81cecc27 100644
--- a/drivers/rtc/rtc-brcmstb-waketimer.c
+++ b/drivers/rtc/rtc-brcmstb-waketimer.c
@@ -27,13 +27,17 @@ struct brcmstb_waketmr {
struct rtc_device *rtc;
struct device *dev;
void __iomem *base;
- int irq;
+ unsigned int wake_irq;
+ unsigned int alarm_irq;
struct notifier_block reboot_notifier;
struct clk *clk;
u32 rate;
+ unsigned long rtc_alarm;
+ bool alarm_en;
};
#define BRCMSTB_WKTMR_EVENT 0x00
+#define WKTMR_ALARM_EVENT BIT(0)
#define BRCMSTB_WKTMR_COUNTER 0x04
#define BRCMSTB_WKTMR_ALARM 0x08
#define BRCMSTB_WKTMR_PRESCALER 0x0C
@@ -41,28 +45,71 @@ struct brcmstb_waketmr {
#define BRCMSTB_WKTMR_DEFAULT_FREQ 27000000
+static inline bool brcmstb_waketmr_is_pending(struct brcmstb_waketmr *timer)
+{
+ u32 reg;
+
+ reg = readl_relaxed(timer->base + BRCMSTB_WKTMR_EVENT);
+ return !!(reg & WKTMR_ALARM_EVENT);
+}
+
static inline void brcmstb_waketmr_clear_alarm(struct brcmstb_waketmr *timer)
{
- writel_relaxed(1, timer->base + BRCMSTB_WKTMR_EVENT);
+ u32 reg;
+
+ if (timer->alarm_en && timer->alarm_irq)
+ disable_irq(timer->alarm_irq);
+ timer->alarm_en = false;
+ reg = readl_relaxed(timer->base + BRCMSTB_WKTMR_COUNTER);
+ writel_relaxed(reg - 1, timer->base + BRCMSTB_WKTMR_ALARM);
+ writel_relaxed(WKTMR_ALARM_EVENT, timer->base + BRCMSTB_WKTMR_EVENT);
(void)readl_relaxed(timer->base + BRCMSTB_WKTMR_EVENT);
}
static void brcmstb_waketmr_set_alarm(struct brcmstb_waketmr *timer,
unsigned int secs)
{
+ unsigned int now;
+
brcmstb_waketmr_clear_alarm(timer);
/* Make sure we are actually counting in seconds */
writel_relaxed(timer->rate, timer->base + BRCMSTB_WKTMR_PRESCALER);
- writel_relaxed(secs + 1, timer->base + BRCMSTB_WKTMR_ALARM);
+ writel_relaxed(secs, timer->base + BRCMSTB_WKTMR_ALARM);
+ now = readl_relaxed(timer->base + BRCMSTB_WKTMR_COUNTER);
+
+ while ((int)(secs - now) <= 0 &&
+ !brcmstb_waketmr_is_pending(timer)) {
+ secs = now + 1;
+ writel_relaxed(secs, timer->base + BRCMSTB_WKTMR_ALARM);
+ now = readl_relaxed(timer->base + BRCMSTB_WKTMR_COUNTER);
+ }
}
static irqreturn_t brcmstb_waketmr_irq(int irq, void *data)
{
struct brcmstb_waketmr *timer = data;
- pm_wakeup_event(timer->dev, 0);
+ if (!timer->alarm_irq)
+ pm_wakeup_event(timer->dev, 0);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t brcmstb_alarm_irq(int irq, void *data)
+{
+ struct brcmstb_waketmr *timer = data;
+
+ /* Ignore spurious interrupts */
+ if (!brcmstb_waketmr_is_pending(timer))
+ return IRQ_HANDLED;
+
+ if (timer->alarm_en) {
+ if (!device_may_wakeup(timer->dev))
+ writel_relaxed(WKTMR_ALARM_EVENT,
+ timer->base + BRCMSTB_WKTMR_EVENT);
+ rtc_update_irq(timer->rtc, 1, RTC_IRQF | RTC_AF);
+ }
return IRQ_HANDLED;
}
@@ -88,17 +135,25 @@ static void wktmr_read(struct brcmstb_waketmr *timer,
static int brcmstb_waketmr_prepare_suspend(struct brcmstb_waketmr *timer)
{
struct device *dev = timer->dev;
- int ret = 0;
+ int ret;
if (device_may_wakeup(dev)) {
- ret = enable_irq_wake(timer->irq);
+ ret = enable_irq_wake(timer->wake_irq);
if (ret) {
dev_err(dev, "failed to enable wake-up interrupt\n");
return ret;
}
+ if (timer->alarm_en && timer->alarm_irq) {
+ ret = enable_irq_wake(timer->alarm_irq);
+ if (ret) {
+ dev_err(dev, "failed to enable rtc interrupt\n");
+ disable_irq_wake(timer->wake_irq);
+ return ret;
+ }
+ }
}
- return ret;
+ return 0;
}
/* If enabled as a wakeup-source, arm the timer when powering off */
@@ -146,46 +201,47 @@ static int brcmstb_waketmr_getalarm(struct device *dev,
struct rtc_wkalrm *alarm)
{
struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
- time64_t sec;
- u32 reg;
- sec = readl_relaxed(timer->base + BRCMSTB_WKTMR_ALARM);
- if (sec != 0) {
- /* Alarm is enabled */
- alarm->enabled = 1;
- rtc_time64_to_tm(sec, &alarm->time);
- }
+ alarm->enabled = timer->alarm_en;
+ rtc_time64_to_tm(timer->rtc_alarm, &alarm->time);
- reg = readl_relaxed(timer->base + BRCMSTB_WKTMR_EVENT);
- alarm->pending = !!(reg & 1);
+ alarm->pending = brcmstb_waketmr_is_pending(timer);
return 0;
}
-static int brcmstb_waketmr_setalarm(struct device *dev,
- struct rtc_wkalrm *alarm)
+static int brcmstb_waketmr_alarm_enable(struct device *dev,
+ unsigned int enabled)
{
struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
- time64_t sec;
-
- if (alarm->enabled)
- sec = rtc_tm_to_time64(&alarm->time);
- else
- sec = 0;
- brcmstb_waketmr_set_alarm(timer, sec);
+ if (enabled && !timer->alarm_en) {
+ if ((int)(readl_relaxed(timer->base + BRCMSTB_WKTMR_COUNTER) -
+ readl_relaxed(timer->base + BRCMSTB_WKTMR_ALARM)) >= 0 &&
+ !brcmstb_waketmr_is_pending(timer))
+ return -EINVAL;
+ timer->alarm_en = true;
+ if (timer->alarm_irq)
+ enable_irq(timer->alarm_irq);
+ } else if (!enabled && timer->alarm_en) {
+ if (timer->alarm_irq)
+ disable_irq(timer->alarm_irq);
+ timer->alarm_en = false;
+ }
return 0;
}
-/*
- * Does not do much but keep the RTC class happy. We always support
- * alarms.
- */
-static int brcmstb_waketmr_alarm_enable(struct device *dev,
- unsigned int enabled)
+static int brcmstb_waketmr_setalarm(struct device *dev,
+ struct rtc_wkalrm *alarm)
{
- return 0;
+ struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
+
+ timer->rtc_alarm = rtc_tm_to_time64(&alarm->time);
+
+ brcmstb_waketmr_set_alarm(timer, timer->rtc_alarm);
+
+ return brcmstb_waketmr_alarm_enable(dev, alarm->enabled);
}
static const struct rtc_class_ops brcmstb_waketmr_ops = {
@@ -221,12 +277,12 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev)
* Set wakeup capability before requesting wakeup interrupt, so we can
* process boot-time "wakeups" (e.g., from S5 soft-off)
*/
- device_set_wakeup_capable(dev, true);
- device_wakeup_enable(dev);
+ device_init_wakeup(dev, true);
- timer->irq = platform_get_irq(pdev, 0);
- if (timer->irq < 0)
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
return -ENODEV;
+ timer->wake_irq = (unsigned int)ret;
timer->clk = devm_clk_get(dev, NULL);
if (!IS_ERR(timer->clk)) {
@@ -241,11 +297,24 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev)
timer->clk = NULL;
}
- ret = devm_request_irq(dev, timer->irq, brcmstb_waketmr_irq, 0,
+ ret = devm_request_irq(dev, timer->wake_irq, brcmstb_waketmr_irq, 0,
"brcmstb-waketimer", timer);
if (ret < 0)
goto err_clk;
+ brcmstb_waketmr_clear_alarm(timer);
+
+ /* Attempt to initialize non-wake irq */
+ ret = platform_get_irq(pdev, 1);
+ if (ret > 0) {
+ timer->alarm_irq = (unsigned int)ret;
+ ret = devm_request_irq(dev, timer->alarm_irq, brcmstb_alarm_irq,
+ IRQF_NO_AUTOEN, "brcmstb-waketimer-rtc",
+ timer);
+ if (ret < 0)
+ timer->alarm_irq = 0;
+ }
+
timer->reboot_notifier.notifier_call = brcmstb_waketmr_reboot;
register_reboot_notifier(&timer->reboot_notifier);
@@ -256,8 +325,6 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev)
if (ret)
goto err_notifier;
- dev_info(dev, "registered, with irq %d\n", timer->irq);
-
return 0;
err_notifier:
@@ -295,7 +362,9 @@ static int brcmstb_waketmr_resume(struct device *dev)
if (!device_may_wakeup(dev))
return 0;
- ret = disable_irq_wake(timer->irq);
+ ret = disable_irq_wake(timer->wake_irq);
+ if (timer->alarm_en && timer->alarm_irq)
+ disable_irq_wake(timer->alarm_irq);
brcmstb_waketmr_clear_alarm(timer);
@@ -325,4 +394,5 @@ module_platform_driver(brcmstb_waketmr_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Brian Norris");
MODULE_AUTHOR("Markus Mayer");
+MODULE_AUTHOR("Doug Berger");
MODULE_DESCRIPTION("Wake-up timer driver for STB chips");
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index def9b7f9d957..e86ba84df6cb 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -1712,9 +1712,9 @@ static const struct regmap_config regmap_config = {
.val_bits = 8,
};
-static int ds1307_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds1307_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct ds1307 *ds1307;
const void *match;
int err = -ENODEV;
@@ -2011,7 +2011,7 @@ static struct i2c_driver ds1307_driver = {
.name = "rtc-ds1307",
.of_match_table = ds1307_of_match,
},
- .probe = ds1307_probe,
+ .probe_new = ds1307_probe,
.id_table = ds1307_id,
};
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index 1e8bc6cc1e12..dc6b0f4a54e2 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -164,7 +164,7 @@ static int efi_read_time(struct device *dev, struct rtc_time *tm)
if (status != EFI_SUCCESS) {
/* should never happen */
- dev_err(dev, "can't read time\n");
+ dev_err_once(dev, "can't read time\n");
return -EINVAL;
}
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index cc710d682121..7d5a298a9a3b 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -518,9 +518,14 @@ static int hym8563_probe(struct i2c_client *client)
}
if (client->irq > 0) {
+ unsigned long irqflags = IRQF_TRIGGER_LOW;
+
+ if (dev_fwnode(&client->dev))
+ irqflags = 0;
+
ret = devm_request_threaded_irq(&client->dev, client->irq,
NULL, hym8563_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ irqflags | IRQF_ONESHOT,
client->name, hym8563);
if (ret < 0) {
dev_err(&client->dev, "irq %d request failed, %d\n",
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index a3b0de3393f5..e68a79b5e00e 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -8,16 +8,16 @@
* by Alessandro Zummo <a.zummo@towertech.it>.
*/
-#include <linux/i2c.h>
#include <linux/bcd.h>
-#include <linux/rtc.h>
-#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/regmap.h>
#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#include <asm/byteorder.h>
/* ISL register offsets */
#define ISL12022_REG_SC 0x00
@@ -44,13 +44,6 @@
#define ISL12022_BETA_TSE (1 << 7)
-static struct i2c_driver isl12022_driver;
-
-struct isl12022 {
- struct rtc_device *rtc;
- struct regmap *regmap;
-};
-
static umode_t isl12022_hwmon_is_visible(const void *data,
enum hwmon_sensor_types type,
u32 attr, int channel)
@@ -67,19 +60,17 @@ static umode_t isl12022_hwmon_is_visible(const void *data,
*/
static int isl12022_hwmon_read_temp(struct device *dev, long *mC)
{
- struct isl12022 *isl12022 = dev_get_drvdata(dev);
- struct regmap *regmap = isl12022->regmap;
- u8 temp_buf[2];
+ struct regmap *regmap = dev_get_drvdata(dev);
int temp, ret;
+ __le16 buf;
- ret = regmap_bulk_read(regmap, ISL12022_REG_TEMP_L,
- temp_buf, sizeof(temp_buf));
+ ret = regmap_bulk_read(regmap, ISL12022_REG_TEMP_L, &buf, sizeof(buf));
if (ret)
return ret;
/*
* Temperature is represented as a 10-bit number, unit half-Kelvins.
*/
- temp = (temp_buf[1] << 8) | temp_buf[0];
+ temp = le16_to_cpu(buf);
temp *= 500;
temp -= 273000;
@@ -115,23 +106,21 @@ static const struct hwmon_chip_info isl12022_hwmon_chip_info = {
static void isl12022_hwmon_register(struct device *dev)
{
- struct isl12022 *isl12022;
+ struct regmap *regmap = dev_get_drvdata(dev);
struct device *hwmon;
int ret;
if (!IS_REACHABLE(CONFIG_HWMON))
return;
- isl12022 = dev_get_drvdata(dev);
-
- ret = regmap_update_bits(isl12022->regmap, ISL12022_REG_BETA,
+ ret = regmap_update_bits(regmap, ISL12022_REG_BETA,
ISL12022_BETA_TSE, ISL12022_BETA_TSE);
if (ret) {
dev_warn(dev, "unable to enable temperature sensor\n");
return;
}
- hwmon = devm_hwmon_device_register_with_info(dev, "isl12022", isl12022,
+ hwmon = devm_hwmon_device_register_with_info(dev, "isl12022", regmap,
&isl12022_hwmon_chip_info,
NULL);
if (IS_ERR(hwmon))
@@ -144,8 +133,7 @@ static void isl12022_hwmon_register(struct device *dev)
*/
static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct isl12022 *isl12022 = dev_get_drvdata(dev);
- struct regmap *regmap = isl12022->regmap;
+ struct regmap *regmap = dev_get_drvdata(dev);
uint8_t buf[ISL12022_REG_INT + 1];
int ret;
@@ -155,16 +143,12 @@ static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (buf[ISL12022_REG_SR] & (ISL12022_SR_LBAT85 | ISL12022_SR_LBAT75)) {
dev_warn(dev,
- "voltage dropped below %u%%, "
- "date and time is not reliable.\n",
+ "voltage dropped below %u%%, date and time is not reliable.\n",
buf[ISL12022_REG_SR] & ISL12022_SR_LBAT85 ? 85 : 75);
}
dev_dbg(dev,
- "%s: raw data is sec=%02x, min=%02x, hr=%02x, "
- "mday=%02x, mon=%02x, year=%02x, wday=%02x, "
- "sr=%02x, int=%02x",
- __func__,
+ "raw data is sec=%02x, min=%02x, hr=%02x, mday=%02x, mon=%02x, year=%02x, wday=%02x, sr=%02x, int=%02x",
buf[ISL12022_REG_SC],
buf[ISL12022_REG_MN],
buf[ISL12022_REG_HR],
@@ -190,8 +174,7 @@ static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int isl12022_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct isl12022 *isl12022 = dev_get_drvdata(dev);
- struct regmap *regmap = isl12022->regmap;
+ struct regmap *regmap = dev_get_drvdata(dev);
int ret;
uint8_t buf[ISL12022_REG_DW + 1];
@@ -218,8 +201,7 @@ static int isl12022_rtc_set_time(struct device *dev, struct rtc_time *tm)
buf[ISL12022_REG_DW] = tm->tm_wday & 0x07;
- return regmap_bulk_write(isl12022->regmap, ISL12022_REG_SC,
- buf, sizeof(buf));
+ return regmap_bulk_write(regmap, ISL12022_REG_SC, buf, sizeof(buf));
}
static const struct rtc_class_ops isl12022_rtc_ops = {
@@ -235,44 +217,39 @@ static const struct regmap_config regmap_config = {
static int isl12022_probe(struct i2c_client *client)
{
- struct isl12022 *isl12022;
+ struct rtc_device *rtc;
+ struct regmap *regmap;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
- isl12022 = devm_kzalloc(&client->dev, sizeof(struct isl12022),
- GFP_KERNEL);
- if (!isl12022)
- return -ENOMEM;
- dev_set_drvdata(&client->dev, isl12022);
-
- isl12022->regmap = devm_regmap_init_i2c(client, &regmap_config);
- if (IS_ERR(isl12022->regmap)) {
+ regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(regmap)) {
dev_err(&client->dev, "regmap allocation failed\n");
- return PTR_ERR(isl12022->regmap);
+ return PTR_ERR(regmap);
}
+ dev_set_drvdata(&client->dev, regmap);
+
isl12022_hwmon_register(&client->dev);
- isl12022->rtc = devm_rtc_allocate_device(&client->dev);
- if (IS_ERR(isl12022->rtc))
- return PTR_ERR(isl12022->rtc);
+ rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- isl12022->rtc->ops = &isl12022_rtc_ops;
- isl12022->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
- isl12022->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->ops = &isl12022_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
- return devm_rtc_register_device(isl12022->rtc);
+ return devm_rtc_register_device(rtc);
}
-#ifdef CONFIG_OF
static const struct of_device_id isl12022_dt_match[] = {
{ .compatible = "isl,isl12022" }, /* for backward compat., don't use */
{ .compatible = "isil,isl12022" },
{ },
};
MODULE_DEVICE_TABLE(of, isl12022_dt_match);
-#endif
static const struct i2c_device_id isl12022_id[] = {
{ "isl12022", 0 },
@@ -283,9 +260,7 @@ MODULE_DEVICE_TABLE(i2c, isl12022_id);
static struct i2c_driver isl12022_driver = {
.driver = {
.name = "rtc-isl12022",
-#ifdef CONFIG_OF
- .of_match_table = of_match_ptr(isl12022_dt_match),
-#endif
+ .of_match_table = isl12022_dt_match,
},
.probe_new = isl12022_probe,
.id_table = isl12022_id,
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index c383719292c7..59d279e3e6f5 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -6,12 +6,15 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
+#include <linux/property.h>
#include <linux/reboot.h>
#include <linux/rtc.h>
#include <linux/slab.h>
@@ -25,6 +28,7 @@
#define JZ_REG_RTC_WAKEUP_FILTER 0x24
#define JZ_REG_RTC_RESET_COUNTER 0x28
#define JZ_REG_RTC_SCRATCHPAD 0x34
+#define JZ_REG_RTC_CKPCR 0x40
/* The following are present on the jz4780 */
#define JZ_REG_RTC_WENR 0x3C
@@ -44,6 +48,9 @@
#define JZ_RTC_WAKEUP_FILTER_MASK 0x0000FFE0
#define JZ_RTC_RESET_COUNTER_MASK 0x00000FE0
+#define JZ_RTC_CKPCR_CK32PULL_DIS BIT(4)
+#define JZ_RTC_CKPCR_CK32CTL_EN (BIT(2) | BIT(1))
+
enum jz4740_rtc_type {
ID_JZ4740,
ID_JZ4760,
@@ -56,6 +63,8 @@ struct jz4740_rtc {
struct rtc_device *rtc;
+ struct clk_hw clk32k;
+
spinlock_t lock;
};
@@ -69,19 +78,15 @@ static inline uint32_t jz4740_rtc_reg_read(struct jz4740_rtc *rtc, size_t reg)
static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
{
uint32_t ctrl;
- int timeout = 10000;
- do {
- ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL);
- } while (!(ctrl & JZ_RTC_CTRL_WRDY) && --timeout);
-
- return timeout ? 0 : -EIO;
+ return readl_poll_timeout(rtc->base + JZ_REG_RTC_CTRL, ctrl,
+ ctrl & JZ_RTC_CTRL_WRDY, 0, 1000);
}
static inline int jz4780_rtc_enable_write(struct jz4740_rtc *rtc)
{
uint32_t ctrl;
- int ret, timeout = 10000;
+ int ret;
ret = jz4740_rtc_wait_write_ready(rtc);
if (ret != 0)
@@ -89,11 +94,8 @@ static inline int jz4780_rtc_enable_write(struct jz4740_rtc *rtc)
writel(JZ_RTC_WENR_MAGIC, rtc->base + JZ_REG_RTC_WENR);
- do {
- ctrl = readl(rtc->base + JZ_REG_RTC_WENR);
- } while (!(ctrl & JZ_RTC_WENR_WEN) && --timeout);
-
- return timeout ? 0 : -EIO;
+ return readl_poll_timeout(rtc->base + JZ_REG_RTC_WENR, ctrl,
+ ctrl & JZ_RTC_WENR_WEN, 0, 1000);
}
static inline int jz4740_rtc_reg_write(struct jz4740_rtc *rtc, size_t reg,
@@ -260,6 +262,7 @@ static void jz4740_rtc_power_off(void)
static const struct of_device_id jz4740_rtc_of_match[] = {
{ .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 },
{ .compatible = "ingenic,jz4760-rtc", .data = (void *)ID_JZ4760 },
+ { .compatible = "ingenic,jz4770-rtc", .data = (void *)ID_JZ4780 },
{ .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
{},
};
@@ -301,6 +304,38 @@ static void jz4740_rtc_set_wakeup_params(struct jz4740_rtc *rtc,
jz4740_rtc_reg_write(rtc, JZ_REG_RTC_RESET_COUNTER, reset_ticks);
}
+static int jz4740_rtc_clk32k_enable(struct clk_hw *hw)
+{
+ struct jz4740_rtc *rtc = container_of(hw, struct jz4740_rtc, clk32k);
+
+ return jz4740_rtc_reg_write(rtc, JZ_REG_RTC_CKPCR,
+ JZ_RTC_CKPCR_CK32PULL_DIS |
+ JZ_RTC_CKPCR_CK32CTL_EN);
+}
+
+static void jz4740_rtc_clk32k_disable(struct clk_hw *hw)
+{
+ struct jz4740_rtc *rtc = container_of(hw, struct jz4740_rtc, clk32k);
+
+ jz4740_rtc_reg_write(rtc, JZ_REG_RTC_CKPCR, 0);
+}
+
+static int jz4740_rtc_clk32k_is_enabled(struct clk_hw *hw)
+{
+ struct jz4740_rtc *rtc = container_of(hw, struct jz4740_rtc, clk32k);
+ u32 ckpcr;
+
+ ckpcr = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CKPCR);
+
+ return !!(ckpcr & JZ_RTC_CKPCR_CK32CTL_EN);
+}
+
+static const struct clk_ops jz4740_rtc_clk32k_ops = {
+ .enable = jz4740_rtc_clk32k_enable,
+ .disable = jz4740_rtc_clk32k_disable,
+ .is_enabled = jz4740_rtc_clk32k_is_enabled,
+};
+
static int jz4740_rtc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -335,17 +370,13 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
device_init_wakeup(dev, 1);
ret = dev_pm_set_wake_irq(dev, irq);
- if (ret) {
- dev_err(dev, "Failed to set wake irq: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set wake irq\n");
rtc->rtc = devm_rtc_allocate_device(dev);
- if (IS_ERR(rtc->rtc)) {
- ret = PTR_ERR(rtc->rtc);
- dev_err(dev, "Failed to allocate rtc device: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(rtc->rtc))
+ return dev_err_probe(dev, PTR_ERR(rtc->rtc),
+ "Failed to allocate rtc device\n");
rtc->rtc->ops = &jz4740_rtc_ops;
rtc->rtc->range_max = U32_MAX;
@@ -362,10 +393,8 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, jz4740_rtc_irq, 0,
pdev->name, rtc);
- if (ret) {
- dev_err(dev, "Failed to request rtc irq: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request rtc irq\n");
if (of_device_is_system_power_controller(np)) {
dev_for_power_off = dev;
@@ -376,6 +405,21 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
dev_warn(dev, "Poweroff handler already present!\n");
}
+ if (device_property_present(dev, "#clock-cells")) {
+ rtc->clk32k.init = CLK_HW_INIT_HW("clk32k", __clk_get_hw(clk),
+ &jz4740_rtc_clk32k_ops, 0);
+
+ ret = devm_clk_hw_register(dev, &rtc->clk32k);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Unable to register clk32k clock\n");
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &rtc->clk32k);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Unable to register clk32k clock provider\n");
+ }
+
return 0;
}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 494052dbd39f..c1963f7c424d 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -914,9 +914,14 @@ static int m41t80_probe(struct i2c_client *client)
"wakeup-source");
#endif
if (client->irq > 0) {
+ unsigned long irqflags = IRQF_TRIGGER_LOW;
+
+ if (dev_fwnode(&client->dev))
+ irqflags = 0;
+
rc = devm_request_threaded_irq(&client->dev, client->irq,
NULL, m41t80_handle_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ irqflags | IRQF_ONESHOT,
"m41t80", client);
if (rc) {
dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
diff --git a/drivers/rtc/rtc-max8907.c b/drivers/rtc/rtc-max8907.c
index db3495d10274..af97140dd00a 100644
--- a/drivers/rtc/rtc-max8907.c
+++ b/drivers/rtc/rtc-max8907.c
@@ -9,7 +9,6 @@
*/
#include <linux/bcd.h>
-#include <linux/i2c.h>
#include <linux/mfd/max8907.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/rtc/rtc-moxart.c b/drivers/rtc/rtc-moxart.c
index 6b24ac9e1cfa..2247dd39ee4b 100644
--- a/drivers/rtc/rtc-moxart.c
+++ b/drivers/rtc/rtc-moxart.c
@@ -10,14 +10,15 @@
* Moxa Technology Co., Ltd. <www.moxa.com>
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/mod_devicetable.h>
+#include <linux/gpio/consumer.h>
#define GPIO_RTC_RESERVED 0x0C
#define GPIO_RTC_DATA_SET 0x10
@@ -55,7 +56,9 @@
struct moxart_rtc {
struct rtc_device *rtc;
spinlock_t rtc_lock;
- int gpio_data, gpio_sclk, gpio_reset;
+ struct gpio_desc *gpio_data;
+ struct gpio_desc *gpio_sclk;
+ struct gpio_desc *gpio_reset;
};
static int day_of_year[12] = { 0, 31, 59, 90, 120, 151, 181,
@@ -67,10 +70,10 @@ static void moxart_rtc_write_byte(struct device *dev, u8 data)
int i;
for (i = 0; i < 8; i++, data >>= 1) {
- gpio_set_value(moxart_rtc->gpio_sclk, 0);
- gpio_set_value(moxart_rtc->gpio_data, ((data & 1) == 1));
+ gpiod_set_value(moxart_rtc->gpio_sclk, 0);
+ gpiod_set_value(moxart_rtc->gpio_data, ((data & 1) == 1));
udelay(GPIO_RTC_DELAY_TIME);
- gpio_set_value(moxart_rtc->gpio_sclk, 1);
+ gpiod_set_value(moxart_rtc->gpio_sclk, 1);
udelay(GPIO_RTC_DELAY_TIME);
}
}
@@ -82,11 +85,11 @@ static u8 moxart_rtc_read_byte(struct device *dev)
u8 data = 0;
for (i = 0; i < 8; i++) {
- gpio_set_value(moxart_rtc->gpio_sclk, 0);
+ gpiod_set_value(moxart_rtc->gpio_sclk, 0);
udelay(GPIO_RTC_DELAY_TIME);
- gpio_set_value(moxart_rtc->gpio_sclk, 1);
+ gpiod_set_value(moxart_rtc->gpio_sclk, 1);
udelay(GPIO_RTC_DELAY_TIME);
- if (gpio_get_value(moxart_rtc->gpio_data))
+ if (gpiod_get_value(moxart_rtc->gpio_data))
data |= (1 << i);
udelay(GPIO_RTC_DELAY_TIME);
}
@@ -101,15 +104,15 @@ static u8 moxart_rtc_read_register(struct device *dev, u8 cmd)
local_irq_save(flags);
- gpio_direction_output(moxart_rtc->gpio_data, 0);
- gpio_set_value(moxart_rtc->gpio_reset, 1);
+ gpiod_direction_output(moxart_rtc->gpio_data, 0);
+ gpiod_set_value(moxart_rtc->gpio_reset, 1);
udelay(GPIO_RTC_DELAY_TIME);
moxart_rtc_write_byte(dev, cmd);
- gpio_direction_input(moxart_rtc->gpio_data);
+ gpiod_direction_input(moxart_rtc->gpio_data);
udelay(GPIO_RTC_DELAY_TIME);
data = moxart_rtc_read_byte(dev);
- gpio_set_value(moxart_rtc->gpio_sclk, 0);
- gpio_set_value(moxart_rtc->gpio_reset, 0);
+ gpiod_set_value(moxart_rtc->gpio_sclk, 0);
+ gpiod_set_value(moxart_rtc->gpio_reset, 0);
udelay(GPIO_RTC_DELAY_TIME);
local_irq_restore(flags);
@@ -124,13 +127,13 @@ static void moxart_rtc_write_register(struct device *dev, u8 cmd, u8 data)
local_irq_save(flags);
- gpio_direction_output(moxart_rtc->gpio_data, 0);
- gpio_set_value(moxart_rtc->gpio_reset, 1);
+ gpiod_direction_output(moxart_rtc->gpio_data, 0);
+ gpiod_set_value(moxart_rtc->gpio_reset, 1);
udelay(GPIO_RTC_DELAY_TIME);
moxart_rtc_write_byte(dev, cmd);
moxart_rtc_write_byte(dev, data);
- gpio_set_value(moxart_rtc->gpio_sclk, 0);
- gpio_set_value(moxart_rtc->gpio_reset, 0);
+ gpiod_set_value(moxart_rtc->gpio_sclk, 0);
+ gpiod_set_value(moxart_rtc->gpio_reset, 0);
udelay(GPIO_RTC_DELAY_TIME);
local_irq_restore(flags);
@@ -247,53 +250,33 @@ static int moxart_rtc_probe(struct platform_device *pdev)
if (!moxart_rtc)
return -ENOMEM;
- moxart_rtc->gpio_data = of_get_named_gpio(pdev->dev.of_node,
- "gpio-rtc-data", 0);
- if (!gpio_is_valid(moxart_rtc->gpio_data)) {
- dev_err(&pdev->dev, "invalid gpio (data): %d\n",
- moxart_rtc->gpio_data);
- return moxart_rtc->gpio_data;
- }
-
- moxart_rtc->gpio_sclk = of_get_named_gpio(pdev->dev.of_node,
- "gpio-rtc-sclk", 0);
- if (!gpio_is_valid(moxart_rtc->gpio_sclk)) {
- dev_err(&pdev->dev, "invalid gpio (sclk): %d\n",
- moxart_rtc->gpio_sclk);
- return moxart_rtc->gpio_sclk;
- }
-
- moxart_rtc->gpio_reset = of_get_named_gpio(pdev->dev.of_node,
- "gpio-rtc-reset", 0);
- if (!gpio_is_valid(moxart_rtc->gpio_reset)) {
- dev_err(&pdev->dev, "invalid gpio (reset): %d\n",
- moxart_rtc->gpio_reset);
- return moxart_rtc->gpio_reset;
- }
-
- spin_lock_init(&moxart_rtc->rtc_lock);
- platform_set_drvdata(pdev, moxart_rtc);
-
- ret = devm_gpio_request(&pdev->dev, moxart_rtc->gpio_data, "rtc_data");
+ moxart_rtc->gpio_data = devm_gpiod_get(&pdev->dev, "rtc-data",
+ GPIOD_IN);
+ ret = PTR_ERR_OR_ZERO(moxart_rtc->gpio_data);
if (ret) {
- dev_err(&pdev->dev, "can't get rtc_data gpio\n");
+ dev_err(&pdev->dev, "can't get rtc data gpio: %d\n", ret);
return ret;
}
- ret = devm_gpio_request_one(&pdev->dev, moxart_rtc->gpio_sclk,
- GPIOF_DIR_OUT, "rtc_sclk");
+ moxart_rtc->gpio_sclk = devm_gpiod_get(&pdev->dev, "rtc-sclk",
+ GPIOD_ASIS);
+ ret = PTR_ERR_OR_ZERO(moxart_rtc->gpio_sclk);
if (ret) {
- dev_err(&pdev->dev, "can't get rtc_sclk gpio\n");
+ dev_err(&pdev->dev, "can't get rtc sclk gpio: %d\n", ret);
return ret;
}
- ret = devm_gpio_request_one(&pdev->dev, moxart_rtc->gpio_reset,
- GPIOF_DIR_OUT, "rtc_reset");
+ moxart_rtc->gpio_reset = devm_gpiod_get(&pdev->dev, "rtc-reset",
+ GPIOD_ASIS);
+ ret = PTR_ERR_OR_ZERO(moxart_rtc->gpio_reset);
if (ret) {
- dev_err(&pdev->dev, "can't get rtc_reset gpio\n");
+ dev_err(&pdev->dev, "can't get rtc reset gpio: %d\n", ret);
return ret;
}
+ spin_lock_init(&moxart_rtc->rtc_lock);
+ platform_set_drvdata(pdev, moxart_rtc);
+
moxart_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&moxart_rtc_ops,
THIS_MODULE);
diff --git a/drivers/rtc/rtc-nxp-bbnsm.c b/drivers/rtc/rtc-nxp-bbnsm.c
new file mode 100644
index 000000000000..acbfbeb8b070
--- /dev/null
+++ b/drivers/rtc/rtc-nxp-bbnsm.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2022 NXP.
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+#define BBNSM_CTRL 0x8
+#define BBNSM_INT_EN 0x10
+#define BBNSM_EVENTS 0x14
+#define BBNSM_RTC_LS 0x40
+#define BBNSM_RTC_MS 0x44
+#define BBNSM_TA 0x50
+
+#define RTC_EN 0x2
+#define RTC_EN_MSK 0x3
+#define TA_EN (0x2 << 2)
+#define TA_DIS (0x1 << 2)
+#define TA_EN_MSK (0x3 << 2)
+#define RTC_INT_EN 0x2
+#define TA_INT_EN (0x2 << 2)
+
+#define BBNSM_EVENT_TA (0x2 << 2)
+
+#define CNTR_TO_SECS_SH 15
+
+struct bbnsm_rtc {
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+ int irq;
+ struct clk *clk;
+};
+
+static u32 bbnsm_read_counter(struct bbnsm_rtc *bbnsm)
+{
+ u32 rtc_msb, rtc_lsb;
+ unsigned int timeout = 100;
+ u32 time;
+ u32 tmp = 0;
+
+ do {
+ time = tmp;
+ /* read the msb */
+ regmap_read(bbnsm->regmap, BBNSM_RTC_MS, &rtc_msb);
+ /* read the lsb */
+ regmap_read(bbnsm->regmap, BBNSM_RTC_LS, &rtc_lsb);
+ /* convert to seconds */
+ tmp = (rtc_msb << 17) | (rtc_lsb >> 15);
+ } while (tmp != time && --timeout);
+
+ return time;
+}
+
+static int bbnsm_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct bbnsm_rtc *bbnsm = dev_get_drvdata(dev);
+ unsigned long time;
+ u32 val;
+
+ regmap_read(bbnsm->regmap, BBNSM_CTRL, &val);
+ if ((val & RTC_EN_MSK) != RTC_EN)
+ return -EINVAL;
+
+ time = bbnsm_read_counter(bbnsm);
+ rtc_time64_to_tm(time, tm);
+
+ return 0;
+}
+
+static int bbnsm_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct bbnsm_rtc *bbnsm = dev_get_drvdata(dev);
+ unsigned long time = rtc_tm_to_time64(tm);
+
+ /* disable the RTC first */
+ regmap_update_bits(bbnsm->regmap, BBNSM_CTRL, RTC_EN_MSK, 0);
+
+ /* write the 32bit sec time to 47 bit timer counter, leaving 15 LSBs blank */
+ regmap_write(bbnsm->regmap, BBNSM_RTC_LS, time << CNTR_TO_SECS_SH);
+ regmap_write(bbnsm->regmap, BBNSM_RTC_MS, time >> (32 - CNTR_TO_SECS_SH));
+
+ /* Enable the RTC again */
+ regmap_update_bits(bbnsm->regmap, BBNSM_CTRL, RTC_EN_MSK, RTC_EN);
+
+ return 0;
+}
+
+static int bbnsm_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct bbnsm_rtc *bbnsm = dev_get_drvdata(dev);
+ u32 bbnsm_events, bbnsm_ta;
+
+ regmap_read(bbnsm->regmap, BBNSM_TA, &bbnsm_ta);
+ rtc_time64_to_tm(bbnsm_ta, &alrm->time);
+
+ regmap_read(bbnsm->regmap, BBNSM_EVENTS, &bbnsm_events);
+ alrm->pending = (bbnsm_events & BBNSM_EVENT_TA) ? 1 : 0;
+
+ return 0;
+}
+
+static int bbnsm_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
+{
+ struct bbnsm_rtc *bbnsm = dev_get_drvdata(dev);
+
+ /* enable the alarm event */
+ regmap_update_bits(bbnsm->regmap, BBNSM_CTRL, TA_EN_MSK, enable ? TA_EN : TA_DIS);
+ /* enable the alarm interrupt */
+ regmap_update_bits(bbnsm->regmap, BBNSM_INT_EN, TA_EN_MSK, enable ? TA_EN : TA_DIS);
+
+ return 0;
+}
+
+static int bbnsm_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct bbnsm_rtc *bbnsm = dev_get_drvdata(dev);
+ unsigned long time = rtc_tm_to_time64(&alrm->time);
+
+ /* disable the alarm */
+ regmap_update_bits(bbnsm->regmap, BBNSM_CTRL, TA_EN, TA_EN);
+
+ /* write the seconds to TA */
+ regmap_write(bbnsm->regmap, BBNSM_TA, time);
+
+ return bbnsm_rtc_alarm_irq_enable(dev, alrm->enabled);
+}
+
+static const struct rtc_class_ops bbnsm_rtc_ops = {
+ .read_time = bbnsm_rtc_read_time,
+ .set_time = bbnsm_rtc_set_time,
+ .read_alarm = bbnsm_rtc_read_alarm,
+ .set_alarm = bbnsm_rtc_set_alarm,
+ .alarm_irq_enable = bbnsm_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t bbnsm_rtc_irq_handler(int irq, void *dev_id)
+{
+ struct device *dev = dev_id;
+ struct bbnsm_rtc *bbnsm = dev_get_drvdata(dev);
+ u32 val;
+
+ regmap_read(bbnsm->regmap, BBNSM_EVENTS, &val);
+ if (val & BBNSM_EVENT_TA) {
+ bbnsm_rtc_alarm_irq_enable(dev, false);
+ /* clear the alarm event */
+ regmap_write_bits(bbnsm->regmap, BBNSM_EVENTS, TA_EN_MSK, BBNSM_EVENT_TA);
+ rtc_update_irq(bbnsm->rtc, 1, RTC_AF | RTC_IRQF);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int bbnsm_rtc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct bbnsm_rtc *bbnsm;
+ int ret;
+
+ bbnsm = devm_kzalloc(&pdev->dev, sizeof(*bbnsm), GFP_KERNEL);
+ if (!bbnsm)
+ return -ENOMEM;
+
+ bbnsm->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(bbnsm->rtc))
+ return PTR_ERR(bbnsm->rtc);
+
+ bbnsm->regmap = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(bbnsm->regmap)) {
+ dev_dbg(&pdev->dev, "bbnsm get regmap failed\n");
+ return PTR_ERR(bbnsm->regmap);
+ }
+
+ bbnsm->irq = platform_get_irq(pdev, 0);
+ if (bbnsm->irq < 0)
+ return bbnsm->irq;
+
+ platform_set_drvdata(pdev, bbnsm);
+
+ /* clear all the pending events */
+ regmap_write(bbnsm->regmap, BBNSM_EVENTS, 0x7A);
+
+ device_init_wakeup(&pdev->dev, true);
+ dev_pm_set_wake_irq(&pdev->dev, bbnsm->irq);
+
+ ret = devm_request_irq(&pdev->dev, bbnsm->irq, bbnsm_rtc_irq_handler,
+ IRQF_SHARED, "rtc alarm", &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq %d: %d\n",
+ bbnsm->irq, ret);
+ return ret;
+ }
+
+ bbnsm->rtc->ops = &bbnsm_rtc_ops;
+ bbnsm->rtc->range_max = U32_MAX;
+
+ return devm_rtc_register_device(bbnsm->rtc);
+}
+
+static const struct of_device_id bbnsm_dt_ids[] = {
+ { .compatible = "nxp,imx93-bbnsm-rtc" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bbnsm_dt_ids);
+
+static struct platform_driver bbnsm_rtc_driver = {
+ .driver = {
+ .name = "bbnsm_rtc",
+ .of_match_table = bbnsm_dt_ids,
+ },
+ .probe = bbnsm_rtc_probe,
+};
+module_platform_driver(bbnsm_rtc_driver);
+
+MODULE_AUTHOR("Jacky Bai <ping.bai@nxp.com>");
+MODULE_DESCRIPTION("NXP BBNSM RTC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index e13b5e695d06..e714661e61a9 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -413,9 +413,14 @@ static int pcf2123_probe(struct spi_device *spi)
/* Register alarm irq */
if (spi->irq > 0) {
+ unsigned long irqflags = IRQF_TRIGGER_LOW;
+
+ if (dev_fwnode(&spi->dev))
+ irqflags = 0;
+
ret = devm_request_threaded_irq(&spi->dev, spi->irq, NULL,
pcf2123_rtc_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ irqflags | IRQF_ONESHOT,
pcf2123_driver.driver.name, &spi->dev);
if (!ret)
device_init_wakeup(&spi->dev, true);
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 754e03984f98..71a456355981 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -621,9 +621,14 @@ static int pcf85063_probe(struct i2c_client *client)
clear_bit(RTC_FEATURE_ALARM, pcf85063->rtc->features);
if (config->has_alarms && client->irq > 0) {
+ unsigned long irqflags = IRQF_TRIGGER_LOW;
+
+ if (dev_fwnode(&client->dev))
+ irqflags = 0;
+
err = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pcf85063_rtc_handle_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ irqflags | IRQF_ONESHOT,
"pcf85063", pcf85063);
if (err) {
dev_warn(&pcf85063->rtc->dev,
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 92de99f11a7a..2e111cdb94f7 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -445,13 +445,18 @@ static int pcf8523_probe(struct i2c_client *client)
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
if (client->irq > 0) {
+ unsigned long irqflags = IRQF_TRIGGER_LOW;
+
+ if (dev_fwnode(&client->dev))
+ irqflags = 0;
+
err = regmap_write(pcf8523->regmap, PCF8523_TMR_CLKOUT_CTRL, 0x38);
if (err < 0)
return err;
err = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pcf8523_irq,
- IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ IRQF_SHARED | IRQF_ONESHOT | irqflags,
dev_name(&rtc->dev), pcf8523);
if (err)
return err;
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index c05b722f0060..8958eadf1c3e 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -101,6 +101,10 @@
#define PIN_IO_INTA_OUT 2
#define PIN_IO_INTA_HIZ 3
+#define OSC_CAP_SEL GENMASK(1, 0)
+#define OSC_CAP_6000 0x01
+#define OSC_CAP_12500 0x02
+
#define STOP_EN_STOP BIT(0)
#define RESET_CPR 0xa4
@@ -117,6 +121,32 @@ struct pcf85x63_config {
unsigned int num_nvram;
};
+static int pcf85363_load_capacitance(struct pcf85363 *pcf85363, struct device_node *node)
+{
+ u32 load = 7000;
+ u8 value = 0;
+
+ of_property_read_u32(node, "quartz-load-femtofarads", &load);
+
+ switch (load) {
+ default:
+ dev_warn(&pcf85363->rtc->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 7000",
+ load);
+ fallthrough;
+ case 7000:
+ break;
+ case 6000:
+ value = OSC_CAP_6000;
+ break;
+ case 12500:
+ value = OSC_CAP_12500;
+ break;
+ }
+
+ return regmap_update_bits(pcf85363->regmap, CTRL_OSCILLATOR,
+ OSC_CAP_SEL, value);
+}
+
static int pcf85363_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct pcf85363 *pcf85363 = dev_get_drvdata(dev);
@@ -372,7 +402,7 @@ static int pcf85363_probe(struct i2c_client *client)
.reg_write = pcf85363_nvram_write,
},
};
- int ret, i;
+ int ret, i, err;
if (data)
config = data;
@@ -394,18 +424,28 @@ static int pcf85363_probe(struct i2c_client *client)
if (IS_ERR(pcf85363->rtc))
return PTR_ERR(pcf85363->rtc);
+ err = pcf85363_load_capacitance(pcf85363, client->dev.of_node);
+ if (err < 0)
+ dev_warn(&client->dev, "failed to set xtal load capacitance: %d",
+ err);
+
pcf85363->rtc->ops = &rtc_ops;
pcf85363->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pcf85363->rtc->range_max = RTC_TIMESTAMP_END_2099;
clear_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
if (client->irq > 0) {
+ unsigned long irqflags = IRQF_TRIGGER_LOW;
+
+ if (dev_fwnode(&client->dev))
+ irqflags = 0;
+
regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
PIN_IO_INTA_OUT, PIN_IO_INTAPM);
ret = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pcf85363_rtc_handle_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ irqflags | IRQF_ONESHOT,
"pcf85363", client);
if (ret)
dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 0a7fd9478465..7e720472213c 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -558,9 +558,14 @@ static int pcf8563_probe(struct i2c_client *client)
pcf8563->rtc->set_start_time = true;
if (client->irq > 0) {
+ unsigned long irqflags = IRQF_TRIGGER_LOW;
+
+ if (dev_fwnode(&client->dev))
+ irqflags = 0;
+
err = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pcf8563_irq,
- IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ IRQF_SHARED | IRQF_ONESHOT | irqflags,
pcf8563_driver.driver.name, client);
if (err) {
dev_err(&client->dev, "unable to request IRQ %d\n",
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index 716e5d9ad74d..372494e82f40 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -1,8 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/*
+ * pm8xxx RTC driver
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
*/
#include <linux/of.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/init.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
@@ -12,11 +17,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-/* RTC Register offsets from RTC CTRL REG */
-#define PM8XXX_ALARM_CTRL_OFFSET 0x01
-#define PM8XXX_RTC_WRITE_OFFSET 0x02
-#define PM8XXX_RTC_READ_OFFSET 0x06
-#define PM8XXX_ALARM_RW_OFFSET 0x0A
+#include <asm/unaligned.h>
/* RTC_CTRL register bit fields */
#define PM8xxx_RTC_ENABLE BIT(7)
@@ -27,13 +28,13 @@
/**
* struct pm8xxx_rtc_regs - describe RTC registers per PMIC versions
- * @ctrl: base address of control register
- * @write: base address of write register
- * @read: base address of read register
- * @alarm_ctrl: base address of alarm control register
- * @alarm_ctrl2: base address of alarm control2 register
- * @alarm_rw: base address of alarm read-write register
- * @alarm_en: alarm enable mask
+ * @ctrl: address of control register
+ * @write: base address of write registers
+ * @read: base address of read registers
+ * @alarm_ctrl: address of alarm control register
+ * @alarm_ctrl2: address of alarm control2 register
+ * @alarm_rw: base address of alarm read-write registers
+ * @alarm_en: alarm enable mask
*/
struct pm8xxx_rtc_regs {
unsigned int ctrl;
@@ -46,25 +47,135 @@ struct pm8xxx_rtc_regs {
};
/**
- * struct pm8xxx_rtc - rtc driver internal structure
- * @rtc: rtc device for this driver.
- * @regmap: regmap used to access RTC registers
- * @allow_set_time: indicates whether writing to the RTC is allowed
- * @rtc_alarm_irq: rtc alarm irq number.
- * @regs: rtc registers description.
- * @rtc_dev: device structure.
- * @ctrl_reg_lock: spinlock protecting access to ctrl_reg.
+ * struct pm8xxx_rtc - RTC driver internal structure
+ * @rtc: RTC device
+ * @regmap: regmap used to access registers
+ * @allow_set_time: whether the time can be set
+ * @alarm_irq: alarm irq number
+ * @regs: register description
+ * @dev: device structure
+ * @nvmem_cell: nvmem cell for offset
+ * @offset: offset from epoch in seconds
*/
struct pm8xxx_rtc {
struct rtc_device *rtc;
struct regmap *regmap;
bool allow_set_time;
- int rtc_alarm_irq;
+ int alarm_irq;
const struct pm8xxx_rtc_regs *regs;
- struct device *rtc_dev;
- spinlock_t ctrl_reg_lock;
+ struct device *dev;
+ struct nvmem_cell *nvmem_cell;
+ u32 offset;
};
+static int pm8xxx_rtc_read_nvmem_offset(struct pm8xxx_rtc *rtc_dd)
+{
+ size_t len;
+ void *buf;
+ int rc;
+
+ buf = nvmem_cell_read(rtc_dd->nvmem_cell, &len);
+ if (IS_ERR(buf)) {
+ rc = PTR_ERR(buf);
+ dev_dbg(rtc_dd->dev, "failed to read nvmem offset: %d\n", rc);
+ return rc;
+ }
+
+ if (len != sizeof(u32)) {
+ dev_dbg(rtc_dd->dev, "unexpected nvmem cell size %zu\n", len);
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ rtc_dd->offset = get_unaligned_le32(buf);
+
+ kfree(buf);
+
+ return 0;
+}
+
+static int pm8xxx_rtc_write_nvmem_offset(struct pm8xxx_rtc *rtc_dd, u32 offset)
+{
+ u8 buf[sizeof(u32)];
+ int rc;
+
+ put_unaligned_le32(offset, buf);
+
+ rc = nvmem_cell_write(rtc_dd->nvmem_cell, buf, sizeof(buf));
+ if (rc < 0) {
+ dev_dbg(rtc_dd->dev, "failed to write nvmem offset: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int pm8xxx_rtc_read_offset(struct pm8xxx_rtc *rtc_dd)
+{
+ if (!rtc_dd->nvmem_cell)
+ return 0;
+
+ return pm8xxx_rtc_read_nvmem_offset(rtc_dd);
+}
+
+static int pm8xxx_rtc_read_raw(struct pm8xxx_rtc *rtc_dd, u32 *secs)
+{
+ const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
+ u8 value[NUM_8_BIT_RTC_REGS];
+ unsigned int reg;
+ int rc;
+
+ rc = regmap_bulk_read(rtc_dd->regmap, regs->read, value, sizeof(value));
+ if (rc)
+ return rc;
+
+ /*
+ * Read the LSB again and check if there has been a carry over.
+ * If there has, redo the read operation.
+ */
+ rc = regmap_read(rtc_dd->regmap, regs->read, &reg);
+ if (rc < 0)
+ return rc;
+
+ if (reg < value[0]) {
+ rc = regmap_bulk_read(rtc_dd->regmap, regs->read, value,
+ sizeof(value));
+ if (rc)
+ return rc;
+ }
+
+ *secs = get_unaligned_le32(value);
+
+ return 0;
+}
+
+static int pm8xxx_rtc_update_offset(struct pm8xxx_rtc *rtc_dd, u32 secs)
+{
+ u32 raw_secs;
+ u32 offset;
+ int rc;
+
+ if (!rtc_dd->nvmem_cell)
+ return -ENODEV;
+
+ rc = pm8xxx_rtc_read_raw(rtc_dd, &raw_secs);
+ if (rc)
+ return rc;
+
+ offset = secs - raw_secs;
+
+ if (offset == rtc_dd->offset)
+ return 0;
+
+ rc = pm8xxx_rtc_write_nvmem_offset(rtc_dd, offset);
+ if (rc)
+ return rc;
+
+ rtc_dd->offset = offset;
+
+ return 0;
+}
+
/*
* Steps to write the RTC registers.
* 1. Disable alarm if enabled.
@@ -74,269 +185,186 @@ struct pm8xxx_rtc {
* 5. Enable rtc if disabled in step 2.
* 6. Enable alarm if disabled in step 1.
*/
-static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
+static int __pm8xxx_rtc_set_time(struct pm8xxx_rtc *rtc_dd, u32 secs)
{
- int rc, i;
- unsigned long secs, irq_flags;
- u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0, rtc_disabled = 0;
- unsigned int ctrl_reg, rtc_ctrl_reg;
- struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
+ u8 value[NUM_8_BIT_RTC_REGS];
+ bool alarm_enabled;
+ int rc;
- if (!rtc_dd->allow_set_time)
- return -ENODEV;
-
- secs = rtc_tm_to_time64(tm);
-
- dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
-
- for (i = 0; i < NUM_8_BIT_RTC_REGS; i++) {
- value[i] = secs & 0xFF;
- secs >>= 8;
- }
-
- spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
+ put_unaligned_le32(secs, value);
- rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
+ rc = regmap_update_bits_check(rtc_dd->regmap, regs->alarm_ctrl,
+ regs->alarm_en, 0, &alarm_enabled);
if (rc)
- goto rtc_rw_fail;
-
- if (ctrl_reg & regs->alarm_en) {
- alarm_enabled = 1;
- ctrl_reg &= ~regs->alarm_en;
- rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
- if (rc) {
- dev_err(dev, "Write to RTC Alarm control register failed\n");
- goto rtc_rw_fail;
- }
- }
+ return rc;
- /* Disable RTC H/w before writing on RTC register */
- rc = regmap_read(rtc_dd->regmap, regs->ctrl, &rtc_ctrl_reg);
+ /* Disable RTC */
+ rc = regmap_update_bits(rtc_dd->regmap, regs->ctrl, PM8xxx_RTC_ENABLE, 0);
if (rc)
- goto rtc_rw_fail;
-
- if (rtc_ctrl_reg & PM8xxx_RTC_ENABLE) {
- rtc_disabled = 1;
- rtc_ctrl_reg &= ~PM8xxx_RTC_ENABLE;
- rc = regmap_write(rtc_dd->regmap, regs->ctrl, rtc_ctrl_reg);
- if (rc) {
- dev_err(dev, "Write to RTC control register failed\n");
- goto rtc_rw_fail;
- }
- }
+ return rc;
/* Write 0 to Byte[0] */
rc = regmap_write(rtc_dd->regmap, regs->write, 0);
- if (rc) {
- dev_err(dev, "Write to RTC write data register failed\n");
- goto rtc_rw_fail;
- }
+ if (rc)
+ return rc;
/* Write Byte[1], Byte[2], Byte[3] */
rc = regmap_bulk_write(rtc_dd->regmap, regs->write + 1,
&value[1], sizeof(value) - 1);
- if (rc) {
- dev_err(dev, "Write to RTC write data register failed\n");
- goto rtc_rw_fail;
- }
+ if (rc)
+ return rc;
/* Write Byte[0] */
rc = regmap_write(rtc_dd->regmap, regs->write, value[0]);
- if (rc) {
- dev_err(dev, "Write to RTC write data register failed\n");
- goto rtc_rw_fail;
- }
+ if (rc)
+ return rc;
- /* Enable RTC H/w after writing on RTC register */
- if (rtc_disabled) {
- rtc_ctrl_reg |= PM8xxx_RTC_ENABLE;
- rc = regmap_write(rtc_dd->regmap, regs->ctrl, rtc_ctrl_reg);
- if (rc) {
- dev_err(dev, "Write to RTC control register failed\n");
- goto rtc_rw_fail;
- }
- }
+ /* Enable RTC */
+ rc = regmap_update_bits(rtc_dd->regmap, regs->ctrl, PM8xxx_RTC_ENABLE,
+ PM8xxx_RTC_ENABLE);
+ if (rc)
+ return rc;
if (alarm_enabled) {
- ctrl_reg |= regs->alarm_en;
- rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
- if (rc) {
- dev_err(dev, "Write to RTC Alarm control register failed\n");
- goto rtc_rw_fail;
- }
+ rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl,
+ regs->alarm_en, regs->alarm_en);
+ if (rc)
+ return rc;
}
-rtc_rw_fail:
- spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
-
- return rc;
+ return 0;
}
-static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
+static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- int rc;
- u8 value[NUM_8_BIT_RTC_REGS];
- unsigned long secs;
- unsigned int reg;
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
- const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
+ u32 secs;
+ int rc;
- rc = regmap_bulk_read(rtc_dd->regmap, regs->read, value, sizeof(value));
- if (rc) {
- dev_err(dev, "RTC read data register failed\n");
- return rc;
- }
+ secs = rtc_tm_to_time64(tm);
- /*
- * Read the LSB again and check if there has been a carry over.
- * If there is, redo the read operation.
- */
- rc = regmap_read(rtc_dd->regmap, regs->read, &reg);
- if (rc < 0) {
- dev_err(dev, "RTC read data register failed\n");
+ if (rtc_dd->allow_set_time)
+ rc = __pm8xxx_rtc_set_time(rtc_dd, secs);
+ else
+ rc = pm8xxx_rtc_update_offset(rtc_dd, secs);
+
+ if (rc)
return rc;
- }
- if (unlikely(reg < value[0])) {
- rc = regmap_bulk_read(rtc_dd->regmap, regs->read,
- value, sizeof(value));
- if (rc) {
- dev_err(dev, "RTC read data register failed\n");
- return rc;
- }
- }
+ dev_dbg(dev, "set time: %ptRd %ptRt (%u + %u)\n", tm, tm,
+ secs - rtc_dd->offset, rtc_dd->offset);
+ return 0;
+}
- secs = value[0] | (value[1] << 8) | (value[2] << 16) |
- ((unsigned long)value[3] << 24);
+static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+ u32 secs;
+ int rc;
- rtc_time64_to_tm(secs, tm);
+ rc = pm8xxx_rtc_read_raw(rtc_dd, &secs);
+ if (rc)
+ return rc;
- dev_dbg(dev, "secs = %lu, h:m:s == %ptRt, y-m-d = %ptRdr\n", secs, tm, tm);
+ secs += rtc_dd->offset;
+ rtc_time64_to_tm(secs, tm);
+ dev_dbg(dev, "read time: %ptRd %ptRt (%u + %u)\n", tm, tm,
+ secs - rtc_dd->offset, rtc_dd->offset);
return 0;
}
static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
- int rc, i;
- u8 value[NUM_8_BIT_RTC_REGS];
- unsigned int ctrl_reg;
- unsigned long secs, irq_flags;
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
+ u8 value[NUM_8_BIT_RTC_REGS];
+ u32 secs;
+ int rc;
secs = rtc_tm_to_time64(&alarm->time);
+ secs -= rtc_dd->offset;
+ put_unaligned_le32(secs, value);
- for (i = 0; i < NUM_8_BIT_RTC_REGS; i++) {
- value[i] = secs & 0xFF;
- secs >>= 8;
- }
-
- spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
+ rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl,
+ regs->alarm_en, 0);
+ if (rc)
+ return rc;
rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value,
sizeof(value));
- if (rc) {
- dev_err(dev, "Write to RTC ALARM register failed\n");
- goto rtc_rw_fail;
- }
-
- rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
if (rc)
- goto rtc_rw_fail;
-
- if (alarm->enabled)
- ctrl_reg |= regs->alarm_en;
- else
- ctrl_reg &= ~regs->alarm_en;
+ return rc;
- rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
- if (rc) {
- dev_err(dev, "Write to RTC alarm control register failed\n");
- goto rtc_rw_fail;
+ if (alarm->enabled) {
+ rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl,
+ regs->alarm_en, regs->alarm_en);
+ if (rc)
+ return rc;
}
- dev_dbg(dev, "Alarm Set for h:m:s=%ptRt, y-m-d=%ptRdr\n",
- &alarm->time, &alarm->time);
-rtc_rw_fail:
- spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
- return rc;
+ dev_dbg(dev, "set alarm: %ptRd %ptRt\n", &alarm->time, &alarm->time);
+
+ return 0;
}
static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
- int rc;
- unsigned int ctrl_reg;
- u8 value[NUM_8_BIT_RTC_REGS];
- unsigned long secs;
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
+ u8 value[NUM_8_BIT_RTC_REGS];
+ unsigned int ctrl_reg;
+ u32 secs;
+ int rc;
rc = regmap_bulk_read(rtc_dd->regmap, regs->alarm_rw, value,
sizeof(value));
- if (rc) {
- dev_err(dev, "RTC alarm time read failed\n");
+ if (rc)
return rc;
- }
-
- secs = value[0] | (value[1] << 8) | (value[2] << 16) |
- ((unsigned long)value[3] << 24);
+ secs = get_unaligned_le32(value);
+ secs += rtc_dd->offset;
rtc_time64_to_tm(secs, &alarm->time);
rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
- if (rc) {
- dev_err(dev, "Read from RTC alarm control register failed\n");
+ if (rc)
return rc;
- }
+
alarm->enabled = !!(ctrl_reg & PM8xxx_RTC_ALARM_ENABLE);
- dev_dbg(dev, "Alarm set for - h:m:s=%ptRt, y-m-d=%ptRdr\n",
- &alarm->time, &alarm->time);
+ dev_dbg(dev, "read alarm: %ptRd %ptRt\n", &alarm->time, &alarm->time);
return 0;
}
static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
{
- int rc;
- unsigned long irq_flags;
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
- unsigned int ctrl_reg;
u8 value[NUM_8_BIT_RTC_REGS] = {0};
-
- spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
-
- rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
- if (rc)
- goto rtc_rw_fail;
+ unsigned int val;
+ int rc;
if (enable)
- ctrl_reg |= regs->alarm_en;
+ val = regs->alarm_en;
else
- ctrl_reg &= ~regs->alarm_en;
+ val = 0;
- rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
- if (rc) {
- dev_err(dev, "Write to RTC control register failed\n");
- goto rtc_rw_fail;
- }
+ rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl,
+ regs->alarm_en, val);
+ if (rc)
+ return rc;
- /* Clear Alarm register */
+ /* Clear alarm register */
if (!enable) {
rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value,
sizeof(value));
- if (rc) {
- dev_err(dev, "Clear RTC ALARM register failed\n");
- goto rtc_rw_fail;
- }
+ if (rc)
+ return rc;
}
-rtc_rw_fail:
- spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
- return rc;
+ return 0;
}
static const struct rtc_class_ops pm8xxx_rtc_ops = {
@@ -351,69 +379,31 @@ static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
{
struct pm8xxx_rtc *rtc_dd = dev_id;
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
- unsigned int ctrl_reg;
int rc;
rtc_update_irq(rtc_dd->rtc, 1, RTC_IRQF | RTC_AF);
- spin_lock(&rtc_dd->ctrl_reg_lock);
-
- /* Clear the alarm enable bit */
- rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
- if (rc) {
- spin_unlock(&rtc_dd->ctrl_reg_lock);
- goto rtc_alarm_handled;
- }
-
- ctrl_reg &= ~regs->alarm_en;
-
- rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
- if (rc) {
- spin_unlock(&rtc_dd->ctrl_reg_lock);
- dev_err(rtc_dd->rtc_dev,
- "Write to alarm control register failed\n");
- goto rtc_alarm_handled;
- }
-
- spin_unlock(&rtc_dd->ctrl_reg_lock);
-
- /* Clear RTC alarm register */
- rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl2, &ctrl_reg);
- if (rc) {
- dev_err(rtc_dd->rtc_dev,
- "RTC Alarm control2 register read failed\n");
- goto rtc_alarm_handled;
- }
+ /* Disable alarm */
+ rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl,
+ regs->alarm_en, 0);
+ if (rc)
+ return IRQ_NONE;
- ctrl_reg |= PM8xxx_RTC_ALARM_CLEAR;
- rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl2, ctrl_reg);
+ /* Clear alarm status */
+ rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl2,
+ PM8xxx_RTC_ALARM_CLEAR, 0);
if (rc)
- dev_err(rtc_dd->rtc_dev,
- "Write to RTC Alarm control2 register failed\n");
+ return IRQ_NONE;
-rtc_alarm_handled:
return IRQ_HANDLED;
}
static int pm8xxx_rtc_enable(struct pm8xxx_rtc *rtc_dd)
{
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
- unsigned int ctrl_reg;
- int rc;
-
- /* Check if the RTC is on, else turn it on */
- rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg);
- if (rc)
- return rc;
- if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) {
- ctrl_reg |= PM8xxx_RTC_ENABLE;
- rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
- if (rc)
- return rc;
- }
-
- return 0;
+ return regmap_update_bits(rtc_dd->regmap, regs->ctrl, PM8xxx_RTC_ENABLE,
+ PM8xxx_RTC_ENABLE);
}
static const struct pm8xxx_rtc_regs pm8921_regs = {
@@ -456,9 +446,6 @@ static const struct pm8xxx_rtc_regs pmk8350_regs = {
.alarm_en = BIT(7),
};
-/*
- * Hardcoded RTC bases until IORESOURCE_REG mapping is figured out
- */
static const struct of_device_id pm8xxx_id_table[] = {
{ .compatible = "qcom,pm8921-rtc", .data = &pm8921_regs },
{ .compatible = "qcom,pm8058-rtc", .data = &pm8058_regs },
@@ -470,9 +457,9 @@ MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
static int pm8xxx_rtc_probe(struct platform_device *pdev)
{
- int rc;
- struct pm8xxx_rtc *rtc_dd;
const struct of_device_id *match;
+ struct pm8xxx_rtc *rtc_dd;
+ int rc;
match = of_match_node(pm8xxx_id_table, pdev->dev.of_node);
if (!match)
@@ -482,24 +469,33 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
if (rtc_dd == NULL)
return -ENOMEM;
- /* Initialise spinlock to protect RTC control register */
- spin_lock_init(&rtc_dd->ctrl_reg_lock);
-
rtc_dd->regmap = dev_get_regmap(pdev->dev.parent, NULL);
- if (!rtc_dd->regmap) {
- dev_err(&pdev->dev, "Parent regmap unavailable.\n");
+ if (!rtc_dd->regmap)
return -ENXIO;
- }
- rtc_dd->rtc_alarm_irq = platform_get_irq(pdev, 0);
- if (rtc_dd->rtc_alarm_irq < 0)
+ rtc_dd->alarm_irq = platform_get_irq(pdev, 0);
+ if (rtc_dd->alarm_irq < 0)
return -ENXIO;
rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node,
"allow-set-time");
+ rtc_dd->nvmem_cell = devm_nvmem_cell_get(&pdev->dev, "offset");
+ if (IS_ERR(rtc_dd->nvmem_cell)) {
+ rc = PTR_ERR(rtc_dd->nvmem_cell);
+ if (rc != -ENOENT)
+ return rc;
+ rtc_dd->nvmem_cell = NULL;
+ }
+
rtc_dd->regs = match->data;
- rtc_dd->rtc_dev = &pdev->dev;
+ rtc_dd->dev = &pdev->dev;
+
+ if (!rtc_dd->allow_set_time) {
+ rc = pm8xxx_rtc_read_offset(rtc_dd);
+ if (rc)
+ return rc;
+ }
rc = pm8xxx_rtc_enable(rtc_dd);
if (rc)
@@ -509,7 +505,6 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
- /* Register the RTC device */
rtc_dd->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc_dd->rtc))
return PTR_ERR(rtc_dd->rtc);
@@ -517,21 +512,18 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
rtc_dd->rtc->ops = &pm8xxx_rtc_ops;
rtc_dd->rtc->range_max = U32_MAX;
- /* Request the alarm IRQ */
- rc = devm_request_any_context_irq(&pdev->dev, rtc_dd->rtc_alarm_irq,
+ rc = devm_request_any_context_irq(&pdev->dev, rtc_dd->alarm_irq,
pm8xxx_alarm_trigger,
IRQF_TRIGGER_RISING,
"pm8xxx_rtc_alarm", rtc_dd);
- if (rc < 0) {
- dev_err(&pdev->dev, "Request IRQ failed (%d)\n", rc);
+ if (rc < 0)
return rc;
- }
rc = devm_rtc_register_device(rtc_dd->rtc);
if (rc)
return rc;
- rc = dev_pm_set_wake_irq(&pdev->dev, rtc_dd->rtc_alarm_irq);
+ rc = dev_pm_set_wake_irq(&pdev->dev, rtc_dd->alarm_irq);
if (rc)
return rc;
@@ -559,3 +551,4 @@ MODULE_ALIAS("platform:rtc-pm8xxx");
MODULE_DESCRIPTION("PMIC8xxx RTC driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Anirudh Ghayal <aghayal@codeaurora.org>");
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index b0099e26e3b0..ec5d7a614e2d 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -982,6 +982,12 @@ static int rv3028_probe(struct i2c_client *client)
return 0;
}
+static const struct acpi_device_id rv3028_i2c_acpi_match[] = {
+ { "MCRY3028" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, rv3028_i2c_acpi_match);
+
static const __maybe_unused struct of_device_id rv3028_of_match[] = {
{ .compatible = "microcrystal,rv3028", },
{ }
@@ -991,6 +997,7 @@ MODULE_DEVICE_TABLE(of, rv3028_of_match);
static struct i2c_driver rv3028_driver = {
.driver = {
.name = "rtc-rv3028",
+ .acpi_match_table = rv3028_i2c_acpi_match,
.of_match_table = of_match_ptr(rv3028_of_match),
},
.probe_new = rv3028_probe,
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index e4fdd47ae066..0852f6709a85 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -735,9 +735,14 @@ static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
return PTR_ERR(rv3029->rtc);
if (rv3029->irq > 0) {
+ unsigned long irqflags = IRQF_TRIGGER_LOW;
+
+ if (dev_fwnode(dev))
+ irqflags = 0;
+
rc = devm_request_threaded_irq(dev, rv3029->irq,
NULL, rv3029_handle_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ irqflags | IRQF_ONESHOT,
"rv3029", dev);
if (rc) {
dev_warn(dev, "unable to request IRQ, alarms disabled\n");
diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c
index c3bee305eacc..1ff4f2e6fa77 100644
--- a/drivers/rtc/rtc-rv3032.c
+++ b/drivers/rtc/rtc-rv3032.c
@@ -930,9 +930,14 @@ static int rv3032_probe(struct i2c_client *client)
return PTR_ERR(rv3032->rtc);
if (client->irq > 0) {
+ unsigned long irqflags = IRQF_TRIGGER_LOW;
+
+ if (dev_fwnode(&client->dev))
+ irqflags = 0;
+
ret = devm_request_threaded_irq(&client->dev, client->irq,
NULL, rv3032_handle_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ irqflags | IRQF_ONESHOT,
"rv3032", rv3032);
if (ret) {
dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
@@ -975,6 +980,12 @@ static int rv3032_probe(struct i2c_client *client)
return 0;
}
+static const struct acpi_device_id rv3032_i2c_acpi_match[] = {
+ { "MCRY3032" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, rv3032_i2c_acpi_match);
+
static const __maybe_unused struct of_device_id rv3032_of_match[] = {
{ .compatible = "microcrystal,rv3032", },
{ }
@@ -984,6 +995,7 @@ MODULE_DEVICE_TABLE(of, rv3032_of_match);
static struct i2c_driver rv3032_driver = {
.driver = {
.name = "rtc-rv3032",
+ .acpi_match_table = rv3032_i2c_acpi_match,
.of_match_table = of_match_ptr(rv3032_of_match),
},
.probe_new = rv3032_probe,
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index b581b6d5ad73..25c3b9e4f515 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -70,6 +70,7 @@ struct rv8803_data {
struct mutex flags_lock;
u8 ctrl;
u8 backup;
+ u8 alarm_invalid:1;
enum rv8803_type type;
};
@@ -165,13 +166,13 @@ static int rv8803_regs_init(struct rv8803_data *rv8803)
static int rv8803_regs_configure(struct rv8803_data *rv8803);
-static int rv8803_regs_reset(struct rv8803_data *rv8803)
+static int rv8803_regs_reset(struct rv8803_data *rv8803, bool full)
{
/*
* The RV-8803 resets all registers to POR defaults after voltage-loss,
* the Epson RTCs don't, so we manually reset the remainder here.
*/
- if (rv8803->type == rx_8803 || rv8803->type == rx_8900) {
+ if (full || rv8803->type == rx_8803 || rv8803->type == rx_8900) {
int ret = rv8803_regs_init(rv8803);
if (ret)
return ret;
@@ -238,6 +239,11 @@ static int rv8803_get_time(struct device *dev, struct rtc_time *tm)
u8 *date = date1;
int ret, flags;
+ if (rv8803->alarm_invalid) {
+ dev_warn(dev, "Corruption detected, data may be invalid.\n");
+ return -EINVAL;
+ }
+
flags = rv8803_read_reg(rv8803->client, RV8803_FLAG);
if (flags < 0)
return flags;
@@ -313,12 +319,19 @@ static int rv8803_set_time(struct device *dev, struct rtc_time *tm)
return flags;
}
- if (flags & RV8803_FLAG_V2F) {
- ret = rv8803_regs_reset(rv8803);
+ if ((flags & RV8803_FLAG_V2F) || rv8803->alarm_invalid) {
+ /*
+ * If we sense corruption in the alarm registers, but see no
+ * voltage loss flag, we can't rely on other registers having
+ * sensible values. Reset them fully.
+ */
+ ret = rv8803_regs_reset(rv8803, rv8803->alarm_invalid);
if (ret) {
mutex_unlock(&rv8803->flags_lock);
return ret;
}
+
+ rv8803->alarm_invalid = false;
}
ret = rv8803_write_reg(rv8803->client, RV8803_FLAG,
@@ -344,15 +357,33 @@ static int rv8803_get_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (flags < 0)
return flags;
+ alarmvals[0] &= 0x7f;
+ alarmvals[1] &= 0x3f;
+ alarmvals[2] &= 0x3f;
+
+ if (!bcd_is_valid(alarmvals[0]) ||
+ !bcd_is_valid(alarmvals[1]) ||
+ !bcd_is_valid(alarmvals[2]))
+ goto err_invalid;
+
alrm->time.tm_sec = 0;
- alrm->time.tm_min = bcd2bin(alarmvals[0] & 0x7f);
- alrm->time.tm_hour = bcd2bin(alarmvals[1] & 0x3f);
- alrm->time.tm_mday = bcd2bin(alarmvals[2] & 0x3f);
+ alrm->time.tm_min = bcd2bin(alarmvals[0]);
+ alrm->time.tm_hour = bcd2bin(alarmvals[1]);
+ alrm->time.tm_mday = bcd2bin(alarmvals[2]);
alrm->enabled = !!(rv8803->ctrl & RV8803_CTRL_AIE);
alrm->pending = (flags & RV8803_FLAG_AF) && alrm->enabled;
+ if ((unsigned int)alrm->time.tm_mday > 31 ||
+ (unsigned int)alrm->time.tm_hour >= 24 ||
+ (unsigned int)alrm->time.tm_min >= 60)
+ goto err_invalid;
+
return 0;
+
+err_invalid:
+ rv8803->alarm_invalid = true;
+ return -EINVAL;
}
static int rv8803_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -641,9 +672,14 @@ static int rv8803_probe(struct i2c_client *client)
return PTR_ERR(rv8803->rtc);
if (client->irq > 0) {
+ unsigned long irqflags = IRQF_TRIGGER_LOW;
+
+ if (dev_fwnode(&client->dev))
+ irqflags = 0;
+
err = devm_request_threaded_irq(&client->dev, client->irq,
NULL, rv8803_handle_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ irqflags | IRQF_ONESHOT,
"rv8803", client);
if (err) {
dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 76a49838014b..37608883a796 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -10,7 +10,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/of.h>
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index d09056570739..b9c8dad26208 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -394,10 +394,14 @@ static int rx8010_probe(struct i2c_client *client)
return PTR_ERR(rx8010->rtc);
if (client->irq > 0) {
- dev_info(dev, "IRQ %d supplied\n", client->irq);
+ unsigned long irqflags = IRQF_TRIGGER_LOW;
+
+ if (dev_fwnode(&client->dev))
+ irqflags = 0;
+
err = devm_request_threaded_irq(dev, client->irq, NULL,
rx8010_irq_1_handler,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ irqflags | IRQF_ONESHOT,
"rx8010", client);
if (err) {
dev_err(dev, "unable to request IRQ\n");
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index ed5516089e9a..7038f47d77ff 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -136,7 +136,6 @@ struct sun6i_rtc_clk_data {
unsigned int fixed_prescaler : 16;
unsigned int has_prescaler : 1;
unsigned int has_out_clk : 1;
- unsigned int export_iosc : 1;
unsigned int has_losc_en : 1;
unsigned int has_auto_swt : 1;
};
@@ -271,10 +270,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
/* Yes, I know, this is ugly. */
sun6i_rtc = rtc;
- /* Only read IOSC name from device tree if it is exported */
- if (rtc->data->export_iosc)
- of_property_read_string_index(node, "clock-output-names", 2,
- &iosc_name);
+ of_property_read_string_index(node, "clock-output-names", 2,
+ &iosc_name);
rtc->int_osc = clk_hw_register_fixed_rate_with_accuracy(NULL,
iosc_name,
@@ -315,13 +312,10 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
goto err_register;
}
- clk_data->num = 2;
+ clk_data->num = 3;
clk_data->hws[0] = &rtc->hw;
clk_data->hws[1] = __clk_get_hw(rtc->ext_losc);
- if (rtc->data->export_iosc) {
- clk_data->hws[2] = rtc->int_osc;
- clk_data->num = 3;
- }
+ clk_data->hws[2] = rtc->int_osc;
of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
return;
@@ -361,7 +355,6 @@ static const struct sun6i_rtc_clk_data sun8i_h3_rtc_data = {
.fixed_prescaler = 32,
.has_prescaler = 1,
.has_out_clk = 1,
- .export_iosc = 1,
};
static void __init sun8i_h3_rtc_clk_init(struct device_node *node)
@@ -379,7 +372,6 @@ static const struct sun6i_rtc_clk_data sun50i_h6_rtc_data = {
.fixed_prescaler = 32,
.has_prescaler = 1,
.has_out_clk = 1,
- .export_iosc = 1,
.has_losc_en = 1,
.has_auto_swt = 1,
};
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index a32457b4cbb8..2637fe1df727 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -29,8 +29,8 @@ static void __ap_flush_queue(struct ap_queue *aq);
*/
static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
{
+ union ap_qirq_ctrl qirqctrl = { .value = 0 };
struct ap_queue_status status;
- struct ap_qirq_ctrl qirqctrl = { 0 };
qirqctrl.ir = 1;
qirqctrl.isc = AP_ISC;
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 28a36e016ea9..72e10abb103a 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -301,7 +301,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
*/
static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
{
- struct ap_qirq_ctrl aqic_gisa = {};
+ union ap_qirq_ctrl aqic_gisa = { .value = 0 };
struct ap_queue_status status;
int retries = 5;
@@ -384,7 +384,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
int isc,
struct kvm_vcpu *vcpu)
{
- struct ap_qirq_ctrl aqic_gisa = {};
+ union ap_qirq_ctrl aqic_gisa = { .value = 0 };
struct ap_queue_status status = {};
struct kvm_s390_gisa *gisa;
struct page *h_page;
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index cbc3b62cd9e5..d904625afd40 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -4,7 +4,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2020
+ * Copyright IBM Corp. 2002, 2023
*/
#define KMSG_COMPONENT "zfcp"
@@ -146,6 +146,48 @@ void zfcp_dbf_hba_fsf_fces(char *tag, const struct zfcp_fsf_req *req, u64 wwpn,
}
/**
+ * zfcp_dbf_hba_fsf_reqid - trace only the tag and a request ID
+ * @tag: tag documenting the source
+ * @level: trace level
+ * @adapter: adapter instance the request ID belongs to
+ * @req_id: the request ID to trace
+ */
+void zfcp_dbf_hba_fsf_reqid(const char *const tag, const int level,
+ struct zfcp_adapter *const adapter,
+ const u64 req_id)
+{
+ struct zfcp_dbf *const dbf = adapter->dbf;
+ struct zfcp_dbf_hba *const rec = &dbf->hba_buf;
+ struct zfcp_dbf_hba_res *const res = &rec->u.res;
+ unsigned long flags;
+
+ if (unlikely(!debug_level_enabled(dbf->hba, level)))
+ return;
+
+ spin_lock_irqsave(&dbf->hba_lock, flags);
+ memset(rec, 0, sizeof(*rec));
+
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+
+ rec->id = ZFCP_DBF_HBA_RES;
+ rec->fsf_req_id = req_id;
+ rec->fsf_req_status = ~0u;
+ rec->fsf_cmd = ~0u;
+ rec->fsf_seq_no = ~0u;
+
+ res->req_issued = ~0ull;
+ res->prot_status = ~0u;
+ memset(res->prot_status_qual, 0xff, sizeof(res->prot_status_qual));
+ res->fsf_status = ~0u;
+ memset(res->fsf_status_qual, 0xff, sizeof(res->fsf_status_qual));
+ res->port_handle = ~0u;
+ res->lun_handle = ~0u;
+
+ debug_event(dbf->hba, level, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
+/**
* zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
* @tag: tag indicating which kind of unsolicited status has been received
* @req: request providing the unsolicited status
@@ -649,7 +691,7 @@ void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
rec->scsi_id = sc->device->id;
rec->scsi_lun = (u32)sc->device->lun;
rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
- rec->host_scribble = (unsigned long)sc->host_scribble;
+ rec->host_scribble = (u64)sc->host_scribble;
memcpy(rec->scsi_opcode, sc->cmnd,
min_t(int, sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 94de55304a02..6c761299a22f 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -129,7 +129,7 @@ struct zfcp_erp_action {
struct scsi_device *sdev;
u32 status; /* recovery status */
enum zfcp_erp_steps step; /* active step of this erp action */
- unsigned long fsf_req_id;
+ u64 fsf_req_id;
struct timer_list timer;
};
@@ -163,7 +163,7 @@ struct zfcp_adapter {
struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
struct list_head port_list; /* remote port list */
rwlock_t port_list_lock; /* port list lock */
- unsigned long req_no; /* unique FSF req number */
+ u64 req_no; /* unique FSF req number */
struct zfcp_reqlist *req_list;
u32 fsf_req_seq_no; /* FSF cmnd seq number */
rwlock_t abort_lock; /* Protects against SCSI
@@ -325,7 +325,7 @@ static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev)
*/
struct zfcp_fsf_req {
struct list_head list;
- unsigned long req_id;
+ u64 req_id;
struct zfcp_adapter *adapter;
struct zfcp_qdio_req qdio_req;
struct completion completion;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index c302cbb18a55..9f5152b42b0e 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -4,7 +4,7 @@
*
* External function declarations.
*
- * Copyright IBM Corp. 2002, 2020
+ * Copyright IBM Corp. 2002, 2023
*/
#ifndef ZFCP_EXT_H
@@ -46,6 +46,9 @@ extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_fsf_fces(char *tag, const struct zfcp_fsf_req *req,
u64 wwpn, u32 fc_security_old,
u32 fc_security_new);
+extern void zfcp_dbf_hba_fsf_reqid(const char *const tag, const int level,
+ struct zfcp_adapter *const adapter,
+ const u64 req_id);
extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index ab3ea529cca7..ceed1b6f7cb6 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -4,7 +4,7 @@
*
* Implementation of FSF commands.
*
- * Copyright IBM Corp. 2002, 2020
+ * Copyright IBM Corp. 2002, 2023
*/
#define KMSG_COMPONENT "zfcp"
@@ -884,7 +884,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
- unsigned long req_id = req->req_id;
+ u64 req_id = req->req_id;
zfcp_reqlist_add(adapter->req_list, req);
@@ -892,8 +892,11 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
req->issued = get_tod_clock();
if (zfcp_qdio_send(qdio, &req->qdio_req)) {
del_timer_sync(&req->timer);
+
/* lookup request again, list might have changed */
- zfcp_reqlist_find_rm(adapter->req_list, req_id);
+ if (zfcp_reqlist_find_rm(adapter->req_list, req_id) == NULL)
+ zfcp_dbf_hba_fsf_reqid("fsrsrmf", 1, adapter, req_id);
+
zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
return -EIO;
}
@@ -1042,7 +1045,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
struct scsi_device *sdev = scmnd->device;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
- unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
+ u64 old_req_id = (u64) scmnd->host_scribble;
spin_lock_irq(&qdio->req_q_lock);
if (zfcp_qdio_sbal_get(qdio))
@@ -1065,7 +1068,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
req->handler = zfcp_fsf_abort_fcp_command_handler;
req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
req->qtcb->header.port_handle = zfcp_sdev->port->handle;
- req->qtcb->bottom.support.req_handle = (u64) old_req_id;
+ req->qtcb->bottom.support.req_handle = old_req_id;
zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req)) {
@@ -1919,7 +1922,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
- unsigned long req_id = 0;
+ u64 req_id = 0;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -1978,7 +1981,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
- unsigned long req_id = 0;
+ u64 req_id = 0;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -2587,6 +2590,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
goto out;
}
+ BUILD_BUG_ON(sizeof(scsi_cmnd->host_scribble) < sizeof(req->req_id));
scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
io = &req->qtcb->bottom.io;
@@ -2732,7 +2736,7 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *fsf_req;
- unsigned long req_id;
+ u64 req_id;
int idx;
for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
@@ -2747,7 +2751,7 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
* corruption and must stop the machine immediately.
*/
zfcp_qdio_siosl(adapter);
- panic("error: unknown req_id (%lx) on adapter %s.\n",
+ panic("error: unknown req_id (%llx) on adapter %s.\n",
req_id, dev_name(&adapter->ccw_device->dev));
}
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 390706867df3..90134d9b69a7 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -115,7 +115,7 @@ zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
*/
static inline
void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- unsigned long req_id, u8 sbtype, void *data, u32 len)
+ u64 req_id, u8 sbtype, void *data, u32 len)
{
struct qdio_buffer_element *sbale;
int count = min(atomic_read(&qdio->req_q_free),
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
index 9b8ff249e31c..59fbb1b128cb 100644
--- a/drivers/s390/scsi/zfcp_reqlist.h
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -5,14 +5,16 @@
* Data structure and helper functions for tracking pending FSF
* requests.
*
- * Copyright IBM Corp. 2009, 2016
+ * Copyright IBM Corp. 2009, 2023
*/
#ifndef ZFCP_REQLIST_H
#define ZFCP_REQLIST_H
+#include <linux/types.h>
+
/* number of hash buckets */
-#define ZFCP_REQ_LIST_BUCKETS 128
+#define ZFCP_REQ_LIST_BUCKETS 128u
/**
* struct zfcp_reqlist - Container for request list (reqlist)
@@ -24,7 +26,7 @@ struct zfcp_reqlist {
struct list_head buckets[ZFCP_REQ_LIST_BUCKETS];
};
-static inline int zfcp_reqlist_hash(unsigned long req_id)
+static inline size_t zfcp_reqlist_hash(u64 req_id)
{
return req_id % ZFCP_REQ_LIST_BUCKETS;
}
@@ -37,7 +39,7 @@ static inline int zfcp_reqlist_hash(unsigned long req_id)
*/
static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void)
{
- unsigned int i;
+ size_t i;
struct zfcp_reqlist *rl;
rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL);
@@ -60,7 +62,7 @@ static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void)
*/
static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl)
{
- unsigned int i;
+ size_t i;
for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
if (!list_empty(&rl->buckets[i]))
@@ -81,10 +83,10 @@ static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl)
}
static inline struct zfcp_fsf_req *
-_zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
+_zfcp_reqlist_find(struct zfcp_reqlist *rl, u64 req_id)
{
struct zfcp_fsf_req *req;
- unsigned int i;
+ size_t i;
i = zfcp_reqlist_hash(req_id);
list_for_each_entry(req, &rl->buckets[i], list)
@@ -102,7 +104,7 @@ _zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
* or NULL if there is no known FSF request with this id.
*/
static inline struct zfcp_fsf_req *
-zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
+zfcp_reqlist_find(struct zfcp_reqlist *rl, u64 req_id)
{
unsigned long flags;
struct zfcp_fsf_req *req;
@@ -127,7 +129,7 @@ zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
* NULL if it has not been found.
*/
static inline struct zfcp_fsf_req *
-zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
+zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, u64 req_id)
{
unsigned long flags;
struct zfcp_fsf_req *req;
@@ -154,7 +156,7 @@ zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
struct zfcp_fsf_req *req)
{
- unsigned int i;
+ size_t i;
unsigned long flags;
i = zfcp_reqlist_hash(req->req_id);
@@ -172,7 +174,7 @@ static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
struct list_head *list)
{
- unsigned int i;
+ size_t i;
unsigned long flags;
spin_lock_irqsave(&rl->lock, flags);
@@ -200,7 +202,7 @@ zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl,
{
struct zfcp_fsf_req *req;
unsigned long flags;
- unsigned int i;
+ size_t i;
spin_lock_irqsave(&rl->lock, flags);
for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 526ac240d9fe..3dbf4b21d127 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -170,7 +170,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
(struct zfcp_adapter *) scsi_host->hostdata[0];
struct zfcp_fsf_req *old_req, *abrt_req;
unsigned long flags;
- unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
+ u64 old_reqid = (u64) scpnt->host_scribble;
int retval = SUCCESS, ret;
int retry = 3;
char *dbf_tag;
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 3687b5c0cf90..d8fc7beafa20 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -24,7 +24,6 @@
#include <linux/scatterlist.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
-#include <linux/version.h>
#include <scsi/scsi_device.h>
#include <scsi/libiscsi_tcp.h>
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 12346e2297fd..f7f62e56afca 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -181,6 +181,7 @@ void scsi_remove_host(struct Scsi_Host *shost)
scsi_forget_host(shost);
mutex_unlock(&shost->scan_mutex);
scsi_proc_host_rm(shost);
+ scsi_proc_hostdir_rm(shost->hostt);
/*
* New SCSI devices cannot be attached anymore because of the SCSI host
@@ -340,6 +341,7 @@ static void scsi_host_dev_release(struct device *dev)
struct Scsi_Host *shost = dev_to_shost(dev);
struct device *parent = dev->parent;
+ /* In case scsi_remove_host() has not been called. */
scsi_proc_hostdir_rm(shost->hostt);
/* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
@@ -356,7 +358,7 @@ static void scsi_host_dev_release(struct device *dev)
/*
* Free the shost_dev device name here if scsi_host_alloc()
* and scsi_host_put() have been called but neither
- * scsi_host_add() nor scsi_host_remove() has been called.
+ * scsi_host_add() nor scsi_remove_host() has been called.
* This avoids that the memory allocated for the shost_dev
* name is leaked.
*/
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 198d3f20d682..c74053f0b72f 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1516,23 +1516,22 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
}
/**
- * strip_and_pad_whitespace - Strip and pad trailing whitespace.
- * @i: index into buffer
- * @buf: string to modify
+ * strip_whitespace - Strip and pad trailing whitespace.
+ * @i: size of buffer
+ * @buf: string to modify
*
- * This function will strip all trailing whitespace, pad the end
- * of the string with a single space, and NULL terminate the string.
+ * This function will strip all trailing whitespace and
+ * NUL terminate the string.
*
- * Return value:
- * new length of string
**/
-static int strip_and_pad_whitespace(int i, char *buf)
+static void strip_whitespace(int i, char *buf)
{
+ if (i < 1)
+ return;
+ i--;
while (i && buf[i] == ' ')
i--;
- buf[i+1] = ' ';
- buf[i+2] = '\0';
- return i + 2;
+ buf[i+1] = '\0';
}
/**
@@ -1547,19 +1546,21 @@ static int strip_and_pad_whitespace(int i, char *buf)
static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
struct ipr_vpd *vpd)
{
- char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
- int i = 0;
+ char vendor_id[IPR_VENDOR_ID_LEN + 1];
+ char product_id[IPR_PROD_ID_LEN + 1];
+ char sn[IPR_SERIAL_NUM_LEN + 1];
- memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
- i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
+ memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
+ strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id);
- memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
- i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
+ memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN);
+ strip_whitespace(IPR_PROD_ID_LEN, product_id);
- memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
- buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
+ memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN);
+ strip_whitespace(IPR_SERIAL_NUM_LEN, sn);
- ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
+ ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix,
+ vendor_id, product_id, sn);
}
/**
@@ -9495,11 +9496,10 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
* This function takes care of initilizing the adapter to the point
* where it can accept new commands.
* Return value:
- * 0 on success / -EIO on failure
+ * none
**/
-static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
+static void ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
{
- int rc = 0;
unsigned long host_lock_flags = 0;
ENTER;
@@ -9515,7 +9515,6 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
LEAVE;
- return rc;
}
/**
@@ -10558,12 +10557,7 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
return rc;
ioa_cfg = pci_get_drvdata(pdev);
- rc = ipr_probe_ioa_part2(ioa_cfg);
-
- if (rc) {
- __ipr_remove(pdev);
- return rc;
- }
+ ipr_probe_ioa_part2(ioa_cfg);
rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 76c3434f8976..22f2e046e8eb 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2541,7 +2541,7 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
/**
* lpfc_enable_bbcr_set: Sets an attribute value.
- * @phba: pointer the the adapter structure.
+ * @phba: pointer to the adapter structure.
* @val: integer attribute value.
*
* Description:
@@ -2632,7 +2632,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
* takes a default argument, a minimum and maximum argument.
*
* lpfc_##attr##_init: Initializes an attribute.
- * @phba: pointer the the adapter structure.
+ * @phba: pointer to the adapter structure.
* @val: integer attribute value.
*
* Validates the min and max values then sets the adapter config field
@@ -2665,7 +2665,7 @@ lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
* into a function with the name lpfc_hba_queue_depth_set
*
* lpfc_##attr##_set: Sets an attribute value.
- * @phba: pointer the the adapter structure.
+ * @phba: pointer to the adapter structure.
* @val: integer attribute value.
*
* Description:
@@ -2794,7 +2794,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
* lpfc_##attr##_init: validates the min and max values then sets the
* adapter config field accordingly, or uses the default if out of range
* and prints an error message.
- * @phba: pointer the the adapter structure.
+ * @phba: pointer to the adapter structure.
* @val: integer attribute value.
*
* Returns:
@@ -2826,7 +2826,7 @@ lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
* lpfc_##attr##_set: validates the min and max values then sets the
* adapter config field if in the valid range. prints error message
* and does not set the parameter if invalid.
- * @phba: pointer the the adapter structure.
+ * @phba: pointer to the adapter structure.
* @val: integer attribute value.
*
* Returns:
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 569639dc8b2c..35b252f1ef73 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -8886,7 +8886,7 @@ reject_out:
* @rrq: Pointer to the rrq struct.
*
* Build a ELS RRQ command and send it to the target. If the issue_iocb is
- * Successful the the completion handler will clear the RRQ.
+ * successful, the completion handler will clear the RRQ.
*
* Return codes
* 0 - Successfully sent rrq els iocb.
@@ -10287,7 +10287,7 @@ lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length)
/* Send every descriptor individually to the upper layer */
if (deliver)
fc_host_fpin_rcv(lpfc_shost_from_vport(vport),
- fpin_length, (char *)fpin);
+ fpin_length, (char *)fpin, 0);
desc_cnt++;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a6df0a5b4006..66cd0b1dbbd0 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -2459,7 +2459,7 @@ static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
* @phba: pointer to lpfc hba data structure.
* @fcf_index: the index of the fcf record to update
* This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
- * flag so the the round robin slection for the particular priority level
+ * flag so the round robin selection for the particular priority level
* will try a different fcf record that does not have this bit set.
* If the fcf record is re-read for any reason this flag is cleared brfore
* adding it to the priority list.
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6eb4085a3a22..73b544bfbb2e 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -5502,7 +5502,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
bf_set(lpfc_mbx_read_top_link_spd, la,
(bf_get(lpfc_acqe_link_speed, acqe_link)));
- /* Fake the the following irrelvant fields */
+ /* Fake the following irrelevant fields */
bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
bf_set(lpfc_mbx_read_top_il, la, 0);
@@ -12549,7 +12549,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
/* Mark CPU as IRQ not assigned by the kernel */
cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
- /* If so, find a new_cpup thats on the the SAME
+ /* If so, find a new_cpup that is on the SAME
* phys_id as cpup. start_cpu will start where we
* left off so all unassigned entries don't get assgined
* the IRQ of the first entry.
@@ -12563,7 +12563,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
goto found_same;
new_cpu = cpumask_next(
new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
+ if (new_cpu >= nr_cpu_ids)
new_cpu = first_cpu;
}
/* At this point, we leave the CPU as unassigned */
@@ -12577,7 +12577,7 @@ found_same:
* selecting the same IRQ.
*/
start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu == nr_cpumask_bits)
+ if (start_cpu >= nr_cpu_ids)
start_cpu = first_cpu;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -12613,7 +12613,7 @@ found_same:
goto found_any;
new_cpu = cpumask_next(
new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
+ if (new_cpu >= nr_cpu_ids)
new_cpu = first_cpu;
}
/* We should never leave an entry unassigned */
@@ -12631,7 +12631,7 @@ found_any:
* selecting the same IRQ.
*/
start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu == nr_cpumask_bits)
+ if (start_cpu >= nr_cpu_ids)
start_cpu = first_cpu;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -12704,7 +12704,7 @@ found_any:
goto found_hdwq;
}
new_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
+ if (new_cpu >= nr_cpu_ids)
new_cpu = first_cpu;
}
@@ -12719,7 +12719,7 @@ found_any:
goto found_hdwq;
new_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
+ if (new_cpu >= nr_cpu_ids)
new_cpu = first_cpu;
}
@@ -12730,7 +12730,7 @@ found_any:
found_hdwq:
/* We found an available entry, copy the IRQ info */
start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu == nr_cpumask_bits)
+ if (start_cpu >= nr_cpu_ids)
start_cpu = first_cpu;
cpup->hdwq = new_cpup->hdwq;
logit:
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 9858b1743769..0dfdc0c4c08c 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2509,7 +2509,7 @@ lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
* information via a READ_FCF mailbox command. This mailbox command also is used
* to indicate where received unsolicited frames from this FCF will be sent. By
* default this routine will set up the FCF to forward all unsolicited frames
- * the the RQ ID passed in the @phba. This can be overridden by the caller for
+ * to the RQ ID passed in the @phba. This can be overridden by the caller for
* more complicated setups.
**/
void
@@ -2577,7 +2577,7 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
* information via a READ_FCF mailbox command. This mailbox command also is used
* to indicate where received unsolicited frames from this FCF will be sent. By
* default this routine will set up the FCF to forward all unsolicited frames
- * the the RQ ID passed in the @phba. This can be overridden by the caller for
+ * to the RQ ID passed in the @phba. This can be overridden by the caller for
* more complicated setups.
**/
void
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index f7cfac0da9b6..7517dd55fe91 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1469,7 +1469,7 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
if (!infop)
return;
- /* Cycle the the entire CPU context list for every MRQ */
+ /* Cycle the entire CPU context list for every MRQ */
for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
for_each_present_cpu(j) {
infop = lpfc_get_ctx_list(phba, j, i);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index edbd81c3b643..c5b69f313af3 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -20804,7 +20804,7 @@ lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
* the offset after the write object mailbox has completed. @size is used to
* determine the end of the object and whether the eof bit should be set.
*
- * Return 0 is successful and offset will contain the the new offset to use
+ * Return 0 is successful and offset will contain the new offset to use
* for the next write.
* Return negative value for error cases.
**/
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index def4c5e15cd8..23de2603e71f 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -29,7 +29,6 @@
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/utsname.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -955,19 +954,16 @@ struct scmd_priv {
* @chain_buf_count: Chain buffer count
* @chain_buf_pool: Chain buffer pool
* @chain_sgl_list: Chain SGL list
- * @chain_bitmap_sz: Chain buffer allocator bitmap size
* @chain_bitmap: Chain buffer allocator bitmap
* @chain_buf_lock: Chain buffer list lock
* @bsg_cmds: Command tracker for BSG command
* @host_tm_cmds: Command tracker for task management commands
* @dev_rmhs_cmds: Command tracker for device removal commands
* @evtack_cmds: Command tracker for event ack commands
- * @devrem_bitmap_sz: Device removal bitmap size
* @devrem_bitmap: Device removal bitmap
- * @dev_handle_bitmap_sz: Device handle bitmap size
+ * @dev_handle_bitmap_bits: Number of bits in device handle bitmap
* @removepend_bitmap: Remove pending bitmap
* @delayed_rmhs_list: Delayed device removal list
- * @evtack_cmds_bitmap_sz: Event Ack bitmap size
* @evtack_cmds_bitmap: Event Ack bitmap
* @delayed_evtack_cmds_list: Delayed event acknowledgment list
* @ts_update_counter: Timestamp update counter
@@ -1128,7 +1124,6 @@ struct mpi3mr_ioc {
u32 chain_buf_count;
struct dma_pool *chain_buf_pool;
struct chain_element *chain_sgl_list;
- u16 chain_bitmap_sz;
void *chain_bitmap;
spinlock_t chain_buf_lock;
@@ -1136,12 +1131,10 @@ struct mpi3mr_ioc {
struct mpi3mr_drv_cmd host_tm_cmds;
struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD];
struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD];
- u16 devrem_bitmap_sz;
void *devrem_bitmap;
- u16 dev_handle_bitmap_sz;
+ u16 dev_handle_bitmap_bits;
void *removepend_bitmap;
struct list_head delayed_rmhs_list;
- u16 evtack_cmds_bitmap_sz;
void *evtack_cmds_bitmap;
struct list_head delayed_evtack_cmds_list;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 9baac224b213..bff637702397 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -293,7 +293,6 @@ out:
static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
struct bsg_job *job)
{
- long rval = -EINVAL;
u16 num_devices = 0, i = 0, size;
unsigned long flags;
struct mpi3mr_tgt_dev *tgtdev;
@@ -304,7 +303,7 @@ static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
if (job->request_payload.payload_len < sizeof(u32)) {
dprint_bsg_err(mrioc, "%s: invalid size argument\n",
__func__);
- return rval;
+ return -EINVAL;
}
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
@@ -312,7 +311,7 @@ static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
num_devices++;
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
- if ((job->request_payload.payload_len == sizeof(u32)) ||
+ if ((job->request_payload.payload_len <= sizeof(u64)) ||
list_empty(&mrioc->tgtdev_list)) {
sg_copy_from_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
@@ -320,14 +319,14 @@ static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
return 0;
}
- kern_entrylen = (num_devices - 1) * sizeof(*devmap_info);
- size = sizeof(*alltgt_info) + kern_entrylen;
+ kern_entrylen = num_devices * sizeof(*devmap_info);
+ size = sizeof(u64) + kern_entrylen;
alltgt_info = kzalloc(size, GFP_KERNEL);
if (!alltgt_info)
return -ENOMEM;
devmap_info = alltgt_info->dmi;
- memset((u8 *)devmap_info, 0xFF, (kern_entrylen + sizeof(*devmap_info)));
+ memset((u8 *)devmap_info, 0xFF, kern_entrylen);
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
if (i < num_devices) {
@@ -344,25 +343,18 @@ static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
num_devices = i;
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
- memcpy(&alltgt_info->num_devices, &num_devices, sizeof(num_devices));
+ alltgt_info->num_devices = num_devices;
- usr_entrylen = (job->request_payload.payload_len - sizeof(u32)) / sizeof(*devmap_info);
+ usr_entrylen = (job->request_payload.payload_len - sizeof(u64)) /
+ sizeof(*devmap_info);
usr_entrylen *= sizeof(*devmap_info);
min_entrylen = min(usr_entrylen, kern_entrylen);
- if (min_entrylen && (!memcpy(&alltgt_info->dmi, devmap_info, min_entrylen))) {
- dprint_bsg_err(mrioc, "%s:%d: device map info copy failed\n",
- __func__, __LINE__);
- rval = -EFAULT;
- goto out;
- }
sg_copy_from_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
- alltgt_info, job->request_payload.payload_len);
- rval = 0;
-out:
+ alltgt_info, (min_entrylen + sizeof(u64)));
kfree(alltgt_info);
- return rval;
+ return 0;
}
/**
* mpi3mr_get_change_count - Get topology change count
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 286a44506578..758f7ca9e0ee 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -1128,7 +1128,6 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
static int
mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
{
- u16 dev_handle_bitmap_sz;
void *removepend_bitmap;
if (mrioc->facts.reply_sz > mrioc->reply_sz) {
@@ -1160,25 +1159,23 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
"\tcontroller while sas transport support is enabled at the\n"
"\tdriver, please reboot the system or reload the driver\n");
- dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
- if (mrioc->facts.max_devhandle % 8)
- dev_handle_bitmap_sz++;
- if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) {
- removepend_bitmap = krealloc(mrioc->removepend_bitmap,
- dev_handle_bitmap_sz, GFP_KERNEL);
+ if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
+ removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
+ GFP_KERNEL);
if (!removepend_bitmap) {
ioc_err(mrioc,
- "failed to increase removepend_bitmap sz from: %d to %d\n",
- mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
+ "failed to increase removepend_bitmap bits from %d to %d\n",
+ mrioc->dev_handle_bitmap_bits,
+ mrioc->facts.max_devhandle);
return -EPERM;
}
- memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0,
- dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz);
+ bitmap_free(mrioc->removepend_bitmap);
mrioc->removepend_bitmap = removepend_bitmap;
ioc_info(mrioc,
- "increased dev_handle_bitmap_sz from %d to %d\n",
- mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
- mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
+ "increased bits of dev_handle_bitmap from %d to %d\n",
+ mrioc->dev_handle_bitmap_bits,
+ mrioc->facts.max_devhandle);
+ mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
}
return 0;
@@ -2957,27 +2954,18 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
if (!mrioc->pel_abort_cmd.reply)
goto out_failed;
- mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
- if (mrioc->facts.max_devhandle % 8)
- mrioc->dev_handle_bitmap_sz++;
- mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz,
- GFP_KERNEL);
+ mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
+ mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
+ GFP_KERNEL);
if (!mrioc->removepend_bitmap)
goto out_failed;
- mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8;
- if (MPI3MR_NUM_DEVRMCMD % 8)
- mrioc->devrem_bitmap_sz++;
- mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz,
- GFP_KERNEL);
+ mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
if (!mrioc->devrem_bitmap)
goto out_failed;
- mrioc->evtack_cmds_bitmap_sz = MPI3MR_NUM_EVTACKCMD / 8;
- if (MPI3MR_NUM_EVTACKCMD % 8)
- mrioc->evtack_cmds_bitmap_sz++;
- mrioc->evtack_cmds_bitmap = kzalloc(mrioc->evtack_cmds_bitmap_sz,
- GFP_KERNEL);
+ mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
+ GFP_KERNEL);
if (!mrioc->evtack_cmds_bitmap)
goto out_failed;
@@ -3415,10 +3403,7 @@ static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
if (!mrioc->chain_sgl_list[i].addr)
goto out_failed;
}
- mrioc->chain_bitmap_sz = num_chains / 8;
- if (num_chains % 8)
- mrioc->chain_bitmap_sz++;
- mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL);
+ mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
if (!mrioc->chain_bitmap)
goto out_failed;
return retval;
@@ -4189,10 +4174,11 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
memset(mrioc->evtack_cmds[i].reply, 0,
sizeof(*mrioc->evtack_cmds[i].reply));
- memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
- memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
- memset(mrioc->evtack_cmds_bitmap, 0,
- mrioc->evtack_cmds_bitmap_sz);
+ bitmap_clear(mrioc->removepend_bitmap, 0,
+ mrioc->dev_handle_bitmap_bits);
+ bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
+ bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
+ MPI3MR_NUM_EVTACKCMD);
}
for (i = 0; i < mrioc->num_queues; i++) {
@@ -4318,16 +4304,16 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
mrioc->evtack_cmds[i].reply = NULL;
}
- kfree(mrioc->removepend_bitmap);
+ bitmap_free(mrioc->removepend_bitmap);
mrioc->removepend_bitmap = NULL;
- kfree(mrioc->devrem_bitmap);
+ bitmap_free(mrioc->devrem_bitmap);
mrioc->devrem_bitmap = NULL;
- kfree(mrioc->evtack_cmds_bitmap);
+ bitmap_free(mrioc->evtack_cmds_bitmap);
mrioc->evtack_cmds_bitmap = NULL;
- kfree(mrioc->chain_bitmap);
+ bitmap_free(mrioc->chain_bitmap);
mrioc->chain_bitmap = NULL;
kfree(mrioc->transport_cmds.reply);
@@ -4886,9 +4872,10 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mpi3mr_flush_delayed_cmd_lists(mrioc);
mpi3mr_flush_drv_cmds(mrioc);
- memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
- memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
- memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz);
+ bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
+ bitmap_clear(mrioc->removepend_bitmap, 0,
+ mrioc->dev_handle_bitmap_bits);
+ bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
mpi3mr_flush_host_io(mrioc);
mpi3mr_cleanup_fwevt_list(mrioc);
mpi3mr_invalidate_devhandles(mrioc);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 3306de7170f6..6eaeba41072c 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -4952,6 +4952,10 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
+ mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i],
+ MPI3MR_HOSTTAG_EVTACKCMD_MIN + i);
+
if (pdev->revision)
mrioc->enable_segqueue = true;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 3fc897336b5e..3b61815979da 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -1280,7 +1280,7 @@ void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc)
if (mrioc->sas_hba.enclosure_handle) {
if (!(mpi3mr_cfg_get_enclosure_pg0(mrioc, &ioc_status,
- &encl_pg0, sizeof(dev_pg0),
+ &encl_pg0, sizeof(encl_pg0),
MPI3_ENCLOS_PGAD_FORM_HANDLE,
mrioc->sas_hba.enclosure_handle)) &&
(ioc_status == MPI3_IOCSTATUS_SUCCESS))
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
index 37d084086fd4..fdda12ef13b0 100644
--- a/drivers/scsi/qedi/qedi_dbg.h
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <scsi/scsi_transport.h>
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 46e8b38603f0..030625ebb4e6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -45,7 +45,7 @@ qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f,
pkt, pkt_size);
- fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt);
+ fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt, 0);
}
const char *const port_state_str[] = {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index abe93ec8b7d0..b7c569a42aa4 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -229,6 +229,7 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
scmd->cmd_len = COMMAND_SIZE(cmd[0]);
memcpy(scmd->cmnd, cmd, scmd->cmd_len);
scmd->allowed = retries;
+ scmd->flags |= args->scmd_flags;
req->timeout = timeout;
req->rq_flags |= RQF_QUIET;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 0965f8a7134f..f12e9467ebb4 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -137,6 +137,7 @@ static const struct {
{ FCH_EVT_PORT_FABRIC, "port_fabric" },
{ FCH_EVT_LINK_UNKNOWN, "link_unknown" },
{ FCH_EVT_LINK_FPIN, "link_FPIN" },
+ { FCH_EVT_LINK_FPIN_ACK, "link_FPIN_ACK" },
{ FCH_EVT_VENDOR_UNIQUE, "vendor_unique" },
};
fc_enum_name_search(host_event_code, fc_host_event_code,
@@ -894,17 +895,20 @@ fc_fpin_congn_stats_update(struct Scsi_Host *shost,
* @shost: host the FPIN was received on
* @fpin_len: length of FPIN payload, in bytes
* @fpin_buf: pointer to FPIN payload
- *
+ * @event_acknowledge: 1, if LLDD handles this event.
* Notes:
* This routine assumes no locks are held on entry.
*/
void
-fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf)
+fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf,
+ u8 event_acknowledge)
{
struct fc_els_fpin *fpin = (struct fc_els_fpin *)fpin_buf;
struct fc_tlv_desc *tlv;
u32 desc_cnt = 0, bytes_remain;
u32 dtag;
+ enum fc_host_event_code event_code =
+ event_acknowledge ? FCH_EVT_LINK_FPIN_ACK : FCH_EVT_LINK_FPIN;
/* Update Statistics */
tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
@@ -934,7 +938,7 @@ fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf)
}
fc_host_post_fc_event(shost, fc_get_event_number(),
- FCH_EVT_LINK_FPIN, fpin_len, fpin_buf, 0);
+ event_code, fpin_len, fpin_buf, 0);
}
EXPORT_SYMBOL(fc_host_fpin_rcv);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a38c71511bc9..4f28dd617eca 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -121,7 +121,6 @@ static void scsi_disk_release(struct device *cdev);
static DEFINE_IDA(sd_index_ida);
-static struct kmem_cache *sd_cdb_cache;
static mempool_t *sd_page_pool;
static struct lock_class_key sd_bio_compl_lkclass;
@@ -2252,23 +2251,20 @@ static void sd_config_protection(struct scsi_disk *sdkp)
{
struct scsi_device *sdp = sdkp->device;
- if (!sdkp->first_scan)
- return;
-
sd_dif_config_host(sdkp);
if (!sdkp->protection_type)
return;
if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) {
- sd_printk(KERN_NOTICE, sdkp,
- "Disabling DIF Type %u protection\n",
- sdkp->protection_type);
+ sd_first_printk(KERN_NOTICE, sdkp,
+ "Disabling DIF Type %u protection\n",
+ sdkp->protection_type);
sdkp->protection_type = 0;
}
- sd_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n",
- sdkp->protection_type);
+ sd_first_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n",
+ sdkp->protection_type);
}
static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
@@ -3851,19 +3847,11 @@ static int __init init_sd(void)
if (err)
goto err_out;
- sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
- 0, 0, NULL);
- if (!sd_cdb_cache) {
- printk(KERN_ERR "sd: can't init extended cdb cache\n");
- err = -ENOMEM;
- goto err_out_class;
- }
-
sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
if (!sd_page_pool) {
printk(KERN_ERR "sd: can't init discard page pool\n");
err = -ENOMEM;
- goto err_out_cache;
+ goto err_out_class;
}
err = scsi_register_driver(&sd_template.gendrv);
@@ -3874,10 +3862,6 @@ static int __init init_sd(void)
err_out_driver:
mempool_destroy(sd_page_pool);
-
-err_out_cache:
- kmem_cache_destroy(sd_cdb_cache);
-
err_out_class:
class_unregister(&sd_disk_class);
err_out:
@@ -3899,7 +3883,6 @@ static void __exit exit_sd(void)
scsi_unregister_driver(&sd_template.gendrv);
mempool_destroy(sd_page_pool);
- kmem_cache_destroy(sd_cdb_cache);
class_unregister(&sd_disk_class);
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 968993ee6d5d..1df847b5f747 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -39,8 +39,10 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
dif = 0; dix = 1;
}
- if (!dix)
+ if (!dix) {
+ blk_integrity_unregister(disk);
return;
+ }
memset(&bi, 0, sizeof(bi));
@@ -72,9 +74,9 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
bi.tag_size = sizeof(u16);
}
- sd_printk(KERN_NOTICE, sdkp,
- "Enabling DIX %s, application tag size %u bytes\n",
- bi.profile->name, bi.tag_size);
+ sd_first_printk(KERN_NOTICE, sdkp,
+ "Enabling DIX %s, application tag size %u bytes\n",
+ bi.profile->name, bi.tag_size);
out:
blk_integrity_register(disk, &bi);
}
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 869ca9c7f23f..b11a9162e73a 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -439,8 +439,8 @@ int ses_match_host(struct enclosure_device *edev, void *data)
}
#endif /* 0 */
-static void ses_process_descriptor(struct enclosure_component *ecomp,
- unsigned char *desc)
+static int ses_process_descriptor(struct enclosure_component *ecomp,
+ unsigned char *desc, int max_desc_len)
{
int eip = desc[0] & 0x10;
int invalid = desc[0] & 0x80;
@@ -451,22 +451,32 @@ static void ses_process_descriptor(struct enclosure_component *ecomp,
unsigned char *d;
if (invalid)
- return;
+ return 0;
switch (proto) {
case SCSI_PROTOCOL_FCP:
if (eip) {
+ if (max_desc_len <= 7)
+ return 1;
d = desc + 4;
slot = d[3];
}
break;
case SCSI_PROTOCOL_SAS:
+
if (eip) {
+ if (max_desc_len <= 27)
+ return 1;
d = desc + 4;
slot = d[3];
d = desc + 8;
- } else
+ } else {
+ if (max_desc_len <= 23)
+ return 1;
d = desc + 4;
+ }
+
+
/* only take the phy0 addr */
addr = (u64)d[12] << 56 |
(u64)d[13] << 48 |
@@ -483,6 +493,8 @@ static void ses_process_descriptor(struct enclosure_component *ecomp,
}
ecomp->slot = slot;
scomp->addr = addr;
+
+ return 0;
}
struct efd {
@@ -555,7 +567,7 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
/* skip past overall descriptor */
desc_ptr += len + 4;
}
- if (ses_dev->page10)
+ if (ses_dev->page10 && ses_dev->page10_len > 9)
addl_desc_ptr = ses_dev->page10 + 8;
type_ptr = ses_dev->page1_types;
components = 0;
@@ -563,17 +575,22 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
for (j = 0; j < type_ptr[1]; j++) {
char *name = NULL;
struct enclosure_component *ecomp;
+ int max_desc_len;
if (desc_ptr) {
- if (desc_ptr >= buf + page7_len) {
+ if (desc_ptr + 3 >= buf + page7_len) {
desc_ptr = NULL;
} else {
len = (desc_ptr[2] << 8) + desc_ptr[3];
desc_ptr += 4;
- /* Add trailing zero - pushes into
- * reserved space */
- desc_ptr[len] = '\0';
- name = desc_ptr;
+ if (desc_ptr + len > buf + page7_len)
+ desc_ptr = NULL;
+ else {
+ /* Add trailing zero - pushes into
+ * reserved space */
+ desc_ptr[len] = '\0';
+ name = desc_ptr;
+ }
}
}
if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
@@ -589,10 +606,14 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
ecomp = &edev->component[components++];
if (!IS_ERR(ecomp)) {
- if (addl_desc_ptr)
- ses_process_descriptor(
- ecomp,
- addl_desc_ptr);
+ if (addl_desc_ptr) {
+ max_desc_len = ses_dev->page10_len -
+ (addl_desc_ptr - ses_dev->page10);
+ if (ses_process_descriptor(ecomp,
+ addl_desc_ptr,
+ max_desc_len))
+ addl_desc_ptr = NULL;
+ }
if (create)
enclosure_component_register(
ecomp);
@@ -609,9 +630,11 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
/* these elements are optional */
type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT ||
type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT ||
- type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS))
+ type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) {
addl_desc_ptr += addl_desc_ptr[1] + 2;
-
+ if (addl_desc_ptr + 1 >= ses_dev->page10 + ses_dev->page10_len)
+ addl_desc_ptr = NULL;
+ }
}
}
kfree(buf);
@@ -710,6 +733,12 @@ static int ses_intf_add(struct device *cdev,
type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
components += type_ptr[1];
}
+
+ if (components == 0) {
+ sdev_printk(KERN_WARNING, sdev, "enclosure has no enumerated components\n");
+ goto err_free;
+ }
+
ses_dev->page1 = buf;
ses_dev->page1_len = len;
buf = NULL;
@@ -833,7 +862,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
kfree(ses_dev->page2);
kfree(ses_dev);
- kfree(edev->component[0].scratch);
+ if (edev->components)
+ kfree(edev->component[0].scratch);
put_device(&edev->edev);
enclosure_unregister(edev);
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index d996782a7106..7a73f5e4a1fc 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -295,7 +295,7 @@ int clk_enable(struct clk *clk)
int ret;
if (!clk)
- return -EINVAL;
+ return 0;
spin_lock_irqsave(&clock_lock, flags);
ret = __clk_enable(clk);
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 2954c06a7f57..64b6a460d739 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -786,7 +786,7 @@ failrd:
writel(0, reg_base + CQSPI_REG_IRQMASK);
/* Cancel the indirect read */
- writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+ writel(CQSPI_REG_INDIRECTRD_CANCEL_MASK,
reg_base + CQSPI_REG_INDIRECTRD);
return ret;
}
diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c
index 348c6e1edd38..333b22dfd8db 100644
--- a/drivers/spi/spi-sn-f-ospi.c
+++ b/drivers/spi/spi-sn-f-ospi.c
@@ -611,7 +611,7 @@ static int f_ospi_probe(struct platform_device *pdev)
return -ENOMEM;
ctlr->mode_bits = SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL
- | SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL
+ | SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL
| SPI_MODE_0 | SPI_MODE_1 | SPI_LSB_FIRST;
ctlr->mem_ops = &f_ospi_mem_ops;
ctlr->bus_num = -1;
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 9f356612ba7e..0b9bc3b7f53a 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -1156,6 +1156,10 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
ret = -EIO;
goto exit;
}
+ if (!xfer->cs_change) {
+ tegra_qspi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ }
break;
default:
ret = -EINVAL;
@@ -1164,14 +1168,14 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
msg->actual_length += xfer->len;
transfer_phase++;
}
- if (!xfer->cs_change) {
- tegra_qspi_transfer_end(spi);
- spi_transfer_delay_exec(xfer);
- }
ret = 0;
exit:
msg->status = ret;
+ if (ret < 0) {
+ tegra_qspi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ }
return ret;
}
@@ -1297,7 +1301,7 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
if (xfer->len > 4 || xfer->len < 3)
return false;
xfer = list_next_entry(xfer, transfer_list);
- if (!tqspi->soc_data->has_dma || xfer->len > (QSPI_FIFO_DEPTH << 2))
+ if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
return false;
return true;
@@ -1532,6 +1536,7 @@ static int tegra_qspi_probe(struct platform_device *pdev)
master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
+ master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->setup = tegra_qspi_setup;
master->transfer_one_message = tegra_qspi_transfer_one_message;
master->num_chipselect = 1;
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index b5808f92702d..cb7e7697cf1e 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -73,7 +73,8 @@ endmenu
config INTEL_BXT_PMIC_THERMAL
tristate "Intel Broxton PMIC thermal driver"
- depends on X86 && INTEL_SOC_PMIC_BXTWC && REGMAP
+ depends on X86 && INTEL_SOC_PMIC_BXTWC
+ select REGMAP
help
Select this driver for Intel Broxton PMIC with ADC channels monitoring
system temperature measurements and alerts.
diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c
index 97b843fa7568..ffdc95047838 100644
--- a/drivers/thermal/intel/intel_quark_dts_thermal.c
+++ b/drivers/thermal/intel/intel_quark_dts_thermal.c
@@ -400,22 +400,14 @@ MODULE_DEVICE_TABLE(x86cpu, qrk_thermal_ids);
static int __init intel_quark_thermal_init(void)
{
- int err = 0;
-
if (!x86_match_cpu(qrk_thermal_ids) || !iosf_mbi_available())
return -ENODEV;
soc_dts = alloc_soc_dts();
- if (IS_ERR(soc_dts)) {
- err = PTR_ERR(soc_dts);
- goto err_free;
- }
+ if (IS_ERR(soc_dts))
+ return PTR_ERR(soc_dts);
return 0;
-
-err_free:
- free_soc_dts(soc_dts);
- return err;
}
static void __exit intel_quark_thermal_exit(void)
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index f566eb1839dc..1dc07f9214d5 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -403,10 +403,11 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
unsigned int this_round, skip = 0;
int size;
- ret = -ENXIO;
vc = vcs_vc(inode, &viewed);
- if (!vc)
- goto unlock_out;
+ if (!vc) {
+ ret = -ENXIO;
+ break;
+ }
/* Check whether we are above size each round,
* as copy_to_user at the end of this loop
@@ -414,10 +415,8 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
*/
size = vcs_size(vc, attr, uni_mode);
if (size < 0) {
- if (read)
- break;
ret = size;
- goto unlock_out;
+ break;
}
if (pos >= size)
break;
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 276a82b2e5ee..172d25fef740 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1409,6 +1409,13 @@ static int ufshcd_devfreq_target(struct device *dev,
struct ufs_clk_info *clki;
unsigned long irq_flags;
+ /*
+ * Skip devfreq if UFS initialization is not finished.
+ * Otherwise ufs could be in a inconsistent state.
+ */
+ if (!smp_load_acquire(&hba->logical_unit_scan_finished))
+ return 0;
+
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
@@ -8392,22 +8399,6 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
if (ret)
goto out;
- /* Initialize devfreq after UFS device is detected */
- if (ufshcd_is_clkscaling_supported(hba)) {
- memcpy(&hba->clk_scaling.saved_pwr_info.info,
- &hba->pwr_info,
- sizeof(struct ufs_pa_layer_attr));
- hba->clk_scaling.saved_pwr_info.is_valid = true;
- hba->clk_scaling.is_allowed = true;
-
- ret = ufshcd_devfreq_init(hba);
- if (ret)
- goto out;
-
- hba->clk_scaling.is_enabled = true;
- ufshcd_init_clk_scaling_sysfs(hba);
- }
-
ufs_bsg_probe(hba);
ufshpb_init(hba);
scsi_scan_host(hba->host);
@@ -8538,7 +8529,9 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
return ret;
if (is_mcq_supported(hba) && !hba->scsi_host_added) {
ret = ufshcd_alloc_mcq(hba);
- if (ret) {
+ if (!ret) {
+ ufshcd_config_mcq(hba);
+ } else {
/* Continue with SDB mode */
use_mcq_mode = false;
dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
@@ -8550,10 +8543,10 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
return ret;
}
hba->scsi_host_added = true;
- }
- /* MCQ may be disabled if ufshcd_alloc_mcq() fails */
- if (is_mcq_supported(hba) && use_mcq_mode)
+ } else if (is_mcq_supported(hba)) {
+ /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
ufshcd_config_mcq(hba);
+ }
}
ufshcd_tune_unipro_params(hba);
@@ -8677,6 +8670,12 @@ out:
if (ret) {
pm_runtime_put_sync(hba->dev);
ufshcd_hba_exit(hba);
+ } else {
+ /*
+ * Make sure that when reader code sees UFS initialization has finished,
+ * all initialization steps have really been executed.
+ */
+ smp_store_release(&hba->logical_unit_scan_finished, true);
}
}
@@ -9143,34 +9142,15 @@ static int ufshcd_execute_start_stop(struct scsi_device *sdev,
enum ufs_dev_pwr_mode pwr_mode,
struct scsi_sense_hdr *sshdr)
{
- unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 };
- struct request *req;
- struct scsi_cmnd *scmd;
- int ret;
-
- req = scsi_alloc_request(sdev->request_queue, REQ_OP_DRV_IN,
- BLK_MQ_REQ_PM);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- scmd = blk_mq_rq_to_pdu(req);
- scmd->cmd_len = COMMAND_SIZE(cdb[0]);
- memcpy(scmd->cmnd, cdb, scmd->cmd_len);
- scmd->allowed = 0/*retries*/;
- scmd->flags |= SCMD_FAIL_IF_RECOVERING;
- req->timeout = 1 * HZ;
- req->rq_flags |= RQF_PM | RQF_QUIET;
-
- blk_execute_rq(req, /*at_head=*/true);
-
- if (sshdr)
- scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
- sshdr);
- ret = scmd->result;
-
- blk_mq_free_request(req);
+ const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 };
+ const struct scsi_exec_args args = {
+ .sshdr = sshdr,
+ .req_flags = BLK_MQ_REQ_PM,
+ .scmd_flags = SCMD_FAIL_IF_RECOVERING,
+ };
- return ret;
+ return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL,
+ /*bufflen=*/0, /*timeout=*/HZ, /*retries=*/0, &args);
}
/**
@@ -10336,12 +10316,30 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
*/
ufshcd_set_ufs_dev_active(hba);
+ /* Initialize devfreq */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
+ hba->clk_scaling.is_allowed = true;
+
+ err = ufshcd_devfreq_init(hba);
+ if (err)
+ goto rpm_put_sync;
+
+ hba->clk_scaling.is_enabled = true;
+ ufshcd_init_clk_scaling_sysfs(hba);
+ }
+
async_schedule(ufshcd_async_scan, hba);
ufs_sysfs_add_nodes(hba->dev);
device_enable_async_suspend(dev);
return 0;
+rpm_put_sync:
+ pm_runtime_put_sync(dev);
free_tmf_queue:
blk_mq_destroy_queue(hba->tmf_queue);
blk_put_queue(hba->tmf_queue);
diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
index 663881437921..8793e3433580 100644
--- a/drivers/ufs/host/Kconfig
+++ b/drivers/ufs/host/Kconfig
@@ -48,7 +48,7 @@ config SCSI_UFS_CDNS_PLATFORM
config SCSI_UFS_DWC_TC_PLATFORM
tristate "DesignWare platform support using a G210 Test Chip"
- depends on SCSI_UFSHCD_PLATFORM
+ depends on OF && SCSI_UFSHCD_PLATFORM
help
Synopsys Test Chip is a PHY for prototyping purposes.
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 21d9b047539f..73e217260390 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -1613,6 +1613,7 @@ static int ufs_mtk_system_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM
static int ufs_mtk_runtime_suspend(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
@@ -1635,6 +1636,7 @@ static int ufs_mtk_runtime_resume(struct device *dev)
return ufshcd_runtime_resume(dev);
}
+#endif
static const struct dev_pm_ops ufs_mtk_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 4ec4174e05a3..7b4e9009f335 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -377,9 +377,26 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
snp_dev->input.data_npages = certs_npages;
}
+ /*
+ * Increment the message sequence number. There is no harm in doing
+ * this now because decryption uses the value stored in the response
+ * structure and any failure will wipe the VMPCK, preventing further
+ * use anyway.
+ */
+ snp_inc_msg_seqno(snp_dev);
+
if (fw_err)
*fw_err = err;
+ /*
+ * If an extended guest request was issued and the supplied certificate
+ * buffer was not large enough, a standard guest request was issued to
+ * prevent IV reuse. If the standard request was successful, return -EIO
+ * back to the caller as would have originally been returned.
+ */
+ if (!rc && err == SNP_GUEST_REQ_INVALID_LEN)
+ return -EIO;
+
if (rc) {
dev_alert(snp_dev->dev,
"Detected error from ASP request. rc: %d, fw_err: %llu\n",
@@ -395,9 +412,6 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
goto disable_vmpck;
}
- /* Increment to new message sequence after payload decryption was successful. */
- snp_inc_msg_seqno(snp_dev);
-
return 0;
disable_vmpck:
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 5de74686f12b..f0872970daf9 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1871,7 +1871,9 @@ config GXP_WATCHDOG
config MT7621_WDT
tristate "Mediatek SoC watchdog"
select WATCHDOG_CORE
- depends on SOC_MT7620 || SOC_MT7621
+ select REGMAP_MMIO
+ select MFD_SYSCON
+ depends on SOC_MT7620 || SOC_MT7621 || COMPILE_TEST
help
Hardware driver for the Mediatek/Ralink MT7621/8 SoC Watchdog Timer.
diff --git a/drivers/watchdog/apple_wdt.c b/drivers/watchdog/apple_wdt.c
index 16aca21f13d6..eddeb0fede89 100644
--- a/drivers/watchdog/apple_wdt.c
+++ b/drivers/watchdog/apple_wdt.c
@@ -136,11 +136,6 @@ static int apple_wdt_restart(struct watchdog_device *wdd, unsigned long mode,
return 0;
}
-static void apple_wdt_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static struct watchdog_ops apple_wdt_ops = {
.owner = THIS_MODULE,
.start = apple_wdt_start,
@@ -162,7 +157,6 @@ static int apple_wdt_probe(struct platform_device *pdev)
struct apple_wdt *wdt;
struct clk *clk;
u32 wdt_ctrl;
- int ret;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
@@ -172,19 +166,9 @@ static int apple_wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt->regs))
return PTR_ERR(wdt->regs);
- clk = devm_clk_get(dev, NULL);
+ clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
-
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, apple_wdt_clk_disable_unprepare,
- clk);
- if (ret)
- return ret;
-
wdt->clk_rate = clk_get_rate(clk);
if (!wdt->clk_rate)
return -EINVAL;
diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c
index ac9fed1ef681..e58652939f8a 100644
--- a/drivers/watchdog/armada_37xx_wdt.c
+++ b/drivers/watchdog/armada_37xx_wdt.c
@@ -246,11 +246,6 @@ static const struct watchdog_ops armada_37xx_wdt_ops = {
.get_timeleft = armada_37xx_wdt_get_timeleft,
};
-static void armada_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int armada_37xx_wdt_probe(struct platform_device *pdev)
{
struct armada_37xx_watchdog *dev;
@@ -280,18 +275,10 @@ static int armada_37xx_wdt_probe(struct platform_device *pdev)
return -ENOMEM;
/* init clock */
- dev->clk = devm_clk_get(&pdev->dev, NULL);
+ dev->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(dev->clk))
return PTR_ERR(dev->clk);
- ret = clk_prepare_enable(dev->clk);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(&pdev->dev,
- armada_clk_disable_unprepare, dev->clk);
- if (ret)
- return ret;
-
dev->clk_rate = clk_get_rate(dev->clk);
if (!dev->clk_rate)
return -EINVAL;
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index 86b5331bc491..c1e79874a2bb 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 5126454bb861..d57409c1a4d1 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -270,7 +270,7 @@ static int at91wdt_remove(struct platform_device *pdev)
misc_deregister(&at91wdt_miscdev);
at91wdt_miscdev.parent = NULL;
- return res;
+ return 0;
}
static void at91wdt_shutdown(struct platform_device *pdev)
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 292b5a1ca831..fed7be246442 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -206,10 +206,9 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
"min heartbeat and max heartbeat might be too close for the system to handle it correctly\n");
if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
- err = request_irq(wdt->irq, wdt_interrupt,
- IRQF_SHARED | IRQF_IRQPOLL |
- IRQF_NO_SUSPEND,
- pdev->name, wdt);
+ err = devm_request_irq(dev, wdt->irq, wdt_interrupt,
+ IRQF_SHARED | IRQF_IRQPOLL | IRQF_NO_SUSPEND,
+ pdev->name, wdt);
if (err)
return err;
}
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index 9388838899ac..e038dd66b819 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -127,11 +127,6 @@ static const struct watchdog_ops bcm7038_wdt_ops = {
.get_timeleft = bcm7038_wdt_get_timeleft,
};
-static void bcm7038_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int bcm7038_wdt_probe(struct platform_device *pdev)
{
struct bcm7038_wdt_platform_data *pdata = pdev->dev.platform_data;
@@ -153,17 +148,9 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
if (pdata && pdata->clk_name)
clk_name = pdata->clk_name;
- wdt->clk = devm_clk_get(dev, clk_name);
+ wdt->clk = devm_clk_get_enabled(dev, clk_name);
/* If unable to get clock, use default frequency */
if (!IS_ERR(wdt->clk)) {
- err = clk_prepare_enable(wdt->clk);
- if (err)
- return err;
- err = devm_add_action_or_reset(dev,
- bcm7038_clk_disable_unprepare,
- wdt->clk);
- if (err)
- return err;
wdt->rate = clk_get_rate(wdt->clk);
/* Prevent divide-by-zero exception */
if (!wdt->rate)
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index bc99e9164930..23d41043863f 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -274,11 +274,6 @@ static const struct watchdog_ops cdns_wdt_ops = {
.set_timeout = cdns_wdt_settimeout,
};
-static void cdns_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
/************************Platform Operations*****************************/
/**
* cdns_wdt_probe - Probe call for the device.
@@ -333,21 +328,11 @@ static int cdns_wdt_probe(struct platform_device *pdev)
watchdog_stop_on_reboot(cdns_wdt_device);
watchdog_set_drvdata(cdns_wdt_device, wdt);
- wdt->clk = devm_clk_get(dev, NULL);
+ wdt->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(wdt->clk))
return dev_err_probe(dev, PTR_ERR(wdt->clk),
"input clock not found\n");
- ret = clk_prepare_enable(wdt->clk);
- if (ret) {
- dev_err(dev, "unable to enable clock\n");
- return ret;
- }
- ret = devm_add_action_or_reset(dev, cdns_clk_disable_unprepare,
- wdt->clk);
- if (ret)
- return ret;
-
clock_f = clk_get_rate(wdt->clk);
if (clock_f <= CDNS_WDT_CLK_75MHZ) {
wdt->prescaler = CDNS_WDT_PRESCALE_512;
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index f02cbd530538..426962547df1 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -155,11 +155,20 @@ static int da9062_wdt_restart(struct watchdog_device *wdd, unsigned long action,
{
struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
struct i2c_client *client = to_i2c_client(wdt->hw->dev);
+ union i2c_smbus_data msg;
int ret;
- /* Don't use regmap because it is not atomic safe */
- ret = i2c_smbus_write_byte_data(client, DA9062AA_CONTROL_F,
- DA9062AA_SHUTDOWN_MASK);
+ /*
+ * Don't use regmap because it is not atomic safe. Additionally, use
+ * unlocked flavor of i2c_smbus_xfer to avoid scenario where i2c bus
+ * might be previously locked by some process unable to release the
+ * lock due to interrupts already being disabled at this late stage.
+ */
+ msg.byte = DA9062AA_SHUTDOWN_MASK;
+ ret = __i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, DA9062AA_CONTROL_F,
+ I2C_SMBUS_BYTE_DATA, &msg);
+
if (ret < 0)
dev_alert(wdt->hw->dev, "Failed to shutdown (err = %d)\n",
ret);
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index 09a4af4c58fc..684667469b10 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -174,11 +174,20 @@ static int da9063_wdt_restart(struct watchdog_device *wdd, unsigned long action,
{
struct da9063 *da9063 = watchdog_get_drvdata(wdd);
struct i2c_client *client = to_i2c_client(da9063->dev);
+ union i2c_smbus_data msg;
int ret;
- /* Don't use regmap because it is not atomic safe */
- ret = i2c_smbus_write_byte_data(client, DA9063_REG_CONTROL_F,
- DA9063_SHUTDOWN);
+ /*
+ * Don't use regmap because it is not atomic safe. Additionally, use
+ * unlocked flavor of i2c_smbus_xfer to avoid scenario where i2c bus
+ * might previously be locked by some process unable to release the
+ * lock due to interrupts already being disabled at this late stage.
+ */
+ msg.byte = DA9063_SHUTDOWN;
+ ret = __i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, DA9063_REG_CONTROL_F,
+ I2C_SMBUS_BYTE_DATA, &msg);
+
if (ret < 0)
dev_alert(da9063->dev, "Failed to shutdown (err = %d)\n",
ret);
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 584a56893b81..5f2184bda7b2 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -189,14 +189,8 @@ static const struct watchdog_ops davinci_wdt_ops = {
.restart = davinci_wdt_restart,
};
-static void davinci_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int davinci_wdt_probe(struct platform_device *pdev)
{
- int ret = 0;
struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
struct davinci_wdt_device *davinci_wdt;
@@ -205,21 +199,11 @@ static int davinci_wdt_probe(struct platform_device *pdev)
if (!davinci_wdt)
return -ENOMEM;
- davinci_wdt->clk = devm_clk_get(dev, NULL);
+ davinci_wdt->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(davinci_wdt->clk))
return dev_err_probe(dev, PTR_ERR(davinci_wdt->clk),
"failed to get clock node\n");
- ret = clk_prepare_enable(davinci_wdt->clk);
- if (ret) {
- dev_err(dev, "failed to prepare clock\n");
- return ret;
- }
- ret = devm_add_action_or_reset(dev, davinci_clk_disable_unprepare,
- davinci_wdt->clk);
- if (ret)
- return ret;
-
platform_set_drvdata(pdev, davinci_wdt);
wdd = &davinci_wdt->wdd;
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index 52962e8d11a6..462f15bd5ffa 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -663,6 +663,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dw_wdt);
watchdog_set_restart_priority(wdd, 128);
+ watchdog_stop_on_reboot(wdd);
ret = watchdog_register_device(wdd);
if (ret)
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index e937b4dd28be..264857d314da 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -441,11 +441,10 @@ static bool iTCO_wdt_set_running(struct iTCO_wdt_private *p)
* Kernel Interfaces
*/
-static const struct watchdog_info ident = {
+static struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
- .firmware_version = 0,
.identity = DRV_NAME,
};
@@ -563,6 +562,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
break;
}
+ ident.firmware_version = p->iTCO_version;
p->wddev.info = &ident,
p->wddev.ops = &iTCO_wdt_ops,
p->wddev.bootstatus = 0;
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index b57ff3787052..a55f801895d4 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -175,16 +175,11 @@ static const struct watchdog_ops pdc_wdt_ops = {
.restart = pdc_wdt_restart,
};
-static void pdc_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int pdc_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
u64 div;
- int ret, val;
+ int val;
unsigned long clk_rate;
struct pdc_wdt_dev *pdc_wdt;
@@ -196,38 +191,18 @@ static int pdc_wdt_probe(struct platform_device *pdev)
if (IS_ERR(pdc_wdt->base))
return PTR_ERR(pdc_wdt->base);
- pdc_wdt->sys_clk = devm_clk_get(dev, "sys");
+ pdc_wdt->sys_clk = devm_clk_get_enabled(dev, "sys");
if (IS_ERR(pdc_wdt->sys_clk)) {
dev_err(dev, "failed to get the sys clock\n");
return PTR_ERR(pdc_wdt->sys_clk);
}
- pdc_wdt->wdt_clk = devm_clk_get(dev, "wdt");
+ pdc_wdt->wdt_clk = devm_clk_get_enabled(dev, "wdt");
if (IS_ERR(pdc_wdt->wdt_clk)) {
dev_err(dev, "failed to get the wdt clock\n");
return PTR_ERR(pdc_wdt->wdt_clk);
}
- ret = clk_prepare_enable(pdc_wdt->sys_clk);
- if (ret) {
- dev_err(dev, "could not prepare or enable sys clock\n");
- return ret;
- }
- ret = devm_add_action_or_reset(dev, pdc_clk_disable_unprepare,
- pdc_wdt->sys_clk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(pdc_wdt->wdt_clk);
- if (ret) {
- dev_err(dev, "could not prepare or enable wdt clock\n");
- return ret;
- }
- ret = devm_add_action_or_reset(dev, pdc_clk_disable_unprepare,
- pdc_wdt->wdt_clk);
- if (ret)
- return ret;
-
/* We use the clock rate to calculate the max timeout */
clk_rate = clk_get_rate(pdc_wdt->wdt_clk);
if (clk_rate == 0) {
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index d0c5d47ddede..19ab7b3d286b 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/watchdog.h>
@@ -35,6 +36,7 @@
#define IMX2_WDT_WCR 0x00 /* Control Register */
#define IMX2_WDT_WCR_WT (0xFF << 8) /* -> Watchdog Timeout Field */
+#define IMX2_WDT_WCR_WDW BIT(7) /* -> Watchdog disable for WAIT */
#define IMX2_WDT_WCR_WDA BIT(5) /* -> External Reset WDOG_B */
#define IMX2_WDT_WCR_SRS BIT(4) /* -> Software Reset Signal */
#define IMX2_WDT_WCR_WRE BIT(3) /* -> WDOG Reset Enable */
@@ -60,13 +62,19 @@
#define WDOG_SEC_TO_COUNT(s) ((s * 2 - 1) << 8)
+struct imx2_wdt_data {
+ bool wdw_supported;
+};
+
struct imx2_wdt_device {
struct clk *clk;
struct regmap *regmap;
struct watchdog_device wdog;
+ const struct imx2_wdt_data *data;
bool ext_reset;
bool clk_is_on;
bool no_ping;
+ bool sleep_wait;
};
static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -129,6 +137,9 @@ static inline void imx2_wdt_setup(struct watchdog_device *wdog)
/* Suspend timer in low power mode, write once-only */
val |= IMX2_WDT_WCR_WDZST;
+ /* Suspend timer in low power WAIT mode, write once-only */
+ if (wdev->sleep_wait)
+ val |= IMX2_WDT_WCR_WDW;
/* Strip the old watchdog Time-Out value */
val &= ~IMX2_WDT_WCR_WT;
/* Generate internal chip-level reset if WDOG times out */
@@ -292,6 +303,8 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
wdog->max_hw_heartbeat_ms = IMX2_WDT_MAX_TIME * 1000;
wdog->parent = dev;
+ wdev->data = of_device_get_match_data(dev);
+
ret = platform_get_irq(pdev, 0);
if (ret > 0)
if (!devm_request_irq(dev, ret, imx2_wdt_isr, 0,
@@ -313,9 +326,18 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
wdev->ext_reset = of_property_read_bool(dev->of_node,
"fsl,ext-reset-output");
+
+ if (of_property_read_bool(dev->of_node, "fsl,suspend-in-wait")) {
+ if (!wdev->data->wdw_supported) {
+ dev_err(dev, "suspend-in-wait not supported\n");
+ return -EINVAL;
+ }
+ wdev->sleep_wait = true;
+ }
+
/*
* The i.MX7D doesn't support low power mode, so we need to ping the watchdog
- * during suspend.
+ * during suspend. Interaction with "fsl,suspend-in-wait" is unknown!
*/
wdev->no_ping = !of_device_is_compatible(dev->of_node, "fsl,imx7d-wdt");
platform_set_drvdata(pdev, wdog);
@@ -417,9 +439,36 @@ static int __maybe_unused imx2_wdt_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(imx2_wdt_pm_ops, imx2_wdt_suspend,
imx2_wdt_resume);
+struct imx2_wdt_data imx_wdt = {
+ .wdw_supported = true,
+};
+
+struct imx2_wdt_data imx_wdt_legacy = {
+ .wdw_supported = false,
+};
+
static const struct of_device_id imx2_wdt_dt_ids[] = {
- { .compatible = "fsl,imx21-wdt", },
- { .compatible = "fsl,imx7d-wdt", },
+ { .compatible = "fsl,imx21-wdt", .data = &imx_wdt_legacy },
+ { .compatible = "fsl,imx25-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,imx27-wdt", .data = &imx_wdt_legacy },
+ { .compatible = "fsl,imx31-wdt", .data = &imx_wdt_legacy },
+ { .compatible = "fsl,imx35-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,imx50-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,imx51-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,imx53-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,imx6q-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,imx6sl-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,imx6sll-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,imx6sx-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,imx6ul-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,imx7d-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,imx8mm-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,imx8mn-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,imx8mp-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,imx8mq-wdt", .data = &imx_wdt },
+ { .compatible = "fsl,ls1012a-wdt", .data = &imx_wdt_legacy },
+ { .compatible = "fsl,ls1043a-wdt", .data = &imx_wdt_legacy },
+ { .compatible = "fsl,vf610-wdt", .data = &imx_wdt },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx2_wdt_dt_ids);
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index 2897902090b3..7ca486794ba7 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -299,11 +299,6 @@ static int imx7ulp_wdt_init(struct imx7ulp_wdt_device *wdt, unsigned int timeout
return ret;
}
-static void imx7ulp_wdt_action(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int imx7ulp_wdt_probe(struct platform_device *pdev)
{
struct imx7ulp_wdt_device *imx7ulp_wdt;
@@ -321,7 +316,7 @@ static int imx7ulp_wdt_probe(struct platform_device *pdev)
if (IS_ERR(imx7ulp_wdt->base))
return PTR_ERR(imx7ulp_wdt->base);
- imx7ulp_wdt->clk = devm_clk_get(dev, NULL);
+ imx7ulp_wdt->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(imx7ulp_wdt->clk)) {
dev_err(dev, "Failed to get watchdog clock\n");
return PTR_ERR(imx7ulp_wdt->clk);
@@ -336,14 +331,6 @@ static int imx7ulp_wdt_probe(struct platform_device *pdev)
dev_info(dev, "imx7ulp wdt probe\n");
}
- ret = clk_prepare_enable(imx7ulp_wdt->clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, imx7ulp_wdt_action, imx7ulp_wdt->clk);
- if (ret)
- return ret;
-
wdog = &imx7ulp_wdt->wdd;
wdog->info = &imx7ulp_wdt_info;
wdog->ops = &imx7ulp_wdt_ops;
diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c
index 60b6d74f267d..1b9b5f21a0df 100644
--- a/drivers/watchdog/lpc18xx_wdt.c
+++ b/drivers/watchdog/lpc18xx_wdt.c
@@ -197,16 +197,10 @@ static const struct watchdog_ops lpc18xx_wdt_ops = {
.restart = lpc18xx_wdt_restart,
};
-static void lpc18xx_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int lpc18xx_wdt_probe(struct platform_device *pdev)
{
struct lpc18xx_wdt_dev *lpc18xx_wdt;
struct device *dev = &pdev->dev;
- int ret;
lpc18xx_wdt = devm_kzalloc(dev, sizeof(*lpc18xx_wdt), GFP_KERNEL);
if (!lpc18xx_wdt)
@@ -216,38 +210,18 @@ static int lpc18xx_wdt_probe(struct platform_device *pdev)
if (IS_ERR(lpc18xx_wdt->base))
return PTR_ERR(lpc18xx_wdt->base);
- lpc18xx_wdt->reg_clk = devm_clk_get(dev, "reg");
+ lpc18xx_wdt->reg_clk = devm_clk_get_enabled(dev, "reg");
if (IS_ERR(lpc18xx_wdt->reg_clk)) {
dev_err(dev, "failed to get the reg clock\n");
return PTR_ERR(lpc18xx_wdt->reg_clk);
}
- lpc18xx_wdt->wdt_clk = devm_clk_get(dev, "wdtclk");
+ lpc18xx_wdt->wdt_clk = devm_clk_get_enabled(dev, "wdtclk");
if (IS_ERR(lpc18xx_wdt->wdt_clk)) {
dev_err(dev, "failed to get the wdt clock\n");
return PTR_ERR(lpc18xx_wdt->wdt_clk);
}
- ret = clk_prepare_enable(lpc18xx_wdt->reg_clk);
- if (ret) {
- dev_err(dev, "could not prepare or enable sys clock\n");
- return ret;
- }
- ret = devm_add_action_or_reset(dev, lpc18xx_clk_disable_unprepare,
- lpc18xx_wdt->reg_clk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(lpc18xx_wdt->wdt_clk);
- if (ret) {
- dev_err(dev, "could not prepare or enable wdt clock\n");
- return ret;
- }
- ret = devm_add_action_or_reset(dev, lpc18xx_clk_disable_unprepare,
- lpc18xx_wdt->wdt_clk);
- if (ret)
- return ret;
-
/* We use the clock rate to calculate timeouts */
lpc18xx_wdt->clk_rate = clk_get_rate(lpc18xx_wdt->wdt_clk);
if (lpc18xx_wdt->clk_rate == 0) {
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index 981a2f7c3bec..35d80cb39856 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -146,16 +146,10 @@ static const struct of_device_id meson_gxbb_wdt_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, meson_gxbb_wdt_dt_ids);
-static void meson_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int meson_gxbb_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_gxbb_wdt *data;
- int ret;
u32 ctrl_reg;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
@@ -166,18 +160,10 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
if (IS_ERR(data->reg_base))
return PTR_ERR(data->reg_base);
- data->clk = devm_clk_get(dev, NULL);
+ data->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
- ret = clk_prepare_enable(data->clk);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(dev, meson_clk_disable_unprepare,
- data->clk);
- if (ret)
- return ret;
-
platform_set_drvdata(pdev, data);
data->wdt_dev.parent = dev;
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
index a8aa3522cfda..442731bba194 100644
--- a/drivers/watchdog/mt7621_wdt.c
+++ b/drivers/watchdog/mt7621_wdt.c
@@ -15,8 +15,8 @@
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
-
-#include <asm/mach-ralink/ralink_regs.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define SYSC_RSTSTAT 0x38
#define WDT_RST_CAUSE BIT(1)
@@ -31,8 +31,12 @@
#define TMR1CTL_RESTART BIT(9)
#define TMR1CTL_PRESCALE_SHIFT 16
-static void __iomem *mt7621_wdt_base;
-static struct reset_control *mt7621_wdt_reset;
+struct mt7621_wdt_data {
+ void __iomem *base;
+ struct reset_control *rst;
+ struct regmap *sysc;
+ struct watchdog_device wdt;
+};
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
@@ -40,27 +44,31 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static inline void rt_wdt_w32(unsigned reg, u32 val)
+static inline void rt_wdt_w32(void __iomem *base, unsigned int reg, u32 val)
{
- iowrite32(val, mt7621_wdt_base + reg);
+ iowrite32(val, base + reg);
}
-static inline u32 rt_wdt_r32(unsigned reg)
+static inline u32 rt_wdt_r32(void __iomem *base, unsigned int reg)
{
- return ioread32(mt7621_wdt_base + reg);
+ return ioread32(base + reg);
}
static int mt7621_wdt_ping(struct watchdog_device *w)
{
- rt_wdt_w32(TIMER_REG_TMRSTAT, TMR1CTL_RESTART);
+ struct mt7621_wdt_data *drvdata = watchdog_get_drvdata(w);
+
+ rt_wdt_w32(drvdata->base, TIMER_REG_TMRSTAT, TMR1CTL_RESTART);
return 0;
}
static int mt7621_wdt_set_timeout(struct watchdog_device *w, unsigned int t)
{
+ struct mt7621_wdt_data *drvdata = watchdog_get_drvdata(w);
+
w->timeout = t;
- rt_wdt_w32(TIMER_REG_TMR1LOAD, t * 1000);
+ rt_wdt_w32(drvdata->base, TIMER_REG_TMR1LOAD, t * 1000);
mt7621_wdt_ping(w);
return 0;
@@ -68,36 +76,41 @@ static int mt7621_wdt_set_timeout(struct watchdog_device *w, unsigned int t)
static int mt7621_wdt_start(struct watchdog_device *w)
{
+ struct mt7621_wdt_data *drvdata = watchdog_get_drvdata(w);
u32 t;
/* set the prescaler to 1ms == 1000us */
- rt_wdt_w32(TIMER_REG_TMR1CTL, 1000 << TMR1CTL_PRESCALE_SHIFT);
+ rt_wdt_w32(drvdata->base, TIMER_REG_TMR1CTL, 1000 << TMR1CTL_PRESCALE_SHIFT);
mt7621_wdt_set_timeout(w, w->timeout);
- t = rt_wdt_r32(TIMER_REG_TMR1CTL);
+ t = rt_wdt_r32(drvdata->base, TIMER_REG_TMR1CTL);
t |= TMR1CTL_ENABLE;
- rt_wdt_w32(TIMER_REG_TMR1CTL, t);
+ rt_wdt_w32(drvdata->base, TIMER_REG_TMR1CTL, t);
return 0;
}
static int mt7621_wdt_stop(struct watchdog_device *w)
{
+ struct mt7621_wdt_data *drvdata = watchdog_get_drvdata(w);
u32 t;
mt7621_wdt_ping(w);
- t = rt_wdt_r32(TIMER_REG_TMR1CTL);
+ t = rt_wdt_r32(drvdata->base, TIMER_REG_TMR1CTL);
t &= ~TMR1CTL_ENABLE;
- rt_wdt_w32(TIMER_REG_TMR1CTL, t);
+ rt_wdt_w32(drvdata->base, TIMER_REG_TMR1CTL, t);
return 0;
}
-static int mt7621_wdt_bootcause(void)
+static int mt7621_wdt_bootcause(struct mt7621_wdt_data *d)
{
- if (rt_sysc_r32(SYSC_RSTSTAT) & WDT_RST_CAUSE)
+ u32 val;
+
+ regmap_read(d->sysc, SYSC_RSTSTAT, &val);
+ if (val & WDT_RST_CAUSE)
return WDIOF_CARDRESET;
return 0;
@@ -105,7 +118,9 @@ static int mt7621_wdt_bootcause(void)
static int mt7621_wdt_is_running(struct watchdog_device *w)
{
- return !!(rt_wdt_r32(TIMER_REG_TMR1CTL) & TMR1CTL_ENABLE);
+ struct mt7621_wdt_data *drvdata = watchdog_get_drvdata(w);
+
+ return !!(rt_wdt_r32(drvdata->base, TIMER_REG_TMR1CTL) & TMR1CTL_ENABLE);
}
static const struct watchdog_info mt7621_wdt_info = {
@@ -121,30 +136,47 @@ static const struct watchdog_ops mt7621_wdt_ops = {
.set_timeout = mt7621_wdt_set_timeout,
};
-static struct watchdog_device mt7621_wdt_dev = {
- .info = &mt7621_wdt_info,
- .ops = &mt7621_wdt_ops,
- .min_timeout = 1,
- .max_timeout = 0xfffful / 1000,
-};
-
static int mt7621_wdt_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- mt7621_wdt_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mt7621_wdt_base))
- return PTR_ERR(mt7621_wdt_base);
+ struct watchdog_device *mt7621_wdt;
+ struct mt7621_wdt_data *drvdata;
+ int err;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->sysc = syscon_regmap_lookup_by_phandle(np, "mediatek,sysctl");
+ if (IS_ERR(drvdata->sysc)) {
+ drvdata->sysc = syscon_regmap_lookup_by_compatible("mediatek,mt7621-sysc");
+ if (IS_ERR(drvdata->sysc))
+ return PTR_ERR(drvdata->sysc);
+ }
+
+ drvdata->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+
+ drvdata->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (!IS_ERR(drvdata->rst))
+ reset_control_deassert(drvdata->rst);
+
+ mt7621_wdt = &drvdata->wdt;
+ mt7621_wdt->info = &mt7621_wdt_info;
+ mt7621_wdt->ops = &mt7621_wdt_ops;
+ mt7621_wdt->min_timeout = 1;
+ mt7621_wdt->max_timeout = 0xfffful / 1000;
+ mt7621_wdt->parent = dev;
- mt7621_wdt_reset = devm_reset_control_get_exclusive(dev, NULL);
- if (!IS_ERR(mt7621_wdt_reset))
- reset_control_deassert(mt7621_wdt_reset);
+ mt7621_wdt->bootstatus = mt7621_wdt_bootcause(drvdata);
- mt7621_wdt_dev.bootstatus = mt7621_wdt_bootcause();
+ watchdog_init_timeout(mt7621_wdt, mt7621_wdt->max_timeout, dev);
+ watchdog_set_nowayout(mt7621_wdt, nowayout);
+ watchdog_set_drvdata(mt7621_wdt, drvdata);
- watchdog_init_timeout(&mt7621_wdt_dev, mt7621_wdt_dev.max_timeout,
- dev);
- watchdog_set_nowayout(&mt7621_wdt_dev, nowayout);
- if (mt7621_wdt_is_running(&mt7621_wdt_dev)) {
+ if (mt7621_wdt_is_running(mt7621_wdt)) {
/*
* Make sure to apply timeout from watchdog core, taking
* the prescaler of this driver here into account (the
@@ -154,17 +186,25 @@ static int mt7621_wdt_probe(struct platform_device *pdev)
* we first disable the watchdog, set the new prescaler
* and timeout, and then re-enable the watchdog.
*/
- mt7621_wdt_stop(&mt7621_wdt_dev);
- mt7621_wdt_start(&mt7621_wdt_dev);
- set_bit(WDOG_HW_RUNNING, &mt7621_wdt_dev.status);
+ mt7621_wdt_stop(mt7621_wdt);
+ mt7621_wdt_start(mt7621_wdt);
+ set_bit(WDOG_HW_RUNNING, &mt7621_wdt->status);
}
- return devm_watchdog_register_device(dev, &mt7621_wdt_dev);
+ err = devm_watchdog_register_device(dev, &drvdata->wdt);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, drvdata);
+
+ return 0;
}
static void mt7621_wdt_shutdown(struct platform_device *pdev)
{
- mt7621_wdt_stop(&mt7621_wdt_dev);
+ struct mt7621_wdt_data *drvdata = platform_get_drvdata(pdev);
+
+ mt7621_wdt_stop(&drvdata->wdt);
}
static const struct of_device_id mt7621_wdt_match[] = {
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 3e6212591e69..a9c437598e7e 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -50,6 +50,7 @@
#define WDT_MODE_IRQ_EN (1 << 3)
#define WDT_MODE_AUTO_START (1 << 4)
#define WDT_MODE_DUAL_EN (1 << 6)
+#define WDT_MODE_CNT_SEL (1 << 8)
#define WDT_MODE_KEY 0x22000000
#define WDT_SWRST 0x14
@@ -70,6 +71,7 @@ struct mtk_wdt_dev {
spinlock_t lock; /* protects WDT_SWSYSRST reg */
struct reset_controller_dev rcdev;
bool disable_wdt_extrst;
+ bool reset_by_toprgu;
};
struct mtk_wdt_data {
@@ -279,6 +281,8 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev)
reg &= ~(WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN);
if (mtk_wdt->disable_wdt_extrst)
reg &= ~WDT_MODE_EXRST_EN;
+ if (mtk_wdt->reset_by_toprgu)
+ reg |= WDT_MODE_CNT_SEL;
reg |= (WDT_MODE_EN | WDT_MODE_KEY);
iowrite32(reg, wdt_base + WDT_MODE);
@@ -408,6 +412,9 @@ static int mtk_wdt_probe(struct platform_device *pdev)
mtk_wdt->disable_wdt_extrst =
of_property_read_bool(dev->of_node, "mediatek,disable-extrst");
+ mtk_wdt->reset_by_toprgu =
+ of_property_read_bool(dev->of_node, "mediatek,reset-by-toprgu");
+
return 0;
}
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 3318544366b8..2a079ca04aa3 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -154,11 +154,6 @@ static u32 xwdt_selftest(struct xwdt_device *xdev)
return XWT_TIMER_FAILED;
}
-static void xwdt_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int xwdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -193,7 +188,7 @@ static int xwdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(xilinx_wdt_wdd, enable_once);
- xdev->clk = devm_clk_get(dev, NULL);
+ xdev->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(xdev->clk)) {
if (PTR_ERR(xdev->clk) != -ENOENT)
return PTR_ERR(xdev->clk);
@@ -211,15 +206,6 @@ static int xwdt_probe(struct platform_device *pdev)
"The watchdog clock freq cannot be obtained\n");
} else {
pfreq = clk_get_rate(xdev->clk);
- rc = clk_prepare_enable(xdev->clk);
- if (rc) {
- dev_err(dev, "unable to enable clock\n");
- return rc;
- }
- rc = devm_add_action_or_reset(dev, xwdt_clk_disable_unprepare,
- xdev->clk);
- if (rc)
- return rc;
}
/*
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 1bdaf17c1d38..8202f0a6b093 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -325,7 +325,8 @@ static int usb_pcwd_set_heartbeat(struct usb_pcwd_private *usb_pcwd, int t)
static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd,
int *temperature)
{
- unsigned char msb, lsb;
+ unsigned char msb = 0x00;
+ unsigned char lsb = 0x00;
usb_pcwd_send_command(usb_pcwd, CMD_READ_TEMP, &msb, &lsb);
@@ -341,7 +342,8 @@ static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd,
static int usb_pcwd_get_timeleft(struct usb_pcwd_private *usb_pcwd,
int *time_left)
{
- unsigned char msb, lsb;
+ unsigned char msb = 0x00;
+ unsigned char lsb = 0x00;
/* Read the time that's left before rebooting */
/* Note: if the board is not yet armed then we will read 0xFFFF */
diff --git a/drivers/watchdog/pic32-dmt.c b/drivers/watchdog/pic32-dmt.c
index f43062b3c4c8..bc4ccddc75a3 100644
--- a/drivers/watchdog/pic32-dmt.c
+++ b/drivers/watchdog/pic32-dmt.c
@@ -164,11 +164,6 @@ static struct watchdog_device pic32_dmt_wdd = {
.ops = &pic32_dmt_fops,
};
-static void pic32_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int pic32_dmt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -184,20 +179,12 @@ static int pic32_dmt_probe(struct platform_device *pdev)
if (IS_ERR(dmt->regs))
return PTR_ERR(dmt->regs);
- dmt->clk = devm_clk_get(dev, NULL);
+ dmt->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(dmt->clk)) {
dev_err(dev, "clk not found\n");
return PTR_ERR(dmt->clk);
}
- ret = clk_prepare_enable(dmt->clk);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(dev, pic32_clk_disable_unprepare,
- dmt->clk);
- if (ret)
- return ret;
-
wdd->timeout = pic32_dmt_get_timeout_secs(dmt);
if (!wdd->timeout) {
dev_err(dev, "failed to read watchdog register timeout\n");
diff --git a/drivers/watchdog/pic32-wdt.c b/drivers/watchdog/pic32-wdt.c
index 41715d68d9e9..6d1a00222991 100644
--- a/drivers/watchdog/pic32-wdt.c
+++ b/drivers/watchdog/pic32-wdt.c
@@ -162,11 +162,6 @@ static const struct of_device_id pic32_wdt_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, pic32_wdt_dt_ids);
-static void pic32_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int pic32_wdt_drv_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -186,22 +181,12 @@ static int pic32_wdt_drv_probe(struct platform_device *pdev)
if (!wdt->rst_base)
return -ENOMEM;
- wdt->clk = devm_clk_get(dev, NULL);
+ wdt->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(wdt->clk)) {
dev_err(dev, "clk not found\n");
return PTR_ERR(wdt->clk);
}
- ret = clk_prepare_enable(wdt->clk);
- if (ret) {
- dev_err(dev, "clk enable failed\n");
- return ret;
- }
- ret = devm_add_action_or_reset(dev, pic32_clk_disable_unprepare,
- wdt->clk);
- if (ret)
- return ret;
-
if (pic32_wdt_is_win_enabled(wdt)) {
dev_err(dev, "windowed-clear mode is not supported.\n");
return -ENODEV;
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index e0ea133c1690..87a44a5675a1 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -179,11 +179,6 @@ static struct watchdog_device pnx4008_wdd = {
.max_timeout = MAX_HEARTBEAT,
};
-static void pnx4008_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int pnx4008_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -195,18 +190,10 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt_base))
return PTR_ERR(wdt_base);
- wdt_clk = devm_clk_get(dev, NULL);
+ wdt_clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(wdt_clk))
return PTR_ERR(wdt_clk);
- ret = clk_prepare_enable(wdt_clk);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(dev, pnx4008_clk_disable_unprepare,
- wdt_clk);
- if (ret)
- return ret;
-
pnx4008_wdd.bootstatus = (readl(WDTIM_RES(wdt_base)) & WDOG_RESET) ?
WDIOF_CARDRESET : 0;
pnx4008_wdd.parent = dev;
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 0d2209c5eaca..d776474dcdf3 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -175,11 +175,6 @@ static const struct watchdog_info qcom_wdt_pt_info = {
.identity = KBUILD_MODNAME,
};
-static void qcom_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static const struct qcom_wdt_match_data match_data_apcs_tmr = {
.offset = reg_offset_data_apcs_tmr,
.pretimeout = false,
@@ -226,21 +221,12 @@ static int qcom_wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- clk = devm_clk_get(dev, NULL);
+ clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
dev_err(dev, "failed to get input clock\n");
return PTR_ERR(clk);
}
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "failed to setup clock\n");
- return ret;
- }
- ret = devm_add_action_or_reset(dev, qcom_clk_disable_unprepare, clk);
- if (ret)
- return ret;
-
/*
* We use the clock rate to calculate the max timeout, so ensure it's
* not zero to avoid a divide-by-zero exception.
diff --git a/drivers/watchdog/realtek_otto_wdt.c b/drivers/watchdog/realtek_otto_wdt.c
index 2a5298c5e8e4..2c30ddd574c5 100644
--- a/drivers/watchdog/realtek_otto_wdt.c
+++ b/drivers/watchdog/realtek_otto_wdt.c
@@ -235,27 +235,14 @@ static const struct watchdog_info otto_wdt_info = {
WDIOF_PRETIMEOUT,
};
-static void otto_wdt_clock_action(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int otto_wdt_probe_clk(struct otto_wdt_ctrl *ctrl)
{
- struct clk *clk = devm_clk_get(ctrl->dev, NULL);
- int ret;
+ struct clk *clk;
+ clk = devm_clk_get_enabled(ctrl->dev, NULL);
if (IS_ERR(clk))
return dev_err_probe(ctrl->dev, PTR_ERR(clk), "Failed to get clock\n");
- ret = clk_prepare_enable(clk);
- if (ret)
- return dev_err_probe(ctrl->dev, ret, "Failed to enable clock\n");
-
- ret = devm_add_action_or_reset(ctrl->dev, otto_wdt_clock_action, clk);
- if (ret)
- return ret;
-
ctrl->clk_rate_khz = clk_get_rate(clk) / 1000;
if (ctrl->clk_rate_khz == 0)
return dev_err_probe(ctrl->dev, -ENXIO, "Failed to get clock rate\n");
diff --git a/drivers/watchdog/rtd119x_wdt.c b/drivers/watchdog/rtd119x_wdt.c
index 834b94ff3f90..95c8d7abce42 100644
--- a/drivers/watchdog/rtd119x_wdt.c
+++ b/drivers/watchdog/rtd119x_wdt.c
@@ -94,16 +94,10 @@ static const struct of_device_id rtd119x_wdt_dt_ids[] = {
{ }
};
-static void rtd119x_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int rtd119x_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rtd119x_watchdog_device *data;
- int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -113,18 +107,10 @@ static int rtd119x_wdt_probe(struct platform_device *pdev)
if (IS_ERR(data->base))
return PTR_ERR(data->base);
- data->clk = devm_clk_get(dev, NULL);
+ data->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
- ret = clk_prepare_enable(data->clk);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(dev, rtd119x_clk_disable_unprepare,
- data->clk);
- if (ret)
- return ret;
-
data->wdt_dev.info = &rtd119x_wdt_info;
data->wdt_dev.ops = &rtd119x_wdt_ops;
data->wdt_dev.timeout = 120;
diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
index 974a4194a8fd..d404953d0e0f 100644
--- a/drivers/watchdog/rzg2l_wdt.c
+++ b/drivers/watchdog/rzg2l_wdt.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -35,6 +36,8 @@
#define F2CYCLE_NSEC(f) (1000000000 / (f))
+#define RZV2M_A_NSEC 730
+
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
@@ -51,11 +54,35 @@ struct rzg2l_wdt_priv {
struct reset_control *rstc;
unsigned long osc_clk_rate;
unsigned long delay;
+ unsigned long minimum_assertion_period;
struct clk *pclk;
struct clk *osc_clk;
enum rz_wdt_type devtype;
};
+static int rzg2l_wdt_reset(struct rzg2l_wdt_priv *priv)
+{
+ int err, status;
+
+ if (priv->devtype == WDT_RZV2M) {
+ /* WDT needs TYPE-B reset control */
+ err = reset_control_assert(priv->rstc);
+ if (err)
+ return err;
+ ndelay(priv->minimum_assertion_period);
+ err = reset_control_deassert(priv->rstc);
+ if (err)
+ return err;
+ err = read_poll_timeout(reset_control_status, status,
+ status != 1, 0, 1000, false,
+ priv->rstc);
+ } else {
+ err = reset_control_reset(priv->rstc);
+ }
+
+ return err;
+}
+
static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv)
{
/* delay timer when change the setting register */
@@ -115,25 +142,23 @@ static int rzg2l_wdt_stop(struct watchdog_device *wdev)
{
struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ rzg2l_wdt_reset(priv);
pm_runtime_put(wdev->parent);
- reset_control_reset(priv->rstc);
return 0;
}
static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int timeout)
{
- struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
-
wdev->timeout = timeout;
/*
* If the watchdog is active, reset the module for updating the WDTSET
- * register so that it is updated with new timeout values.
+ * register by calling rzg2l_wdt_stop() (which internally calls reset_control_reset()
+ * to reset the module) so that it is updated with new timeout values.
*/
if (watchdog_active(wdev)) {
- pm_runtime_put(wdev->parent);
- reset_control_reset(priv->rstc);
+ rzg2l_wdt_stop(wdev);
rzg2l_wdt_start(wdev);
}
@@ -156,6 +181,7 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev,
rzg2l_wdt_write(priv, PEEN_FORCE, PEEN);
} else {
/* RZ/V2M doesn't have parity error registers */
+ rzg2l_wdt_reset(priv);
wdev->timeout = 0;
@@ -253,6 +279,13 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
priv->devtype = (uintptr_t)of_device_get_match_data(dev);
+ if (priv->devtype == WDT_RZV2M) {
+ priv->minimum_assertion_period = RZV2M_A_NSEC +
+ 3 * F2CYCLE_NSEC(pclk_rate) + 5 *
+ max(F2CYCLE_NSEC(priv->osc_clk_rate),
+ F2CYCLE_NSEC(pclk_rate));
+ }
+
pm_runtime_enable(&pdev->dev);
priv->wdev.info = &rzg2l_wdt_ident;
diff --git a/drivers/watchdog/rzn1_wdt.c b/drivers/watchdog/rzn1_wdt.c
index 55ab384b9965..980c1717adb5 100644
--- a/drivers/watchdog/rzn1_wdt.c
+++ b/drivers/watchdog/rzn1_wdt.c
@@ -98,11 +98,6 @@ static const struct watchdog_ops rzn1_wdt_ops = {
.ping = rzn1_wdt_ping,
};
-static void rzn1_wdt_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int rzn1_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -132,23 +127,12 @@ static int rzn1_wdt_probe(struct platform_device *pdev)
return ret;
}
- clk = devm_clk_get(dev, NULL);
+ clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
dev_err(dev, "failed to get the clock\n");
return PTR_ERR(clk);
}
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "failed to prepare/enable the clock\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev, rzn1_wdt_clk_disable_unprepare,
- clk);
- if (ret)
- return ret;
-
clk_rate = clk_get_rate(clk);
if (!clk_rate) {
dev_err(dev, "failed to get the clock rate\n");
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index 9791c74aebd4..63862803421f 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -150,6 +150,7 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
wdd->timeout = timeout;
+ timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000);
if (action)
sbsa_gwdt_reg_write(gwdt->clk * timeout, gwdt);
diff --git a/drivers/watchdog/visconti_wdt.c b/drivers/watchdog/visconti_wdt.c
index 83ef55e66ca8..cef0794708e7 100644
--- a/drivers/watchdog/visconti_wdt.c
+++ b/drivers/watchdog/visconti_wdt.c
@@ -112,11 +112,6 @@ static const struct watchdog_ops visconti_wdt_ops = {
.set_timeout = visconti_wdt_set_timeout,
};
-static void visconti_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int visconti_wdt_probe(struct platform_device *pdev)
{
struct watchdog_device *wdev;
@@ -134,20 +129,10 @@ static int visconti_wdt_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- clk = devm_clk_get(dev, NULL);
+ clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk))
return dev_err_probe(dev, PTR_ERR(clk), "Could not get clock\n");
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "Could not enable clock\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev, visconti_clk_disable_unprepare, clk);
- if (ret)
- return ret;
-
clk_freq = clk_get_rate(clk);
if (!clk_freq)
return -EINVAL;
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 55574ed42504..0122e8796879 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -35,6 +35,7 @@
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/hrtimer.h> /* For hrtimers */
#include <linux/kernel.h> /* For printk/panic/... */
+#include <linux/kstrtox.h> /* For kstrto* */
#include <linux/kthread.h> /* For kthread_work */
#include <linux/miscdevice.h> /* For handling misc devices */
#include <linux/module.h> /* For module stuff/... */
@@ -546,6 +547,24 @@ static ssize_t pretimeout_show(struct device *dev,
}
static DEVICE_ATTR_RO(pretimeout);
+static ssize_t options_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "0x%x\n", wdd->info->options);
+}
+static DEVICE_ATTR_RO(options);
+
+static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", wdd->info->firmware_version);
+}
+static DEVICE_ATTR_RO(fw_version);
+
static ssize_t identity_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -617,6 +636,8 @@ static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
}
static struct attribute *wdt_attrs[] = {
&dev_attr_state.attr,
+ &dev_attr_options.attr,
+ &dev_attr_fw_version.attr,
&dev_attr_identity.attr,
&dev_attr_timeout.attr,
&dev_attr_min_timeout.attr,
@@ -1061,8 +1082,8 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
if (wdd->id == 0) {
misc_deregister(&watchdog_miscdev);
old_wd_data = NULL;
- put_device(&wd_data->dev);
}
+ put_device(&wd_data->dev);
return err;
}
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index ce7a4a9e4b03..0ba99bed59fc 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -301,13 +301,12 @@ static const struct watchdog_info wdat_wdt_info = {
.identity = "wdat_wdt",
};
-static const struct watchdog_ops wdat_wdt_ops = {
+static struct watchdog_ops wdat_wdt_ops = {
.owner = THIS_MODULE,
.start = wdat_wdt_start,
.stop = wdat_wdt_stop,
.ping = wdat_wdt_ping,
.set_timeout = wdat_wdt_set_timeout,
- .get_timeleft = wdat_wdt_get_timeleft,
};
static int wdat_wdt_probe(struct platform_device *pdev)
@@ -436,6 +435,9 @@ static int wdat_wdt_probe(struct platform_device *pdev)
list_add_tail(&instr->node, instructions);
}
+ if (wdat->instructions[ACPI_WDAT_GET_CURRENT_COUNTDOWN])
+ wdat_wdt_ops.get_timeleft = wdat_wdt_get_timeleft;
+
wdat_wdt_boot_status(wdat);
wdat_wdt_set_running(wdat);
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index d0e88875443a..21ca08a694ee 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -593,8 +593,7 @@ static int ziirave_wdt_init_duration(struct i2c_client *client)
reset_duration);
}
-static int ziirave_wdt_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ziirave_wdt_probe(struct i2c_client *client)
{
int ret;
struct ziirave_wdt_data *w_priv;
@@ -732,7 +731,7 @@ static struct i2c_driver ziirave_wdt_driver = {
.name = "ziirave_wdt",
.of_match_table = zrv_wdt_of_match,
},
- .probe = ziirave_wdt_probe,
+ .probe_new = ziirave_wdt_probe,
.remove = ziirave_wdt_remove,
.id_table = ziirave_wdt_id,
};