summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-07-03 10:20:05 -0600
committerJens Axboe <axboe@kernel.dk>2024-07-03 10:20:05 -0600
commit1a50d1467077c0d6f504bdbb66e6fa0dcd5b3b01 (patch)
treeec53ad1b708908c196190a75721f72c2ee0489bc /drivers
parent98d34c087249d39838874b83e17671e7d5eb1ca7 (diff)
parent22a40d14b572deb80c0648557f4bd502d7e83826 (diff)
Merge tag 'v6.10-rc6' into for-6.11/block-post
Pull in v6.10-rc6 to resolve a conflict for the integrity cleanups. * tag 'v6.10-rc6': (778 commits) Linux 6.10-rc6 ata: ahci: Clean up sysfs file on error ata: libata-core: Fix double free on error ata,scsi: libata-core: Do not leak memory for ata_port struct members ata: libata-core: Fix null pointer dereference on error x86-32: fix cmpxchg8b_emu build error with clang x86: stop playing stack games in profile_pc() i2c: testunit: discard write requests while old command is running i2c: testunit: don't erase registers after STOP tty: mxser: Remove __counted_by from mxser_board.ports[] randomize_kstack: Remove non-functional per-arch entropy filtering string: kunit: add missing MODULE_DESCRIPTION() macros ata: libata-core: Add ATA_HORKAGE_NOLPM for all Crucial BX SSD1 models MAINTAINERS: Update IOMMU tree location tools/power turbostat: Add local build_bug.h header for snapshot target tools/power turbostat: Fix unc freq columns not showing with '-q' or '-l' tools/power turbostat: option '-n' is ambiguous drm/drm_file: Fix pid refcounting race kallsyms: rework symbol lookup return codes gpiolib: cdev: Ignore reconfiguration without direction ... Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/acevents.h4
-rw-r--r--drivers/acpi/acpica/evregion.c6
-rw-r--r--drivers/acpi/acpica/evxfregn.c54
-rw-r--r--drivers/acpi/acpica/exregion.c23
-rw-r--r--drivers/acpi/ec.c3
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/mipi-disco-img.c28
-rw-r--r--drivers/acpi/thermal.c8
-rw-r--r--drivers/acpi/x86/utils.c24
-rw-r--r--drivers/ata/ahci.c25
-rw-r--r--drivers/ata/libata-core.c32
-rw-r--r--drivers/ata/libata-scsi.c8
-rw-r--r--drivers/base/core.c48
-rw-r--r--drivers/block/loop.c23
-rw-r--r--drivers/block/nbd.c51
-rw-r--r--drivers/clk/clkdev.c11
-rw-r--r--drivers/counter/ti-eqep.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c32
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile5
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/ioat/init.c55
-rw-r--r--drivers/dma/ti/k3-udma-glue.c5
-rw-r--r--drivers/dma/xilinx/xdma.c4
-rw-r--r--drivers/firewire/Kconfig2
-rw-r--r--drivers/firewire/core-card.c6
-rw-r--r--drivers/firewire/core-cdev.c6
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firewire/core-transaction.c30
-rw-r--r--drivers/firmware/efi/memmap.c9
-rw-r--r--drivers/firmware/psci/psci.c4
-rw-r--r--drivers/gpio/gpio-davinci.c5
-rw-r--r--drivers/gpio/gpio-graniterapids.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpiolib-cdev.c28
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c5
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c72
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c10
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h5
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c73
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c2
-rw-r--r--drivers/gpu/drm/bridge/panel.c7
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c5
-rw-r--r--drivers/gpu/drm/drm_file.c8
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c1
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c15
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c6
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c18
-rw-r--r--drivers/hv/hv.c37
-rw-r--r--drivers/hv/hv_balloon.c190
-rw-r--r--drivers/i2c/busses/Makefile6
-rw-r--r--drivers/i2c/busses/i2c-at91-slave.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c2
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-viai2c-common.c71
-rw-r--r--drivers/i2c/busses/i2c-viai2c-common.h2
-rw-r--r--drivers/i2c/busses/i2c-viai2c-wmt.c36
-rw-r--r--drivers/i2c/busses/i2c-viai2c-zhaoxin.c113
-rw-r--r--drivers/i2c/i2c-slave-testunit.c5
-rw-r--r--drivers/iio/accel/Kconfig2
-rw-r--r--drivers/iio/adc/ad7173.c37
-rw-r--r--drivers/iio/adc/ad7266.c2
-rw-r--r--drivers/iio/adc/ad9467.c4
-rw-r--r--drivers/iio/adc/xilinx-ams.c8
-rw-r--r--drivers/iio/chemical/bme680.h2
-rw-r--r--drivers/iio/chemical/bme680_core.c62
-rw-r--r--drivers/iio/common/inv_sensors/inv_sensors_timestamp.c6
-rw-r--r--drivers/iio/dac/Kconfig2
-rw-r--r--drivers/iio/dac/ad5592r-base.c2
-rw-r--r--drivers/iio/humidity/hdc3020.c325
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c5
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c4
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c19
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c1
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c1
-rw-r--r--drivers/iio/inkern.c2
-rw-r--r--drivers/iio/pressure/bmp280-core.c10
-rw-r--r--drivers/iio/temperature/mlx90635.c6
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h4
-rw-r--r--drivers/infiniband/hw/mana/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c8
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/mouse/elantech.c31
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h18
-rw-r--r--drivers/input/touchscreen/ads7846.c12
-rw-r--r--drivers/input/touchscreen/ili210x.c4
-rw-r--r--drivers/iommu/amd/init.c3
-rw-r--r--drivers/iommu/amd/iommu.c12
-rw-r--r--drivers/iommu/intel/iommu.c20
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c5
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c4
-rw-r--r--drivers/leds/led-class.c6
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c2
-rw-r--r--drivers/mfd/axp20x.c1
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c9
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mei/platform-vsc.c39
-rw-r--r--drivers/misc/mei/vsc-fw-loader.c2
-rw-r--r--drivers/mmc/host/moxart-mmc.c78
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c4
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c11
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c41
-rw-r--r--drivers/mmc/host/sdhci.c25
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c14
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c55
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h5
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c2
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c61
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h2
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h11
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c13
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h1
-rw-r--r--drivers/net/dsa/qca/qca8k-leds.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h52
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h311
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c11
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c8
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c21
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h55
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c20
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c44
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c48
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h28
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c55
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c55
-rw-r--r--drivers/net/geneve.c10
-rw-r--r--drivers/net/netdevsim/netdev.c3
-rw-r--r--drivers/net/phy/dp83tg720.c38
-rw-r--r--drivers/net/phy/micrel.c1
-rw-r--r--drivers/net/phy/mxl-gpy.c58
-rw-r--r--drivers/net/phy/sfp.c3
-rw-r--r--drivers/net/pse-pd/Kconfig1
-rw-r--r--drivers/net/usb/ax88179_178a.c24
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/rtl8150.c3
-rw-r--r--drivers/net/virtio_net.c32
-rw-r--r--drivers/net/vxlan/vxlan_core.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/nvme/host/apple.c1
-rw-r--r--drivers/nvme/host/core.c10
-rw-r--r--drivers/nvme/host/ioctl.c15
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/target/configfs.c41
-rw-r--r--drivers/nvme/target/core.c1
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c3
-rw-r--r--drivers/nvme/target/fabrics-cmd.c6
-rw-r--r--drivers/nvme/target/fc.c2
-rw-r--r--drivers/nvme/target/passthru.c6
-rw-r--r--drivers/parport/parport_amiga.c8
-rw-r--r--drivers/pci/msi/msi.c10
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c189
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h32
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h13
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c68
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h1
-rw-r--r--drivers/pinctrl/pinctrl-tps6594.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c4
-rw-r--r--drivers/platform/mellanox/nvsw-sn2201.c5
-rw-r--r--drivers/platform/x86/amilo-rfkill.c1
-rw-r--r--drivers/platform/x86/firmware_attributes_class.c1
-rw-r--r--drivers/platform/x86/ibm_rtl.c1
-rw-r--r--drivers/platform/x86/intel/hid.c1
-rw-r--r--drivers/platform/x86/intel/pmc/pltdrv.c1
-rw-r--r--drivers/platform/x86/intel/rst.c1
-rw-r--r--drivers/platform/x86/intel/smartconnect.c1
-rw-r--r--drivers/platform/x86/intel/vbtn.c1
-rw-r--r--drivers/platform/x86/lg-laptop.c89
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c1
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c1
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c1
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt.c1
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc.c1
-rw-r--r--drivers/platform/x86/uv_sysfs.c1
-rw-r--r--drivers/platform/x86/wireless-hotkey.c3
-rw-r--r--drivers/platform/x86/xo1-rfkill.c1
-rw-r--r--drivers/ptp/ptp_sysfs.c3
-rw-r--r--drivers/pwm/pwm-stm32.c23
-rw-r--r--drivers/ras/amd/atl/internal.h2
-rw-r--r--drivers/ras/amd/atl/system.c2
-rw-r--r--drivers/ras/amd/atl/umc.c160
-rw-r--r--drivers/regulator/axp20x-regulator.c33
-rw-r--r--drivers/regulator/bd71815-regulator.c2
-rw-r--r--drivers/regulator/core.c1
-rw-r--r--drivers/regulator/tps6594-regulator.c12
-rw-r--r--drivers/s390/char/sclp.c1
-rw-r--r--drivers/s390/virtio/virtio_ccw.c4
-rw-r--r--drivers/scsi/libsas/sas_ata.c6
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c62
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c19
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c23
-rw-r--r--drivers/scsi/scsi.c7
-rw-r--r--drivers/scsi/scsi_transport_sas.c23
-rw-r--r--drivers/scsi/sd.c21
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c4
-rw-r--r--drivers/soundwire/amd_manager.c3
-rw-r--r--drivers/soundwire/intel_auxdevice.c6
-rw-r--r--drivers/soundwire/mipi_disco.c30
-rw-r--r--drivers/spi/spi-cs42l43.c6
-rw-r--r--drivers/spi/spi-imx.c14
-rw-r--r--drivers/spi/spi-stm32-qspi.c12
-rw-r--r--drivers/spi/spi.c16
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h5
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c10
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c7
-rw-r--r--drivers/thermal/gov_step_wise.c6
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c3
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c12
-rw-r--r--drivers/thermal/thermal_core.c40
-rw-r--r--drivers/thermal/thermal_core.h4
-rw-r--r--drivers/thunderbolt/debugfs.c5
-rw-r--r--drivers/tty/mxser.c2
-rw-r--r--drivers/tty/n_tty.c22
-rw-r--r--drivers/tty/serial/8250/8250_core.c5
-rw-r--r--drivers/tty/serial/8250/8250_dw.c36
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c3
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.h33
-rw-r--r--drivers/tty/serial/8250/8250_omap.c25
-rw-r--r--drivers/tty/serial/8250/8250_pci.c13
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c1
-rw-r--r--drivers/tty/serial/Kconfig3
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c7
-rw-r--r--drivers/tty/serial/imx.c4
-rw-r--r--drivers/tty/serial/mcf.c2
-rw-r--r--drivers/tty/serial/serial_base.h30
-rw-r--r--drivers/tty/serial/serial_base_bus.c129
-rw-r--r--drivers/tty/serial/serial_core.c6
-rw-r--r--drivers/tty/serial/serial_port.c7
-rw-r--r--drivers/ufs/core/ufshcd.c7
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/atm/cxacru.c14
-rw-r--r--drivers/usb/chipidea/core.c8
-rw-r--r--drivers/usb/chipidea/ulpi.c5
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/core/hcd.c12
-rw-r--r--drivers/usb/dwc3/core.c26
-rw-r--r--drivers/usb/gadget/function/f_printer.c40
-rw-r--r--drivers/usb/gadget/function/u_ether.c4
-rw-r--r--drivers/usb/gadget/udc/aspeed_udc.c4
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/host/xhci-ring.c59
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/musb/da8xx.c8
-rw-r--r--drivers/usb/storage/alauda.c9
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--drivers/usb/storage/uas.c7
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c5
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c61
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c19
-rw-r--r--drivers/vfio/device_cdev.c7
-rw-r--r--drivers/vfio/group.c7
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c271
-rw-r--r--drivers/vfio/vfio_main.c44
-rw-r--r--drivers/watchdog/Kconfig1
-rw-r--r--drivers/watchdog/menz69_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c1
-rw-r--r--drivers/watchdog/simatic-ipc-wdt.c1
-rw-r--r--drivers/watchdog/ts4800_wdt.c1
-rw-r--r--drivers/watchdog/twl4030_wdt.c1
355 files changed, 3889 insertions, 2097 deletions
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index ddd072cbc738..2133085deda7 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -191,6 +191,10 @@ void
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
acpi_adr_space_type space_id, u32 function);
+void
+acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node,
+ acpi_adr_space_type space_id);
+
acpi_status
acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 18fdf2bc2d49..dc6004daf624 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -20,10 +20,6 @@ extern u8 acpi_gbl_default_address_spaces[];
/* Local prototypes */
-static void
-acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
- acpi_adr_space_type space_id);
-
static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
@@ -818,7 +814,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
*
******************************************************************************/
-static void
+void
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
acpi_adr_space_type space_id)
{
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 3197e6303c5b..624361a5f34d 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -306,3 +306,57 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
}
ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_execute_orphan_reg_method
+ *
+ * PARAMETERS: device - Handle for the device
+ * space_id - The address space ID
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI
+ * device. This is a _REG method that has no corresponding region
+ * within the device's scope.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method);
+
+ /* Parameter validation */
+
+ if (!device) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_validate_handle(device);
+ if (node) {
+
+ /*
+ * If an "orphan" _REG method is present in the device's scope
+ * for the given address space ID, run it.
+ */
+
+ acpi_ev_execute_orphan_reg_method(node, space_id);
+ } else {
+ status = AE_BAD_PARAMETER;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method)
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 8907b8bf4267..c49b9f8de723 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -44,7 +44,6 @@ acpi_ex_system_memory_space_handler(u32 function,
struct acpi_mem_mapping *mm = mem_info->cur_mm;
u32 length;
acpi_size map_length;
- acpi_size page_boundary_map_length;
#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
u32 remainder;
#endif
@@ -138,26 +137,8 @@ acpi_ex_system_memory_space_handler(u32 function,
map_length = (acpi_size)
((mem_info->address + mem_info->length) - address);
- /*
- * If mapping the entire remaining portion of the region will cross
- * a page boundary, just map up to the page boundary, do not cross.
- * On some systems, crossing a page boundary while mapping regions
- * can cause warnings if the pages have different attributes
- * due to resource management.
- *
- * This has the added benefit of constraining a single mapping to
- * one page, which is similar to the original code that used a 4k
- * maximum window.
- */
- page_boundary_map_length = (acpi_size)
- (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address);
- if (page_boundary_map_length == 0) {
- page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
- }
-
- if (map_length > page_boundary_map_length) {
- map_length = page_boundary_map_length;
- }
+ if (map_length > ACPI_DEFAULT_PAGE_SIZE)
+ map_length = ACPI_DEFAULT_PAGE_SIZE;
/* Create a new mapping starting at the address given */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 68dd17f96f63..299ec653388c 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1507,6 +1507,9 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC);
+ if (scope_handle != ec->handle)
+ acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC);
+
set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 2a0e9fc7b74c..601b670356e5 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -302,6 +302,10 @@ void acpi_mipi_check_crs_csi2(acpi_handle handle);
void acpi_mipi_scan_crs_csi2(void);
void acpi_mipi_init_crs_csi2_swnodes(void);
void acpi_mipi_crs_csi2_cleanup(void);
+#ifdef CONFIG_X86
bool acpi_graph_ignore_port(acpi_handle handle);
+#else
+static inline bool acpi_graph_ignore_port(acpi_handle handle) { return false; }
+#endif
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c
index d05413a0672a..92b658f92dc0 100644
--- a/drivers/acpi/mipi-disco-img.c
+++ b/drivers/acpi/mipi-disco-img.c
@@ -725,14 +725,20 @@ void acpi_mipi_crs_csi2_cleanup(void)
acpi_mipi_del_crs_csi2(csi2);
}
-static const struct dmi_system_id dmi_ignore_port_nodes[] = {
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 9315"),
- },
- },
- { }
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+/* CPU matches for Dell generations with broken ACPI MIPI DISCO info */
+static const struct x86_cpu_id dell_broken_mipi_disco_cpu_gens[] = {
+ X86_MATCH_VFM(INTEL_TIGERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL),
+ {}
};
static const char *strnext(const char *s1, const char *s2)
@@ -761,7 +767,10 @@ bool acpi_graph_ignore_port(acpi_handle handle)
static bool dmi_tested, ignore_port;
if (!dmi_tested) {
- ignore_port = dmi_first_match(dmi_ignore_port_nodes);
+ if (dmi_name_in_vendors("Dell Inc.") &&
+ x86_match_cpu(dell_broken_mipi_disco_cpu_gens))
+ ignore_port = true;
+
dmi_tested = true;
}
@@ -794,3 +803,4 @@ out_free:
kfree(orig_path);
return false;
}
+#endif
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index d67881b50bca..a0cfc857fb55 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -168,11 +168,17 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
static int acpi_thermal_temp(struct acpi_thermal *tz, int temp_deci_k)
{
+ int temp;
+
if (temp_deci_k == THERMAL_TEMP_INVALID)
return THERMAL_TEMP_INVALID;
- return deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
+ temp = deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
tz->kelvin_offset);
+ if (temp <= 0)
+ return THERMAL_TEMP_INVALID;
+
+ return temp;
}
static bool acpi_thermal_trip_valid(struct acpi_thermal_trip *acpi_trip)
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 7dca73417e2b..2fe0934dcd64 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -206,16 +206,16 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
}
/*
- * AMD systems from Renoir and Lucienne *require* that the NVME controller
+ * AMD systems from Renoir onwards *require* that the NVME controller
* is put into D3 over a Modern Standby / suspend-to-idle cycle.
*
* This is "typically" accomplished using the `StorageD3Enable`
* property in the _DSD that is checked via the `acpi_storage_d3` function
- * but this property was introduced after many of these systems launched
- * and most OEM systems don't have it in their BIOS.
+ * but some OEM systems still don't have it in their BIOS.
*
* The Microsoft documentation for StorageD3Enable mentioned that Windows has
- * a hardcoded allowlist for D3 support, which was used for these platforms.
+ * a hardcoded allowlist for D3 support as well as a registry key to override
+ * the BIOS, which has been used for these cases.
*
* This allows quirking on Linux in a similar fashion.
*
@@ -228,19 +228,15 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
* https://bugzilla.kernel.org/show_bug.cgi?id=216773
* https://bugzilla.kernel.org/show_bug.cgi?id=217003
* 2) On at least one HP system StorageD3Enable is missing on the second NVME
- disk in the system.
+ * disk in the system.
+ * 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only
+ * NVME device.
*/
-static const struct x86_cpu_id storage_d3_cpu_ids[] = {
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */
- {}
-};
-
bool force_storage_d3(void)
{
- return x86_match_cpu(storage_d3_cpu_ids);
+ if (!cpu_feature_enabled(X86_FEATURE_ZEN))
+ return false;
+ return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0;
}
/*
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 07d66d2c5f0d..fc6fd583faf8 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1735,6 +1735,14 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap)
if (ap->pflags & ATA_PFLAG_EXTERNAL)
return;
+ /* If no LPM states are supported by the HBA, do not bother with LPM */
+ if ((ap->host->flags & ATA_HOST_NO_PART) &&
+ (ap->host->flags & ATA_HOST_NO_SSC) &&
+ (ap->host->flags & ATA_HOST_NO_DEVSLP)) {
+ ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n");
+ return;
+ }
+
/* user modified policy via module param */
if (mobile_lpm_policy != -1) {
policy = mobile_lpm_policy;
@@ -1967,8 +1975,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
- if (!host)
- return -ENOMEM;
+ if (!host) {
+ rc = -ENOMEM;
+ goto err_rm_sysfs_file;
+ }
host->private_data = hpriv;
if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
@@ -2023,11 +2033,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* initialize adapter */
rc = ahci_configure_dma_masks(pdev, hpriv);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
rc = ahci_pci_reset_controller(host);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
ahci_pci_init_controller(host);
ahci_pci_print_info(host);
@@ -2036,10 +2046,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = ahci_host_activate(host, &ahci_sht);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
pm_runtime_put_noidle(&pdev->dev);
return 0;
+
+err_rm_sysfs_file:
+ sysfs_remove_file_from_group(&pdev->dev.kobj,
+ &dev_attr_remapped_nvme.attr, NULL);
+ return rc;
}
static void ahci_shutdown_one(struct pci_dev *pdev)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e1bf8a19b3c8..74b59b78d278 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4137,8 +4137,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
/* Crucial devices with broken LPM support */
- { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
- { "CT240BX500SSD1", NULL, ATA_HORKAGE_NOLPM },
+ { "CT*0BX*00SSD1", NULL, ATA_HORKAGE_NOLPM },
/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
{ "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
@@ -5490,6 +5489,18 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
return ap;
}
+void ata_port_free(struct ata_port *ap)
+{
+ if (!ap)
+ return;
+
+ kfree(ap->pmp_link);
+ kfree(ap->slave_link);
+ kfree(ap->ncq_sense_buf);
+ kfree(ap);
+}
+EXPORT_SYMBOL_GPL(ata_port_free);
+
static void ata_devres_release(struct device *gendev, void *res)
{
struct ata_host *host = dev_get_drvdata(gendev);
@@ -5516,12 +5527,7 @@ static void ata_host_release(struct kref *kref)
int i;
for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
-
- kfree(ap->pmp_link);
- kfree(ap->slave_link);
- kfree(ap->ncq_sense_buf);
- kfree(ap);
+ ata_port_free(host->ports[i]);
host->ports[i] = NULL;
}
kfree(host);
@@ -5571,8 +5577,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
if (!host)
return NULL;
- if (!devres_open_group(dev, NULL, GFP_KERNEL))
- goto err_free;
+ if (!devres_open_group(dev, NULL, GFP_KERNEL)) {
+ kfree(host);
+ return NULL;
+ }
dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
if (!dr)
@@ -5604,8 +5612,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
err_out:
devres_release_group(dev, NULL);
- err_free:
- kfree(host);
return NULL;
}
EXPORT_SYMBOL_GPL(ata_host_alloc);
@@ -5904,7 +5910,7 @@ int ata_host_register(struct ata_host *host, const struct scsi_host_template *sh
* allocation time.
*/
for (i = host->n_ports; host->ports[i]; i++)
- kfree(host->ports[i]);
+ ata_port_free(host->ports[i]);
/* give ports names and add SCSI hosts */
for (i = 0; i < host->n_ports; i++) {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 682971c4cbe4..67ce756f8dfc 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1830,11 +1830,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
2
};
- /* set scsi removable (RMB) bit per ata bit, or if the
- * AHCI port says it's external (Hotplug-capable, eSATA).
+ /*
+ * Set the SCSI Removable Media Bit (RMB) if the ATA removable media
+ * device bit (obsolete since ATA-8 ACS) is set.
*/
- if (ata_id_removable(args->id) ||
- (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
+ if (ata_id_removable(args->id))
hdr[1] |= (1 << 7);
if (args->dev->class == ATA_DEV_ZAC) {
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 131d96c6090b..2b4c0624b704 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2739,8 +2739,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env)
return -ENOMEM;
+ /* Synchronize with really_probe() */
+ device_lock(dev);
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
+ device_unlock(dev);
if (retval)
goto out;
@@ -2845,15 +2848,6 @@ static void devm_attr_group_remove(struct device *dev, void *res)
sysfs_remove_group(&dev->kobj, group);
}
-static void devm_attr_groups_remove(struct device *dev, void *res)
-{
- union device_attr_group_devres *devres = res;
- const struct attribute_group **groups = devres->groups;
-
- dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
- sysfs_remove_groups(&dev->kobj, groups);
-}
-
/**
* devm_device_add_group - given a device, create a managed attribute group
* @dev: The device to create the group for
@@ -2886,42 +2880,6 @@ int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
}
EXPORT_SYMBOL_GPL(devm_device_add_group);
-/**
- * devm_device_add_groups - create a bunch of managed attribute groups
- * @dev: The device to create the group for
- * @groups: The attribute groups to create, NULL terminated
- *
- * This function creates a bunch of managed attribute groups. If an error
- * occurs when creating a group, all previously created groups will be
- * removed, unwinding everything back to the original state when this
- * function was called. It will explicitly warn and error if any of the
- * attribute files being created already exist.
- *
- * Returns 0 on success or error code from sysfs_create_group on failure.
- */
-int devm_device_add_groups(struct device *dev,
- const struct attribute_group **groups)
-{
- union device_attr_group_devres *devres;
- int error;
-
- devres = devres_alloc(devm_attr_groups_remove,
- sizeof(*devres), GFP_KERNEL);
- if (!devres)
- return -ENOMEM;
-
- error = sysfs_create_groups(&dev->kobj, groups);
- if (error) {
- devres_free(devres);
- return error;
- }
-
- devres->groups = groups;
- devres_add(dev, devres);
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_device_add_groups);
-
static int device_add_attrs(struct device *dev)
{
const struct class *class = dev->class;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 4f0d96876b1f..bba4c920508f 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -299,6 +299,21 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
return 0;
}
+static void loop_clear_limits(struct loop_device *lo, int mode)
+{
+ struct queue_limits lim = queue_limits_start_update(lo->lo_queue);
+
+ if (mode & FALLOC_FL_ZERO_RANGE)
+ lim.max_write_zeroes_sectors = 0;
+
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ lim.max_hw_discard_sectors = 0;
+ lim.discard_granularity = 0;
+ }
+
+ queue_limits_commit_update(lo->lo_queue, &lim);
+}
+
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
int mode)
{
@@ -317,6 +332,14 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
return -EIO;
+
+ /*
+ * We initially configure the limits in a hope that fallocate is
+ * supported and clear them here if that turns out not to be true.
+ */
+ if (unlikely(ret == -EOPNOTSUPP))
+ loop_clear_limits(lo, mode);
+
return ret;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 6cddf5baffe0..41a90150b501 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -597,10 +597,11 @@ static inline int was_interrupted(int result)
}
/*
- * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns
- * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed.
+ * Returns BLK_STS_RESOURCE if the caller should retry after a delay.
+ * Returns BLK_STS_IOERR if sending failed.
*/
-static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
+ int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct nbd_config *config = nbd->config;
@@ -622,13 +623,13 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
type = req_to_nbd_cmd_type(req);
if (type == U32_MAX)
- return -EIO;
+ return BLK_STS_IOERR;
if (rq_data_dir(req) == WRITE &&
(config->flags & NBD_FLAG_READ_ONLY)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Write on read-only\n");
- return -EIO;
+ return BLK_STS_IOERR;
}
if (req->cmd_flags & REQ_FUA)
@@ -682,11 +683,11 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
nsock->sent = sent;
}
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return (__force int)BLK_STS_RESOURCE;
+ return BLK_STS_RESOURCE;
}
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
- return -EAGAIN;
+ goto requeue;
}
send_pages:
if (type != NBD_CMD_WRITE)
@@ -723,12 +724,12 @@ send_pages:
nsock->pending = req;
nsock->sent = sent;
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return (__force int)BLK_STS_RESOURCE;
+ return BLK_STS_RESOURCE;
}
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
result);
- return -EAGAIN;
+ goto requeue;
}
/*
* The completion might already have come in,
@@ -745,7 +746,16 @@ out:
trace_nbd_payload_sent(req, handle);
nsock->pending = NULL;
nsock->sent = 0;
- return 0;
+ __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
+ return BLK_STS_OK;
+
+requeue:
+ /* retry on a different socket */
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Request send failed, requeueing\n");
+ nbd_mark_nsock_dead(nbd, nsock, 1);
+ nbd_requeue_cmd(cmd);
+ return BLK_STS_OK;
}
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
@@ -1026,7 +1036,7 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
struct nbd_device *nbd = cmd->nbd;
struct nbd_config *config;
struct nbd_sock *nsock;
- int ret;
+ blk_status_t ret;
lockdep_assert_held(&cmd->lock);
@@ -1080,28 +1090,11 @@ again:
ret = BLK_STS_OK;
goto out;
}
- /*
- * Some failures are related to the link going down, so anything that
- * returns EAGAIN can be retried on a different socket.
- */
ret = nbd_send_cmd(nbd, cmd, index);
- /*
- * Access to this flag is protected by cmd->lock, thus it's safe to set
- * the flag after nbd_send_cmd() succeed to send request to server.
- */
- if (!ret)
- __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
- else if (ret == -EAGAIN) {
- dev_err_ratelimited(disk_to_dev(nbd->disk),
- "Request send failed, requeueing\n");
- nbd_mark_nsock_dead(nbd, nsock, 1);
- nbd_requeue_cmd(cmd);
- ret = BLK_STS_OK;
- }
out:
mutex_unlock(&nsock->tx_lock);
nbd_config_put(nbd);
- return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret;
+ return ret;
}
static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 6a77d7e201a9..2f83fb97c6fb 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -204,8 +204,15 @@ fail:
pr_err("%pV:%s: %s ID is greater than %zu\n",
&vaf, con_id, failure, max_size);
va_end(ap_copy);
- kfree(cla);
- return NULL;
+
+ /*
+ * Don't fail in this case, but as the entry won't ever match just
+ * fill it with something that also won't match.
+ */
+ strscpy(cla->con_id, "bad", sizeof(cla->con_id));
+ strscpy(cla->dev_id, "bad", sizeof(cla->dev_id));
+
+ return &cla->cl;
}
static struct clk_lookup *
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index 072b11fd6b32..825ae22c3ebc 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -6,6 +6,7 @@
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/counter.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
@@ -376,6 +377,7 @@ static int ti_eqep_probe(struct platform_device *pdev)
struct counter_device *counter;
struct ti_eqep_cnt *priv;
void __iomem *base;
+ struct clk *clk;
int err;
counter = devm_counter_alloc(dev, sizeof(*priv));
@@ -415,6 +417,10 @@ static int ti_eqep_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to enable clock\n");
+
err = counter_add(counter);
if (err < 0) {
pm_runtime_put_sync(dev);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 65d3f79104bd..c31914a9876f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -355,15 +355,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
int ret;
ret = cppc_get_perf_caps(cpu, &cppc_perf);
- if (ret)
- return;
-
/*
- * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
- * In this case we can't use CPPC.highest_perf to enable ITMT.
- * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
+ * If CPPC is not available, fall back to MSR_HWP_CAPABILITIES bits [8:0].
+ *
+ * Also, on some systems with overclocking enabled, CPPC.highest_perf is
+ * hardcoded to 0xff, so CPPC.highest_perf cannot be used to enable ITMT.
+ * Fall back to MSR_HWP_CAPABILITIES then too.
*/
- if (cppc_perf.highest_perf == CPPC_MAX_PERF)
+ if (ret || cppc_perf.highest_perf == CPPC_MAX_PERF)
cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
/*
@@ -1302,12 +1301,17 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
no_turbo = !!clamp_t(int, input, 0, 1);
- if (no_turbo == global.no_turbo)
- goto unlock_driver;
-
- if (global.turbo_disabled) {
- pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
+ WRITE_ONCE(global.turbo_disabled, turbo_is_disabled());
+ if (global.turbo_disabled && !no_turbo) {
+ pr_notice("Turbo disabled by BIOS or unavailable on processor\n");
count = -EPERM;
+ if (global.no_turbo)
+ goto unlock_driver;
+ else
+ no_turbo = 1;
+ }
+
+ if (no_turbo == global.no_turbo) {
goto unlock_driver;
}
@@ -1762,7 +1766,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
u32 vid;
val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp(
@@ -1927,7 +1931,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
u64 val;
val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
val |= (u64)1 << 32;
return val;
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 6f9266edc9f1..eac73cbfdd38 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -39,7 +39,8 @@ intel_qat-objs := adf_cfg.o \
adf_sysfs_rl.o \
qat_uclo.o \
qat_hal.o \
- qat_bl.o
+ qat_bl.o \
+ qat_mig_dev.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
adf_fw_counters.o \
@@ -56,6 +57,6 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
- adf_gen2_pfvf.o adf_gen4_pfvf.o qat_mig_dev.o
+ adf_gen2_pfvf.o adf_gen4_pfvf.o
intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 002a5ec80620..9fc99cfbef08 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -394,7 +394,7 @@ config LS2X_APB_DMA
config MCF_EDMA
tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
- depends on M5441x || COMPILE_TEST
+ depends on M5441x || (COMPILE_TEST && FSL_EDMA=n)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 8dc029c86551..fc049c9c9892 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -611,11 +611,13 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
spin_unlock(&irq_entry->list_lock);
- list_for_each_entry(desc, &flist, list) {
+ list_for_each_entry_safe(desc, n, &flist, list) {
/*
* Check against the original status as ABORT is software defined
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
+ list_del(&desc->list);
+
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true);
continue;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 9c364e92cb82..e8f45a7fded4 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -534,18 +534,6 @@ err_out:
return err;
}
-static int ioat_register(struct ioatdma_device *ioat_dma)
-{
- int err = dma_async_device_register(&ioat_dma->dma_dev);
-
- if (err) {
- ioat_disable_interrupts(ioat_dma);
- dma_pool_destroy(ioat_dma->completion_pool);
- }
-
- return err;
-}
-
static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
{
struct dma_device *dma = &ioat_dma->dma_dev;
@@ -1181,9 +1169,9 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
}
- err = ioat_register(ioat_dma);
+ err = dma_async_device_register(&ioat_dma->dma_dev);
if (err)
- return err;
+ goto err_disable_interrupts;
ioat_kobject_add(ioat_dma, &ioat_ktype);
@@ -1192,20 +1180,29 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
/* disable relaxed ordering */
err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16);
- if (err)
- return pcibios_err_to_errno(err);
+ if (err) {
+ err = pcibios_err_to_errno(err);
+ goto err_disable_interrupts;
+ }
/* clear relaxed ordering enable */
val16 &= ~PCI_EXP_DEVCTL_RELAX_EN;
err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16);
- if (err)
- return pcibios_err_to_errno(err);
+ if (err) {
+ err = pcibios_err_to_errno(err);
+ goto err_disable_interrupts;
+ }
if (ioat_dma->cap & IOAT_CAP_DPS)
writeb(ioat_pending_level + 1,
ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
return 0;
+
+err_disable_interrupts:
+ ioat_disable_interrupts(ioat_dma);
+ dma_pool_destroy(ioat_dma->completion_pool);
+ return err;
}
static void ioat_shutdown(struct pci_dev *pdev)
@@ -1350,6 +1347,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
void __iomem * const *iomap;
struct device *dev = &pdev->dev;
struct ioatdma_device *device;
+ unsigned int i;
+ u8 version;
int err;
err = pcim_enable_device(pdev);
@@ -1363,6 +1362,10 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!iomap)
return -ENOMEM;
+ version = readb(iomap[IOAT_MMIO_BAR] + IOAT_VER_OFFSET);
+ if (version < IOAT_VER_3_0)
+ return -ENODEV;
+
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err)
return err;
@@ -1373,17 +1376,18 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
pci_set_drvdata(pdev, device);
- device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+ device->version = version;
if (device->version >= IOAT_VER_3_4)
ioat_dca_enabled = 0;
- if (device->version >= IOAT_VER_3_0) {
- if (is_skx_ioat(pdev))
- device->version = IOAT_VER_3_2;
- err = ioat3_dma_probe(device, ioat_dca_enabled);
- } else
- return -ENODEV;
+ if (is_skx_ioat(pdev))
+ device->version = IOAT_VER_3_2;
+
+ err = ioat3_dma_probe(device, ioat_dca_enabled);
if (err) {
+ for (i = 0; i < IOAT_MAX_CHANS; i++)
+ kfree(device->idx[i]);
+ kfree(device);
dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
return -ENODEV;
}
@@ -1445,6 +1449,7 @@ module_init(ioat_init_module);
static void __exit ioat_exit_module(void)
{
pci_unregister_driver(&ioat_pci_driver);
+ kmem_cache_destroy(ioat_sed_cache);
kmem_cache_destroy(ioat_cache);
}
module_exit(ioat_exit_module);
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index c9b93055dc9d..f0a399cf45b2 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -200,12 +200,9 @@ of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glu
ret = of_k3_udma_glue_parse(udmax_np, common);
if (ret)
- goto out_put_spec;
+ return ret;
ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
-
-out_put_spec:
- of_node_put(udmax_np);
return ret;
}
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index e143a7330816..718842fdaf98 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -885,11 +885,11 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
u32 st;
bool repeat_tx;
+ spin_lock(&xchan->vchan.lock);
+
if (xchan->stop_requested)
complete(&xchan->last_interrupt);
- spin_lock(&xchan->vchan.lock);
-
/* get submitted request */
vd = vchan_next_desc(&xchan->vchan);
if (!vd)
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 869598b20e3a..5268b3f0a25a 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -11,7 +11,7 @@ config FIREWIRE
This is the new-generation IEEE 1394 (FireWire) driver stack
a.k.a. Juju, a new implementation designed for robustness and
simplicity.
- See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
+ See http://ieee1394.docs.kernel.org/en/latest/migration.html
for information about migration from the older Linux 1394 stack
to the new driver stack.
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 127d87e3a153..f8b99dd6cd82 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -222,14 +222,14 @@ static int reset_bus(struct fw_card *card, bool short_reset)
int reg = short_reset ? 5 : 1;
int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
- trace_bus_reset_initiate(card->generation, short_reset);
+ trace_bus_reset_initiate(card->index, card->generation, short_reset);
return card->driver->update_phy_reg(card, reg, 0, bit);
}
void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
{
- trace_bus_reset_schedule(card->generation, short_reset);
+ trace_bus_reset_schedule(card->index, card->generation, short_reset);
/* We don't try hard to sort out requests of long vs. short resets. */
card->br_short = short_reset;
@@ -249,7 +249,7 @@ static void br_work(struct work_struct *work)
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
- trace_bus_reset_postpone(card->generation, card->br_short);
+ trace_bus_reset_postpone(card->index, card->generation, card->br_short);
if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
fw_card_put(card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 55993c9e0b90..9a7dc90330a3 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1559,7 +1559,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
struct client *e_client = e->client;
u32 rcode;
- trace_async_phy_outbound_complete((uintptr_t)packet, status, packet->generation,
+ trace_async_phy_outbound_complete((uintptr_t)packet, card->index, status, packet->generation,
packet->timestamp);
switch (status) {
@@ -1659,8 +1659,8 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
memcpy(pp->data, a->data, sizeof(a->data));
}
- trace_async_phy_outbound_initiate((uintptr_t)&e->p, e->p.generation, e->p.header[1],
- e->p.header[2]);
+ trace_async_phy_outbound_initiate((uintptr_t)&e->p, card->index, e->p.generation,
+ e->p.header[1], e->p.header[2]);
card->driver->send_request(card, &e->p);
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 837cc44d8d9f..8107eebd4296 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -508,7 +508,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
struct fw_node *local_node;
unsigned long flags;
- trace_bus_reset_handle(generation, node_id, bm_abdicate, self_ids, self_id_count);
+ trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count);
spin_lock_irqsave(&card->lock, flags);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 571fdff65c2b..76ab6a209768 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -174,8 +174,8 @@ static void transmit_complete_callback(struct fw_packet *packet,
struct fw_transaction *t =
container_of(packet, struct fw_transaction, packet);
- trace_async_request_outbound_complete((uintptr_t)t, packet->generation, packet->speed,
- status, packet->timestamp);
+ trace_async_request_outbound_complete((uintptr_t)t, card->index, packet->generation,
+ packet->speed, status, packet->timestamp);
switch (status) {
case ACK_COMPLETE:
@@ -398,7 +398,8 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
spin_unlock_irqrestore(&card->lock, flags);
- trace_async_request_outbound_initiate((uintptr_t)t, generation, speed, t->packet.header, payload,
+ trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed,
+ t->packet.header, payload,
tcode_is_read_request(tcode) ? 0 : length / 4);
card->driver->send_request(card, &t->packet);
@@ -463,7 +464,7 @@ static DECLARE_COMPLETION(phy_config_done);
static void transmit_phy_packet_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
- trace_async_phy_outbound_complete((uintptr_t)packet, packet->generation, status,
+ trace_async_phy_outbound_complete((uintptr_t)packet, card->index, packet->generation, status,
packet->timestamp);
complete(&phy_config_done);
}
@@ -503,7 +504,7 @@ void fw_send_phy_config(struct fw_card *card,
phy_config_packet.generation = generation;
reinit_completion(&phy_config_done);
- trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet,
+ trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet, card->index,
phy_config_packet.generation, phy_config_packet.header[1],
phy_config_packet.header[2]);
@@ -674,7 +675,7 @@ static void free_response_callback(struct fw_packet *packet,
{
struct fw_request *request = container_of(packet, struct fw_request, response);
- trace_async_response_outbound_complete((uintptr_t)request, packet->generation,
+ trace_async_response_outbound_complete((uintptr_t)request, card->index, packet->generation,
packet->speed, status, packet->timestamp);
// Decrease the reference count since not at in-flight.
@@ -879,9 +880,10 @@ void fw_send_response(struct fw_card *card,
// Increase the reference count so that the object is kept during in-flight.
fw_request_get(request);
- trace_async_response_outbound_initiate((uintptr_t)request, request->response.generation,
- request->response.speed, request->response.header,
- data, data ? data_length / 4 : 0);
+ trace_async_response_outbound_initiate((uintptr_t)request, card->index,
+ request->response.generation, request->response.speed,
+ request->response.header, data,
+ data ? data_length / 4 : 0);
card->driver->send_response(card, &request->response);
}
@@ -995,7 +997,7 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
tcode = async_header_get_tcode(p->header);
if (tcode_is_link_internal(tcode)) {
- trace_async_phy_inbound((uintptr_t)p, p->generation, p->ack, p->timestamp,
+ trace_async_phy_inbound((uintptr_t)p, card->index, p->generation, p->ack, p->timestamp,
p->header[1], p->header[2]);
fw_cdev_handle_phy_packet(card, p);
return;
@@ -1007,8 +1009,8 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
return;
}
- trace_async_request_inbound((uintptr_t)request, p->generation, p->speed, p->ack,
- p->timestamp, p->header, request->data,
+ trace_async_request_inbound((uintptr_t)request, card->index, p->generation, p->speed,
+ p->ack, p->timestamp, p->header, request->data,
tcode_is_read_request(tcode) ? 0 : request->length / 4);
offset = async_header_get_offset(p->header);
@@ -1078,8 +1080,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
}
spin_unlock_irqrestore(&card->lock, flags);
- trace_async_response_inbound((uintptr_t)t, p->generation, p->speed, p->ack, p->timestamp,
- p->header, data, data_length / 4);
+ trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack,
+ p->timestamp, p->header, data, data_length / 4);
if (!t) {
timed_out:
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 3365944f7965..34109fd86c55 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -15,10 +15,6 @@
#include <asm/early_ioremap.h>
#include <asm/efi.h>
-#ifndef __efi_memmap_free
-#define __efi_memmap_free(phys, size, flags) do { } while (0)
-#endif
-
/**
* __efi_memmap_init - Common code for mapping the EFI memory map
* @data: EFI memory map data
@@ -51,11 +47,6 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
return -ENOMEM;
}
- if (efi.memmap.flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB))
- __efi_memmap_free(efi.memmap.phys_map,
- efi.memmap.desc_size * efi.memmap.nr_map,
- efi.memmap.flags);
-
map.phys_map = data->phys_map;
map.nr_map = data->size / data->desc_size;
map.map_end = map.map + data->size;
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index d9629ff87861..2328ca58bba6 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -497,10 +497,12 @@ int psci_cpu_suspend_enter(u32 state)
static int psci_system_suspend(unsigned long unused)
{
+ int err;
phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
- return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
+ err = invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
pa_cpu_resume, 0, 0);
+ return psci_to_linux_errno(err);
}
static int psci_system_suspend_enter(suspend_state_t state)
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index bb499e362912..1d0175d6350b 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -225,6 +225,11 @@ static int davinci_gpio_probe(struct platform_device *pdev)
else
nirq = DIV_ROUND_UP(ngpio, 16);
+ if (nirq > MAX_INT_PER_BANK) {
+ dev_err(dev, "Too many IRQs!\n");
+ return -EINVAL;
+ }
+
chips = devm_kzalloc(dev, sizeof(*chips), GFP_KERNEL);
if (!chips)
return -ENOMEM;
diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c
index c693fe05d50f..f2e911a3d2ca 100644
--- a/drivers/gpio/gpio-graniterapids.c
+++ b/drivers/gpio/gpio-graniterapids.c
@@ -296,6 +296,8 @@ static int gnr_gpio_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ raw_spin_lock_init(&priv->lock);
+
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 77a2812f2974..732a6964748c 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -758,6 +758,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
int level;
if (chip->driver_data & PCA_PCAL) {
+ guard(mutex)(&chip->i2c_lock);
+
/* Enable latch on interrupt-enabled inputs */
pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 9dad67ea2597..5639abce6ec5 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -89,6 +89,10 @@ struct linehandle_state {
GPIOHANDLE_REQUEST_OPEN_DRAIN | \
GPIOHANDLE_REQUEST_OPEN_SOURCE)
+#define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \
+ (GPIOHANDLE_REQUEST_INPUT | \
+ GPIOHANDLE_REQUEST_OUTPUT)
+
static int linehandle_validate_flags(u32 flags)
{
/* Return an error if an unknown flag is set */
@@ -169,21 +173,21 @@ static long linehandle_set_config(struct linehandle_state *lh,
if (ret)
return ret;
+ /* Lines must be reconfigured explicitly as input or output. */
+ if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS))
+ return -EINVAL;
+
for (i = 0; i < lh->num_descs; i++) {
desc = lh->descs[i];
- linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags);
+ linehandle_flags_to_desc_flags(lflags, &desc->flags);
- /*
- * Lines have to be requested explicitly for input
- * or output, else the line will be treated "as is".
- */
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
int val = !!gcnf.default_values[i];
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
- } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
+ } else {
ret = gpiod_direction_input(desc);
if (ret)
return ret;
@@ -1530,12 +1534,14 @@ static long linereq_set_config(struct linereq *lr, void __user *ip)
line = &lr->lines[i];
desc = lr->lines[i].desc;
flags = gpio_v2_line_config_flags(&lc, i);
- gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
- edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
/*
- * Lines have to be requested explicitly for input
- * or output, else the line will be treated "as is".
+ * Lines not explicitly reconfigured as input or output
+ * are left unchanged.
*/
+ if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
+ continue;
+ gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
+ edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
int val = gpio_v2_line_config_output_value(&lc, i);
@@ -1543,7 +1549,7 @@ static long linereq_set_config(struct linereq *lr, void __user *ip)
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
- } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
+ } else {
ret = gpiod_direction_input(desc);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 026444eeb5c6..d0aa277fc3bf 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -450,6 +450,7 @@ config DRM_PRIVACY_SCREEN
config DRM_WERROR
bool "Compile the drm subsystem with warnings as errors"
depends on DRM && EXPERT
+ depends on !WERROR
default n
help
A kernel build should not cause any compiler warnings, and this
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 108003bdf1e9..2e13c7c4b2b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -400,7 +400,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = vram_info->v30.channel_num;
mem_channel_width = vram_info->v30.channel_width;
if (vram_width)
- *vram_width = mem_channel_number * (1 << mem_channel_width);
+ *vram_width = mem_channel_number * 16;
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 932dc93b2e63..33f791d92ddf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5220,11 +5220,14 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
dev_info(adev->dev, "GPU mode1 reset\n");
+ /* Cache the state before bus master disable. The saved config space
+ * values are used in other cases like restore after mode-2 reset.
+ */
+ amdgpu_device_cache_pci_state(adev->pdev);
+
/* disable BM */
pci_clear_master(adev->pdev);
- amdgpu_device_cache_pci_state(adev->pdev);
-
if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
dev_info(adev->dev, "GPU smu mode1 reset\n");
ret = amdgpu_dpm_mode1_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 055ba2ea4c12..662d0f28f358 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -41,8 +41,6 @@
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
-#include <linux/pm_runtime.h>
-#include "amdgpu_trace.h"
/**
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
@@ -58,42 +56,11 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- int r;
if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(1, __func__);
- if (r < 0)
- goto out;
-
return 0;
-
-out:
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(0, __func__);
- return r;
-}
-
-/**
- * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
- *
- * @dmabuf: DMA-buf where we remove the attachment from
- * @attach: the attachment to remove
- *
- * Called when an attachment is removed from the DMA-buf.
- */
-static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attach)
-{
- struct drm_gem_object *obj = dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(0, __func__);
}
/**
@@ -267,7 +234,6 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_dma_buf_attach,
- .detach = amdgpu_dma_buf_detach,
.pin = amdgpu_dma_buf_pin,
.unpin = amdgpu_dma_buf_unpin,
.map_dma_buf = amdgpu_dma_buf_map,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 10832b470448..bc3ac73b6b8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -181,7 +181,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(1, __func__);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old;
@@ -309,7 +308,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
dma_fence_put(fence);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(0, __func__);
} while (last_seq != seq);
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 67c234bcf89f..3adaa4670103 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -108,6 +108,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
memset(&bp, 0, sizeof(bp));
*obj = NULL;
+ flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
bp.size = size;
bp.byte_align = alignment;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index be4629cdac04..08b9dfb65335 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -684,12 +684,17 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
unsigned int ndw;
- signed long r;
+ int r;
uint32_t seq;
- if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready ||
- !down_read_trylock(&adev->reset_domain->sem)) {
+ /*
+ * A GPU reset should flush all TLBs anyway, so no need to do
+ * this while one is ongoing.
+ */
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return 0;
+ if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
if (adev->gmc.flush_tlb_needs_extra_type_2)
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
2, all_hub,
@@ -703,43 +708,40 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
flush_type, all_hub,
inst);
- return 0;
- }
+ r = 0;
+ } else {
+ /* 2 dwords flush + 8 dwords fence */
+ ndw = kiq->pmf->invalidate_tlbs_size + 8;
- /* 2 dwords flush + 8 dwords fence */
- ndw = kiq->pmf->invalidate_tlbs_size + 8;
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ ndw += kiq->pmf->invalidate_tlbs_size;
- if (adev->gmc.flush_tlb_needs_extra_type_2)
- ndw += kiq->pmf->invalidate_tlbs_size;
+ if (adev->gmc.flush_tlb_needs_extra_type_0)
+ ndw += kiq->pmf->invalidate_tlbs_size;
- if (adev->gmc.flush_tlb_needs_extra_type_0)
- ndw += kiq->pmf->invalidate_tlbs_size;
+ spin_lock(&adev->gfx.kiq[inst].ring_lock);
+ amdgpu_ring_alloc(ring, ndw);
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
- spin_lock(&adev->gfx.kiq[inst].ring_lock);
- amdgpu_ring_alloc(ring, ndw);
- if (adev->gmc.flush_tlb_needs_extra_type_2)
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
+ if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
- if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r) {
+ amdgpu_ring_undo(ring);
+ spin_unlock(&adev->gfx.kiq[inst].ring_lock);
+ goto error_unlock_reset;
+ }
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
- r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
- if (r) {
- amdgpu_ring_undo(ring);
+ amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- goto error_unlock_reset;
- }
-
- amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
- if (r < 1) {
- dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
- r = -ETIME;
- goto error_unlock_reset;
+ if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
+ dev_err(adev->dev, "timeout waiting for kiq fence\n");
+ r = -ETIME;
+ }
}
- r = 0;
error_unlock_reset:
up_read(&adev->reset_domain->sem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8d8c39be6129..c556c8b653fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -604,8 +604,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (!amdgpu_bo_support_uswc(bo->flags))
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
-
bo->tbo.bdev = &adev->mman.bdev;
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
AMDGPU_GEM_DOMAIN_GDS))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 4bd4602d11b1..cef9dd0a012b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -640,6 +640,20 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
}
}
+static bool psp_err_warn(struct psp_context *psp)
+{
+ struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
+
+ /* This response indicates reg list is already loaded */
+ if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
+ cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
+ cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
+ cmd->resp.status == TEE_ERROR_CANCEL)
+ return false;
+
+ return true;
+}
+
static int
psp_cmd_submit_buf(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
@@ -699,10 +713,13 @@ psp_cmd_submit_buf(struct psp_context *psp,
dev_warn(psp->adev->dev,
"failed to load ucode %s(0x%X) ",
amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
- dev_warn(psp->adev->dev,
- "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
- psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
- psp->cmd_buf_mem->resp.status);
+ if (psp_err_warn(psp))
+ dev_warn(
+ psp->adev->dev,
+ "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
+ psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
+ psp->cmd_buf_mem->cmd_id,
+ psp->cmd_buf_mem->resp.status);
/* If any firmware (including CAP) load fails under SRIOV, it should
* return failure to stop the VF from initializing.
* Also return failure in case of timeout
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 7aafeb763e5d..383fce40d4dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -554,21 +554,6 @@ TRACE_EVENT(amdgpu_reset_reg_dumps,
__entry->value)
);
-TRACE_EVENT(amdgpu_runpm_reference_dumps,
- TP_PROTO(uint32_t index, const char *func),
- TP_ARGS(index, func),
- TP_STRUCT__entry(
- __field(uint32_t, index)
- __string(func, func)
- ),
- TP_fast_assign(
- __entry->index = index;
- __assign_str(func);
- ),
- TP_printk("amdgpu runpm reference dump 0x%x: 0x%s\n",
- __entry->index,
- __get_str(func))
-);
#undef AMDGPU_JOB_GET_TIMELINE_NAME
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index e30eecd02ae1..fde66225c481 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -3,6 +3,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h>
#include "amdgpu.h"
@@ -314,7 +315,13 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
return 0;
}
afb = to_amdgpu_framebuffer(new_state->fb);
- obj = new_state->fb->obj[0];
+
+ obj = drm_gem_fb_get_obj(new_state->fb, 0);
+ if (!obj) {
+ DRM_ERROR("Failed to get obj from framebuffer\n");
+ return -EINVAL;
+ }
+
rbo = gem_to_amdgpu_bo(obj);
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
@@ -368,12 +375,19 @@ static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct amdgpu_bo *rbo;
+ struct drm_gem_object *obj;
int r;
if (!old_state->fb)
return;
- rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
+ obj = drm_gem_fb_get_obj(old_state->fb, 0);
+ if (!obj) {
+ DRM_ERROR("Failed to get obj from framebuffer\n");
+ return;
+ }
+
+ rbo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r)) {
DRM_ERROR("failed to reserve rbo before unpin\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 7b16e8cca86a..f5b9f443cfdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -4195,9 +4195,10 @@ static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_i
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info)
{
- int i, j, k, counter, xcc_id, active_cu_number = 0;
- u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+ int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
+ u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
unsigned disable_masks[4 * 4];
+ bool is_symmetric_cus;
if (!adev || !cu_info)
return -EINVAL;
@@ -4215,6 +4216,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
+ is_symmetric_cus = true;
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
@@ -4242,6 +4244,15 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
+ if (i && is_symmetric_cus && prev_counter != counter)
+ is_symmetric_cus = false;
+ prev_counter = counter;
+ }
+ if (is_symmetric_cus) {
+ tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
+ tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
}
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
xcc_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 0d1407f25005..32d4519541c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -154,18 +154,18 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
void *pkt, int size,
int api_status_off)
{
- int ndw = size / 4;
- signed long r;
- union MESAPI__MISC *x_pkt = pkt;
- struct MES_API_STATUS *api_status;
+ union MESAPI__QUERY_MES_STATUS mes_status_pkt;
+ signed long timeout = 3000000; /* 3000 ms */
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring;
- unsigned long flags;
- signed long timeout = 3000000; /* 3000 ms */
+ struct MES_API_STATUS *api_status;
+ union MESAPI__MISC *x_pkt = pkt;
const char *op_str, *misc_op_str;
- u32 fence_offset;
- u64 fence_gpu_addr;
- u64 *fence_ptr;
+ unsigned long flags;
+ u64 status_gpu_addr;
+ u32 status_offset;
+ u64 *status_ptr;
+ signed long r;
int ret;
if (x_pkt->header.opcode >= MES_SCH_API_MAX)
@@ -177,28 +177,38 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
/* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */
timeout = 15 * 600 * 1000;
}
- BUG_ON(size % 4 != 0);
- ret = amdgpu_device_wb_get(adev, &fence_offset);
+ ret = amdgpu_device_wb_get(adev, &status_offset);
if (ret)
return ret;
- fence_gpu_addr =
- adev->wb.gpu_addr + (fence_offset * 4);
- fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
- *fence_ptr = 0;
+
+ status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4);
+ status_ptr = (u64 *)&adev->wb.wb[status_offset];
+ *status_ptr = 0;
spin_lock_irqsave(&mes->ring_lock, flags);
- if (amdgpu_ring_alloc(ring, ndw)) {
- spin_unlock_irqrestore(&mes->ring_lock, flags);
- amdgpu_device_wb_free(adev, fence_offset);
- return -ENOMEM;
- }
+ r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
+ if (r)
+ goto error_unlock_free;
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
- api_status->api_completion_fence_addr = fence_gpu_addr;
+ api_status->api_completion_fence_addr = status_gpu_addr;
api_status->api_completion_fence_value = 1;
- amdgpu_ring_write_multiple(ring, pkt, ndw);
+ amdgpu_ring_write_multiple(ring, pkt, size / 4);
+
+ memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
+ mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
+ mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+ mes_status_pkt.api_status.api_completion_fence_addr =
+ ring->fence_drv.gpu_addr;
+ mes_status_pkt.api_status.api_completion_fence_value =
+ ++ring->fence_drv.sync_seq;
+
+ amdgpu_ring_write_multiple(ring, &mes_status_pkt,
+ sizeof(mes_status_pkt) / 4);
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&mes->ring_lock, flags);
@@ -206,15 +216,16 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
misc_op_str = mes_v11_0_get_misc_op_string(x_pkt);
if (misc_op_str)
- dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str, misc_op_str);
+ dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str,
+ misc_op_str);
else if (op_str)
dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
else
- dev_dbg(adev->dev, "MES msg=%d was emitted\n", x_pkt->header.opcode);
+ dev_dbg(adev->dev, "MES msg=%d was emitted\n",
+ x_pkt->header.opcode);
- r = amdgpu_mes_fence_wait_polling(fence_ptr, (u64)1, timeout);
- amdgpu_device_wb_free(adev, fence_offset);
- if (r < 1) {
+ r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout);
+ if (r < 1 || !*status_ptr) {
if (misc_op_str)
dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
@@ -229,10 +240,19 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
while (halt_if_hws_hang)
schedule();
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto error_wb_free;
}
+ amdgpu_device_wb_free(adev, status_offset);
return 0;
+
+error_unlock_free:
+ spin_unlock_irqrestore(&mes->ring_lock, flags);
+
+error_wb_free:
+ amdgpu_device_wb_free(adev, status_offset);
+ return r;
}
static int convert_to_mes_queue_type(int queue_type)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 7566973ed8f5..37b5ddd6f13b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -464,8 +464,9 @@ struct psp_gfx_rb_frame
#define PSP_ERR_UNKNOWN_COMMAND 0x00000100
enum tee_error_code {
- TEE_SUCCESS = 0x00000000,
- TEE_ERROR_NOT_SUPPORTED = 0xFFFF000A,
+ TEE_SUCCESS = 0x00000000,
+ TEE_ERROR_CANCEL = 0xFFFF0002,
+ TEE_ERROR_NOT_SUPPORTED = 0xFFFF000A,
};
#endif /* _PSP_TEE_GFX_IF_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index f08a32c18694..40b28298af30 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -32,7 +32,9 @@
#include "mp/mp_14_0_2_sh_mask.h"
MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -66,6 +68,9 @@ static int psp_v14_0_init_microcode(struct psp_context *psp)
err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ if (err)
+ return err;
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 5fcd4f778dc3..47b8b49da8a7 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -8,7 +8,7 @@ config DRM_AMD_DC
depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64
select SND_HDA_COMPONENT if SND_HDA_CORE
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
- select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && (!ARM64 || !CC_IS_CLANG)
+ select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || RISCV))
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f1d67c6f4b98..e9ac20bed0f2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9169,9 +9169,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
trace_amdgpu_dm_atomic_commit_tail_begin(state);
- if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
- dc_allow_idle_optimizations(dm->dc, false);
-
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_dp_mst_atomic_wait_for_dependencies(state);
@@ -11440,6 +11437,12 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
mutex_unlock(&adev->dm.dc_lock);
}
+static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc)
+{
+ if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter)
+ dc_exit_ips_for_hw_access(dc);
+}
+
void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
u32 value, const char *func_name)
{
@@ -11450,6 +11453,8 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
return;
}
#endif
+
+ amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
cgs_write_register(ctx->cgs_device, address, value);
trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
}
@@ -11473,6 +11478,8 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
return 0;
}
+ amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
+
value = cgs_read_register(ctx->cgs_device, address);
trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 60f251cf973b..beed7adbbd43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -177,7 +177,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
- .dram_clock_change_latency_us = 11.72,
+ .dram_clock_change_latency_us = 34.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index e4f333d4fb54..a201dbb743d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -215,7 +215,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
- .dram_clock_change_latency_us = 11.72,
+ .dram_clock_change_latency_us = 34,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 5295f52e4fc8..dcced89c07b3 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -1439,3 +1439,75 @@ void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
}
}
}
+
+static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
+{
+ /* Calculate average pixel count per TU, return false if under ~2.00 to
+ * avoid empty TUs. This is only required for DPIA tunneling as empty TUs
+ * are legal to generate for native DP links. Assume TU size 64 as there
+ * is currently no scenario where it's reprogrammed from HW default.
+ * MTPs have no such limitation, so this does not affect MST use cases.
+ */
+ unsigned int pix_clk_mhz;
+ unsigned int symclk_mhz;
+ unsigned int avg_pix_per_tu_x1000;
+ unsigned int tu_size_bytes = 64;
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
+ const struct dc *dc = pipe_ctx->stream->link->dc;
+
+ if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ return false;
+
+ // Not necessary for MST configurations
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ return false;
+
+ pix_clk_mhz = timing->pix_clk_100hz / 10000;
+
+ // If this is true, can't block due to dynamic ODM
+ if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz)
+ return false;
+
+ switch (link_settings->link_rate) {
+ case LINK_RATE_LOW:
+ symclk_mhz = 162;
+ break;
+ case LINK_RATE_HIGH:
+ symclk_mhz = 270;
+ break;
+ case LINK_RATE_HIGH2:
+ symclk_mhz = 540;
+ break;
+ case LINK_RATE_HIGH3:
+ symclk_mhz = 810;
+ break;
+ default:
+ // We shouldn't be tunneling any other rates, something is wrong
+ ASSERT(0);
+ return false;
+ }
+
+ avg_pix_per_tu_x1000 = (1000 * pix_clk_mhz * tu_size_bytes)
+ / (symclk_mhz * link_settings->lane_count);
+
+ // Add small empirically-decided margin to account for potential jitter
+ return (avg_pix_per_tu_x1000 < 2020);
+}
+
+bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
+ return false;
+
+ if (should_avoid_empty_tu(pipe_ctx))
+ return false;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) &&
+ dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ return true;
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index a731c8880d60..f0ea7d1511ae 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -95,4 +95,6 @@ void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
+bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index df3bf77f3fb4..199781233fd5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -158,7 +158,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
- .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index a01d0842bf8e..d487dfcd219b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -1590,9 +1590,17 @@ static bool retrieve_link_cap(struct dc_link *link)
return false;
}
- if (dp_is_lttpr_present(link))
+ if (dp_is_lttpr_present(link)) {
configure_lttpr_mode_transparent(link);
+ // Echo TOTAL_LTTPR_CNT back downstream
+ core_link_write_dpcd(
+ link,
+ DP_TOTAL_LTTPR_CNT,
+ &link->dpcd_caps.lttpr_caps.phy_repeater_cnt,
+ sizeof(link->dpcd_caps.lttpr_caps.phy_repeater_cnt));
+ }
+
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index 914f28e9f224..aee5170f5fb2 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -177,4 +177,9 @@ enum dpcd_psr_sink_states {
#define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379
#define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A
+/* Remove once drm_dp_helper.h is updated upstream */
+#ifndef DP_TOTAL_LTTPR_CNT
+#define DP_TOTAL_LTTPR_CNT 0xF000A /* 2.1 */
+#endif
+
#endif /* __DAL_DPCD_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 6bb42d04b247..e8b6989a40f3 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -164,6 +164,8 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
if (table[i].ulSupportedSCLK != 0) {
+ if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
+ continue;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
table[i].usVoltageID;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 7789b313285c..e1796ecf9c05 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -324,6 +324,18 @@ static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
return ret;
}
+static int smu_set_mall_enable(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->ppt_funcs->set_mall_enable)
+ return 0;
+
+ ret = smu->ppt_funcs->set_mall_enable(smu);
+
+ return ret;
+}
+
/**
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
@@ -1791,6 +1803,7 @@ static int smu_hw_init(void *handle)
smu_dpm_set_jpeg_enable(smu, true);
smu_dpm_set_vpe_enable(smu, true);
smu_dpm_set_umsch_mm_enable(smu, true);
+ smu_set_mall_enable(smu);
smu_set_gfx_cgpg(smu, true);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 0917dec8efe3..64ccdb5f14ea 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1395,6 +1395,11 @@ struct pptable_funcs {
int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable);
/**
+ * @set_mall_enable: Init MALL power gating control.
+ */
+ int (*set_mall_enable)(struct smu_context *smu);
+
+ /**
* @notify_rlc_state: Notify RLC power state to SMU.
*/
int (*notify_rlc_state)(struct smu_context *smu, bool en);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
index c4dc5881d8df..e7f5ef49049f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
@@ -106,8 +106,8 @@
#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
-#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache
-#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache
+#define PPSMC_MSG_MALLPowerController 0x38 ///< Set MALL control
+#define PPSMC_MSG_MALLPowerState 0x39 ///< Enter/Exit MALL PG
#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
/** @}*/
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index c48214e3dc8e..2e32b085824a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -272,7 +272,9 @@
__SMU_DUMMY_MAP(SetSoftMinVpe), \
__SMU_DUMMY_MAP(GetMetricsVersion), \
__SMU_DUMMY_MAP(EnableUCLKShadow), \
- __SMU_DUMMY_MAP(RmaDueToBadPageThreshold),
+ __SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \
+ __SMU_DUMMY_MAP(MALLPowerController), \
+ __SMU_DUMMY_MAP(MALLPowerState),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index e4419e1561ef..18abfbd6d059 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -52,6 +52,19 @@
#define mmMP1_SMN_C2PMSG_90 0x029a
#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+/* MALLPowerController message arguments (Defines for the Cache mode control) */
+#define SMU_MALL_PMFW_CONTROL 0
+#define SMU_MALL_DRIVER_CONTROL 1
+
+/*
+ * MALLPowerState message arguments
+ * (Defines for the Allocate/Release Cache mode if in driver mode)
+ */
+#define SMU_MALL_EXIT_PG 0
+#define SMU_MALL_ENTER_PG 1
+
+#define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
@@ -66,6 +79,12 @@
FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \
FEATURE_MASK(FEATURE_VPE_DPM_BIT))
+enum smu_mall_pg_config {
+ SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0,
+ SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON = 1,
+ SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF = 2,
+};
+
static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
@@ -113,6 +132,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PowerDownUmsch, PPSMC_MSG_PowerDownUmsch, 1),
MSG_MAP(SetSoftMaxVpe, PPSMC_MSG_SetSoftMaxVpe, 1),
MSG_MAP(SetSoftMinVpe, PPSMC_MSG_SetSoftMinVpe, 1),
+ MSG_MAP(MALLPowerController, PPSMC_MSG_MALLPowerController, 1),
+ MSG_MAP(MALLPowerState, PPSMC_MSG_MALLPowerState, 1),
};
static struct cmn2asic_mapping smu_v14_0_0_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -1423,6 +1444,57 @@ static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_cl
return 0;
}
+static int smu_v14_0_1_init_mall_power_gating(struct smu_context *smu, enum smu_mall_pg_config pg_config)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (pg_config == SMU_MALL_PG_CONFIG_PMFW_CONTROL) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController,
+ SMU_MALL_PMFW_CONTROL, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Init MALL PMFW CONTROL Failure\n");
+ return ret;
+ }
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController,
+ SMU_MALL_DRIVER_CONTROL, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Init MALL Driver CONTROL Failure\n");
+ return ret;
+ }
+
+ if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState,
+ SMU_MALL_EXIT_PG, NULL);
+ if (ret) {
+ dev_err(adev->dev, "EXIT MALL PG Failure\n");
+ return ret;
+ }
+ } else if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState,
+ SMU_MALL_ENTER_PG, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Enter MALL PG Failure\n");
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_common_set_mall_enable(struct smu_context *smu)
+{
+ enum smu_mall_pg_config pg_config = SMU_MALL_PG_CONFIG_DEFAULT;
+ int ret = 0;
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ ret = smu_v14_0_1_init_mall_power_gating(smu, pg_config);
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@@ -1454,6 +1526,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
.get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
+ .set_mall_enable = smu_v14_0_common_set_mall_enable,
};
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 14ee79becacb..5ba62e637a61 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -12,10 +12,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
-#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#endif
#include <drm/drm_print.h>
@@ -43,7 +41,6 @@ static int komeda_register_show(struct seq_file *sf, void *x)
DEFINE_SHOW_ATTRIBUTE(komeda_register);
-#ifdef CONFIG_DEBUG_FS
static void komeda_debugfs_init(struct komeda_dev *mdev)
{
if (!debugfs_initialized())
@@ -55,7 +52,6 @@ static void komeda_debugfs_init(struct komeda_dev *mdev)
debugfs_create_x16("err_verbosity", 0664, mdev->debugfs_root,
&mdev->err_verbosity);
}
-#endif
static ssize_t
core_id_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -265,9 +261,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
mdev->err_verbosity = KOMEDA_DEV_PRINT_ERR_EVENTS;
-#ifdef CONFIG_DEBUG_FS
komeda_debugfs_init(mdev);
-#endif
return mdev;
@@ -286,9 +280,7 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group);
-#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(mdev->debugfs_root);
-#endif
if (mdev->aclk)
clk_prepare_enable(mdev->aclk);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index f3e744172673..f4e76b46ca32 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -259,7 +259,7 @@ komeda_component_get_avail_scaler(struct komeda_component *c,
u32 avail_scalers;
pipe_st = komeda_pipeline_get_state(c->pipeline, state);
- if (!pipe_st)
+ if (IS_ERR_OR_NULL(pipe_st))
return NULL;
avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 32506524d9a2..fe5fb08c9fc4 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -360,9 +360,12 @@ EXPORT_SYMBOL(drm_panel_bridge_set_orientation);
static void devm_drm_panel_bridge_release(struct device *dev, void *res)
{
- struct drm_bridge **bridge = res;
+ struct drm_bridge *bridge = *(struct drm_bridge **)res;
- drm_panel_bridge_remove(*bridge);
+ if (!bridge)
+ return;
+
+ drm_bridge_remove(bridge);
}
/**
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d612133e2cf7..117237d3528b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -524,6 +524,9 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
if (!info)
return ERR_PTR(-ENOMEM);
+ if (!drm_leak_fbdev_smem)
+ info->flags |= FBINFO_HIDE_SMEM_START;
+
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret)
goto err_release;
@@ -1860,9 +1863,6 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
info = fb_helper->info;
info->var.pixclock = 0;
- if (!drm_leak_fbdev_smem)
- info->flags |= FBINFO_HIDE_SMEM_START;
-
/* Need to drop locks to avoid recursive deadlock in
* register_framebuffer. This is ok because the only thing left to do is
* register the fbdev emulation instance in kernel_fb_helper_list. */
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index 6c9427bb4053..13cd754af311 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -130,7 +130,10 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
info->flags |= FBINFO_READS_FAST; /* signal caching */
info->screen_size = sizes->surface_height * fb->pitches[0];
info->screen_buffer = map.vaddr;
- info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
+ if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
+ info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ }
info->fix.smem_len = info->screen_size;
return 0;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 638ffa4444f5..714e42b05108 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -469,14 +469,12 @@ void drm_file_update_pid(struct drm_file *filp)
dev = filp->minor->dev;
mutex_lock(&dev->filelist_mutex);
+ get_pid(pid);
old = rcu_replace_pointer(filp->pid, pid, 1);
mutex_unlock(&dev->filelist_mutex);
- if (pid != old) {
- get_pid(pid);
- synchronize_rcu();
- put_pid(old);
- }
+ synchronize_rcu();
+ put_pid(old);
}
/**
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index aa93129c3397..2166208a961d 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -202,6 +202,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_BOARD_NAME, "NEXT"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO KUN */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "KUN"),
+ },
+ .driver_data = (void *)&lcd1600x2560_rightside_up,
}, { /* Chuwi HiBook (CWI514) */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index f48c4343f469..3e6d4c6aa877 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -285,7 +285,6 @@ struct platform_driver dp_driver = {
.remove_new = exynos_dp_remove,
.driver = {
.name = "exynos-dp",
- .owner = THIS_MODULE,
.pm = pm_ptr(&exynos_dp_pm_ops),
.of_match_table = exynos_dp_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index fab135308b70..11a720fef32b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -309,6 +309,7 @@ static int vidi_get_modes(struct drm_connector *connector)
struct vidi_context *ctx = ctx_from_connector(connector);
struct edid *edid;
int edid_len;
+ int count;
/*
* the edid data comes from user side and it would be set
@@ -328,7 +329,11 @@ static int vidi_get_modes(struct drm_connector *connector)
drm_connector_update_edid_property(connector, edid);
- return drm_add_edid_modes(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+
+ return count;
}
static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index e968824a4c72..1e26cd4f8347 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
int ret;
if (!hdata->ddc_adpt)
- return 0;
+ goto no_edid;
edid = drm_get_edid(connector, hdata->ddc_adpt);
if (!edid)
- return 0;
+ goto no_edid;
hdata->dvi_mode = !connector->display_info.is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
@@ -906,6 +906,9 @@ static int hdmi_get_modes(struct drm_connector *connector)
kfree(edid);
return ret;
+
+no_edid:
+ return drm_add_modes_noedid(connector, 640, 480);
}
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index e05e25cd4a94..5b3b6ae1e3d7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -442,6 +442,10 @@ bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ /* eDP MSO is not compatible with joiner */
+ if (intel_dp->mso_link_count)
+ return false;
+
return DISPLAY_VER(dev_priv) >= 12 ||
(DISPLAY_VER(dev_priv) == 11 &&
encoder->port != PORT_A);
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 40371b8a9bbb..93bc1cc1ee7e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -298,6 +298,7 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
return;
GEM_BUG_ON(fence->vma != vma);
+ i915_active_wait(&fence->active);
GEM_BUG_ON(!i915_active_is_idle(&fence->active));
GEM_BUG_ON(atomic_read(&fence->pin_count));
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index b5f605751b0a..de811e2265da 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -952,6 +952,13 @@ static void mtk_drm_remove(struct platform_device *pdev)
of_node_put(private->comp_node[i]);
}
+static void mtk_drm_shutdown(struct platform_device *pdev)
+{
+ struct mtk_drm_private *private = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(private->drm);
+}
+
static int mtk_drm_sys_prepare(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
@@ -983,6 +990,7 @@ static const struct dev_pm_ops mtk_drm_pm_ops = {
static struct platform_driver mtk_drm_platform_driver = {
.probe = mtk_drm_probe,
.remove_new = mtk_drm_remove,
+ .shutdown = mtk_drm_shutdown,
.driver = {
.name = "mediatek-drm",
.pm = &mtk_drm_pm_ops,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 13705c5f1497..4b7497a8755c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -68,7 +68,7 @@ nv04_display_fini(struct drm_device *dev, bool runtime, bool suspend)
if (nv_two_heads(dev))
NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
- if (!runtime)
+ if (!runtime && !drm->headless)
cancel_work_sync(&drm->hpd_work);
if (!suspend)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 670c9739e5e1..2033214c4b78 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -209,6 +209,8 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
struct drm_display_mode *mode;
mode = drm_mode_duplicate(encoder->dev, tv_mode);
+ if (!mode)
+ continue;
mode->clock = tv_norm->tv_enc_mode.vrefresh *
mode->htotal / 1000 *
@@ -258,6 +260,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
if (modes[i].hdisplay == output_mode->hdisplay &&
modes[i].vdisplay == output_mode->vdisplay) {
mode = drm_mode_duplicate(encoder->dev, output_mode);
+ if (!mode)
+ continue;
mode->type |= DRM_MODE_TYPE_PREFERRED;
} else {
@@ -265,6 +269,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
modes[i].vdisplay, 60, false,
(output_mode->flags &
DRM_MODE_FLAG_INTERLACE), false);
+ if (!mode)
+ continue;
}
/* CVT modes are sometimes unsuitable... */
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 88728a0b2c25..674dc567e179 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2680,7 +2680,7 @@ nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
nv50_mstm_fini(nouveau_encoder(encoder));
}
- if (!runtime)
+ if (!runtime && !drm->headless)
cancel_work_sync(&drm->hpd_work);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 79cfab53f80e..8c3c1f1e01c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -43,11 +43,6 @@
#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
#define LOG_OLD_VALUE(x)
-struct init_exec {
- bool execute;
- bool repeat;
-};
-
static bool nv_cksum(const uint8_t *data, unsigned int length)
{
/*
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index aed5d5b51b43..d4725a968827 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -450,6 +450,9 @@ nouveau_display_hpd_resume(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->headless)
+ return;
+
spin_lock_irq(&drm->hpd_lock);
drm->hpd_pending = ~0;
spin_unlock_irq(&drm->hpd_lock);
@@ -635,7 +638,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
}
drm_connector_list_iter_end(&conn_iter);
- if (!runtime)
+ if (!runtime && !drm->headless)
cancel_work_sync(&drm->hpd_work);
drm_kms_helper_poll_disable(dev);
@@ -729,6 +732,7 @@ nouveau_display_create(struct drm_device *dev)
/* no display hw */
if (ret == -ENODEV) {
ret = 0;
+ drm->headless = true;
goto disp_create_err;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index e239c6bf4afa..25fca98a20bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -276,6 +276,7 @@ struct nouveau_drm {
/* modesetting */
struct nvbios vbios;
struct nouveau_display *display;
+ bool headless;
struct work_struct hpd_work;
spinlock_t hpd_lock;
u32 hpd_pending;
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index dcb6d0b6ced0..c8cdc8356c58 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2752,6 +2752,7 @@ static const struct display_timing koe_tx26d202vm0bwa_timing = {
.vfront_porch = { 3, 5, 10 },
.vback_porch = { 2, 5, 10 },
.vsync_len = { 5, 5, 5 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc koe_tx26d202vm0bwa = {
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 21d27e6235f3..b11f7c5bbcbe 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1619,6 +1619,8 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
if (table[i].ulSupportedSCLK != 0) {
+ if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
+ continue;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
table[i].usVoltageID;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
index e83c3e52251d..0250d5f00bf1 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
@@ -171,6 +171,13 @@ static void shmob_drm_remove(struct platform_device *pdev)
drm_kms_helper_poll_fini(ddev);
}
+static void shmob_drm_shutdown(struct platform_device *pdev)
+{
+ struct shmob_drm_device *sdev = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(&sdev->ddev);
+}
+
static int shmob_drm_probe(struct platform_device *pdev)
{
struct shmob_drm_platform_data *pdata = pdev->dev.platform_data;
@@ -273,6 +280,7 @@ static const struct of_device_id shmob_drm_of_table[] __maybe_unused = {
static struct platform_driver shmob_drm_platform_driver = {
.probe = shmob_drm_probe,
.remove_new = shmob_drm_remove,
+ .shutdown = shmob_drm_shutdown,
.driver = {
.name = "shmob-drm",
.of_match_table = of_match_ptr(shmob_drm_of_table),
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index faddae3d6ac2..6f1ac940cbae 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -2,7 +2,7 @@
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
depends on DRM && PCI && MMU
- depends on X86 || ARM64
+ depends on (X86 && HYPERVISOR_GUEST) || ARM64
select DRM_TTM
select DRM_TTM_HELPER
select MAPPING_DIRTY_HELPERS
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index 8fc0f3f6ecc5..944770fb2daf 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -147,6 +147,13 @@ static const struct attribute *gt_idle_attrs[] = {
static void gt_idle_sysfs_fini(struct drm_device *drm, void *arg)
{
struct kobject *kobj = arg;
+ struct xe_gt *gt = kobj_to_gt(kobj->parent);
+
+ if (gt_to_xe(gt)->info.skip_guc_pc) {
+ XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ xe_gt_idle_disable_c6(gt);
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ }
sysfs_remove_files(kobj, gt_idle_attrs);
kobject_put(kobj);
@@ -199,7 +206,7 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt)
void xe_gt_idle_disable_c6(struct xe_gt *gt)
{
xe_device_assert_mem_access(gt_to_xe(gt));
- xe_force_wake_assert_held(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
xe_mmio_write32(gt, PG_ENABLE, 0);
xe_mmio_write32(gt, RC_CONTROL, 0);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 476d613333a9..6c2cfc54442c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1274,6 +1274,9 @@ static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
struct xe_tile *tile;
unsigned int tid;
+ xe_assert(xe, IS_DGFX(xe));
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
for_each_tile(tile, xe, tid) {
lmtt = &tile->sriov.pf.lmtt;
xe_lmtt_drop_pages(lmtt, vfid);
@@ -1292,6 +1295,9 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
unsigned int tid;
int err;
+ xe_assert(xe, IS_DGFX(xe));
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
total = 0;
for_each_tile(tile, xe, tid)
total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
@@ -1337,6 +1343,7 @@ fail:
static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
+ xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -1355,6 +1362,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
int err;
xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, IS_DGFX(xe));
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
size = round_up(size, pf_get_lmem_alignment(gt));
@@ -1745,11 +1753,14 @@ static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *c
static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ struct xe_device *xe = gt_to_xe(gt);
if (!xe_gt_is_media_type(gt)) {
pf_release_vf_config_ggtt(gt, config);
- pf_release_vf_config_lmem(gt, config);
- pf_update_vf_lmtt(gt_to_xe(gt), vfid);
+ if (IS_DGFX(xe)) {
+ pf_release_vf_config_lmem(gt, config);
+ pf_update_vf_lmtt(xe, vfid);
+ }
}
pf_release_config_ctxs(gt, config);
pf_release_config_dbs(gt, config);
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 240e7a4bbff1..5faca4fc2fef 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -631,8 +631,6 @@ int xe_guc_enable_communication(struct xe_guc *guc)
struct xe_device *xe = guc_to_xe(guc);
int err;
- guc_enable_irq(guc);
-
if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
struct xe_gt *gt = guc_to_gt(guc);
struct xe_tile *tile = gt_to_tile(gt);
@@ -640,6 +638,8 @@ int xe_guc_enable_communication(struct xe_guc *guc)
err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc);
if (err)
return err;
+ } else {
+ guc_enable_irq(guc);
}
xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 509649d0e65e..23382ced4ea7 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -895,12 +895,6 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc)
static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
{
struct xe_guc_pc *pc = arg;
- struct xe_device *xe = pc_to_xe(pc);
-
- if (xe->info.skip_guc_pc) {
- xe_gt_idle_disable_c6(pc_to_gt(pc));
- return;
- }
XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index d42b3f33bd7a..aca7a9af6e84 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -80,6 +80,16 @@ static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i)
return i;
}
+static int emit_flush_dw(u32 *dw, int i)
+{
+ dw[i++] = MI_FLUSH_DW | MI_FLUSH_IMM_DW;
+ dw[i++] = 0;
+ dw[i++] = 0;
+ dw[i++] = 0;
+
+ return i;
+}
+
static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb,
u32 *dw, int i)
{
@@ -234,10 +244,12 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
- if (job->user_fence.used)
+ if (job->user_fence.used) {
+ i = emit_flush_dw(dw, i);
i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
job->user_fence.value,
dw, i);
+ }
i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
@@ -293,10 +305,12 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
- if (job->user_fence.used)
+ if (job->user_fence.used) {
+ i = emit_flush_dw(dw, i);
i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
job->user_fence.value,
dw, i);
+ }
i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index a8ad728354cb..e0d676c74f14 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -45,8 +45,8 @@ int hv_init(void)
* This involves a hypercall.
*/
int hv_post_message(union hv_connection_id connection_id,
- enum hv_message_type message_type,
- void *payload, size_t payload_size)
+ enum hv_message_type message_type,
+ void *payload, size_t payload_size)
{
struct hv_input_post_message *aligned_msg;
unsigned long flags;
@@ -86,7 +86,7 @@ int hv_post_message(union hv_connection_id connection_id,
status = HV_STATUS_INVALID_PARAMETER;
} else {
status = hv_do_hypercall(HVCALL_POST_MESSAGE,
- aligned_msg, NULL);
+ aligned_msg, NULL);
}
local_irq_restore(flags);
@@ -111,7 +111,7 @@ int hv_synic_alloc(void)
hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
GFP_KERNEL);
- if (hv_context.hv_numa_map == NULL) {
+ if (!hv_context.hv_numa_map) {
pr_err("Unable to allocate NUMA map\n");
goto err;
}
@@ -120,11 +120,11 @@ int hv_synic_alloc(void)
hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
tasklet_init(&hv_cpu->msg_dpc,
- vmbus_on_msg_dpc, (unsigned long) hv_cpu);
+ vmbus_on_msg_dpc, (unsigned long)hv_cpu);
if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->post_msg_page == NULL) {
+ if (!hv_cpu->post_msg_page) {
pr_err("Unable to allocate post msg page\n");
goto err;
}
@@ -147,14 +147,14 @@ int hv_synic_alloc(void)
if (!ms_hyperv.paravisor_present && !hv_root_partition) {
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->synic_message_page == NULL) {
+ if (!hv_cpu->synic_message_page) {
pr_err("Unable to allocate SYNIC message page\n");
goto err;
}
hv_cpu->synic_event_page =
(void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->synic_event_page == NULL) {
+ if (!hv_cpu->synic_event_page) {
pr_err("Unable to allocate SYNIC event page\n");
free_page((unsigned long)hv_cpu->synic_message_page);
@@ -203,14 +203,13 @@ err:
return ret;
}
-
void hv_synic_free(void)
{
int cpu, ret;
for_each_present_cpu(cpu) {
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
+ struct hv_per_cpu_context *hv_cpu =
+ per_cpu_ptr(hv_context.cpu_context, cpu);
/* It's better to leak the page if the encryption fails. */
if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
@@ -262,8 +261,8 @@ void hv_synic_free(void)
*/
void hv_synic_enable_regs(unsigned int cpu)
{
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
+ struct hv_per_cpu_context *hv_cpu =
+ per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_simp simp;
union hv_synic_siefp siefp;
union hv_synic_sint shared_sint;
@@ -277,8 +276,8 @@ void hv_synic_enable_regs(unsigned int cpu)
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
- hv_cpu->synic_message_page
- = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
+ hv_cpu->synic_message_page =
+ (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
if (!hv_cpu->synic_message_page)
pr_err("Fail to map synic message page.\n");
} else {
@@ -296,8 +295,8 @@ void hv_synic_enable_regs(unsigned int cpu)
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
- hv_cpu->synic_event_page
- = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
+ hv_cpu->synic_event_page =
+ (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
if (!hv_cpu->synic_event_page)
pr_err("Fail to map synic event page.\n");
} else {
@@ -348,8 +347,8 @@ int hv_synic_init(unsigned int cpu)
*/
void hv_synic_disable_regs(unsigned int cpu)
{
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
+ struct hv_per_cpu_context *hv_cpu =
+ per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_sint shared_sint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index e000fa3b9f97..0e7427c2baf5 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -25,6 +25,7 @@
#include <linux/notifier.h>
#include <linux/percpu_counter.h>
#include <linux/page_reporting.h>
+#include <linux/sizes.h>
#include <linux/hyperv.h>
#include <asm/hyperv-tlfs.h>
@@ -41,8 +42,6 @@
* Begin protocol definitions.
*/
-
-
/*
* Protocol versions. The low word is the minor version, the high word the major
* version.
@@ -71,8 +70,6 @@ enum {
DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
};
-
-
/*
* Message Types
*/
@@ -101,7 +98,6 @@ enum dm_message_type {
DM_VERSION_1_MAX = 12
};
-
/*
* Structures defining the dynamic memory management
* protocol.
@@ -115,7 +111,6 @@ union dm_version {
__u32 version;
} __packed;
-
union dm_caps {
struct {
__u64 balloon:1;
@@ -148,8 +143,6 @@ union dm_mem_page_range {
__u64 page_range;
} __packed;
-
-
/*
* The header for all dynamic memory messages:
*
@@ -174,7 +167,6 @@ struct dm_message {
__u8 data[]; /* enclosed message */
} __packed;
-
/*
* Specific message types supporting the dynamic memory protocol.
*/
@@ -271,7 +263,6 @@ struct dm_status {
__u32 io_diff;
} __packed;
-
/*
* Message to ask the guest to allocate memory - balloon up message.
* This message is sent from the host to the guest. The guest may not be
@@ -286,14 +277,13 @@ struct dm_balloon {
__u32 reservedz;
} __packed;
-
/*
* Balloon response message; this message is sent from the guest
* to the host in response to the balloon message.
*
* reservedz: Reserved; must be set to zero.
* more_pages: If FALSE, this is the last message of the transaction.
- * if TRUE there will atleast one more message from the guest.
+ * if TRUE there will be at least one more message from the guest.
*
* range_count: The number of ranges in the range array.
*
@@ -314,7 +304,7 @@ struct dm_balloon_response {
* to the guest to give guest more memory.
*
* more_pages: If FALSE, this is the last message of the transaction.
- * if TRUE there will atleast one more message from the guest.
+ * if TRUE there will be at least one more message from the guest.
*
* reservedz: Reserved; must be set to zero.
*
@@ -342,7 +332,6 @@ struct dm_unballoon_response {
struct dm_header hdr;
} __packed;
-
/*
* Hot add request message. Message sent from the host to the guest.
*
@@ -390,7 +379,6 @@ enum dm_info_type {
MAX_INFO_TYPE
};
-
/*
* Header for the information message.
*/
@@ -425,11 +413,11 @@ struct dm_info_msg {
* The range start_pfn : end_pfn specifies the range
* that the host has asked us to hot add. The range
* start_pfn : ha_end_pfn specifies the range that we have
- * currently hot added. We hot add in multiples of 128M
- * chunks; it is possible that we may not be able to bring
- * online all the pages in the region. The range
+ * currently hot added. We hot add in chunks equal to the
+ * memory block size; it is possible that we may not be able
+ * to bring online all the pages in the region. The range
* covered_start_pfn:covered_end_pfn defines the pages that can
- * be brough online.
+ * be brought online.
*/
struct hv_hotadd_state {
@@ -480,10 +468,10 @@ static unsigned long last_post_time;
static int hv_hypercall_multi_failure;
-module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
+module_param(hot_add, bool, 0644);
MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
-module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
+module_param(pressure_report_delay, uint, 0644);
MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
static atomic_t trans_id = ATOMIC_INIT(0);
@@ -502,11 +490,13 @@ enum hv_dm_state {
DM_INIT_ERROR
};
-
static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
+
+static unsigned long ha_pages_in_chunk;
+#define HA_BYTES_IN_CHUNK (ha_pages_in_chunk << PAGE_SHIFT)
+
#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
-#define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE)
struct hv_dynmem_device {
struct hv_device *dev;
@@ -595,12 +585,12 @@ static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
struct hv_hotadd_gap *gap;
/* The page is not backed. */
- if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
+ if (pfn < has->covered_start_pfn || pfn >= has->covered_end_pfn)
return false;
/* Check for gaps. */
list_for_each_entry(gap, &has->gap_list, list) {
- if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
+ if (pfn >= gap->start_pfn && pfn < gap->end_pfn)
return false;
}
@@ -724,28 +714,21 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
unsigned long processed_pfn;
unsigned long total_pfn = pfn_count;
- for (i = 0; i < (size/HA_CHUNK); i++) {
- start_pfn = start + (i * HA_CHUNK);
+ for (i = 0; i < (size/ha_pages_in_chunk); i++) {
+ start_pfn = start + (i * ha_pages_in_chunk);
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
- has->ha_end_pfn += HA_CHUNK;
-
- if (total_pfn > HA_CHUNK) {
- processed_pfn = HA_CHUNK;
- total_pfn -= HA_CHUNK;
- } else {
- processed_pfn = total_pfn;
- total_pfn = 0;
- }
-
- has->covered_end_pfn += processed_pfn;
+ has->ha_end_pfn += ha_pages_in_chunk;
+ processed_pfn = umin(total_pfn, ha_pages_in_chunk);
+ total_pfn -= processed_pfn;
+ has->covered_end_pfn += processed_pfn;
}
reinit_completion(&dm_device.ol_waitevent);
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
ret = add_memory(nid, PFN_PHYS((start_pfn)),
- (HA_CHUNK << PAGE_SHIFT), MHP_MERGE_RESOURCE);
+ HA_BYTES_IN_CHUNK, MHP_MERGE_RESOURCE);
if (ret) {
pr_err("hot_add memory failed error is %d\n", ret);
@@ -760,7 +743,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
do_hot_add = false;
}
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
- has->ha_end_pfn -= HA_CHUNK;
+ has->ha_end_pfn -= ha_pages_in_chunk;
has->covered_end_pfn -= processed_pfn;
}
break;
@@ -787,8 +770,8 @@ static void hv_online_page(struct page *pg, unsigned int order)
guard(spinlock_irqsave)(&dm_device.ha_lock);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/* The page belongs to a different HAS. */
- if ((pfn < has->start_pfn) ||
- (pfn + (1UL << order) > has->end_pfn))
+ if (pfn < has->start_pfn ||
+ (pfn + (1UL << order) > has->end_pfn))
continue;
hv_bring_pgs_online(has, pfn, 1UL << order);
@@ -800,7 +783,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
{
struct hv_hotadd_state *has;
struct hv_hotadd_gap *gap;
- unsigned long residual, new_inc;
+ unsigned long residual;
int ret = 0;
guard(spinlock_irqsave)(&dm_device.ha_lock);
@@ -836,15 +819,9 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
* our current limit; extend it.
*/
if ((start_pfn + pfn_cnt) > has->end_pfn) {
+ /* Extend the region by multiples of ha_pages_in_chunk */
residual = (start_pfn + pfn_cnt - has->end_pfn);
- /*
- * Extend the region by multiples of HA_CHUNK.
- */
- new_inc = (residual / HA_CHUNK) * HA_CHUNK;
- if (residual % HA_CHUNK)
- new_inc += HA_CHUNK;
-
- has->end_pfn += new_inc;
+ has->end_pfn += ALIGN(residual, ha_pages_in_chunk);
}
ret = 1;
@@ -855,7 +832,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
}
static unsigned long handle_pg_range(unsigned long pg_start,
- unsigned long pg_count)
+ unsigned long pg_count)
{
unsigned long start_pfn = pg_start;
unsigned long pfn_cnt = pg_count;
@@ -866,7 +843,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
unsigned long res = 0, flags;
pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
- pg_start);
+ pg_start);
spin_lock_irqsave(&dm_device.ha_lock, flags);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
@@ -902,22 +879,19 @@ static unsigned long handle_pg_range(unsigned long pg_start,
if (start_pfn > has->start_pfn &&
online_section_nr(pfn_to_section_nr(start_pfn)))
hv_bring_pgs_online(has, start_pfn, pgs_ol);
-
}
- if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
+ if (has->ha_end_pfn < has->end_pfn && pfn_cnt > 0) {
/*
* We have some residual hot add range
* that needs to be hot added; hot add
* it now. Hot add a multiple of
- * HA_CHUNK that fully covers the pages
+ * ha_pages_in_chunk that fully covers the pages
* we have.
*/
size = (has->end_pfn - has->ha_end_pfn);
if (pfn_cnt <= size) {
- size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
- if (pfn_cnt % HA_CHUNK)
- size += HA_CHUNK;
+ size = ALIGN(pfn_cnt, ha_pages_in_chunk);
} else {
pfn_cnt = size;
}
@@ -1010,10 +984,7 @@ static void hot_add_req(struct work_struct *dummy)
rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
- if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
- unsigned long region_size;
- unsigned long region_start;
-
+ if (rg_start == 0 && !dm->host_specified_ha_region) {
/*
* The host has not specified the hot-add region.
* Based on the hot-add page range being specified,
@@ -1021,19 +992,13 @@ static void hot_add_req(struct work_struct *dummy)
* that need to be hot-added while ensuring the alignment
* and size requirements of Linux as it relates to hot-add.
*/
- region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
- if (pfn_cnt % HA_CHUNK)
- region_size += HA_CHUNK;
-
- region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
-
- rg_start = region_start;
- rg_sz = region_size;
+ rg_start = ALIGN_DOWN(pg_start, ha_pages_in_chunk);
+ rg_sz = ALIGN(pfn_cnt, ha_pages_in_chunk);
}
if (do_hot_add)
resp.page_count = process_hot_add(pg_start, pfn_cnt,
- rg_start, rg_sz);
+ rg_start, rg_sz);
dm->num_pages_added += resp.page_count;
#endif
@@ -1211,11 +1176,10 @@ static void post_status(struct hv_dynmem_device *dm)
sizeof(struct dm_status),
(unsigned long)NULL,
VM_PKT_DATA_INBAND, 0);
-
}
static void free_balloon_pages(struct hv_dynmem_device *dm,
- union dm_mem_page_range *range_array)
+ union dm_mem_page_range *range_array)
{
int num_pages = range_array->finfo.page_cnt;
__u64 start_frame = range_array->finfo.start_page;
@@ -1231,8 +1195,6 @@ static void free_balloon_pages(struct hv_dynmem_device *dm,
}
}
-
-
static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
unsigned int num_pages,
struct dm_balloon_response *bl_resp,
@@ -1278,7 +1240,6 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
page_to_pfn(pg);
bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
bl_resp->hdr.size += sizeof(union dm_mem_page_range);
-
}
return i * alloc_unit;
@@ -1332,7 +1293,7 @@ static void balloon_up(struct work_struct *dummy)
if (num_ballooned == 0 || num_ballooned == num_pages) {
pr_debug("Ballooned %u out of %u requested pages.\n",
- num_pages, dm_device.balloon_wrk.num_pages);
+ num_pages, dm_device.balloon_wrk.num_pages);
bl_resp->more_pages = 0;
done = true;
@@ -1366,16 +1327,15 @@ static void balloon_up(struct work_struct *dummy)
for (i = 0; i < bl_resp->range_count; i++)
free_balloon_pages(&dm_device,
- &bl_resp->range_array[i]);
+ &bl_resp->range_array[i]);
done = true;
}
}
-
}
static void balloon_down(struct hv_dynmem_device *dm,
- struct dm_unballoon_request *req)
+ struct dm_unballoon_request *req)
{
union dm_mem_page_range *range_array = req->range_array;
int range_count = req->range_count;
@@ -1389,7 +1349,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
}
pr_debug("Freed %u ballooned pages.\n",
- prev_pages_ballooned - dm->num_pages_ballooned);
+ prev_pages_ballooned - dm->num_pages_ballooned);
if (req->more_pages == 1)
return;
@@ -1414,8 +1374,7 @@ static int dm_thread_func(void *dm_dev)
struct hv_dynmem_device *dm = dm_dev;
while (!kthread_should_stop()) {
- wait_for_completion_interruptible_timeout(
- &dm_device.config_event, 1*HZ);
+ wait_for_completion_interruptible_timeout(&dm_device.config_event, 1 * HZ);
/*
* The host expects us to post information on the memory
* pressure every second.
@@ -1439,9 +1398,8 @@ static int dm_thread_func(void *dm_dev)
return 0;
}
-
static void version_resp(struct hv_dynmem_device *dm,
- struct dm_version_response *vresp)
+ struct dm_version_response *vresp)
{
struct dm_version_request version_req;
int ret;
@@ -1502,7 +1460,7 @@ version_error:
}
static void cap_resp(struct hv_dynmem_device *dm,
- struct dm_capabilities_resp_msg *cap_resp)
+ struct dm_capabilities_resp_msg *cap_resp)
{
if (!cap_resp->is_accepted) {
pr_err("Capabilities not accepted by host\n");
@@ -1535,7 +1493,7 @@ static void balloon_onchannelcallback(void *context)
switch (dm_hdr->type) {
case DM_VERSION_RESPONSE:
version_resp(dm,
- (struct dm_version_response *)dm_msg);
+ (struct dm_version_response *)dm_msg);
break;
case DM_CAPABILITIES_RESPONSE:
@@ -1565,7 +1523,7 @@ static void balloon_onchannelcallback(void *context)
dm->state = DM_BALLOON_DOWN;
balloon_down(dm,
- (struct dm_unballoon_request *)recv_buffer);
+ (struct dm_unballoon_request *)recv_buffer);
break;
case DM_MEM_HOT_ADD_REQUEST:
@@ -1603,17 +1561,15 @@ static void balloon_onchannelcallback(void *context)
default:
pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type);
-
}
}
-
}
#define HV_LARGE_REPORTING_ORDER 9
#define HV_LARGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << \
HV_LARGE_REPORTING_ORDER)
static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
- struct scatterlist *sgl, unsigned int nents)
+ struct scatterlist *sgl, unsigned int nents)
{
unsigned long flags;
struct hv_memory_hint *hint;
@@ -1648,7 +1604,7 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
*/
/* page reporting for pages 2MB or higher */
- if (order >= HV_LARGE_REPORTING_ORDER ) {
+ if (order >= HV_LARGE_REPORTING_ORDER) {
range->page.largepage = 1;
range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB;
range->base_large_pfn = page_to_hvpfn(
@@ -1662,23 +1618,21 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
range->page.additional_pages =
(sg->length / HV_HYP_PAGE_SIZE) - 1;
}
-
}
status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0,
hint, NULL);
local_irq_restore(flags);
if (!hv_result_success(status)) {
-
pr_err("Cold memory discard hypercall failed with status %llx\n",
- status);
+ status);
if (hv_hypercall_multi_failure > 0)
hv_hypercall_multi_failure++;
if (hv_result(status) == HV_STATUS_INVALID_PARAMETER) {
pr_err("Underlying Hyper-V does not support order less than 9. Hypercall failed\n");
pr_err("Defaulting to page_reporting_order %d\n",
- pageblock_order);
+ pageblock_order);
page_reporting_order = pageblock_order;
hv_hypercall_multi_failure++;
return -EINVAL;
@@ -1712,7 +1666,7 @@ static void enable_page_reporting(void)
pr_err("Failed to enable cold memory discard: %d\n", ret);
} else {
pr_info("Cold memory discard hint enabled with order %d\n",
- page_reporting_order);
+ page_reporting_order);
}
}
@@ -1795,7 +1749,7 @@ static int balloon_connect_vsp(struct hv_device *dev)
if (ret)
goto out;
- t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
+ t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto out;
@@ -1831,10 +1785,13 @@ static int balloon_connect_vsp(struct hv_device *dev)
cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
/*
- * Specify our alignment requirements as it relates
- * memory hot-add. Specify 128MB alignment.
+ * Specify our alignment requirements for memory hot-add. The value is
+ * the log base 2 of the number of megabytes in a chunk. For example,
+ * with 256 MiB chunks, the value is 8. The number of MiB in a chunk
+ * must be a power of 2.
*/
- cap_msg.caps.cap_bits.hot_add_alignment = 7;
+ cap_msg.caps.cap_bits.hot_add_alignment =
+ ilog2(HA_BYTES_IN_CHUNK / SZ_1M);
/*
* Currently the host does not use these
@@ -1850,7 +1807,7 @@ static int balloon_connect_vsp(struct hv_device *dev)
if (ret)
goto out;
- t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
+ t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto out;
@@ -1891,8 +1848,8 @@ static int hv_balloon_debug_show(struct seq_file *f, void *offset)
char *sname;
seq_printf(f, "%-22s: %u.%u\n", "host_version",
- DYNMEM_MAJOR_VERSION(dm->version),
- DYNMEM_MINOR_VERSION(dm->version));
+ DYNMEM_MAJOR_VERSION(dm->version),
+ DYNMEM_MINOR_VERSION(dm->version));
seq_printf(f, "%-22s:", "capabilities");
if (ballooning_enabled())
@@ -1941,10 +1898,10 @@ static int hv_balloon_debug_show(struct seq_file *f, void *offset)
seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned);
seq_printf(f, "%-22s: %lu\n", "total_pages_committed",
- get_pages_committed(dm));
+ get_pages_committed(dm));
seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count",
- dm->max_dynamic_page_count);
+ dm->max_dynamic_page_count);
return 0;
}
@@ -1954,7 +1911,7 @@ DEFINE_SHOW_ATTRIBUTE(hv_balloon_debug);
static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
{
debugfs_create_file("hv-balloon", 0444, NULL, b,
- &hv_balloon_debug_fops);
+ &hv_balloon_debug_fops);
}
static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
@@ -1984,8 +1941,23 @@ static int balloon_probe(struct hv_device *dev,
hot_add = false;
#ifdef CONFIG_MEMORY_HOTPLUG
+ /*
+ * Hot-add must operate in chunks that are of size equal to the
+ * memory block size because that's what the core add_memory()
+ * interface requires. The Hyper-V interface requires that the memory
+ * block size be a power of 2, which is guaranteed by the check in
+ * memory_dev_init().
+ */
+ ha_pages_in_chunk = memory_block_size_bytes() / PAGE_SIZE;
do_hot_add = hot_add;
#else
+ /*
+ * Without MEMORY_HOTPLUG, the guest returns a failure status for all
+ * hot add requests from Hyper-V, and the chunk size is used only to
+ * specify alignment to Hyper-V as required by the host/guest protocol.
+ * Somewhat arbitrarily, use 128 MiB.
+ */
+ ha_pages_in_chunk = SZ_128M / PAGE_SIZE;
do_hot_add = false;
#endif
dm_device.dev = dev;
@@ -2097,7 +2069,6 @@ static int balloon_suspend(struct hv_device *hv_dev)
tasklet_enable(&hv_dev->channel->callback_event);
return 0;
-
}
static int balloon_resume(struct hv_device *dev)
@@ -2156,7 +2127,6 @@ static struct hv_driver balloon_drv = {
static int __init init_balloon_drv(void)
{
-
return vmbus_driver_register(&balloon_drv);
}
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 3d65934f5eb4..78d0561339e5 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -29,8 +29,7 @@ obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o
obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o
obj-$(CONFIG_I2C_VIA) += i2c-via.o
obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o
-i2c-zhaoxin-objs := i2c-viai2c-zhaoxin.o i2c-viai2c-common.o
-obj-$(CONFIG_I2C_ZHAOXIN) += i2c-zhaoxin.o
+obj-$(CONFIG_I2C_ZHAOXIN) += i2c-viai2c-zhaoxin.o i2c-viai2c-common.o
# Mac SMBus host controller drivers
obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o
@@ -120,8 +119,7 @@ obj-$(CONFIG_I2C_TEGRA_BPMP) += i2c-tegra-bpmp.o
obj-$(CONFIG_I2C_UNIPHIER) += i2c-uniphier.o
obj-$(CONFIG_I2C_UNIPHIER_F) += i2c-uniphier-f.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
-i2c-wmt-objs := i2c-viai2c-wmt.o i2c-viai2c-common.o
-obj-$(CONFIG_I2C_WMT) += i2c-wmt.o
+obj-$(CONFIG_I2C_WMT) += i2c-viai2c-wmt.o i2c-viai2c-common.o
i2c-octeon-objs := i2c-octeon-core.o i2c-octeon-platdrv.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
i2c-thunderx-objs := i2c-octeon-core.o i2c-thunderx-pcidrv.o
diff --git a/drivers/i2c/busses/i2c-at91-slave.c b/drivers/i2c/busses/i2c-at91-slave.c
index d6eeea5166c0..131a67d9d4a6 100644
--- a/drivers/i2c/busses/i2c-at91-slave.c
+++ b/drivers/i2c/busses/i2c-at91-slave.c
@@ -106,8 +106,7 @@ static int at91_unreg_slave(struct i2c_client *slave)
static u32 at91_twi_func(struct i2c_adapter *adapter)
{
- return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
- | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
+ return I2C_FUNC_SLAVE;
}
static const struct i2c_algorithm at91_twi_algorithm_slave = {
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 2e079cf20bb5..78e2c47e3d7d 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -220,7 +220,7 @@ static const struct i2c_algorithm i2c_dw_algo = {
void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
{
- dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
+ dev->functionality = I2C_FUNC_SLAVE;
dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 56a4dabf5a38..4ad670a80a63 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -431,8 +431,8 @@ static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8);
/* Init the device */
- oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_EN);
+ oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-viai2c-common.c b/drivers/i2c/busses/i2c-viai2c-common.c
index 1844d13f1f79..162b31306cba 100644
--- a/drivers/i2c/busses/i2c-viai2c-common.c
+++ b/drivers/i2c/busses/i2c-viai2c-common.c
@@ -17,6 +17,7 @@ int viai2c_wait_bus_not_busy(struct viai2c *i2c)
return 0;
}
+EXPORT_SYMBOL_GPL(viai2c_wait_bus_not_busy);
static int viai2c_write(struct viai2c *i2c, struct i2c_msg *pmsg, int last)
{
@@ -121,6 +122,7 @@ int viai2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
return (ret < 0) ? ret : i;
}
+EXPORT_SYMBOL_GPL(viai2c_xfer);
/*
* Main process of the byte mode xfer
@@ -130,7 +132,7 @@ int viai2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
* 0: there is still data that needs to be transferred
* -EIO: error occurred
*/
-static int viai2c_irq_xfer(struct viai2c *i2c)
+int viai2c_irq_xfer(struct viai2c *i2c)
{
u16 val;
struct i2c_msg *msg = i2c->msg;
@@ -171,51 +173,11 @@ static int viai2c_irq_xfer(struct viai2c *i2c)
return i2c->xfered_len == msg->len;
}
-
-int __weak viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq)
-{
- return 0;
-}
-
-static irqreturn_t viai2c_isr(int irq, void *data)
-{
- struct viai2c *i2c = data;
- u8 status;
-
- /* save the status and write-clear it */
- status = readw(i2c->base + VIAI2C_REG_ISR);
- if (!status && i2c->platform == VIAI2C_PLAT_ZHAOXIN)
- return IRQ_NONE;
-
- writew(status, i2c->base + VIAI2C_REG_ISR);
-
- i2c->ret = 0;
- if (status & VIAI2C_ISR_NACK_ADDR)
- i2c->ret = -EIO;
-
- if (i2c->platform == VIAI2C_PLAT_WMT && (status & VIAI2C_ISR_SCL_TIMEOUT))
- i2c->ret = -ETIMEDOUT;
-
- if (!i2c->ret) {
- if (i2c->mode == VIAI2C_BYTE_MODE)
- i2c->ret = viai2c_irq_xfer(i2c);
- else
- i2c->ret = viai2c_fifo_irq_xfer(i2c, true);
- }
-
- /* All the data has been successfully transferred or error occurred */
- if (i2c->ret)
- complete(&i2c->complete);
-
- return IRQ_HANDLED;
-}
+EXPORT_SYMBOL_GPL(viai2c_irq_xfer);
int viai2c_init(struct platform_device *pdev, struct viai2c **pi2c, int plat)
{
- int err;
- int irq_flags;
struct viai2c *i2c;
- struct device_node *np = pdev->dev.of_node;
i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
@@ -225,28 +187,8 @@ int viai2c_init(struct platform_device *pdev, struct viai2c **pi2c, int plat)
if (IS_ERR(i2c->base))
return PTR_ERR(i2c->base);
- if (plat == VIAI2C_PLAT_WMT) {
- irq_flags = 0;
- i2c->irq = irq_of_parse_and_map(np, 0);
- if (!i2c->irq)
- return -EINVAL;
- } else if (plat == VIAI2C_PLAT_ZHAOXIN) {
- irq_flags = IRQF_SHARED;
- i2c->irq = platform_get_irq(pdev, 0);
- if (i2c->irq < 0)
- return i2c->irq;
- } else {
- return dev_err_probe(&pdev->dev, -EINVAL, "wrong platform type\n");
- }
-
i2c->platform = plat;
- err = devm_request_irq(&pdev->dev, i2c->irq, viai2c_isr,
- irq_flags, pdev->name, i2c);
- if (err)
- return dev_err_probe(&pdev->dev, err,
- "failed to request irq %i\n", i2c->irq);
-
i2c->dev = &pdev->dev;
init_completion(&i2c->complete);
platform_set_drvdata(pdev, i2c);
@@ -254,3 +196,8 @@ int viai2c_init(struct platform_device *pdev, struct viai2c **pi2c, int plat)
*pi2c = i2c;
return 0;
}
+EXPORT_SYMBOL_GPL(viai2c_init);
+
+MODULE_DESCRIPTION("Via/Wondermedia/Zhaoxin I2C master-mode bus adapter");
+MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-viai2c-common.h b/drivers/i2c/busses/i2c-viai2c-common.h
index 81e827c54434..00f17733223c 100644
--- a/drivers/i2c/busses/i2c-viai2c-common.h
+++ b/drivers/i2c/busses/i2c-viai2c-common.h
@@ -80,6 +80,6 @@ struct viai2c {
int viai2c_wait_bus_not_busy(struct viai2c *i2c);
int viai2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num);
int viai2c_init(struct platform_device *pdev, struct viai2c **pi2c, int plat);
-int viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq);
+int viai2c_irq_xfer(struct viai2c *i2c);
#endif
diff --git a/drivers/i2c/busses/i2c-viai2c-wmt.c b/drivers/i2c/busses/i2c-viai2c-wmt.c
index e1988f946026..420fd10fe3aa 100644
--- a/drivers/i2c/busses/i2c-viai2c-wmt.c
+++ b/drivers/i2c/busses/i2c-viai2c-wmt.c
@@ -72,6 +72,32 @@ static int wmt_i2c_reset_hardware(struct viai2c *i2c)
return 0;
}
+static irqreturn_t wmt_i2c_isr(int irq, void *data)
+{
+ struct viai2c *i2c = data;
+ u8 status;
+
+ /* save the status and write-clear it */
+ status = readw(i2c->base + VIAI2C_REG_ISR);
+ writew(status, i2c->base + VIAI2C_REG_ISR);
+
+ i2c->ret = 0;
+ if (status & VIAI2C_ISR_NACK_ADDR)
+ i2c->ret = -EIO;
+
+ if (status & VIAI2C_ISR_SCL_TIMEOUT)
+ i2c->ret = -ETIMEDOUT;
+
+ if (!i2c->ret)
+ i2c->ret = viai2c_irq_xfer(i2c);
+
+ /* All the data has been successfully transferred or error occurred */
+ if (i2c->ret)
+ complete(&i2c->complete);
+
+ return IRQ_HANDLED;
+}
+
static int wmt_i2c_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -84,6 +110,16 @@ static int wmt_i2c_probe(struct platform_device *pdev)
if (err)
return err;
+ i2c->irq = platform_get_irq(pdev, 0);
+ if (i2c->irq < 0)
+ return i2c->irq;
+
+ err = devm_request_irq(&pdev->dev, i2c->irq, wmt_i2c_isr,
+ 0, pdev->name, i2c);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to request irq %i\n", i2c->irq);
+
i2c->clk = of_clk_get(np, 0);
if (IS_ERR(i2c->clk)) {
dev_err(&pdev->dev, "unable to request clock\n");
diff --git a/drivers/i2c/busses/i2c-viai2c-zhaoxin.c b/drivers/i2c/busses/i2c-viai2c-zhaoxin.c
index 7e3ac2a3e1fd..ab3e44e147e9 100644
--- a/drivers/i2c/busses/i2c-viai2c-zhaoxin.c
+++ b/drivers/i2c/busses/i2c-viai2c-zhaoxin.c
@@ -49,8 +49,7 @@ struct viai2c_zhaoxin {
u16 xfer_len;
};
-/* 'irq == true' means in interrupt context */
-int viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq)
+static int viai2c_fifo_xfer(struct viai2c *i2c)
{
u16 i;
u8 tmp;
@@ -59,17 +58,6 @@ int viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq)
bool read = !!(msg->flags & I2C_M_RD);
struct viai2c_zhaoxin *priv = i2c->pltfm_priv;
- if (irq) {
- /* get the received data */
- if (read)
- for (i = 0; i < priv->xfer_len; i++)
- msg->buf[i2c->xfered_len + i] = ioread8(base + ZXI2C_REG_HRDR);
-
- i2c->xfered_len += priv->xfer_len;
- if (i2c->xfered_len == msg->len)
- return 1;
- }
-
/* reset fifo buffer */
tmp = ioread8(base + ZXI2C_REG_HCR);
iowrite8(tmp | ZXI2C_HCR_RST_FIFO, base + ZXI2C_REG_HCR);
@@ -92,18 +80,59 @@ int viai2c_fifo_irq_xfer(struct viai2c *i2c, bool irq)
iowrite8(tmp, base + VIAI2C_REG_CR);
}
- if (irq) {
- /* continue transmission */
- tmp = ioread8(base + VIAI2C_REG_CR);
- iowrite8(tmp |= VIAI2C_CR_CPU_RDY, base + VIAI2C_REG_CR);
+ u16 tcr_val = i2c->tcr;
+
+ /* start transmission */
+ tcr_val |= read ? VIAI2C_TCR_READ : 0;
+ writew(tcr_val | msg->addr, base + VIAI2C_REG_TCR);
+
+ return 0;
+}
+
+static int viai2c_fifo_irq_xfer(struct viai2c *i2c)
+{
+ u16 i;
+ u8 tmp;
+ struct i2c_msg *msg = i2c->msg;
+ void __iomem *base = i2c->base;
+ bool read = !!(msg->flags & I2C_M_RD);
+ struct viai2c_zhaoxin *priv = i2c->pltfm_priv;
+
+ /* get the received data */
+ if (read)
+ for (i = 0; i < priv->xfer_len; i++)
+ msg->buf[i2c->xfered_len + i] = ioread8(base + ZXI2C_REG_HRDR);
+
+ i2c->xfered_len += priv->xfer_len;
+ if (i2c->xfered_len == msg->len)
+ return 1;
+
+ /* reset fifo buffer */
+ tmp = ioread8(base + ZXI2C_REG_HCR);
+ iowrite8(tmp | ZXI2C_HCR_RST_FIFO, base + ZXI2C_REG_HCR);
+
+ /* set xfer len */
+ priv->xfer_len = min_t(u16, msg->len - i2c->xfered_len, ZXI2C_FIFO_SIZE);
+ if (read) {
+ iowrite8(priv->xfer_len - 1, base + ZXI2C_REG_HRLR);
} else {
- u16 tcr_val = i2c->tcr;
+ iowrite8(priv->xfer_len - 1, base + ZXI2C_REG_HTLR);
+ /* set write data */
+ for (i = 0; i < priv->xfer_len; i++)
+ iowrite8(msg->buf[i2c->xfered_len + i], base + ZXI2C_REG_HTDR);
+ }
- /* start transmission */
- tcr_val |= read ? VIAI2C_TCR_READ : 0;
- writew(tcr_val | msg->addr, base + VIAI2C_REG_TCR);
+ /* prepare to stop transmission */
+ if (priv->hrv && msg->len == (i2c->xfered_len + priv->xfer_len)) {
+ tmp = ioread8(base + VIAI2C_REG_CR);
+ tmp |= read ? VIAI2C_CR_RX_END : VIAI2C_CR_TX_END;
+ iowrite8(tmp, base + VIAI2C_REG_CR);
}
+ /* continue transmission */
+ tmp = ioread8(base + VIAI2C_REG_CR);
+ iowrite8(tmp |= VIAI2C_CR_CPU_RDY, base + VIAI2C_REG_CR);
+
return 0;
}
@@ -135,7 +164,7 @@ static int zxi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int
priv->xfer_len = 0;
i2c->xfered_len = 0;
- viai2c_fifo_irq_xfer(i2c, 0);
+ viai2c_fifo_xfer(i2c);
if (!wait_for_completion_timeout(&i2c->complete, VIAI2C_TIMEOUT))
return -ETIMEDOUT;
@@ -228,6 +257,36 @@ static void zxi2c_get_bus_speed(struct viai2c *i2c)
dev_info(i2c->dev, "speed mode is %s\n", i2c_freq_mode_string(params[0]));
}
+static irqreturn_t zxi2c_isr(int irq, void *data)
+{
+ struct viai2c *i2c = data;
+ u8 status;
+
+ /* save the status and write-clear it */
+ status = readw(i2c->base + VIAI2C_REG_ISR);
+ if (!status)
+ return IRQ_NONE;
+
+ writew(status, i2c->base + VIAI2C_REG_ISR);
+
+ i2c->ret = 0;
+ if (status & VIAI2C_ISR_NACK_ADDR)
+ i2c->ret = -EIO;
+
+ if (!i2c->ret) {
+ if (i2c->mode == VIAI2C_BYTE_MODE)
+ i2c->ret = viai2c_irq_xfer(i2c);
+ else
+ i2c->ret = viai2c_fifo_irq_xfer(i2c);
+ }
+
+ /* All the data has been successfully transferred or error occurred */
+ if (i2c->ret)
+ complete(&i2c->complete);
+
+ return IRQ_HANDLED;
+}
+
static int zxi2c_probe(struct platform_device *pdev)
{
int error;
@@ -239,6 +298,16 @@ static int zxi2c_probe(struct platform_device *pdev)
if (error)
return error;
+ i2c->irq = platform_get_irq(pdev, 0);
+ if (i2c->irq < 0)
+ return i2c->irq;
+
+ error = devm_request_irq(&pdev->dev, i2c->irq, zxi2c_isr,
+ IRQF_SHARED, pdev->name, i2c);
+ if (error)
+ return dev_err_probe(&pdev->dev, error,
+ "failed to request irq %i\n", i2c->irq);
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index a49642bbae4b..ca43e98cae1b 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -118,9 +118,12 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
queue_delayed_work(system_long_wq, &tu->worker,
msecs_to_jiffies(10 * tu->regs[TU_REG_DELAY]));
}
- fallthrough;
+ break;
case I2C_SLAVE_WRITE_REQUESTED:
+ if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
+ return -EBUSY;
+
memset(tu->regs, 0, TU_NUM_REGS);
tu->reg_idx = 0;
break;
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index c2da5066e9a7..80b57d3ee3a7 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -330,6 +330,8 @@ config DMARD10
config FXLS8962AF
tristate
depends on I2C || !I2C # cannot be built-in for modular I2C
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
config FXLS8962AF_I2C
tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver"
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index a7826bba0852..b26d4575e256 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -145,6 +145,7 @@ struct ad7173_device_info {
unsigned int id;
char *name;
bool has_temp;
+ bool has_input_buf;
bool has_int_ref;
bool has_ref2;
u8 num_gpios;
@@ -212,18 +213,21 @@ static const struct ad7173_device_info ad7173_device_info[] = {
.num_configs = 4,
.num_gpios = 2,
.has_temp = true,
+ .has_input_buf = true,
.has_int_ref = true,
.clock = 2 * HZ_PER_MHZ,
.sinc5_data_rates = ad7173_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
},
[ID_AD7172_4] = {
+ .name = "ad7172-4",
.id = AD7172_4_ID,
.num_inputs = 9,
.num_channels = 8,
.num_configs = 8,
.num_gpios = 4,
.has_temp = false,
+ .has_input_buf = true,
.has_ref2 = true,
.clock = 2 * HZ_PER_MHZ,
.sinc5_data_rates = ad7173_sinc5_data_rates,
@@ -237,6 +241,7 @@ static const struct ad7173_device_info ad7173_device_info[] = {
.num_configs = 8,
.num_gpios = 4,
.has_temp = true,
+ .has_input_buf = true,
.has_int_ref = true,
.has_ref2 = true,
.clock = 2 * HZ_PER_MHZ,
@@ -251,18 +256,21 @@ static const struct ad7173_device_info ad7173_device_info[] = {
.num_configs = 4,
.num_gpios = 2,
.has_temp = true,
+ .has_input_buf = true,
.has_int_ref = true,
.clock = 16 * HZ_PER_MHZ,
.sinc5_data_rates = ad7175_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
},
[ID_AD7175_8] = {
+ .name = "ad7175-8",
.id = AD7175_8_ID,
.num_inputs = 17,
.num_channels = 16,
.num_configs = 8,
.num_gpios = 4,
.has_temp = true,
+ .has_input_buf = true,
.has_int_ref = true,
.has_ref2 = true,
.clock = 16 * HZ_PER_MHZ,
@@ -277,18 +285,21 @@ static const struct ad7173_device_info ad7173_device_info[] = {
.num_configs = 4,
.num_gpios = 2,
.has_temp = false,
+ .has_input_buf = false,
.has_int_ref = true,
.clock = 16 * HZ_PER_MHZ,
.sinc5_data_rates = ad7175_sinc5_data_rates,
.num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
},
[ID_AD7177_2] = {
+ .name = "ad7177-2",
.id = AD7177_ID,
.num_inputs = 5,
.num_channels = 4,
.num_configs = 4,
.num_gpios = 2,
.has_temp = true,
+ .has_input_buf = true,
.has_int_ref = true,
.clock = 16 * HZ_PER_MHZ,
.odr_start_value = AD7177_ODR_START_VALUE,
@@ -532,6 +543,7 @@ static int ad7173_append_status(struct ad_sigma_delta *sd, bool append)
unsigned int interface_mode = st->interface_mode;
int ret;
+ interface_mode &= ~AD7173_INTERFACE_DATA_STAT;
interface_mode |= AD7173_INTERFACE_DATA_STAT_EN(append);
ret = ad_sd_write_reg(&st->sd, AD7173_REG_INTERFACE_MODE, 2, interface_mode);
if (ret)
@@ -705,7 +717,7 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
{
struct ad7173_state *st = iio_priv(indio_dev);
struct ad7173_channel_config *cfg;
- unsigned int freq, i, reg;
+ unsigned int freq, i;
int ret;
ret = iio_device_claim_direct_mode(indio_dev);
@@ -721,16 +733,7 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
cfg = &st->channels[chan->address].cfg;
cfg->odr = i;
-
- if (!cfg->live)
- break;
-
- ret = ad_sd_read_reg(&st->sd, AD7173_REG_FILTER(cfg->cfg_slot), 2, &reg);
- if (ret)
- break;
- reg &= ~AD7173_FILTER_ODR0_MASK;
- reg |= FIELD_PREP(AD7173_FILTER_ODR0_MASK, i);
- ret = ad_sd_write_reg(&st->sd, AD7173_REG_FILTER(cfg->cfg_slot), 2, reg);
+ cfg->live = false;
break;
default:
@@ -792,8 +795,7 @@ static const struct iio_chan_spec ad7173_channel_template = {
.type = IIO_VOLTAGE,
.indexed = 1,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.scan_type = {
.sign = 'u',
.realbits = 24,
@@ -804,12 +806,11 @@ static const struct iio_chan_spec ad7173_channel_template = {
static const struct iio_chan_spec ad7173_temp_iio_channel_template = {
.type = IIO_TEMP,
- .indexed = 1,
.channel = AD7173_AIN_TEMP_POS,
.channel2 = AD7173_AIN_TEMP_NEG,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
.scan_type = {
.sign = 'u',
.realbits = 24,
@@ -932,7 +933,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
AD7173_CH_ADDRESS(chan_arr[chan_index].channel,
chan_arr[chan_index].channel2);
chan_st_priv->cfg.bipolar = false;
- chan_st_priv->cfg.input_buf = true;
+ chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan_st_priv->cfg.ref_sel = AD7173_SETUP_REF_SEL_INT_REF;
st->adc_mode |= AD7173_ADC_MODE_REF_EN;
@@ -989,7 +990,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
chan_st_priv->ain = AD7173_CH_ADDRESS(ain[0], ain[1]);
chan_st_priv->chan_reg = chan_index;
- chan_st_priv->cfg.input_buf = true;
+ chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan_st_priv->cfg.odr = 0;
chan_st_priv->cfg.bipolar = fwnode_property_read_bool(child, "bipolar");
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 353a97f9c086..13ea8a1073d2 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -157,6 +157,8 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
ret = ad7266_read_single(st, val, chan->address);
iio_device_release_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
*val = (*val >> 2) & 0xfff;
if (chan->scan_type.sign == 's')
*val = sign_extend32(*val,
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index e85b763b9ffc..8f5b9c3f6e3d 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -243,11 +243,11 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index,
}
static const struct iio_chan_spec ad9434_channels[] = {
- AD9467_CHAN(0, 0, 12, 'S'),
+ AD9467_CHAN(0, 0, 12, 's'),
};
static const struct iio_chan_spec ad9467_channels[] = {
- AD9467_CHAN(0, 0, 16, 'S'),
+ AD9467_CHAN(0, 0, 16, 's'),
};
static const struct ad9467_chip_info ad9467_chip_tbl = {
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index f0b71a1220e0..f52abf759260 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -414,8 +414,12 @@ static void ams_enable_channel_sequence(struct iio_dev *indio_dev)
/* Run calibration of PS & PL as part of the sequence */
scan_mask = BIT(0) | BIT(AMS_PS_SEQ_MAX);
- for (i = 0; i < indio_dev->num_channels; i++)
- scan_mask |= BIT_ULL(indio_dev->channels[i].scan_index);
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ const struct iio_chan_spec *chan = &indio_dev->channels[i];
+
+ if (chan->scan_index < AMS_CTRL_SEQ_BASE)
+ scan_mask |= BIT_ULL(chan->scan_index);
+ }
if (ams->ps_base) {
/* put sysmon in a soft reset to change the sequence */
diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
index 4edc5d21cb9f..f959252a4fe6 100644
--- a/drivers/iio/chemical/bme680.h
+++ b/drivers/iio/chemical/bme680.h
@@ -54,7 +54,9 @@
#define BME680_NB_CONV_MASK GENMASK(3, 0)
#define BME680_REG_MEAS_STAT_0 0x1D
+#define BME680_NEW_DATA_BIT BIT(7)
#define BME680_GAS_MEAS_BIT BIT(6)
+#define BME680_MEAS_BIT BIT(5)
/* Calibration Parameters */
#define BME680_T2_LSB_REG 0x8A
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index ef5e0e46fd34..500f56834b01 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -10,6 +10,7 @@
*/
#include <linux/acpi.h>
#include <linux/bitfield.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/log2.h>
@@ -38,7 +39,7 @@ struct bme680_calib {
s8 par_h3;
s8 par_h4;
s8 par_h5;
- s8 par_h6;
+ u8 par_h6;
s8 par_h7;
s8 par_gh1;
s16 par_gh2;
@@ -342,10 +343,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
if (!calib->par_t2)
bme680_read_calib(data, calib);
- var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
+ var1 = (adc_temp >> 3) - ((s32)calib->par_t1 << 1);
var2 = (var1 * calib->par_t2) >> 11;
var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
- var3 = (var3 * (calib->par_t3 << 4)) >> 14;
+ var3 = (var3 * ((s32)calib->par_t3 << 4)) >> 14;
data->t_fine = var2 + var3;
calc_temp = (data->t_fine * 5 + 128) >> 8;
@@ -368,9 +369,9 @@ static u32 bme680_compensate_press(struct bme680_data *data,
var1 = (data->t_fine >> 1) - 64000;
var2 = ((((var1 >> 2) * (var1 >> 2)) >> 11) * calib->par_p6) >> 2;
var2 = var2 + (var1 * calib->par_p5 << 1);
- var2 = (var2 >> 2) + (calib->par_p4 << 16);
+ var2 = (var2 >> 2) + ((s32)calib->par_p4 << 16);
var1 = (((((var1 >> 2) * (var1 >> 2)) >> 13) *
- (calib->par_p3 << 5)) >> 3) +
+ ((s32)calib->par_p3 << 5)) >> 3) +
((calib->par_p2 * var1) >> 1);
var1 = var1 >> 18;
var1 = ((32768 + var1) * calib->par_p1) >> 15;
@@ -388,7 +389,7 @@ static u32 bme680_compensate_press(struct bme680_data *data,
var3 = ((press_comp >> 8) * (press_comp >> 8) *
(press_comp >> 8) * calib->par_p10) >> 17;
- press_comp += (var1 + var2 + var3 + (calib->par_p7 << 7)) >> 4;
+ press_comp += (var1 + var2 + var3 + ((s32)calib->par_p7 << 7)) >> 4;
return press_comp;
}
@@ -414,7 +415,7 @@ static u32 bme680_compensate_humid(struct bme680_data *data,
(((temp_scaled * ((temp_scaled * calib->par_h5) / 100))
>> 6) / 100) + (1 << 14))) >> 10;
var3 = var1 * var2;
- var4 = calib->par_h6 << 7;
+ var4 = (s32)calib->par_h6 << 7;
var4 = (var4 + ((temp_scaled * calib->par_h7) / 100)) >> 4;
var5 = ((var3 >> 14) * (var3 >> 14)) >> 10;
var6 = (var4 * var5) >> 1;
@@ -532,6 +533,43 @@ static u8 bme680_oversampling_to_reg(u8 val)
return ilog2(val) + 1;
}
+/*
+ * Taken from Bosch BME680 API:
+ * https://github.com/boschsensortec/BME68x_SensorAPI/blob/v4.4.8/bme68x.c#L490
+ */
+static int bme680_wait_for_eoc(struct bme680_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int check;
+ int ret;
+ /*
+ * (Sum of oversampling ratios * time per oversampling) +
+ * TPH measurement + gas measurement + wait transition from forced mode
+ * + heater duration
+ */
+ int wait_eoc_us = ((data->oversampling_temp + data->oversampling_press +
+ data->oversampling_humid) * 1936) + (477 * 4) +
+ (477 * 5) + 1000 + (data->heater_dur * 1000);
+
+ usleep_range(wait_eoc_us, wait_eoc_us + 100);
+
+ ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
+ if (ret) {
+ dev_err(dev, "failed to read measurement status register.\n");
+ return ret;
+ }
+ if (check & BME680_MEAS_BIT) {
+ dev_err(dev, "Device measurement cycle incomplete.\n");
+ return -EBUSY;
+ }
+ if (!(check & BME680_NEW_DATA_BIT)) {
+ dev_err(dev, "No new data available from the device.\n");
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
static int bme680_chip_config(struct bme680_data *data)
{
struct device *dev = regmap_get_device(data->regmap);
@@ -622,6 +660,10 @@ static int bme680_read_temp(struct bme680_data *data, int *val)
if (ret < 0)
return ret;
+ ret = bme680_wait_for_eoc(data);
+ if (ret)
+ return ret;
+
ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
&tmp, 3);
if (ret < 0) {
@@ -678,7 +720,7 @@ static int bme680_read_press(struct bme680_data *data,
}
*val = bme680_compensate_press(data, adc_press);
- *val2 = 100;
+ *val2 = 1000;
return IIO_VAL_FRACTIONAL;
}
@@ -738,6 +780,10 @@ static int bme680_read_gas(struct bme680_data *data,
if (ret < 0)
return ret;
+ ret = bme680_wait_for_eoc(data);
+ if (ret)
+ return ret;
+
ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
if (check & BME680_GAS_MEAS_BIT) {
dev_err(dev, "gas measurement incomplete\n");
diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
index fa205f17bd90..f44458c380d9 100644
--- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
+++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
@@ -60,11 +60,15 @@ EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_init, IIO_INV_SENSORS_TIMESTAMP);
int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
uint32_t period, bool fifo)
{
+ uint32_t mult;
+
/* when FIFO is on, prevent odr change if one is already pending */
if (fifo && ts->new_mult != 0)
return -EAGAIN;
- ts->new_mult = period / ts->chip.clock_period;
+ mult = period / ts->chip.clock_period;
+ if (mult != ts->mult)
+ ts->new_mult = mult;
return 0;
}
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 3c2bf620f00f..ee0d9798d8b4 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -133,7 +133,7 @@ config AD5624R_SPI
config AD9739A
tristate "Analog Devices AD9739A RF DAC spi driver"
- depends on SPI || COMPILE_TEST
+ depends on SPI
select REGMAP_SPI
select IIO_BACKEND
help
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 076bc9ecfb49..4763402dbcd6 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -415,7 +415,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
s64 tmp = *val * (3767897513LL / 25LL);
*val = div_s64_rem(tmp, 1000000000LL, val2);
- return IIO_VAL_INT_PLUS_MICRO;
+ return IIO_VAL_INT_PLUS_NANO;
}
mutex_lock(&st->lock);
diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c
index cdc4789213ba..a82dcc3da421 100644
--- a/drivers/iio/humidity/hdc3020.c
+++ b/drivers/iio/humidity/hdc3020.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm.h>
@@ -66,8 +67,10 @@
#define HDC3020_CRC8_POLYNOMIAL 0x31
-#define HDC3020_MIN_TEMP -40
-#define HDC3020_MAX_TEMP 125
+#define HDC3020_MIN_TEMP_MICRO -39872968
+#define HDC3020_MAX_TEMP_MICRO 124875639
+#define HDC3020_MAX_TEMP_HYST_MICRO 164748607
+#define HDC3020_MAX_HUM_MICRO 99220264
struct hdc3020_data {
struct i2c_client *client;
@@ -368,6 +371,105 @@ static int hdc3020_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int hdc3020_thresh_get_temp(u16 thresh)
+{
+ int temp;
+
+ /*
+ * Get the temperature threshold from 9 LSBs, shift them to get
+ * the truncated temperature threshold representation and
+ * calculate the threshold according to the formula in the
+ * datasheet. Result is degree celsius scaled by 65535.
+ */
+ temp = FIELD_GET(HDC3020_THRESH_TEMP_MASK, thresh) <<
+ HDC3020_THRESH_TEMP_TRUNC_SHIFT;
+
+ return -2949075 + (175 * temp);
+}
+
+static int hdc3020_thresh_get_hum(u16 thresh)
+{
+ int hum;
+
+ /*
+ * Get the humidity threshold from 7 MSBs, shift them to get the
+ * truncated humidity threshold representation and calculate the
+ * threshold according to the formula in the datasheet. Result is
+ * percent scaled by 65535.
+ */
+ hum = FIELD_GET(HDC3020_THRESH_HUM_MASK, thresh) <<
+ HDC3020_THRESH_HUM_TRUNC_SHIFT;
+
+ return hum * 100;
+}
+
+static u16 hdc3020_thresh_set_temp(int s_temp, u16 curr_thresh)
+{
+ u64 temp;
+ u16 thresh;
+
+ /*
+ * Calculate temperature threshold, shift it down to get the
+ * truncated threshold representation in the 9LSBs while keeping
+ * the current humidity threshold in the 7 MSBs.
+ */
+ temp = (u64)(s_temp + 45000000) * 65535ULL;
+ temp = div_u64(temp, 1000000 * 175) >> HDC3020_THRESH_TEMP_TRUNC_SHIFT;
+ thresh = FIELD_PREP(HDC3020_THRESH_TEMP_MASK, temp);
+ thresh |= (FIELD_GET(HDC3020_THRESH_HUM_MASK, curr_thresh) <<
+ HDC3020_THRESH_HUM_TRUNC_SHIFT);
+
+ return thresh;
+}
+
+static u16 hdc3020_thresh_set_hum(int s_hum, u16 curr_thresh)
+{
+ u64 hum;
+ u16 thresh;
+
+ /*
+ * Calculate humidity threshold, shift it down and up to get the
+ * truncated threshold representation in the 7MSBs while keeping
+ * the current temperature threshold in the 9 LSBs.
+ */
+ hum = (u64)(s_hum) * 65535ULL;
+ hum = div_u64(hum, 1000000 * 100) >> HDC3020_THRESH_HUM_TRUNC_SHIFT;
+ thresh = FIELD_PREP(HDC3020_THRESH_HUM_MASK, hum);
+ thresh |= FIELD_GET(HDC3020_THRESH_TEMP_MASK, curr_thresh);
+
+ return thresh;
+}
+
+static
+int hdc3020_thresh_clr(s64 s_thresh, s64 s_hyst, enum iio_event_direction dir)
+{
+ s64 s_clr;
+
+ /*
+ * Include directions when calculation the clear value,
+ * since hysteresis is unsigned by definition and the
+ * clear value is an absolute value which is signed.
+ */
+ if (dir == IIO_EV_DIR_RISING)
+ s_clr = s_thresh - s_hyst;
+ else
+ s_clr = s_thresh + s_hyst;
+
+ /* Divide by 65535 to get units of micro */
+ return div_s64(s_clr, 65535);
+}
+
+static int _hdc3020_write_thresh(struct hdc3020_data *data, u16 reg, u16 val)
+{
+ u8 buf[5];
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be16(val, buf + 2);
+ buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
+
+ return hdc3020_write_bytes(data, buf, 5);
+}
+
static int hdc3020_write_thresh(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
@@ -376,67 +478,126 @@ static int hdc3020_write_thresh(struct iio_dev *indio_dev,
int val, int val2)
{
struct hdc3020_data *data = iio_priv(indio_dev);
- u8 buf[5];
- u64 tmp;
- u16 reg;
- int ret;
-
- /* Supported temperature range is from –40 to 125 degree celsius */
- if (val < HDC3020_MIN_TEMP || val > HDC3020_MAX_TEMP)
- return -EINVAL;
-
- /* Select threshold register */
- if (info == IIO_EV_INFO_VALUE) {
- if (dir == IIO_EV_DIR_RISING)
- reg = HDC3020_S_T_RH_THRESH_HIGH;
- else
- reg = HDC3020_S_T_RH_THRESH_LOW;
+ u16 reg, reg_val, reg_thresh_rd, reg_clr_rd, reg_thresh_wr, reg_clr_wr;
+ s64 s_thresh, s_hyst, s_clr;
+ int s_val, thresh, clr, ret;
+
+ /* Select threshold registers */
+ if (dir == IIO_EV_DIR_RISING) {
+ reg_thresh_rd = HDC3020_R_T_RH_THRESH_HIGH;
+ reg_thresh_wr = HDC3020_S_T_RH_THRESH_HIGH;
+ reg_clr_rd = HDC3020_R_T_RH_THRESH_HIGH_CLR;
+ reg_clr_wr = HDC3020_S_T_RH_THRESH_HIGH_CLR;
} else {
- if (dir == IIO_EV_DIR_RISING)
- reg = HDC3020_S_T_RH_THRESH_HIGH_CLR;
- else
- reg = HDC3020_S_T_RH_THRESH_LOW_CLR;
+ reg_thresh_rd = HDC3020_R_T_RH_THRESH_LOW;
+ reg_thresh_wr = HDC3020_S_T_RH_THRESH_LOW;
+ reg_clr_rd = HDC3020_R_T_RH_THRESH_LOW_CLR;
+ reg_clr_wr = HDC3020_S_T_RH_THRESH_LOW_CLR;
}
guard(mutex)(&data->lock);
- ret = hdc3020_read_be16(data, reg);
+ ret = hdc3020_read_be16(data, reg_thresh_rd);
+ if (ret < 0)
+ return ret;
+
+ thresh = ret;
+ ret = hdc3020_read_be16(data, reg_clr_rd);
if (ret < 0)
return ret;
+ clr = ret;
+ /* Scale value to include decimal part into calculations */
+ s_val = (val < 0) ? (val * 1000000 - val2) : (val * 1000000 + val2);
switch (chan->type) {
case IIO_TEMP:
- /*
- * Calculate temperature threshold, shift it down to get the
- * truncated threshold representation in the 9LSBs while keeping
- * the current humidity threshold in the 7 MSBs.
- */
- tmp = ((u64)(((val + 45) * MICRO) + val2)) * 65535ULL;
- tmp = div_u64(tmp, MICRO * 175);
- val = tmp >> HDC3020_THRESH_TEMP_TRUNC_SHIFT;
- val = FIELD_PREP(HDC3020_THRESH_TEMP_MASK, val);
- val |= (FIELD_GET(HDC3020_THRESH_HUM_MASK, ret) <<
- HDC3020_THRESH_HUM_TRUNC_SHIFT);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ s_val = max(s_val, HDC3020_MIN_TEMP_MICRO);
+ s_val = min(s_val, HDC3020_MAX_TEMP_MICRO);
+ reg = reg_thresh_wr;
+ reg_val = hdc3020_thresh_set_temp(s_val, thresh);
+ ret = _hdc3020_write_thresh(data, reg, reg_val);
+ if (ret < 0)
+ return ret;
+
+ /* Calculate old hysteresis */
+ s_thresh = (s64)hdc3020_thresh_get_temp(thresh) * 1000000;
+ s_clr = (s64)hdc3020_thresh_get_temp(clr) * 1000000;
+ s_hyst = div_s64(abs(s_thresh - s_clr), 65535);
+ /* Set new threshold */
+ thresh = reg_val;
+ /* Set old hysteresis */
+ s_val = s_hyst;
+ fallthrough;
+ case IIO_EV_INFO_HYSTERESIS:
+ /*
+ * Function hdc3020_thresh_get_temp returns temperature
+ * in degree celsius scaled by 65535. Scale by 1000000
+ * to be able to subtract scaled hysteresis value.
+ */
+ s_thresh = (s64)hdc3020_thresh_get_temp(thresh) * 1000000;
+ /*
+ * Units of s_val are in micro degree celsius, scale by
+ * 65535 to get same units as s_thresh.
+ */
+ s_val = min(abs(s_val), HDC3020_MAX_TEMP_HYST_MICRO);
+ s_hyst = (s64)s_val * 65535;
+ s_clr = hdc3020_thresh_clr(s_thresh, s_hyst, dir);
+ s_clr = max(s_clr, HDC3020_MIN_TEMP_MICRO);
+ s_clr = min(s_clr, HDC3020_MAX_TEMP_MICRO);
+ reg = reg_clr_wr;
+ reg_val = hdc3020_thresh_set_temp(s_clr, clr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
break;
case IIO_HUMIDITYRELATIVE:
- /*
- * Calculate humidity threshold, shift it down and up to get the
- * truncated threshold representation in the 7MSBs while keeping
- * the current temperature threshold in the 9 LSBs.
- */
- tmp = ((u64)((val * MICRO) + val2)) * 65535ULL;
- tmp = div_u64(tmp, MICRO * 100);
- val = tmp >> HDC3020_THRESH_HUM_TRUNC_SHIFT;
- val = FIELD_PREP(HDC3020_THRESH_HUM_MASK, val);
- val |= FIELD_GET(HDC3020_THRESH_TEMP_MASK, ret);
+ s_val = (s_val < 0) ? 0 : min(s_val, HDC3020_MAX_HUM_MICRO);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ reg = reg_thresh_wr;
+ reg_val = hdc3020_thresh_set_hum(s_val, thresh);
+ ret = _hdc3020_write_thresh(data, reg, reg_val);
+ if (ret < 0)
+ return ret;
+
+ /* Calculate old hysteresis */
+ s_thresh = (s64)hdc3020_thresh_get_hum(thresh) * 1000000;
+ s_clr = (s64)hdc3020_thresh_get_hum(clr) * 1000000;
+ s_hyst = div_s64(abs(s_thresh - s_clr), 65535);
+ /* Set new threshold */
+ thresh = reg_val;
+ /* Try to set old hysteresis */
+ s_val = min(abs(s_hyst), HDC3020_MAX_HUM_MICRO);
+ fallthrough;
+ case IIO_EV_INFO_HYSTERESIS:
+ /*
+ * Function hdc3020_thresh_get_hum returns relative
+ * humidity in percent scaled by 65535. Scale by 1000000
+ * to be able to subtract scaled hysteresis value.
+ */
+ s_thresh = (s64)hdc3020_thresh_get_hum(thresh) * 1000000;
+ /*
+ * Units of s_val are in micro percent, scale by 65535
+ * to get same units as s_thresh.
+ */
+ s_hyst = (s64)s_val * 65535;
+ s_clr = hdc3020_thresh_clr(s_thresh, s_hyst, dir);
+ s_clr = max(s_clr, 0);
+ s_clr = min(s_clr, HDC3020_MAX_HUM_MICRO);
+ reg = reg_clr_wr;
+ reg_val = hdc3020_thresh_set_hum(s_clr, clr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
break;
default:
return -EOPNOTSUPP;
}
- put_unaligned_be16(reg, buf);
- put_unaligned_be16(val, buf + 2);
- buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
- return hdc3020_write_bytes(data, buf, 5);
+ return _hdc3020_write_thresh(data, reg, reg_val);
}
static int hdc3020_read_thresh(struct iio_dev *indio_dev,
@@ -447,48 +608,60 @@ static int hdc3020_read_thresh(struct iio_dev *indio_dev,
int *val, int *val2)
{
struct hdc3020_data *data = iio_priv(indio_dev);
- u16 reg;
- int ret;
+ u16 reg_thresh, reg_clr;
+ int thresh, clr, ret;
- /* Select threshold register */
- if (info == IIO_EV_INFO_VALUE) {
- if (dir == IIO_EV_DIR_RISING)
- reg = HDC3020_R_T_RH_THRESH_HIGH;
- else
- reg = HDC3020_R_T_RH_THRESH_LOW;
+ /* Select threshold registers */
+ if (dir == IIO_EV_DIR_RISING) {
+ reg_thresh = HDC3020_R_T_RH_THRESH_HIGH;
+ reg_clr = HDC3020_R_T_RH_THRESH_HIGH_CLR;
} else {
- if (dir == IIO_EV_DIR_RISING)
- reg = HDC3020_R_T_RH_THRESH_HIGH_CLR;
- else
- reg = HDC3020_R_T_RH_THRESH_LOW_CLR;
+ reg_thresh = HDC3020_R_T_RH_THRESH_LOW;
+ reg_clr = HDC3020_R_T_RH_THRESH_LOW_CLR;
}
guard(mutex)(&data->lock);
- ret = hdc3020_read_be16(data, reg);
+ ret = hdc3020_read_be16(data, reg_thresh);
if (ret < 0)
return ret;
switch (chan->type) {
case IIO_TEMP:
- /*
- * Get the temperature threshold from 9 LSBs, shift them to get
- * the truncated temperature threshold representation and
- * calculate the threshold according to the formula in the
- * datasheet.
- */
- *val = FIELD_GET(HDC3020_THRESH_TEMP_MASK, ret);
- *val = *val << HDC3020_THRESH_TEMP_TRUNC_SHIFT;
- *val = -2949075 + (175 * (*val));
+ thresh = hdc3020_thresh_get_temp(ret);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = thresh;
+ break;
+ case IIO_EV_INFO_HYSTERESIS:
+ ret = hdc3020_read_be16(data, reg_clr);
+ if (ret < 0)
+ return ret;
+
+ clr = hdc3020_thresh_get_temp(ret);
+ *val = abs(thresh - clr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
*val2 = 65535;
return IIO_VAL_FRACTIONAL;
case IIO_HUMIDITYRELATIVE:
- /*
- * Get the humidity threshold from 7 MSBs, shift them to get the
- * truncated humidity threshold representation and calculate the
- * threshold according to the formula in the datasheet.
- */
- *val = FIELD_GET(HDC3020_THRESH_HUM_MASK, ret);
- *val = (*val << HDC3020_THRESH_HUM_TRUNC_SHIFT) * 100;
+ thresh = hdc3020_thresh_get_hum(ret);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = thresh;
+ break;
+ case IIO_EV_INFO_HYSTERESIS:
+ ret = hdc3020_read_be16(data, reg_clr);
+ if (ret < 0)
+ return ret;
+
+ clr = hdc3020_thresh_get_hum(ret);
+ *val = abs(thresh - clr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
*val2 = 65535;
return IIO_VAL_FRACTIONAL;
default:
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
index 5d42ab9b176a..67d74a1a1b26 100644
--- a/drivers/iio/imu/bmi323/bmi323_core.c
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -1391,7 +1391,7 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p)
&data->buffer.channels,
ARRAY_SIZE(data->buffer.channels));
if (ret)
- return IRQ_NONE;
+ goto out;
} else {
for_each_set_bit(bit, indio_dev->active_scan_mask,
BMI323_CHAN_MAX) {
@@ -1400,13 +1400,14 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p)
&data->buffer.channels[index++],
BMI323_BYTES_PER_SAMPLE);
if (ret)
- return IRQ_NONE;
+ goto out;
}
}
iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
iio_get_time_ns(indio_dev));
+out:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index 83d8504ebfff..4b2566693614 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -130,10 +130,6 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
/* update data FIFO write */
inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
- if (ret)
- goto out_unlock;
-
- ret = inv_icm42600_buffer_update_watermark(st);
out_unlock:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
index 63b85ec88c13..a8cf74c84c3c 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
@@ -222,10 +222,15 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st)
latency_accel = period_accel * wm_accel;
/* 0 value for watermark means that the sensor is turned off */
+ if (wm_gyro == 0 && wm_accel == 0)
+ return 0;
+
if (latency_gyro == 0) {
watermark = wm_accel;
+ st->fifo.watermark.eff_accel = wm_accel;
} else if (latency_accel == 0) {
watermark = wm_gyro;
+ st->fifo.watermark.eff_gyro = wm_gyro;
} else {
/* compute the smallest latency that is a multiple of both */
if (latency_gyro <= latency_accel)
@@ -241,6 +246,13 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st)
watermark = latency / period;
if (watermark < 1)
watermark = 1;
+ /* update effective watermark */
+ st->fifo.watermark.eff_gyro = latency / period_gyro;
+ if (st->fifo.watermark.eff_gyro < 1)
+ st->fifo.watermark.eff_gyro = 1;
+ st->fifo.watermark.eff_accel = latency / period_accel;
+ if (st->fifo.watermark.eff_accel < 1)
+ st->fifo.watermark.eff_accel = 1;
}
/* compute watermark value in bytes */
@@ -514,7 +526,7 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st)
/* handle gyroscope timestamp and FIFO data parsing */
if (st->fifo.nb.gyro > 0) {
ts = &gyro_st->ts;
- inv_sensors_timestamp_interrupt(ts, st->fifo.nb.gyro,
+ inv_sensors_timestamp_interrupt(ts, st->fifo.watermark.eff_gyro,
st->timestamp.gyro);
ret = inv_icm42600_gyro_parse_fifo(st->indio_gyro);
if (ret)
@@ -524,7 +536,7 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st)
/* handle accelerometer timestamp and FIFO data parsing */
if (st->fifo.nb.accel > 0) {
ts = &accel_st->ts;
- inv_sensors_timestamp_interrupt(ts, st->fifo.nb.accel,
+ inv_sensors_timestamp_interrupt(ts, st->fifo.watermark.eff_accel,
st->timestamp.accel);
ret = inv_icm42600_accel_parse_fifo(st->indio_accel);
if (ret)
@@ -577,6 +589,9 @@ int inv_icm42600_buffer_init(struct inv_icm42600_state *st)
unsigned int val;
int ret;
+ st->fifo.watermark.eff_gyro = 1;
+ st->fifo.watermark.eff_accel = 1;
+
/*
* Default FIFO configuration (bits 7 to 5)
* - use invalid value
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
index 8b85ee333bf8..f6c85daf42b0 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h
@@ -32,6 +32,8 @@ struct inv_icm42600_fifo {
struct {
unsigned int gyro;
unsigned int accel;
+ unsigned int eff_gyro;
+ unsigned int eff_accel;
} watermark;
size_t count;
struct {
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index 96116a68ab29..62fdae530334 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -537,6 +537,7 @@ static int inv_icm42600_irq_init(struct inv_icm42600_state *st, int irq,
if (ret)
return ret;
+ irq_type |= IRQF_ONESHOT;
return devm_request_threaded_irq(dev, irq, inv_icm42600_irq_timestamp,
inv_icm42600_irq_handler, irq_type,
"inv_icm42600", st);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index e6f8de80128c..938af5b640b0 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -130,10 +130,6 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
/* update data FIFO write */
inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
- if (ret)
- goto out_unlock;
-
- ret = inv_icm42600_buffer_update_watermark(st);
out_unlock:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 0dc0f22a5582..3d3b27f28c9d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -100,8 +100,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
goto end_session;
/* Each FIFO data contains all sensors, so same number for FIFO and sensor data */
fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
- inv_sensors_timestamp_interrupt(&st->timestamp, nb, pf->timestamp);
- inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, nb, 0);
+ inv_sensors_timestamp_interrupt(&st->timestamp, 1, pf->timestamp);
+ inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, 1, 0);
/* clear internal data buffer for avoiding kernel data leak */
memset(data, 0, sizeof(data));
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 1b603567ccc8..84273660ca2e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -300,6 +300,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type)
if (!st->trig)
return -ENOMEM;
+ irq_type |= IRQF_ONESHOT;
ret = devm_request_threaded_irq(&indio_dev->dev, st->irq,
&inv_mpu6050_interrupt_timestamp,
&inv_mpu6050_interrupt_handle,
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 52d773261828..485e6fc44a04 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -721,7 +721,7 @@ int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
return ret;
*val *= scale;
- return 0;
+ return ret;
} else {
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
if (ret < 0)
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 09f53d987c7d..221fa2c552ae 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -1394,12 +1394,12 @@ static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2)
/*
* Temperature is returned in Celsius degrees in fractional
- * form down 2^16. We rescale by x1000 to return milli Celsius
- * to respect IIO ABI.
+ * form down 2^16. We rescale by x1000 to return millidegrees
+ * Celsius to respect IIO ABI.
*/
- *val = raw_temp * 1000;
- *val2 = 16;
- return IIO_VAL_FRACTIONAL_LOG2;
+ raw_temp = sign_extend32(raw_temp, 23);
+ *val = ((s64)raw_temp * 1000) / (1 << 16);
+ return IIO_VAL_INT;
}
static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2)
diff --git a/drivers/iio/temperature/mlx90635.c b/drivers/iio/temperature/mlx90635.c
index 1f5c962c1818..f7f88498ba0e 100644
--- a/drivers/iio/temperature/mlx90635.c
+++ b/drivers/iio/temperature/mlx90635.c
@@ -947,9 +947,9 @@ static int mlx90635_probe(struct i2c_client *client)
"failed to allocate regmap\n");
regmap_ee = devm_regmap_init_i2c(client, &mlx90635_regmap_ee);
- if (IS_ERR(regmap))
- return dev_err_probe(&client->dev, PTR_ERR(regmap),
- "failed to allocate regmap\n");
+ if (IS_ERR(regmap_ee))
+ return dev_err_probe(&client->dev, PTR_ERR(regmap_ee),
+ "failed to allocate EEPROM regmap\n");
mlx90635 = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 9dca451ed522..6974922e5609 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -107,8 +107,6 @@ struct bnxt_re_gsi_context {
struct bnxt_re_sqp_entries *sqp_tbl;
};
-#define BNXT_RE_MIN_MSIX 2
-#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
#define BNXT_RE_GEN_P5_MAX_VF 64
@@ -168,7 +166,7 @@ struct bnxt_re_dev {
struct bnxt_qplib_rcfw rcfw;
/* NQ */
- struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
+ struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX];
/* Device Resources */
struct bnxt_qplib_dev_attr dev_attr;
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 4f13423ecdbd..887b09dd86e7 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -112,6 +112,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
"start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
start, iova, length, access_flags);
+ access_flags &= ~IB_ACCESS_OPTIONAL;
if (access_flags & ~VALID_MR_FLAGS)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 2366c46eebc8..43660c831b22 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3759,10 +3759,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
return 0;
-err:
- mlx5r_macsec_dealloc_gids(dev);
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
+err:
+ mlx5r_macsec_dealloc_gids(dev);
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index ecc111ed5d86..d3c1f63791a2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -246,6 +246,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2,
(ent->rb_key.access_mode >> 2) & 0x7);
+ MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats);
MLX5_SET(mkc, mkc, translations_octword_size,
get_mkc_octo_size(ent->rb_key.access_mode,
@@ -641,10 +642,8 @@ static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
new = &((*new)->rb_left);
if (cmp < 0)
new = &((*new)->rb_right);
- if (cmp == 0) {
- mutex_unlock(&cache->rb_lock);
+ if (cmp == 0)
return -EEXIST;
- }
}
/* Add new node and rebalance tree. */
@@ -719,6 +718,8 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
}
mr->mmkey.cache_ent = ent;
mr->mmkey.type = MLX5_MKEY_MR;
+ mr->mmkey.rb_key = ent->rb_key;
+ mr->mmkey.cacheable = true;
init_waitqueue_head(&mr->mmkey.wait);
return mr;
}
@@ -1169,7 +1170,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->ibmr.pd = pd;
mr->umem = umem;
mr->page_shift = order_base_2(page_size);
- mr->mmkey.cacheable = true;
set_mr_fields(dev, mr, umem->length, access_flags, iova);
return mr;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index a056ea835da5..84be0c3d5699 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
int err;
struct mlx5_srq_attr in = {};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
+ __u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) /
+ sizeof(struct mlx5_wqe_data_seg);
if (init_attr->srq_type != IB_SRQT_BASIC &&
init_attr->srq_type != IB_SRQT_XRC &&
init_attr->srq_type != IB_SRQT_TM)
return -EOPNOTSUPP;
- /* Sanity check SRQ size before proceeding */
- if (init_attr->attr.max_wr >= max_srq_wqes) {
- mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
- init_attr->attr.max_wr,
- max_srq_wqes);
+ /* Sanity check SRQ and sge size before proceeding */
+ if (init_attr->attr.max_wr >= max_srq_wqes ||
+ init_attr->attr.max_sge > max_sge_sz) {
+ mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n",
+ init_attr->attr.max_wr, max_srq_wqes,
+ init_attr->attr.max_sge, max_sge_sz);
return -EINVAL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index c6a7fa3054fa..6596a85723c9 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -344,6 +344,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
* receive buffer later. For rmda operations additional
* length checks are performed in check_rkey.
*/
+ if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
+ unsigned int payload = payload_size(pkt);
+ unsigned int recv_buffer_len = 0;
+ int i;
+
+ for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
+ recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
+ if (payload + 40 > recv_buffer_len) {
+ rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
+ return RESPST_ERR_LENGTH;
+ }
+ }
+
if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
(qp_type(qp) == IB_QPT_UC))) {
unsigned int mtu = qp->mtu;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index c7d4d8ab5a09..de6238ee4379 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -812,7 +812,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
int i;
for (i = 0; i < ibwr->num_sge; i++, sge++) {
- memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
+ memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length);
p += sge->length;
}
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 70f0654c58b6..2b8370ecf42a 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -209,6 +209,7 @@ static const struct xpad_device {
{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
{ 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
{ 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE },
+ { 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE },
{ 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
{ 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 4e38229404b4..b4723ea395eb 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1477,15 +1477,46 @@ static void elantech_disconnect(struct psmouse *psmouse)
}
/*
+ * Some hw_version 4 models fail to properly activate absolute mode on
+ * resume without going through disable/enable cycle.
+ */
+static const struct dmi_system_id elantech_needs_reenable[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ /* Lenovo N24 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "81AF"),
+ },
+ },
+#endif
+ { }
+};
+
+/*
* Put the touchpad back into absolute mode when reconnecting
*/
static int elantech_reconnect(struct psmouse *psmouse)
{
+ int err;
+
psmouse_reset(psmouse);
if (elantech_detect(psmouse, 0))
return -1;
+ if (dmi_check_system(elantech_needs_reenable)) {
+ err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE);
+ if (err)
+ psmouse_warn(psmouse, "failed to deactivate mouse on %s: %d\n",
+ psmouse->ps2dev.serio->phys, err);
+
+ err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE);
+ if (err)
+ psmouse_warn(psmouse, "failed to reactivate mouse on %s: %d\n",
+ psmouse->ps2dev.serio->phys, err);
+ }
+
if (elantech_set_absolute_mode(psmouse)) {
psmouse_err(psmouse,
"failed to put touchpad back into absolute mode.\n");
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index dfc6c581873b..5b50475ec414 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -76,7 +76,7 @@ static inline void i8042_write_command(int val)
#define SERIO_QUIRK_PROBE_DEFER BIT(5)
#define SERIO_QUIRK_RESET_ALWAYS BIT(6)
#define SERIO_QUIRK_RESET_NEVER BIT(7)
-#define SERIO_QUIRK_DIECT BIT(8)
+#define SERIO_QUIRK_DIRECT BIT(8)
#define SERIO_QUIRK_DUMBKBD BIT(9)
#define SERIO_QUIRK_NOLOOP BIT(10)
#define SERIO_QUIRK_NOTIMEOUT BIT(11)
@@ -1332,6 +1332,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
+ {
+ /*
+ * The Ayaneo Kun is a handheld device where some the buttons
+ * are handled by an AT keyboard. The keyboard is usually
+ * detected as raw, but sometimes, usually after a cold boot,
+ * it is detected as translated. Make sure that the keyboard
+ * is always in raw mode.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "KUN"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_DIRECT)
+ },
{ }
};
@@ -1655,7 +1669,7 @@ static void __init i8042_check_quirks(void)
if (quirks & SERIO_QUIRK_RESET_NEVER)
i8042_reset = I8042_RESET_NEVER;
}
- if (quirks & SERIO_QUIRK_DIECT)
+ if (quirks & SERIO_QUIRK_DIRECT)
i8042_direct = true;
if (quirks & SERIO_QUIRK_DUMBKBD)
i8042_dumbkbd = true;
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index d2bbb436a77d..4d13db13b9e5 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1111,6 +1111,16 @@ static const struct of_device_id ads7846_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, ads7846_dt_ids);
+static const struct spi_device_id ads7846_spi_ids[] = {
+ { "tsc2046", 7846 },
+ { "ads7843", 7843 },
+ { "ads7845", 7845 },
+ { "ads7846", 7846 },
+ { "ads7873", 7873 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ads7846_spi_ids);
+
static const struct ads7846_platform_data *ads7846_get_props(struct device *dev)
{
struct ads7846_platform_data *pdata;
@@ -1386,10 +1396,10 @@ static struct spi_driver ads7846_driver = {
},
.probe = ads7846_probe,
.remove = ads7846_remove,
+ .id_table = ads7846_spi_ids,
};
module_spi_driver(ads7846_driver);
MODULE_DESCRIPTION("ADS7846 TouchScreen Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("spi:ads7846");
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 31ffdc2a93f3..79bdb2b10949 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -261,8 +261,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
if (!error && data[0] == 2) {
error = i2c_master_recv(client, data + ILI251X_DATA_SIZE1,
ILI251X_DATA_SIZE2);
- if (error >= 0 && error != ILI251X_DATA_SIZE2)
- error = -EIO;
+ if (error >= 0)
+ error = error == ILI251X_DATA_SIZE2 ? 0 : -EIO;
}
return error;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 27e293727095..c89d85b54a1a 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -2743,6 +2743,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu);
+ iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
@@ -3362,7 +3363,7 @@ int amd_iommu_reenable(int mode)
return 0;
}
-int __init amd_iommu_enable_faulting(unsigned int cpu)
+int amd_iommu_enable_faulting(unsigned int cpu)
{
/* We enable MSI later when PCI is initialized */
return 0;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index c2703599bb16..b19e8c0f48fa 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2061,6 +2061,12 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
+ /* Clear DTE and flush the entry */
+ amd_iommu_dev_update_dte(dev_data, false);
+
+ /* Flush IOTLB and wait for the flushes to finish */
+ amd_iommu_domain_flush_all(domain);
+
/* Clear GCR3 table */
if (pdom_is_sva_capable(domain))
destroy_gcr3_table(dev_data, domain);
@@ -2069,12 +2075,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
dev_data->domain = NULL;
list_del(&dev_data->list);
- /* Clear DTE and flush the entry */
- amd_iommu_dev_update_dte(dev_data, false);
-
- /* Flush IOTLB and wait for the flushes to finish */
- amd_iommu_domain_flush_all(domain);
-
/* decrease reference counters - needs to happen after the flushes */
domain->dev_iommu[iommu->index] -= 1;
domain->dev_cnt -= 1;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 2e9811bf2a4e..fd11a080380c 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2114,12 +2114,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
if (ret)
return ret;
- ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID);
- if (ret) {
- domain_detach_iommu(domain, iommu);
- return ret;
- }
-
info->domain = domain;
spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
@@ -2137,15 +2131,21 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
else
ret = intel_pasid_setup_second_level(iommu, domain, dev, IOMMU_NO_PASID);
- if (ret) {
- device_block_translation(dev);
- return ret;
- }
+ if (ret)
+ goto out_block_translation;
if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
iommu_enable_pci_caps(info);
+ ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID);
+ if (ret)
+ goto out_block_translation;
+
return 0;
+
+out_block_translation:
+ device_block_translation(dev);
+ return ret;
}
/**
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index c7ddebf312ad..b1f2080be2be 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -15,6 +15,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
+#include <asm/numa.h>
#define EIOINTC_REG_NODEMAP 0x14a0
#define EIOINTC_REG_IPMAP 0x14c0
@@ -339,7 +340,7 @@ static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
int node;
if (cpu_has_flatmode)
- node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
+ node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
else
node = eiointc_priv[nr_pics - 1]->node;
@@ -431,7 +432,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
goto out_free_handle;
if (cpu_has_flatmode)
- node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
+ node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
else
node = acpi_eiointc->node;
acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index e4b33aed1c97..7c4fe7ab4b83 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -28,7 +28,7 @@
#define LIOINTC_INTC_CHIP_START 0x20
-#define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20)
+#define LIOINTC_REG_INTC_STATUS(core) (LIOINTC_INTC_CHIP_START + 0x20 + (core) * 8)
#define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04)
#define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08)
#define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c)
@@ -217,7 +217,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
goto out_free_priv;
for (i = 0; i < LIOINTC_NUM_CORES; i++)
- priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
+ priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS(i);
for (i = 0; i < LIOINTC_NUM_PARENT; i++)
priv->handler[i].parent_int_map = parent_int_map[i];
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 24fcff682b24..ba1be15cfd8e 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -552,12 +552,6 @@ int led_classdev_register_ext(struct device *parent,
led_init_core(led_cdev);
#ifdef CONFIG_LEDS_TRIGGERS
- /*
- * If no default trigger was given and hw_control_trigger is set,
- * make it the default trigger.
- */
- if (!led_cdev->default_trigger && led_cdev->hw_control_trigger)
- led_cdev->default_trigger = led_cdev->hw_control_trigger;
led_trigger_set_default(led_cdev);
#endif
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index 1280696f65f2..e80fb4ebfda6 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -5152,7 +5152,7 @@ struct saa7134_board saa7134_boards[] = {
},
},
[SAA7134_BOARD_AVERMEDIA_STUDIO_507UA] = {
- /* Andy Shevchenko <andy@smile.org.ua> */
+ /* Andy Shevchenko <andy@kernel.org> */
.name = "Avermedia AVerTV Studio 507UA",
.audio_clock = 0x00187de7,
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* Should be MK5 */
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index f2c0f144c0fc..dacd3c96c9f5 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -210,6 +210,7 @@ static const struct regmap_access_table axp313a_volatile_table = {
static const struct regmap_range axp717_writeable_ranges[] = {
regmap_reg_range(AXP717_IRQ0_EN, AXP717_IRQ4_EN),
+ regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE),
regmap_reg_range(AXP717_DCDC_OUTPUT_CONTROL, AXP717_CPUSLDO_CONTROL),
};
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
index 32af2b14ff34..34c9be437432 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
@@ -69,8 +69,10 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]),
GFP_KERNEL);
- if (!aux_bus->aux_device_wrapper[1])
- return -ENOMEM;
+ if (!aux_bus->aux_device_wrapper[1]) {
+ retval = -ENOMEM;
+ goto err_aux_dev_add_0;
+ }
retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
if (retval < 0)
@@ -111,6 +113,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
err_aux_dev_add_1:
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
+ goto err_aux_dev_add_0;
err_aux_dev_init_1:
ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id);
@@ -120,6 +123,7 @@ err_ida_alloc_1:
err_aux_dev_add_0:
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
+ goto err_ret;
err_aux_dev_init_0:
ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id);
@@ -127,6 +131,7 @@ err_aux_dev_init_0:
err_ida_alloc_0:
kfree(aux_bus->aux_device_wrapper[0]);
+err_ret:
return retval;
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 79e6f3c1341f..40c3fe26f76d 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -329,7 +329,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
}
if (!mei_cl_is_connected(cl)) {
- cl_err(dev, cl, "is not connected");
+ cl_dbg(dev, cl, "is not connected");
rets = -ENODEV;
goto out;
}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 7f59dd38c32f..6589635f8ba3 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -385,8 +385,10 @@ static int mei_me_pci_resume(struct device *device)
}
err = mei_restart(dev);
- if (err)
+ if (err) {
+ free_irq(pdev->irq, dev);
return err;
+ }
/* Start timer if stopped in suspend */
schedule_delayed_work(&dev->timer_work, HZ);
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
index b543e6b9f3cf..1ec65d87488a 100644
--- a/drivers/misc/mei/platform-vsc.c
+++ b/drivers/misc/mei/platform-vsc.c
@@ -399,41 +399,32 @@ static void mei_vsc_remove(struct platform_device *pdev)
static int mei_vsc_suspend(struct device *dev)
{
- struct mei_device *mei_dev = dev_get_drvdata(dev);
- struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ struct mei_device *mei_dev;
+ int ret = 0;
- mei_stop(mei_dev);
+ mei_dev = dev_get_drvdata(dev);
+ if (!mei_dev)
+ return -ENODEV;
- mei_disable_interrupts(mei_dev);
+ mutex_lock(&mei_dev->device_lock);
- vsc_tp_free_irq(hw->tp);
+ if (!mei_write_is_idle(mei_dev))
+ ret = -EAGAIN;
- return 0;
+ mutex_unlock(&mei_dev->device_lock);
+
+ return ret;
}
static int mei_vsc_resume(struct device *dev)
{
- struct mei_device *mei_dev = dev_get_drvdata(dev);
- struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
- int ret;
-
- ret = vsc_tp_request_irq(hw->tp);
- if (ret)
- return ret;
-
- ret = mei_restart(mei_dev);
- if (ret)
- goto err_free;
+ struct mei_device *mei_dev;
- /* start timer if stopped in suspend */
- schedule_delayed_work(&mei_dev->timer_work, HZ);
+ mei_dev = dev_get_drvdata(dev);
+ if (!mei_dev)
+ return -ENODEV;
return 0;
-
-err_free:
- vsc_tp_free_irq(hw->tp);
-
- return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c
index ffa4ccd96a10..596a9d695dfc 100644
--- a/drivers/misc/mei/vsc-fw-loader.c
+++ b/drivers/misc/mei/vsc-fw-loader.c
@@ -252,7 +252,7 @@ static int vsc_get_sensor_name(struct vsc_fw_loader *fw_loader,
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER };
union acpi_object obj = {
- .type = ACPI_TYPE_INTEGER,
+ .integer.type = ACPI_TYPE_INTEGER,
.integer.value = 1,
};
struct acpi_object_list arg_list = {
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 9a5f75163aca..8ede4ce93271 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -131,10 +131,12 @@ struct moxart_host {
struct dma_async_tx_descriptor *tx_desc;
struct mmc_host *mmc;
struct mmc_request *mrq;
+ struct scatterlist *cur_sg;
struct completion dma_complete;
struct completion pio_complete;
- struct sg_mapping_iter sg_miter;
+ u32 num_sg;
+ u32 data_remain;
u32 data_len;
u32 fifo_width;
u32 timeout;
@@ -146,6 +148,35 @@ struct moxart_host {
bool is_removed;
};
+static inline void moxart_init_sg(struct moxart_host *host,
+ struct mmc_data *data)
+{
+ host->cur_sg = data->sg;
+ host->num_sg = data->sg_len;
+ host->data_remain = host->cur_sg->length;
+
+ if (host->data_remain > host->data_len)
+ host->data_remain = host->data_len;
+}
+
+static inline int moxart_next_sg(struct moxart_host *host)
+{
+ int remain;
+ struct mmc_data *data = host->mrq->cmd->data;
+
+ host->cur_sg++;
+ host->num_sg--;
+
+ if (host->num_sg > 0) {
+ host->data_remain = host->cur_sg->length;
+ remain = host->data_len - data->bytes_xfered;
+ if (remain > 0 && remain < host->data_remain)
+ host->data_remain = remain;
+ }
+
+ return host->num_sg;
+}
+
static int moxart_wait_for_status(struct moxart_host *host,
u32 mask, u32 *status)
{
@@ -278,29 +309,14 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
static void moxart_transfer_pio(struct moxart_host *host)
{
- struct sg_mapping_iter *sgm = &host->sg_miter;
struct mmc_data *data = host->mrq->cmd->data;
u32 *sgp, len = 0, remain, status;
if (host->data_len == data->bytes_xfered)
return;
- /*
- * By updating sgm->consumes this will get a proper pointer into the
- * buffer at any time.
- */
- if (!sg_miter_next(sgm)) {
- /* This shold not happen */
- dev_err(mmc_dev(host->mmc), "ran out of scatterlist prematurely\n");
- data->error = -EINVAL;
- complete(&host->pio_complete);
- return;
- }
- sgp = sgm->addr;
- remain = sgm->length;
- if (remain > host->data_len)
- remain = host->data_len;
- sgm->consumed = 0;
+ sgp = sg_virt(host->cur_sg);
+ remain = host->data_remain;
if (data->flags & MMC_DATA_WRITE) {
while (remain > 0) {
@@ -315,7 +331,6 @@ static void moxart_transfer_pio(struct moxart_host *host)
sgp++;
len += 4;
}
- sgm->consumed += len;
remain -= len;
}
@@ -332,22 +347,22 @@ static void moxart_transfer_pio(struct moxart_host *host)
sgp++;
len += 4;
}
- sgm->consumed += len;
remain -= len;
}
}
- data->bytes_xfered += sgm->consumed;
- if (host->data_len == data->bytes_xfered) {
+ data->bytes_xfered += host->data_remain - remain;
+ host->data_remain = remain;
+
+ if (host->data_len != data->bytes_xfered)
+ moxart_next_sg(host);
+ else
complete(&host->pio_complete);
- return;
- }
}
static void moxart_prepare_data(struct moxart_host *host)
{
struct mmc_data *data = host->mrq->cmd->data;
- unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */
u32 datactrl;
int blksz_bits;
@@ -358,19 +373,15 @@ static void moxart_prepare_data(struct moxart_host *host)
blksz_bits = ffs(data->blksz) - 1;
BUG_ON(1 << blksz_bits != data->blksz);
+ moxart_init_sg(host, data);
+
datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE);
- if (data->flags & MMC_DATA_WRITE) {
- flags |= SG_MITER_FROM_SG;
+ if (data->flags & MMC_DATA_WRITE)
datactrl |= DCR_DATA_WRITE;
- } else {
- flags |= SG_MITER_TO_SG;
- }
if (moxart_use_dma(host))
datactrl |= DCR_DMA_EN;
- else
- sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL);
writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR);
@@ -443,9 +454,6 @@ static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
request_done:
- if (!moxart_use_dma(host))
- sg_miter_stop(&host->sg_miter);
-
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
}
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 9053526fa212..150fb477b7cc 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -24,6 +24,7 @@
#define BRCMSTB_MATCH_FLAGS_NO_64BIT BIT(0)
#define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT BIT(1)
#define BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE BIT(2)
+#define BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY BIT(4)
#define BRCMSTB_PRIV_FLAGS_HAS_CQE BIT(0)
#define BRCMSTB_PRIV_FLAGS_GATE_CLOCK BIT(1)
@@ -384,6 +385,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT)
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ if (!(match_priv->flags & BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY))
+ host->mmc_host_ops.card_busy = NULL;
+
/* Change the base clock frequency if the DT property exists */
if (device_property_read_u32(&pdev->dev, "clock-frequency",
&priv->base_freq_hz) != 0)
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index ef89ec382bfe..23e6ba70144c 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1326,7 +1326,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
if (ret)
- return ret;
+ goto fail;
/*
* Turn PMOS on [bit 0], set over current detection to 2.4 V
@@ -1337,7 +1337,10 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
else
scratch &= ~0x47;
- return pci_write_config_byte(chip->pdev, 0xAE, scratch);
+ ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
+
+fail:
+ return pcibios_err_to_errno(ret);
}
static int jmicron_probe(struct sdhci_pci_chip *chip)
@@ -2202,7 +2205,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
if (ret)
- return ret;
+ return pcibios_err_to_errno(ret);
slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
@@ -2211,7 +2214,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
if (ret)
- return ret;
+ return pcibios_err_to_errno(ret);
first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index d4a02184784a..058bef1c7e41 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -823,7 +823,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -834,7 +834,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_CLKREQ, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x20;
pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
@@ -843,7 +843,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
*/
ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x01;
pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
@@ -856,7 +856,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_INF_MOD, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x08;
pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
@@ -864,7 +864,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
@@ -875,7 +875,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -886,7 +886,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG0,
&scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 = ((scratch_32 & 0xFF000000) >> 24);
/* Check Whether subId is 0x11 or 0x12 */
@@ -898,7 +898,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG4,
&scratch_32);
if (ret)
- return ret;
+ goto read_fail;
/* Enable Base Clk setting change */
scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET;
@@ -921,7 +921,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CLK_SETTING, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 &= ~(0xFF00);
scratch_32 |= 0x07E0C800;
@@ -931,14 +931,14 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CLKREQ, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 |= 0x3;
pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32);
ret = pci_read_config_dword(chip->pdev,
O2_SD_PLL_SETTING, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 &= ~(0x1F3F070E);
scratch_32 |= 0x18270106;
@@ -949,7 +949,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CAP_REG2, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 &= ~(0xE0);
pci_write_config_dword(chip->pdev,
O2_SD_CAP_REG2, scratch_32);
@@ -961,7 +961,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
@@ -971,7 +971,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -979,7 +979,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_PLL_SETTING, &scratch_32);
if (ret)
- return ret;
+ goto read_fail;
if ((scratch_32 & 0xff000000) == 0x01000000) {
scratch_32 &= 0x0000FFFF;
@@ -998,7 +998,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG4,
&scratch_32);
if (ret)
- return ret;
+ goto read_fail;
scratch_32 |= (1 << 22);
pci_write_config_dword(chip->pdev,
O2_SD_FUNC_REG4, scratch_32);
@@ -1017,7 +1017,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
@@ -1028,7 +1028,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
/* UnLock WP */
ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@@ -1057,13 +1057,16 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
/* Lock WP */
ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
if (ret)
- return ret;
+ goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
}
return 0;
+
+read_fail:
+ return pcibios_err_to_errno(ret);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 746f4cf7ab03..112584aa0772 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2515,26 +2515,29 @@ EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
static int sdhci_check_ro(struct sdhci_host *host)
{
- unsigned long flags;
+ bool allow_invert = false;
int is_readonly;
- spin_lock_irqsave(&host->lock, flags);
-
- if (host->flags & SDHCI_DEVICE_DEAD)
+ if (host->flags & SDHCI_DEVICE_DEAD) {
is_readonly = 0;
- else if (host->ops->get_ro)
+ } else if (host->ops->get_ro) {
is_readonly = host->ops->get_ro(host);
- else if (mmc_can_gpio_ro(host->mmc))
+ } else if (mmc_can_gpio_ro(host->mmc)) {
is_readonly = mmc_gpio_get_ro(host->mmc);
- else
+ /* Do not invert twice */
+ allow_invert = !(host->mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
+ } else {
is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
& SDHCI_WRITE_PROTECT);
+ allow_invert = true;
+ }
- spin_unlock_irqrestore(&host->lock, flags);
+ if (is_readonly >= 0 &&
+ allow_invert &&
+ (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT))
+ is_readonly = !is_readonly;
- /* This quirk needs to be replaced by a callback-function later */
- return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
- !is_readonly : is_readonly;
+ return is_readonly;
}
#define SAMPLE_COUNT 5
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3c3fcce4acd4..d19aabf5d4fb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5773,6 +5773,9 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
if (real_dev) {
ret = ethtool_get_ts_info_by_layer(real_dev, info);
} else {
+ info->phc_index = -1;
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
/* Check if all slaves support software tx timestamping */
rcu_read_lock();
bond_for_each_slave_rcu(bond, slave, iter) {
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 1d9057dc44f2..bf1589aef1fc 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -1618,11 +1618,20 @@ static int mcp251xfd_open(struct net_device *ndev)
clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
can_rx_offload_enable(&priv->offload);
+ priv->wq = alloc_ordered_workqueue("%s-mcp251xfd_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ dev_name(&spi->dev));
+ if (!priv->wq) {
+ err = -ENOMEM;
+ goto out_can_rx_offload_disable;
+ }
+ INIT_WORK(&priv->tx_work, mcp251xfd_tx_obj_write_sync);
+
err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
IRQF_SHARED | IRQF_ONESHOT,
dev_name(&spi->dev), priv);
if (err)
- goto out_can_rx_offload_disable;
+ goto out_destroy_workqueue;
err = mcp251xfd_chip_interrupts_enable(priv);
if (err)
@@ -1634,6 +1643,8 @@ static int mcp251xfd_open(struct net_device *ndev)
out_free_irq:
free_irq(spi->irq, priv);
+ out_destroy_workqueue:
+ destroy_workqueue(priv->wq);
out_can_rx_offload_disable:
can_rx_offload_disable(&priv->offload);
set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
@@ -1661,6 +1672,7 @@ static int mcp251xfd_stop(struct net_device *ndev)
hrtimer_cancel(&priv->tx_irq_timer);
mcp251xfd_chip_interrupts_disable(priv);
free_irq(ndev->irq, priv);
+ destroy_workqueue(priv->wq);
can_rx_offload_disable(&priv->offload);
mcp251xfd_timestamp_stop(priv);
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
index 160528d3cc26..b1de8052a45c 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
@@ -131,6 +131,39 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
tx_obj->xfer[0].len = len;
}
+static void mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_ring *tx_ring,
+ int err)
+{
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned int frame_len = 0;
+ u8 tx_head;
+
+ tx_ring->head--;
+ stats->tx_dropped++;
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+ can_free_echo_skb(ndev, tx_head, &frame_len);
+ netdev_completed_queue(ndev, 1, frame_len);
+ netif_wake_queue(ndev);
+
+ if (net_ratelimit())
+ netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+}
+
+void mcp251xfd_tx_obj_write_sync(struct work_struct *work)
+{
+ struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv,
+ tx_work);
+ struct mcp251xfd_tx_obj *tx_obj = priv->tx_work_obj;
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ int err;
+
+ err = spi_sync(priv->spi, &tx_obj->msg);
+ if (err)
+ mcp251xfd_tx_failure_drop(priv, tx_ring, err);
+}
+
static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
struct mcp251xfd_tx_obj *tx_obj)
{
@@ -162,6 +195,11 @@ static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
return false;
}
+static bool mcp251xfd_work_busy(struct work_struct *work)
+{
+ return work_busy(work);
+}
+
netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -175,7 +213,8 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
- if (mcp251xfd_tx_busy(priv, tx_ring))
+ if (mcp251xfd_tx_busy(priv, tx_ring) ||
+ mcp251xfd_work_busy(&priv->tx_work))
return NETDEV_TX_BUSY;
tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
@@ -193,13 +232,13 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
netdev_sent_queue(priv->ndev, frame_len);
err = mcp251xfd_tx_obj_write(priv, tx_obj);
- if (err)
- goto out_err;
-
- return NETDEV_TX_OK;
-
- out_err:
- netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+ if (err == -EBUSY) {
+ netif_stop_queue(ndev);
+ priv->tx_work_obj = tx_obj;
+ queue_work(priv->wq, &priv->tx_work);
+ } else if (err) {
+ mcp251xfd_tx_failure_drop(priv, tx_ring, err);
+ }
return NETDEV_TX_OK;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 24510b3b8020..b35bfebd23f2 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -633,6 +633,10 @@ struct mcp251xfd_priv {
struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
+ struct workqueue_struct *wq;
+ struct work_struct tx_work;
+ struct mcp251xfd_tx_obj *tx_work_obj;
+
DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
u8 rx_ring_num;
@@ -952,6 +956,7 @@ void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
+void mcp251xfd_tx_obj_write_sync(struct work_struct *work);
netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 8faf8a462c05..7292c81fc0cd 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -294,7 +294,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
}
usb_free_urb(urb);
- return 0;
+ return err;
}
int kvaser_usb_can_rx_over_error(struct net_device *netdev)
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index f8ad7833f5d9..425e20daf1e9 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -355,10 +355,8 @@ int ksz9477_reset_switch(struct ksz_device *dev)
SPI_AUTO_EDGE_DETECTION, 0);
/* default configuration */
- ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
- data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
- SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
- ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+ ksz_write8(dev, REG_SW_LUE_CTRL_1,
+ SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER);
/* disable interrupts */
ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
@@ -429,6 +427,57 @@ void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
mutex_unlock(&p->mib.cnt_mutex);
}
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col)
+{
+ u32 pmavbc;
+ u8 status;
+ u16 pqm;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status);
+ if (ret)
+ return ret;
+ if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status) == PORT_INTF_SPEED_NONE) &&
+ !(status & PORT_INTF_FULL_DUPLEX)) {
+ /* Errata DS80000754 recommends monitoring potential faults in
+ * half-duplex mode. The switch might not be able to communicate anymore
+ * in these states.
+ * If you see this message, please read the errata-sheet for more information:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf
+ * To workaround this issue, half-duplex mode should be avoided.
+ * A software reset could be implemented to recover from this state.
+ */
+ dev_warn_once(dev->dev,
+ "Half-duplex detected on port %d, transmission halt may occur\n",
+ port);
+ if (tx_late_col != 0) {
+ /* Transmission halt with late collisions */
+ dev_crit_once(dev->dev,
+ "TX late collisions detected, transmission may be halted on port %d\n",
+ port);
+ }
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &status);
+ if (ret)
+ return ret;
+ if (status & SW_VLAN_ENABLE) {
+ ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm);
+ if (ret)
+ return ret;
+ ret = ksz_read32(dev, REG_PMAVBC, &pmavbc);
+ if (ret)
+ return ret;
+ if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) ||
+ (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) {
+ /* Transmission halt with Half-Duplex and VLAN */
+ dev_crit_once(dev->dev,
+ "resources out of limits, transmission may be halted\n");
+ }
+ }
+ }
+ return ret;
+}
+
void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
@@ -1299,6 +1348,10 @@ int ksz9477_setup(struct dsa_switch *ds)
/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
+ /* Use collision based back pressure mode. */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE,
+ SW_BACK_PRESSURE_COLLISION);
+
/* Now we can configure default MTU value */
ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index ce1e656b800b..239a281da10b 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -36,6 +36,8 @@ int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
bool ingress, struct netlink_ext_ack *extack);
void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror);
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col);
void ksz9477_get_caps(struct ksz_device *dev, int port,
struct phylink_config *config);
int ksz9477_fdb_dump(struct ksz_device *dev, int port,
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index f3a205ee483f..d5354c600ea1 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -247,6 +247,7 @@
#define REG_SW_MAC_CTRL_1 0x0331
#define SW_BACK_PRESSURE BIT(5)
+#define SW_BACK_PRESSURE_COLLISION 0
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
#define SW_JUMBO_PACKET BIT(2)
@@ -842,8 +843,8 @@
#define REG_PORT_STATUS_0 0x0030
-#define PORT_INTF_SPEED_M 0x3
-#define PORT_INTF_SPEED_S 3
+#define PORT_INTF_SPEED_MASK GENMASK(4, 3)
+#define PORT_INTF_SPEED_NONE GENMASK(1, 0)
#define PORT_INTF_FULL_DUPLEX BIT(2)
#define PORT_TX_FLOW_CTRL BIT(1)
#define PORT_RX_FLOW_CTRL BIT(0)
@@ -1167,6 +1168,11 @@
#define PORT_RMII_CLK_SEL BIT(7)
#define PORT_MII_SEL_EDGE BIT(5)
+#define REG_PMAVBC 0x03AC
+
+#define PMAVBC_MASK GENMASK(26, 16)
+#define PMAVBC_MIN 0x580
+
/* 4 - MAC */
#define REG_PORT_MAC_CTRL_0 0x0400
@@ -1494,6 +1500,7 @@
#define PORT_QM_TX_CNT_USED_S 0
#define PORT_QM_TX_CNT_M (BIT(11) - 1)
+#define PORT_QM_TX_CNT_MAX 0x200
#define REG_PORT_QM_TX_CNT_1__4 0x0A14
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 2818e24e2a51..0580b2fee21c 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1382,6 +1382,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.tc_cbs_supported = true,
.ops = &ksz9477_dev_ops,
.phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1416,6 +1417,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_ipms = 8,
.ops = &ksz9477_dev_ops,
.phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1450,6 +1452,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_ipms = 8,
.ops = &ksz9477_dev_ops,
.phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1540,6 +1543,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.tc_cbs_supported = true,
.ops = &ksz9477_dev_ops,
.phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1820,6 +1824,7 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
struct rtnl_link_stats64 *stats;
struct ksz_stats_raw *raw;
struct ksz_port_mib *mib;
+ int ret;
mib = &dev->ports[port].mib;
stats = &mib->stats64;
@@ -1861,6 +1866,12 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
pstats->rx_pause_frames = raw->rx_pause;
spin_unlock(&mib->stats64_lock);
+
+ if (dev->info->phy_errata_9477) {
+ ret = ksz9477_errata_monitor(dev, port, raw->tx_late_col);
+ if (ret)
+ dev_err(dev->dev, "Failed to monitor transmission halt\n");
+ }
}
void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port)
@@ -2185,7 +2196,7 @@ static void ksz_irq_bus_sync_unlock(struct irq_data *d)
struct ksz_device *dev = kirq->dev;
int ret;
- ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
+ ret = ksz_write8(dev, kirq->reg_mask, kirq->masked);
if (ret)
dev_err(dev->dev, "failed to change IRQ mask\n");
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index c784fd23a993..ee7db46e469d 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -66,6 +66,7 @@ struct ksz_chip_data {
bool tc_cbs_supported;
const struct ksz_dev_ops *ops;
const struct phylink_mac_ops *phylink_mac_ops;
+ bool phy_errata_9477;
bool ksz87xx_eee_link_erratum;
const struct ksz_mib_names *mib_names;
int mib_cnt;
diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c
index 811ebeeff4ed..43ac68052baf 100644
--- a/drivers/net/dsa/qca/qca8k-leds.c
+++ b/drivers/net/dsa/qca/qca8k-leds.c
@@ -431,8 +431,11 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d",
priv->internal_mdio_bus->id,
port_num);
- if (!init_data.devicename)
+ if (!init_data.devicename) {
+ fwnode_handle_put(led);
+ fwnode_handle_put(leds);
return -ENOMEM;
+ }
ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data);
if (ret)
@@ -441,6 +444,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
kfree(init_data.devicename);
}
+ fwnode_handle_put(leds);
return 0;
}
@@ -471,9 +475,13 @@ qca8k_setup_led_ctrl(struct qca8k_priv *priv)
* the correct port for LED setup.
*/
ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num));
- if (ret)
+ if (ret) {
+ fwnode_handle_put(port);
+ fwnode_handle_put(ports);
return ret;
+ }
}
+ fwnode_handle_put(ports);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c437ca1c0fd3..a6d69a45fa01 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -732,9 +732,6 @@ tx_done:
return NETDEV_TX_OK;
tx_dma_error:
- if (BNXT_TX_PTP_IS_SET(lflags))
- atomic_inc(&bp->ptp_cfg->tx_avail);
-
last_frag = i;
/* start back at beginning and unmap skb */
@@ -756,6 +753,8 @@ tx_dma_error:
tx_free:
dev_kfree_skb_any(skb);
tx_kick_pending:
+ if (BNXT_TX_PTP_IS_SET(lflags))
+ atomic_inc(&bp->ptp_cfg->tx_avail);
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
@@ -8996,6 +8995,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
#endif
}
+ bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
hwrm_func_qcaps_exit:
hwrm_req_drop(bp, req);
@@ -15363,6 +15363,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->priv_flags |= IFF_UNICAST_FLT;
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+ if (bp->tso_max_segs)
+ netif_set_tso_max_segs(dev, bp->tso_max_segs);
dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 656ab81c0272..9cf0acfa04e5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1434,6 +1434,57 @@ struct bnxt_l2_filter {
atomic_t refcnt;
};
+/* Compat version of hwrm_port_phy_qcfg_output capped at 96 bytes. The
+ * first 95 bytes are identical to hwrm_port_phy_qcfg_output in bnxt_hsi.h.
+ * The last valid byte in the compat version is different.
+ */
+struct hwrm_port_phy_qcfg_output_compat {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ u8 active_fec_signal_mode;
+ __le16 link_speed;
+ u8 duplex_cfg;
+ u8 pause;
+ __le16 support_speeds;
+ __le16 force_link_speed;
+ u8 auto_mode;
+ u8 auto_pause;
+ __le16 auto_link_speed;
+ __le16 auto_link_speed_mask;
+ u8 wirespeed;
+ u8 lpbk;
+ u8 force_pause;
+ u8 module_status;
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ u8 media_type;
+ u8 xcvr_pkg_type;
+ u8 eee_config_phy_addr;
+ u8 parallel_detect;
+ __le16 link_partner_adv_speeds;
+ u8 link_partner_adv_auto_mode;
+ u8 link_partner_adv_pause;
+ __le16 adv_eee_link_speed_mask;
+ __le16 link_partner_adv_eee_link_speed_mask;
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ __le16 fec_cfg;
+ u8 duplex_state;
+ u8 option_flags;
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le16 support_pam4_speeds;
+ __le16 force_pam4_link_speed;
+ __le16 auto_pam4_link_speed_mask;
+ u8 link_partner_pam4_adv_speeds;
+ u8 valid;
+};
+
struct bnxt_link_info {
u8 phy_type;
u8 media_type;
@@ -2267,6 +2318,7 @@ struct bnxt {
u8 rss_hash_key_updated:1;
u16 max_mtu;
+ u16 tso_max_segs;
u8 max_tc;
u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 06ea86c80be1..f219709f9563 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2023 Broadcom Inc.
+ * Copyright (c) 2018-2024 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -500,7 +500,11 @@ struct cmd_nums {
#define HWRM_TFC_IF_TBL_GET 0x399UL
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
#define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x39cUL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x39dUL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x39eUL
#define HWRM_SV 0x400UL
+ #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
@@ -609,8 +613,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 39
-#define HWRM_VERSION_STR "1.10.3.39"
+#define HWRM_VERSION_RSVD 44
+#define HWRM_VERSION_STR "1.10.3.44"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -664,6 +668,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -843,7 +848,9 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
#define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
#define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1326,13 +1333,13 @@ struct hwrm_async_event_cmpl_error_report_base {
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
@@ -1814,6 +1821,9 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1828,7 +1838,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
u8 device_serial_number[8];
__le16 ctxs_per_partition;
- u8 unused_2[2];
+ __le16 max_tso_segs;
__le32 roce_vf_max_av;
__le32 roce_vf_max_cq;
__le32 roce_vf_max_mrw;
@@ -2449,6 +2459,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
#define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
#define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -3660,22 +3671,24 @@ struct hwrm_func_backing_store_cfg_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
@@ -3772,18 +3785,20 @@ struct hwrm_func_backing_store_qcfg_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
@@ -3876,22 +3891,24 @@ struct hwrm_func_backing_store_qcaps_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
@@ -3911,22 +3928,24 @@ struct hwrm_func_backing_store_qcaps_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
@@ -4202,7 +4221,8 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
__le16 auto_link_speeds2_mask;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
@@ -4217,6 +4237,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
u8 unused_2[6];
};
@@ -4292,6 +4313,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
u8 duplex_cfg;
@@ -4451,7 +4473,13 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -5049,33 +5077,43 @@ struct hwrm_port_qstats_ext_output {
u8 valid;
};
-/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
+/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
struct hwrm_port_lpbk_qstats_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
+ __le16 lpbk_stat_size;
+ u8 flags;
+ #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 lpbk_stat_host_addr;
};
-/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
+/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
struct hwrm_port_lpbk_qstats_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
+ __le16 lpbk_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* port_lpbk_stats (size:640b/80B) */
+struct port_lpbk_stats {
__le64 lpbk_ucast_frames;
__le64 lpbk_mcast_frames;
__le64 lpbk_bcast_frames;
__le64 lpbk_ucast_bytes;
__le64 lpbk_mcast_bytes;
__le64 lpbk_bcast_bytes;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
- __le64 rx_stat_discard;
- __le64 rx_stat_error;
- u8 unused_0[7];
- u8 valid;
+ __le64 lpbk_tx_discards;
+ __le64 lpbk_tx_errors;
+ __le64 lpbk_rx_discards;
+ __le64 lpbk_rx_errors;
};
/* hwrm_port_ecn_qstats_input (size:256b/32B) */
@@ -5140,13 +5178,15 @@ struct hwrm_port_clr_stats_output {
u8 valid;
};
-/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
+/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
struct hwrm_port_lpbk_clr_stats_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
@@ -5287,10 +5327,11 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
__le16 flags2;
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
u8 internal_port_cnt;
u8 unused_0;
__le16 supported_speeds2_force_mode;
@@ -7443,17 +7484,17 @@ struct hwrm_cfa_l2_filter_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
#define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
#define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
#define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
@@ -8520,17 +8561,17 @@ struct hwrm_tunnel_dst_port_query_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
@@ -8576,17 +8617,17 @@ struct hwrm_tunnel_dst_port_alloc_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
@@ -8635,17 +8676,17 @@ struct hwrm_tunnel_dst_port_free_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
@@ -9109,6 +9150,7 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
#define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
+ #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
@@ -9758,6 +9800,9 @@ struct hwrm_dbg_coredump_initiate_input {
__le16 instance;
__le16 unused_0;
u8 seg_flags;
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL
u8 unused_1[7];
};
@@ -10433,13 +10478,13 @@ struct hwrm_selftest_irq_output {
/* dbc_dbc (size:64b/8B) */
struct dbc_dbc {
- u32 index;
+ __le32 index;
#define DBC_DBC_INDEX_MASK 0xffffffUL
#define DBC_DBC_INDEX_SFT 0
#define DBC_DBC_EPOCH 0x1000000UL
#define DBC_DBC_TOGGLE_MASK 0x6000000UL
#define DBC_DBC_TOGGLE_SFT 25
- u32 type_path_xid;
+ __le32 type_path_xid;
#define DBC_DBC_XID_MASK 0xfffffUL
#define DBC_DBC_XID_SFT 0
#define DBC_DBC_PATH_MASK 0x3000000UL
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index 1df3d56cc4b5..d2fd2d04ed47 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -680,7 +680,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
req_type);
else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
- req_type, token->seq_id, rc);
+ req_type, le16_to_cpu(ctx->req->seq_id), rc);
rc = __hwrm_to_stderr(rc);
exit:
if (token)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 175192ebaa77..22898d3d088b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -950,8 +950,11 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_fwd_resp_input *req;
int rc;
- if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
+ if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) {
+ netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n",
+ msg_size);
return -EINVAL;
+ }
rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
if (!rc) {
@@ -1085,7 +1088,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
- struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
+ struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
@@ -1096,6 +1099,11 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
mutex_unlock(&bp->link_lock);
phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+ /* New SPEEDS2 fields are beyond the legacy structure, so
+ * clear the SPEEDS2_SUPPORTED flag.
+ */
+ phy_qcfg_resp.option_flags &=
+ ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED;
phy_qcfg_resp.valid = 1;
if (vf->flags & BNXT_VF_LINK_UP) {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index 96c6ea12279f..989b4ddae342 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct,
pg_info->page_offset;
memcpy(skb->data, va, MIN_SKB_SIZE);
skb_put(skb, MIN_SKB_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset + MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
}
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- pg_info->page,
- pg_info->page_offset + MIN_SKB_SIZE,
- len - MIN_SKB_SIZE,
- LIO_RXBUFFER_SZ);
} else {
struct octeon_skb_page_info *pg_info =
((struct octeon_skb_page_info *)(skb->cb));
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index c1c912de59c7..1154c1d8f66f 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -647,11 +647,13 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
}
-static void gve_rx_free_skb(struct gve_rx_ring *rx)
+static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
{
if (!rx->ctx.skb_head)
return;
+ if (rx->ctx.skb_head == napi->skb)
+ napi->skb = NULL;
dev_kfree_skb_any(rx->ctx.skb_head);
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
@@ -950,7 +952,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num);
if (err < 0) {
- gve_rx_free_skb(rx);
+ gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
if (err == -ENOMEM)
rx->rx_skb_alloc_fail++;
@@ -993,7 +995,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* gve_rx_complete_skb() will consume skb if successful */
if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
- gve_rx_free_skb(rx);
+ gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
rx->rx_desc_err_dropped_pkt++;
u64_stats_update_end(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index fe1b26a4d736..0b3cca3fc792 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -555,28 +555,18 @@ static int gve_prep_tso(struct sk_buff *skb)
if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
return -1;
+ if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+ return -EINVAL;
+
/* Needed because we will modify header. */
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
tcp = tcp_hdr(skb);
-
- /* Remove payload length from checksum. */
paylen = skb->len - skb_transport_offset(skb);
-
- switch (skb_shinfo(skb)->gso_type) {
- case SKB_GSO_TCPV4:
- case SKB_GSO_TCPV6:
- csum_replace_by_diff(&tcp->check,
- (__force __wsum)htonl(paylen));
-
- /* Compute length of segmentation header. */
- header_len = skb_tcp_all_headers(skb);
- break;
- default:
- return -EINVAL;
- }
+ csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
+ header_len = skb_tcp_all_headers(skb);
if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index ff71fb1eced9..a5fc0209d628 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -3535,6 +3535,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
ret = hns3_alloc_and_attach_buffer(ring, i);
if (ret)
goto out_buffer_fail;
+
+ if (!(i % HNS3_RESCHED_BD_NUM))
+ cond_resched();
}
return 0;
@@ -5107,6 +5110,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
}
u64_stats_init(&priv->ring[i].syncp);
+ cond_resched();
}
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index acd756b0c7c9..d36c4ed16d8d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -214,6 +214,8 @@ enum hns3_nic_state {
#define HNS3_CQ_MODE_EQE 1U
#define HNS3_CQ_MODE_CQE 0U
+#define HNS3_RESCHED_BD_NUM 1024
+
enum hns3_pkt_l2t_type {
HNS3_L2_TYPE_UNICAST,
HNS3_L2_TYPE_MULTICAST,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 43cc6ee4d87d..82574ce0194f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3086,9 +3086,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev)
static void hclge_update_link_status(struct hclge_dev *hdev)
{
- struct hnae3_handle *rhandle = &hdev->vport[0].roce;
struct hnae3_handle *handle = &hdev->vport[0].nic;
- struct hnae3_client *rclient = hdev->roce_client;
struct hnae3_client *client = hdev->nic_client;
int state;
int ret;
@@ -3112,8 +3110,15 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
client->ops->link_status_change(handle, state);
hclge_config_mac_tnl_int(hdev, state);
- if (rclient && rclient->ops->link_status_change)
- rclient->ops->link_status_change(rhandle, state);
+
+ if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
+ struct hnae3_handle *rhandle = &hdev->vport[0].roce;
+ struct hnae3_client *rclient = hdev->roce_client;
+
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle,
+ state);
+ }
hclge_push_link_status(hdev);
}
@@ -11319,6 +11324,12 @@ clear_roce:
return ret;
}
+static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
+{
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
+}
+
static void hclge_uninit_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -11327,7 +11338,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (hdev->roce_client) {
clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
- while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ while (hclge_uninit_need_wait(hdev))
msleep(HCLGE_WAIT_RESET_DONE);
hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5e9a93bdb518..23ebeb143987 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2482,6 +2482,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
tx_buff = &tx_pool->tx_buff[bufidx];
+
+ /* Sanity checks on our free map to make sure it points to an index
+ * that is not being occupied by another skb. If skb memory is
+ * not freed then we see congestion control kick in and halt tx.
+ */
+ if (unlikely(tx_buff->skb)) {
+ dev_warn_ratelimited(dev, "TX free map points to untracked skb (%s %d idx=%d)\n",
+ skb_is_gso(skb) ? "tso_pool" : "tx_pool",
+ queue_num, bufidx);
+ dev_kfree_skb_any(tx_buff->skb);
+ }
+
tx_buff->skb = skb;
tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
@@ -4061,6 +4073,12 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
adapter->num_active_tx_scrqs = 0;
}
+ /* Clean any remaining outstanding SKBs
+ * we freed the irq so we won't be hearing
+ * from them
+ */
+ clean_tx_pools(adapter);
+
if (adapter->rx_scrq) {
for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
if (!adapter->rx_scrq[i])
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index ce5034ed2b24..f182179529b7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1339,6 +1339,7 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
for (i = 0; i < count; i++) {
bool last = false;
+ int try_cnt = 0;
int status;
bh = (struct ice_buf_hdr *)(bufs + start + i);
@@ -1346,8 +1347,26 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
if (indicate_last)
last = ice_is_last_download_buffer(bh, i, count);
- status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
- &offset, &info, NULL);
+ while (1) {
+ status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE,
+ last, &offset, &info,
+ NULL);
+ if (hw->adminq.sq_last_status != ICE_AQ_RC_ENOSEC &&
+ hw->adminq.sq_last_status != ICE_AQ_RC_EBADSIG)
+ break;
+
+ try_cnt++;
+
+ if (try_cnt == 5)
+ break;
+
+ msleep(20);
+ }
+
+ if (try_cnt)
+ dev_dbg(ice_hw_to_dev(hw),
+ "ice_aq_download_pkg number of retries: %d\n",
+ try_cnt);
/* Save AQ status from download package */
if (status) {
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 1b61ca3a6eb6..55a42aad92a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -805,6 +805,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
switch (vsi->port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_200GB:
+ speed = "200 G";
+ break;
case ICE_AQ_LINK_SPEED_100GB:
speed = "100 G";
break;
@@ -4136,7 +4139,7 @@ bool ice_is_wol_supported(struct ice_hw *hw)
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
{
struct ice_pf *pf = vsi->back;
- int err = 0, timeout = 50;
+ int i, err = 0, timeout = 50;
if (!new_rx && !new_tx)
return -EINVAL;
@@ -4162,6 +4165,14 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
ice_vsi_close(vsi);
ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+
+ ice_for_each_traffic_class(i) {
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ netdev_set_tc_queue(vsi->netdev,
+ vsi->tc_cfg.tc_info[i].netdev_tc,
+ vsi->tc_cfg.tc_info[i].qcount_tx,
+ vsi->tc_cfg.tc_info[i].qoffset);
+ }
ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi);
done:
@@ -5564,7 +5575,7 @@ static int ice_suspend(struct device *dev)
*/
disabled = ice_service_task_stop(pf);
- ice_unplug_aux_dev(pf);
+ ice_deinit_rdma(pf);
/* Already suspended?, then there is nothing to do */
if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
@@ -5644,6 +5655,11 @@ static int ice_resume(struct device *dev)
if (ret)
dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
+ ret = ice_init_rdma(pf);
+ if (ret)
+ dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
+ ret);
+
clear_bit(ICE_DOWN, pf->state);
/* Now perform PF reset and rebuild */
reset_type = ICE_RESET_PFR;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 94d6670d0901..1191031b2a43 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1899,7 +1899,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
- lkup_type == ICE_SW_LKUP_DFLT) {
+ lkup_type == ICE_SW_LKUP_DFLT ||
+ lkup_type == ICE_SW_LKUP_LAST) {
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
if (opc == ice_aqc_opc_alloc_res)
@@ -2922,7 +2923,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
- lkup_type == ICE_SW_LKUP_DFLT)
+ lkup_type == ICE_SW_LKUP_DFLT ||
+ lkup_type == ICE_SW_LKUP_LAST)
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 305e05294a26..87b655b839c1 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -7032,6 +7032,8 @@ static int igc_probe(struct pci_dev *pdev,
device_set_wakeup_enable(&adapter->pdev->dev,
adapter->flags & IGC_FLAG_WOL_SUPPORTED);
+ igc_ptp_init(adapter);
+
igc_tsn_clear_schedule(adapter);
/* reset the hardware with the new settings */
@@ -7053,9 +7055,6 @@ static int igc_probe(struct pci_dev *pdev,
/* Check if Media Autosense is enabled */
adapter->ei = *ei;
- /* do hw tstamp init after resetting */
- igc_ptp_init(adapter);
-
/* print pcie link status and MAC address */
pcie_print_link_status(pdev);
netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index e91486c48de3..9adf4301c9b1 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4014,7 +4014,10 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
}
}
- skb = build_skb(data, frag_size);
+ if (frag_size)
+ skb = build_skb(data, frag_size);
+ else
+ skb = slab_build_skb(data);
if (!skb) {
netdev_warn(port->dev, "skb build failed\n");
goto err_drop_frame;
@@ -6904,6 +6907,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
/* 9704 == 9728 - 20 and rounding to 8 */
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
device_set_node(&dev->dev, port_fwnode);
+ dev->dev_port = port->id;
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
port->pcs_gmac.neg_mode = true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 5664f768cb0c..64a97a0a10ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -9,10 +9,9 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
otx2_devlink.o qos_sq.o qos.o
-rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+rvu_nicvf-y := otx2_vf.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
-rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index a85ac039d779..87d5776e3b88 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -648,14 +648,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
} else if (lvl == NIX_TXSCH_LVL_TL4) {
parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL3) {
parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
@@ -670,11 +670,11 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
- req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
+ req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val;
if (lvl == hw->txschq_link_cfg_lvl) {
req->num_regs++;
@@ -698,7 +698,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->num_regs++;
req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
- req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+ req->regval[1] = hw->txschq_aggr_lvl_rr_prio << 1;
req->num_regs++;
req->reg[2] = NIX_AF_TL1X_CIR(schq);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 28fb643d2917..aa01110f04a3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -54,6 +54,7 @@ int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_config);
static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
{
@@ -122,6 +123,7 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_alloc);
static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
{
@@ -260,6 +262,7 @@ update_sq_smq_map:
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_update);
int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
{
@@ -282,6 +285,7 @@ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_stop);
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
{
@@ -321,6 +325,7 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_config_priority_flow_ctrl);
void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
bool pfc_enable)
@@ -385,6 +390,7 @@ out:
"Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
qidx, err);
}
+EXPORT_SYMBOL(otx2_update_bpid_in_rqctx);
static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
@@ -472,3 +478,4 @@ int otx2_dcbnl_set_ops(struct net_device *dev)
return 0;
}
+EXPORT_SYMBOL(otx2_dcbnl_set_ops);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 99ddf31269d9..458d34a62e18 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -113,6 +113,7 @@ err_dl:
devlink_free(dl);
return err;
}
+EXPORT_SYMBOL(otx2_register_dl);
void otx2_unregister_dl(struct otx2_nic *pfvf)
{
@@ -124,3 +125,4 @@ void otx2_unregister_dl(struct otx2_nic *pfvf)
ARRAY_SIZE(otx2_dl_params));
devlink_free(dl);
}
+EXPORT_SYMBOL(otx2_unregister_dl);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index 45a32e4b49d1..e3aee6e36215 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -139,33 +139,34 @@
#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
/* NIX AF transmit scheduler registers */
-#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
-#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
-#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
-#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
-#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
-#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
-#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16)
-#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16)
-#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16)
-#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
-#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
-#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16)
-#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16)
-#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16)
-#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16)
-#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
-#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
-#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16)
-#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16)
-#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
-#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16)
-#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
-#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16)
-#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16)
-#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16)
-#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
-#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
+#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16)
+#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16)
+#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16)
+#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16)
+#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16)
+#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16)
+#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16)
+#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16)
+#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16)
+#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16)
+#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16)
+#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16)
+#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16)
+#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16)
+#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16)
+#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16)
+#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16)
+#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16)
+#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3)
/* LMT LF registers */
#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index a16e9f244117..3eb85949677a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -513,7 +513,7 @@ process_cqe:
static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
{
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = { 0 };
u64 rx_frames, rx_bytes;
u64 tx_frames, tx_bytes;
@@ -1174,8 +1174,11 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
/* Insert vlan tag before giving pkt to tso */
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
skb = __vlan_hwaccel_push_inside(skb);
+ if (!skb)
+ return true;
+ }
otx2_sq_append_tso(pfvf, sq, skb, qidx);
return true;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index edac008099c0..0f844c14485a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -153,7 +153,6 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
num_regs++;
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
-
} else if (level == NIX_TXSCH_LVL_TL4) {
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
} else if (level == NIX_TXSCH_LVL_TL3) {
@@ -176,7 +175,7 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
/* check if node is root */
if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
- cfg->regval[num_regs] = TXSCH_TL1_DFLT_RR_PRIO << 24 |
+ cfg->regval[num_regs] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 |
mtu_to_dwrr_weight(pfvf,
pfvf->tx_max_pktlen);
num_regs++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index c53c99dde558..a605eae56685 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4875,7 +4875,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
/* Verify if UDP port is being offloaded by HW */
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
- return features;
+ return vxlan_features_check(skb, features);
#if IS_ENABLED(CONFIG_GENEVE)
/* Support Geneve offload for default UDP port */
@@ -4901,7 +4901,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct mlx5e_priv *priv = netdev_priv(netdev);
features = vlan_features_check(skb, features);
- features = vxlan_features_check(skb, features);
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index bf66d996e32e..c0ced4d315f3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1594,18 +1594,25 @@ static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
return -EBUSY;
}
-static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci)
+static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci,
+ bool pci_reset_sbr_supported)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
char mrsr_pl[MLXSW_REG_MRSR_LEN];
int err;
+ if (!pci_reset_sbr_supported) {
+ pci_dbg(pdev, "Performing PCI hot reset instead of \"all reset\"\n");
+ goto sbr;
+ }
+
mlxsw_reg_mrsr_pack(mrsr_pl,
MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE);
err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
if (err)
return err;
+sbr:
device_lock_assert(&pdev->dev);
pci_cfg_access_lock(pdev);
@@ -1633,6 +1640,7 @@ static int
mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
+ bool pci_reset_sbr_supported = false;
char mcam_pl[MLXSW_REG_MCAM_LEN];
bool pci_reset_supported = false;
u32 sys_status;
@@ -1652,13 +1660,17 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
- if (!err)
+ if (!err) {
mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
&pci_reset_supported);
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET_SBR,
+ &pci_reset_sbr_supported);
+ }
if (pci_reset_supported) {
pci_dbg(pdev, "Starting PCI reset flow\n");
- err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci);
+ err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci,
+ pci_reset_sbr_supported);
} else {
pci_dbg(pdev, "Starting software reset flow\n");
err = mlxsw_pci_reset_sw(mlxsw_pci);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 8adf86a6f5cc..3bb89045eaf5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -10671,6 +10671,8 @@ enum mlxsw_reg_mcam_mng_feature_cap_mask_bits {
MLXSW_REG_MCAM_MCIA_128B = 34,
/* If set, MRSR.command=6 is supported. */
MLXSW_REG_MCAM_PCI_RESET = 48,
+ /* If set, MRSR.command=6 is supported with Secondary Bus Reset. */
+ MLXSW_REG_MCAM_PCI_RESET_SBR = 67,
};
#define MLXSW_REG_BYTES_PER_DWORD 0x4
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index c9f1c79f3f9d..ba090262e27e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -1607,8 +1607,8 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
+ u16 local_port, local_port_1, first_local_port, last_local_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u16 local_port, local_port_1, last_local_port;
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
u8 masked_count, current_page = 0;
unsigned long cb_priv = 0;
@@ -1628,6 +1628,7 @@ next_batch:
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, false);
mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
@@ -1645,9 +1646,12 @@ next_batch:
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
- local_port, 1);
+ local_port - first_local_port,
+ 1);
}
- mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
+ local_port - first_local_port,
+ 1);
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
&bulk_list);
@@ -1684,7 +1688,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u16 local_port, last_local_port;
+ u16 local_port, first_local_port, last_local_port;
LIST_HEAD(bulk_list);
unsigned int masked_count;
u8 current_page = 0;
@@ -1702,6 +1706,7 @@ next_batch:
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, true);
mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
@@ -1719,9 +1724,12 @@ next_batch:
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
- local_port, 1);
+ local_port - first_local_port,
+ 1);
}
- mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
+ local_port - first_local_port,
+ 1);
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
&bulk_list);
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index d0f4ff4ee075..0d1740d64676 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1127,8 +1127,12 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
if (netdev->phydev)
phy_ethtool_get_wol(netdev->phydev, wol);
- wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
- WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+ if (wol->supported != adapter->phy_wol_supported)
+ netif_warn(adapter, drv, adapter->netdev,
+ "PHY changed its supported WOL! old=%x, new=%x\n",
+ adapter->phy_wol_supported, wol->supported);
+
+ wol->supported |= MAC_SUPPORTED_WAKES;
if (adapter->is_pci11x1x)
wol->supported |= WAKE_MAGICSECURE;
@@ -1143,7 +1147,39 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ /* WAKE_MAGICSEGURE is a modifier of and only valid together with
+ * WAKE_MAGIC
+ */
+ if ((wol->wolopts & WAKE_MAGICSECURE) && !(wol->wolopts & WAKE_MAGIC))
+ return -EINVAL;
+
+ if (netdev->phydev) {
+ struct ethtool_wolinfo phy_wol;
+ int ret;
+
+ phy_wol.wolopts = wol->wolopts & adapter->phy_wol_supported;
+
+ /* If WAKE_MAGICSECURE was requested, filter out WAKE_MAGIC
+ * for PHYs that do not support WAKE_MAGICSECURE
+ */
+ if (wol->wolopts & WAKE_MAGICSECURE &&
+ !(adapter->phy_wol_supported & WAKE_MAGICSECURE))
+ phy_wol.wolopts &= ~WAKE_MAGIC;
+
+ ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol);
+ if (ret && (ret != -EOPNOTSUPP))
+ return ret;
+
+ if (ret == -EOPNOTSUPP)
+ adapter->phy_wolopts = 0;
+ else
+ adapter->phy_wolopts = phy_wol.wolopts;
+ } else {
+ adapter->phy_wolopts = 0;
+ }
+
adapter->wolopts = 0;
+ wol->wolopts &= ~adapter->phy_wolopts;
if (wol->wolopts & WAKE_UCAST)
adapter->wolopts |= WAKE_UCAST;
if (wol->wolopts & WAKE_MCAST)
@@ -1164,10 +1200,10 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX);
}
+ wol->wolopts = adapter->wolopts | adapter->phy_wolopts;
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
- return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
- : -ENETDOWN;
+ return 0;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 6be8a43c908a..e418539565b1 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -3118,6 +3118,17 @@ static int lan743x_netdev_open(struct net_device *netdev)
if (ret)
goto close_tx;
}
+
+#ifdef CONFIG_PM
+ if (adapter->netdev->phydev) {
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phy_ethtool_get_wol(netdev->phydev, &wol);
+ adapter->phy_wol_supported = wol.supported;
+ adapter->phy_wolopts = wol.wolopts;
+ }
+#endif
+
return 0;
close_tx:
@@ -3575,7 +3586,7 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
/* clear wake settings */
pmtctl = lan743x_csr_read(adapter, PMT_CTL);
- pmtctl |= PMT_CTL_WUPS_MASK_;
+ pmtctl |= PMT_CTL_WUPS_MASK_ | PMT_CTL_RES_CLR_WKP_MASK_;
pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
@@ -3587,10 +3598,9 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
- if (adapter->wolopts & WAKE_PHY) {
- pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
+ if (adapter->phy_wolopts)
pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
- }
+
if (adapter->wolopts & WAKE_MAGIC) {
wucsr |= MAC_WUCSR_MPEN_;
macrx |= MAC_RX_RXEN_;
@@ -3686,7 +3696,7 @@ static int lan743x_pm_suspend(struct device *dev)
lan743x_csr_write(adapter, MAC_WUCSR2, 0);
lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
- if (adapter->wolopts)
+ if (adapter->wolopts || adapter->phy_wolopts)
lan743x_pm_set_wol(adapter);
if (adapter->is_pci11x1x) {
@@ -3710,6 +3720,7 @@ static int lan743x_pm_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 data;
int ret;
pci_set_power_state(pdev, PCI_D0);
@@ -3728,6 +3739,30 @@ static int lan743x_pm_resume(struct device *dev)
return ret;
}
+ ret = lan743x_csr_read(adapter, MAC_WK_SRC);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Wakeup source : 0x%08X\n", ret);
+
+ /* Clear the wol configuration and status bits. Note that
+ * the status bits are "Write One to Clear (W1C)"
+ */
+ data = MAC_WUCSR_EEE_TX_WAKE_ | MAC_WUCSR_EEE_RX_WAKE_ |
+ MAC_WUCSR_RFE_WAKE_FR_ | MAC_WUCSR_PFDA_FR_ | MAC_WUCSR_WUFR_ |
+ MAC_WUCSR_MPR_ | MAC_WUCSR_BCAST_FR_;
+ lan743x_csr_write(adapter, MAC_WUCSR, data);
+
+ data = MAC_WUCSR2_NS_RCD_ | MAC_WUCSR2_ARP_RCD_ |
+ MAC_WUCSR2_IPV6_TCPSYN_RCD_ | MAC_WUCSR2_IPV4_TCPSYN_RCD_;
+ lan743x_csr_write(adapter, MAC_WUCSR2, data);
+
+ data = MAC_WK_SRC_ETH_PHY_WK_ | MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ |
+ MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ | MAC_WK_SRC_EEE_TX_WK_ |
+ MAC_WK_SRC_EEE_RX_WK_ | MAC_WK_SRC_RFE_FR_WK_ |
+ MAC_WK_SRC_PFDA_FR_WK_ | MAC_WK_SRC_MP_FR_WK_ |
+ MAC_WK_SRC_BCAST_FR_WK_ | MAC_WK_SRC_WU_FR_WK_ |
+ MAC_WK_SRC_WK_FR_SAVED_;
+ lan743x_csr_write(adapter, MAC_WK_SRC, data);
+
/* open netdev when netdev is at running state while resume.
* For instance, it is true when system wakesup after pm-suspend
* However, it is false when system wakes up after suspend GUI menu
@@ -3736,9 +3771,6 @@ static int lan743x_pm_resume(struct device *dev)
lan743x_netdev_open(netdev);
netif_device_attach(netdev);
- ret = lan743x_csr_read(adapter, MAC_WK_SRC);
- netif_info(adapter, drv, adapter->netdev,
- "Wakeup source : 0x%08X\n", ret);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 645bc048e52e..3b2585a384e2 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -61,6 +61,7 @@
#define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18)
#define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15)
#define PMT_CTL_EEE_WAKEUP_EN_ BIT(13)
+#define PMT_CTL_RES_CLR_WKP_MASK_ GENMASK(9, 8)
#define PMT_CTL_READY_ BIT(7)
#define PMT_CTL_ETH_PHY_RST_ BIT(4)
#define PMT_CTL_WOL_EN_ BIT(3)
@@ -227,12 +228,31 @@
#define MAC_WUCSR (0x140)
#define MAC_MP_SO_EN_ BIT(21)
#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14)
+#define MAC_WUCSR_EEE_TX_WAKE_ BIT(13)
+#define MAC_WUCSR_EEE_RX_WAKE_ BIT(11)
+#define MAC_WUCSR_RFE_WAKE_FR_ BIT(9)
+#define MAC_WUCSR_PFDA_FR_ BIT(7)
+#define MAC_WUCSR_WUFR_ BIT(6)
+#define MAC_WUCSR_MPR_ BIT(5)
+#define MAC_WUCSR_BCAST_FR_ BIT(4)
#define MAC_WUCSR_PFDA_EN_ BIT(3)
#define MAC_WUCSR_WAKE_EN_ BIT(2)
#define MAC_WUCSR_MPEN_ BIT(1)
#define MAC_WUCSR_BCST_EN_ BIT(0)
#define MAC_WK_SRC (0x144)
+#define MAC_WK_SRC_ETH_PHY_WK_ BIT(17)
+#define MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ BIT(16)
+#define MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ BIT(15)
+#define MAC_WK_SRC_EEE_TX_WK_ BIT(14)
+#define MAC_WK_SRC_EEE_RX_WK_ BIT(13)
+#define MAC_WK_SRC_RFE_FR_WK_ BIT(12)
+#define MAC_WK_SRC_PFDA_FR_WK_ BIT(11)
+#define MAC_WK_SRC_MP_FR_WK_ BIT(10)
+#define MAC_WK_SRC_BCAST_FR_WK_ BIT(9)
+#define MAC_WK_SRC_WU_FR_WK_ BIT(8)
+#define MAC_WK_SRC_WK_FR_SAVED_ BIT(7)
+
#define MAC_MP_SO_HI (0x148)
#define MAC_MP_SO_LO (0x14C)
@@ -295,6 +315,10 @@
#define RFE_INDX(index) (0x580 + (index << 2))
#define MAC_WUCSR2 (0x600)
+#define MAC_WUCSR2_NS_RCD_ BIT(7)
+#define MAC_WUCSR2_ARP_RCD_ BIT(6)
+#define MAC_WUCSR2_IPV6_TCPSYN_RCD_ BIT(5)
+#define MAC_WUCSR2_IPV4_TCPSYN_RCD_ BIT(4)
#define SGMII_ACC (0x720)
#define SGMII_ACC_SGMII_BZY_ BIT(31)
@@ -1018,6 +1042,8 @@ enum lan743x_sgmii_lsd {
LINK_2500_SLAVE
};
+#define MAC_SUPPORTED_WAKES (WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | \
+ WAKE_MAGIC | WAKE_ARP)
struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
@@ -1025,6 +1051,8 @@ struct lan743x_adapter {
#ifdef CONFIG_PM
u32 wolopts;
u8 sopass[SOPASS_MAX];
+ u32 phy_wolopts;
+ u32 phy_wol_supported;
#endif
struct pci_dev *pdev;
struct lan743x_csr csr;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index d087cf954f75..608ad31a9702 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -2798,6 +2798,8 @@ static int add_adev(struct gdma_dev *gd)
if (ret)
goto init_fail;
+ /* madev is owned by the auxiliary device */
+ madev = NULL;
ret = auxiliary_device_add(adev);
if (ret)
goto add_fail;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index f30eee4a5a80..b6c01a88098d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -375,7 +375,9 @@ typedef void (*ionic_cq_done_cb)(void *done_arg);
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg);
-unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do);
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq,
+ unsigned int work_to_do,
+ bool in_napi);
int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
struct ionic_queue *q, unsigned int index, const char *name,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 24870da3f484..1837a30ba08a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -304,10 +304,8 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
if (ret)
return ret;
- if (qcq->napi.poll)
- napi_enable(&qcq->napi);
-
if (qcq->flags & IONIC_QCQ_F_INTR) {
+ napi_enable(&qcq->napi);
irq_set_affinity_hint(qcq->intr.vector,
&qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
@@ -1191,7 +1189,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_rx_service, NULL, NULL);
if (lif->hwstamp_txq)
- tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget);
+ tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget, !!budget);
work_done = max(max(n_work, a_work), max(rx_work, tx_work));
if (work_done < budget && napi_complete_done(napi, work_done)) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 2427610f4306..9fdd7cd3ef19 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -23,7 +23,8 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_tx_desc_info *desc_info,
- struct ionic_txq_comp *comp);
+ struct ionic_txq_comp *comp,
+ bool in_napi);
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
{
@@ -480,6 +481,20 @@ int ionic_xdp_xmit(struct net_device *netdev, int n,
return nxmit;
}
+static void ionic_xdp_rx_put_bufs(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info,
+ int nbufs)
+{
+ int i;
+
+ for (i = 0; i < nbufs; i++) {
+ dma_unmap_page(q->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+ buf_info->page = NULL;
+ buf_info++;
+ }
+}
+
static bool ionic_run_xdp(struct ionic_rx_stats *stats,
struct net_device *netdev,
struct bpf_prog *xdp_prog,
@@ -493,6 +508,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
struct netdev_queue *nq;
struct xdp_frame *xdpf;
int remain_len;
+ int nbufs = 1;
int frag_len;
int err = 0;
@@ -542,6 +558,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
if (page_is_pfmemalloc(bi->page))
xdp_buff_set_frag_pfmemalloc(&xdp_buf);
} while (remain_len > 0);
+ nbufs += sinfo->nr_frags;
}
xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf);
@@ -574,9 +591,6 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
goto out_xdp_abort;
}
- dma_unmap_page(rxq->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
-
err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
buf_info->page,
buf_info->page_offset,
@@ -586,23 +600,19 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
goto out_xdp_abort;
}
- buf_info->page = NULL;
+ ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
stats->xdp_tx++;
/* the Tx completion will free the buffers */
break;
case XDP_REDIRECT:
- /* unmap the pages before handing them to a different device */
- dma_unmap_page(rxq->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
-
err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
if (err) {
netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
goto out_xdp_abort;
}
- buf_info->page = NULL;
+ ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
rxq->xdp_flush = true;
stats->xdp_redirect++;
break;
@@ -935,7 +945,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
u32 work_done = 0;
u32 flags = 0;
- work_done = ionic_tx_cq_service(cq, budget);
+ work_done = ionic_tx_cq_service(cq, budget, !!budget);
if (unlikely(!budget))
return budget;
@@ -1019,7 +1029,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq;
- tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT);
+ tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, !!budget);
if (unlikely(!budget))
return budget;
@@ -1152,7 +1162,8 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_tx_desc_info *desc_info,
- struct ionic_txq_comp *comp)
+ struct ionic_txq_comp *comp,
+ bool in_napi)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_qcq *qcq = q_to_qcq(q);
@@ -1204,11 +1215,13 @@ static void ionic_tx_clean(struct ionic_queue *q,
desc_info->bytes = skb->len;
stats->clean++;
- napi_consume_skb(skb, 1);
+ napi_consume_skb(skb, likely(in_napi) ? 1 : 0);
}
static bool ionic_tx_service(struct ionic_cq *cq,
- unsigned int *total_pkts, unsigned int *total_bytes)
+ unsigned int *total_pkts,
+ unsigned int *total_bytes,
+ bool in_napi)
{
struct ionic_tx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
@@ -1230,7 +1243,7 @@ static bool ionic_tx_service(struct ionic_cq *cq,
desc_info->bytes = 0;
index = q->tail_idx;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, comp);
+ ionic_tx_clean(q, desc_info, comp, in_napi);
if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
@@ -1244,7 +1257,9 @@ static bool ionic_tx_service(struct ionic_cq *cq,
return true;
}
-unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq,
+ unsigned int work_to_do,
+ bool in_napi)
{
unsigned int work_done = 0;
unsigned int bytes = 0;
@@ -1253,7 +1268,7 @@ unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
if (work_to_do == 0)
return 0;
- while (ionic_tx_service(cq, &pkts, &bytes)) {
+ while (ionic_tx_service(cq, &pkts, &bytes, in_napi)) {
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
@@ -1279,7 +1294,7 @@ void ionic_tx_flush(struct ionic_cq *cq)
{
u32 work_done;
- work_done = ionic_tx_cq_service(cq, cq->num_descs);
+ work_done = ionic_tx_cq_service(cq, cq->num_descs, false);
if (work_done)
ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index,
work_done, IONIC_INTR_CRED_RESET_COALESCE);
@@ -1296,7 +1311,7 @@ void ionic_tx_empty(struct ionic_queue *q)
desc_info = &q->tx_info[q->tail_idx];
desc_info->bytes = 0;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, NULL);
+ ionic_tx_clean(q, desc_info, NULL, false);
if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index ff3b89e9028e..ad06da0fdaa0 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -98,10 +98,8 @@ qcaspi_info_show(struct seq_file *s, void *what)
seq_printf(s, "IRQ : %d\n",
qca->spi_dev->irq);
- seq_printf(s, "INTR REQ : %u\n",
- qca->intr_req);
- seq_printf(s, "INTR SVC : %u\n",
- qca->intr_svc);
+ seq_printf(s, "INTR : %lx\n",
+ qca->intr);
seq_printf(s, "SPI max speed : %lu\n",
(unsigned long)qca->spi_dev->max_speed_hz);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5799ecc88a87..8f7ce6b51a1c 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -35,6 +35,8 @@
#define MAX_DMA_BURST_LEN 5000
+#define SPI_INTR 0
+
/* Modules parameters */
#define QCASPI_CLK_SPEED_MIN 1000000
#define QCASPI_CLK_SPEED_MAX 16000000
@@ -579,14 +581,14 @@ qcaspi_spi_thread(void *data)
continue;
}
- if ((qca->intr_req == qca->intr_svc) &&
+ if (!test_bit(SPI_INTR, &qca->intr) &&
!qca->txr.skb[qca->txr.head])
schedule();
set_current_state(TASK_RUNNING);
- netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n",
- qca->intr_req - qca->intr_svc,
+ netdev_dbg(qca->net_dev, "have work to do. int: %lu, tx_skb: %p\n",
+ qca->intr,
qca->txr.skb[qca->txr.head]);
qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE);
@@ -600,8 +602,7 @@ qcaspi_spi_thread(void *data)
msleep(QCASPI_QCA7K_REBOOT_TIME_MS);
}
- if (qca->intr_svc != qca->intr_req) {
- qca->intr_svc = qca->intr_req;
+ if (test_and_clear_bit(SPI_INTR, &qca->intr)) {
start_spi_intr_handling(qca, &intr_cause);
if (intr_cause & SPI_INT_CPU_ON) {
@@ -663,7 +664,7 @@ qcaspi_intr_handler(int irq, void *data)
{
struct qcaspi *qca = data;
- qca->intr_req++;
+ set_bit(SPI_INTR, &qca->intr);
if (qca->spi_thread)
wake_up_process(qca->spi_thread);
@@ -679,8 +680,7 @@ qcaspi_netdev_open(struct net_device *dev)
if (!qca)
return -EINVAL;
- qca->intr_req = 1;
- qca->intr_svc = 0;
+ set_bit(SPI_INTR, &qca->intr);
qca->sync = QCASPI_SYNC_UNKNOWN;
qcafrm_fsm_init_spi(&qca->frm_handle);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index d59cb2352cee..8f4808695e82 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -81,8 +81,7 @@ struct qcaspi {
struct qcafrm_handle frm_handle;
struct sk_buff *rx_skb;
- unsigned int intr_req;
- unsigned int intr_svc;
+ unsigned long intr;
u16 reset_count;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index e254b21fdb59..65d7370b47d5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -93,6 +93,7 @@ struct ethqos_emac_driver_data {
bool has_emac_ge_3;
const char *link_clk_name;
bool has_integrated_pcs;
+ u32 dma_addr_width;
struct dwmac4_addrs dwmac4_addrs;
};
@@ -276,6 +277,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
.has_emac_ge_3 = true,
.link_clk_name = "phyaux",
.has_integrated_pcs = true,
+ .dma_addr_width = 36,
.dwmac4_addrs = {
.dma_chan = 0x00008100,
.dma_chan_offset = 0x1000,
@@ -845,6 +847,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI;
if (data->has_integrated_pcs)
plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS;
+ if (data->dma_addr_width)
+ plat_dat->host_dma_width = data->dma_addr_width;
if (ethqos->serdes_phy) {
plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index f05bd757dfe5..5ef52ef2698f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -218,6 +218,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
{
u32 num_snapshot, ts_status, tsync_int;
struct ptp_clock_event event;
+ u32 acr_value, channel;
unsigned long flags;
u64 ptp_time;
int i;
@@ -243,12 +244,15 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
GMAC_TIMESTAMP_ATSNS_SHIFT;
+ acr_value = readl(priv->ptpaddr + PTP_ACR);
+ channel = ilog2(FIELD_GET(PTP_ACR_MASK, acr_value));
+
for (i = 0; i < num_snapshot; i++) {
read_lock_irqsave(&priv->ptp_lock, flags);
get_ptptime(priv->ptpaddr, &ptp_time);
read_unlock_irqrestore(&priv->ptp_lock, flags);
event.type = PTP_CLOCK_EXTTS;
- event.index = 0;
+ event.index = channel;
event.timestamp = ptp_time;
ptp_clock_event(priv->ptp_clock, &event);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 222540b55480..996f2bcd07a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -343,10 +343,11 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
struct tc_cbs_qopt_offload *qopt)
{
u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ s64 port_transmit_rate_kbps;
u32 queue = qopt->queue;
- u32 ptr, speed_div;
u32 mode_to_use;
u64 value;
+ u32 ptr;
int ret;
/* Queue 0 is not AVB capable */
@@ -355,30 +356,30 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
if (!priv->dma_cap.av)
return -EOPNOTSUPP;
- /* Port Transmit Rate and Speed Divider */
- switch (priv->speed) {
- case SPEED_10000:
- ptr = 32;
- speed_div = 10000000;
- break;
- case SPEED_5000:
- ptr = 32;
- speed_div = 5000000;
- break;
- case SPEED_2500:
- ptr = 8;
- speed_div = 2500000;
- break;
- case SPEED_1000:
- ptr = 8;
- speed_div = 1000000;
- break;
- case SPEED_100:
- ptr = 4;
- speed_div = 100000;
- break;
- default:
- return -EOPNOTSUPP;
+ port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
+
+ if (qopt->enable) {
+ /* Port Transmit Rate and Speed Divider */
+ switch (div_s64(port_transmit_rate_kbps, 1000)) {
+ case SPEED_10000:
+ case SPEED_5000:
+ ptr = 32;
+ break;
+ case SPEED_2500:
+ case SPEED_1000:
+ ptr = 8;
+ break;
+ case SPEED_100:
+ ptr = 4;
+ break;
+ default:
+ netdev_err(priv->dev,
+ "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
+ port_transmit_rate_kbps);
+ return -EINVAL;
+ }
+ } else {
+ ptr = 0;
}
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
@@ -398,10 +399,10 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
}
/* Final adjustments for HW */
- value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
+ value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps);
priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
- value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
+ value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps);
priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
value = qopt->hicredit * 1024ll * 8;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 51495cb4b9be..838e85ddec67 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -815,6 +815,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
+ bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
const struct ip_tunnel_key *key = &info->key;
@@ -826,7 +827,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!skb_vlan_inet_prepare(skb))
+ if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
return -EINVAL;
if (!gs4)
@@ -908,7 +909,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
- geneve->cfg.inner_proto_inherit);
+ inner_proto_inherit);
if (unlikely(err))
return err;
@@ -925,6 +926,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
+ bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
const struct ip_tunnel_key *key = &info->key;
@@ -935,7 +937,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!skb_vlan_inet_prepare(skb))
+ if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
return -EINVAL;
if (!gs6)
@@ -997,7 +999,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
ttl = ttl ? : ip6_dst_hoplimit(dst);
}
err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
- geneve->cfg.inner_proto_inherit);
+ inner_proto_inherit);
if (unlikely(err))
return err;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index c22897bf5509..017a6102be0a 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -324,7 +324,8 @@ static int nsim_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(nsim->peer);
- iflink = peer ? READ_ONCE(peer->netdev->ifindex) : 0;
+ iflink = peer ? READ_ONCE(peer->netdev->ifindex) :
+ READ_ONCE(dev->ifindex);
rcu_read_unlock();
return iflink;
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index 326c9770a6dc..c706429b225a 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -17,6 +17,11 @@
#define DP83TG720S_PHY_RESET 0x1f
#define DP83TG720S_HW_RESET BIT(15)
+#define DP83TG720S_LPS_CFG3 0x18c
+/* Power modes are documented as bit fields but used as values */
+/* Power Mode 0 is Normal mode */
+#define DP83TG720S_LPS_CFG3_PWR_MODE_0 BIT(0)
+
#define DP83TG720S_RGMII_DELAY_CTRL 0x602
/* In RGMII mode, Enable or disable the internal delay for RXD */
#define DP83TG720S_RGMII_RX_CLK_SEL BIT(1)
@@ -31,11 +36,20 @@
static int dp83tg720_config_aneg(struct phy_device *phydev)
{
+ int ret;
+
/* Autoneg is not supported and this PHY supports only one speed.
* We need to care only about master/slave configuration if it was
* changed by user.
*/
- return genphy_c45_pma_baset1_setup_master_slave(phydev);
+ ret = genphy_c45_pma_baset1_setup_master_slave(phydev);
+ if (ret)
+ return ret;
+
+ /* Re-read role configuration to make changes visible even if
+ * the link is in administrative down state.
+ */
+ return genphy_c45_pma_baset1_read_master_slave(phydev);
}
static int dp83tg720_read_status(struct phy_device *phydev)
@@ -64,6 +78,8 @@ static int dp83tg720_read_status(struct phy_device *phydev)
return ret;
/* After HW reset we need to restore master/slave configuration.
+ * genphy_c45_pma_baset1_read_master_slave() call will be done
+ * by the dp83tg720_config_aneg() function.
*/
ret = dp83tg720_config_aneg(phydev);
if (ret)
@@ -154,10 +170,24 @@ static int dp83tg720_config_init(struct phy_device *phydev)
*/
usleep_range(1000, 2000);
- if (phy_interface_is_rgmii(phydev))
- return dp83tg720_config_rgmii_delay(phydev);
+ if (phy_interface_is_rgmii(phydev)) {
+ ret = dp83tg720_config_rgmii_delay(phydev);
+ if (ret)
+ return ret;
+ }
+
+ /* In case the PHY is bootstrapped in managed mode, we need to
+ * wake it.
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_LPS_CFG3,
+ DP83TG720S_LPS_CFG3_PWR_MODE_0);
+ if (ret)
+ return ret;
- return 0;
+ /* Make role configuration visible for ethtool on init and after
+ * rest.
+ */
+ return genphy_c45_pma_baset1_read_master_slave(phydev);
}
static struct phy_driver dp83tg720_driver[] = {
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 5aada7cf3da7..ebafedde0ab7 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -5607,6 +5607,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ9477, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8841, MICREL_PHY_ID_MASK },
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index b2d36a3a96f1..e5f8ac4b4604 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -107,6 +107,7 @@ struct gpy_priv {
u8 fw_major;
u8 fw_minor;
+ u32 wolopts;
/* It takes 3 seconds to fully switch out of loopback mode before
* it can safely re-enter loopback mode. Record the time when
@@ -221,6 +222,15 @@ static int gpy_hwmon_register(struct phy_device *phydev)
}
#endif
+static int gpy_ack_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Clear all pending interrupts */
+ ret = phy_read(phydev, PHY_ISTAT);
+ return ret < 0 ? ret : 0;
+}
+
static int gpy_mbox_read(struct phy_device *phydev, u32 addr)
{
struct gpy_priv *priv = phydev->priv;
@@ -262,16 +272,8 @@ out:
static int gpy_config_init(struct phy_device *phydev)
{
- int ret;
-
- /* Mask all interrupts */
- ret = phy_write(phydev, PHY_IMASK, 0);
- if (ret)
- return ret;
-
- /* Clear all pending interrupts */
- ret = phy_read(phydev, PHY_ISTAT);
- return ret < 0 ? ret : 0;
+ /* Nothing to configure. Configuration Requirement Placeholder */
+ return 0;
}
static int gpy21x_config_init(struct phy_device *phydev)
@@ -627,11 +629,23 @@ static int gpy_read_status(struct phy_device *phydev)
static int gpy_config_intr(struct phy_device *phydev)
{
+ struct gpy_priv *priv = phydev->priv;
u16 mask = 0;
+ int ret;
+
+ ret = gpy_ack_interrupt(phydev);
+ if (ret)
+ return ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
mask = PHY_IMASK_MASK;
+ if (priv->wolopts & WAKE_MAGIC)
+ mask |= PHY_IMASK_WOL;
+
+ if (priv->wolopts & WAKE_PHY)
+ mask |= PHY_IMASK_LSTC;
+
return phy_write(phydev, PHY_IMASK, mask);
}
@@ -678,6 +692,7 @@ static int gpy_set_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
struct net_device *attach_dev = phydev->attached_dev;
+ struct gpy_priv *priv = phydev->priv;
int ret;
if (wol->wolopts & WAKE_MAGIC) {
@@ -725,6 +740,8 @@ static int gpy_set_wol(struct phy_device *phydev,
ret = phy_read(phydev, PHY_ISTAT);
if (ret < 0)
return ret;
+
+ priv->wolopts |= WAKE_MAGIC;
} else {
/* Disable magic packet matching */
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
@@ -732,6 +749,13 @@ static int gpy_set_wol(struct phy_device *phydev,
WOL_EN);
if (ret < 0)
return ret;
+
+ /* Disable the WOL interrupt */
+ ret = phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_WOL);
+ if (ret < 0)
+ return ret;
+
+ priv->wolopts &= ~WAKE_MAGIC;
}
if (wol->wolopts & WAKE_PHY) {
@@ -748,9 +772,11 @@ static int gpy_set_wol(struct phy_device *phydev,
if (ret & (PHY_IMASK_MASK & ~PHY_IMASK_LSTC))
phy_trigger_machine(phydev);
+ priv->wolopts |= WAKE_PHY;
return 0;
}
+ priv->wolopts &= ~WAKE_PHY;
/* Disable the link state change interrupt */
return phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC);
}
@@ -758,18 +784,10 @@ static int gpy_set_wol(struct phy_device *phydev,
static void gpy_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
- int ret;
+ struct gpy_priv *priv = phydev->priv;
wol->supported = WAKE_MAGIC | WAKE_PHY;
- wol->wolopts = 0;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VPSPEC2_WOL_CTL);
- if (ret & WOL_EN)
- wol->wolopts |= WAKE_MAGIC;
-
- ret = phy_read(phydev, PHY_IMASK);
- if (ret & PHY_IMASK_LSTC)
- wol->wolopts |= WAKE_PHY;
+ wol->wolopts = priv->wolopts;
}
static int gpy_loopback(struct phy_device *phydev, bool enable)
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 3f9cbd797fd6..a5684ef5884b 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -2429,8 +2429,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
/* Handle remove event globally, it resets this state machine */
if (event == SFP_E_REMOVE) {
- if (sfp->sm_mod_state > SFP_MOD_PROBE)
- sfp_sm_mod_remove(sfp);
+ sfp_sm_mod_remove(sfp);
sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0);
return;
}
diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig
index 577ea904b3d9..7fab916a7f46 100644
--- a/drivers/net/pse-pd/Kconfig
+++ b/drivers/net/pse-pd/Kconfig
@@ -23,6 +23,7 @@ config PSE_REGULATOR
config PSE_PD692X0
tristate "PD692X0 PSE controller"
depends on I2C
+ select FW_LOADER
select FW_UPLOAD
help
This module provides support for PD692x0 regulator based Ethernet
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 51c295e1e823..b034ef8a73ea 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -174,7 +174,6 @@ struct ax88179_data {
u32 wol_supported;
u32 wolopts;
u8 disconnecting;
- u8 initialized;
};
struct ax88179_int_data {
@@ -327,7 +326,8 @@ static void ax88179_status(struct usbnet *dev, struct urb *urb)
if (netif_carrier_ok(dev->net) != link) {
usbnet_link_change(dev, link, 1);
- netdev_info(dev->net, "ax88179 - Link status is: %d\n", link);
+ if (!link)
+ netdev_info(dev->net, "ax88179 - Link status is: 0\n");
}
}
@@ -1543,6 +1543,7 @@ static int ax88179_link_reset(struct usbnet *dev)
GMII_PHY_PHYSR, 2, &tmp16);
if (!(tmp16 & GMII_PHY_PHYSR_LINK)) {
+ netdev_info(dev->net, "ax88179 - Link status is: 0\n");
return 0;
} else if (GMII_PHY_PHYSR_GIGA == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
mode |= AX_MEDIUM_GIGAMODE | AX_MEDIUM_EN_125MHZ;
@@ -1580,6 +1581,8 @@ static int ax88179_link_reset(struct usbnet *dev)
netif_carrier_on(dev->net);
+ netdev_info(dev->net, "ax88179 - Link status is: 1\n");
+
return 0;
}
@@ -1678,12 +1681,21 @@ static int ax88179_reset(struct usbnet *dev)
static int ax88179_net_reset(struct usbnet *dev)
{
- struct ax88179_data *ax179_data = dev->driver_priv;
+ u16 tmp16;
- if (ax179_data->initialized)
+ ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID, GMII_PHY_PHYSR,
+ 2, &tmp16);
+ if (tmp16) {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ if (!(tmp16 & AX_MEDIUM_RECEIVE_EN)) {
+ tmp16 |= AX_MEDIUM_RECEIVE_EN;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ }
+ } else {
ax88179_reset(dev);
- else
- ax179_data->initialized = 1;
+ }
return 0;
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 663e46348ce3..386d62769ded 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1372,6 +1372,8 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x3000, 0)}, /* Telit FN912 series */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x3001, 0)}, /* Telit FN912 series */
{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 97afd7335d86..01a3b2417a54 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -778,7 +778,8 @@ static int rtl8150_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ecmd)
{
rtl8150_t *dev = netdev_priv(netdev);
- short lpa, bmcr;
+ short lpa = 0;
+ short bmcr = 0;
u32 supported;
supported = (SUPPORTED_10baseT_Half |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 61a57d134544..ea10db9a09fa 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1360,6 +1360,10 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
+ /* Partially checksummed packets must be dropped. */
+ if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ goto err_xdp;
+
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1677,6 +1681,10 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
if (unlikely(hdr->hdr.gso_type))
return NULL;
+ /* Partially checksummed packets must be dropped. */
+ if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ return NULL;
+
/* Now XDP core assumes frag size is PAGE_SIZE, but buffers
* with headroom may add hole in truesize, which
* make their length exceed PAGE_SIZE. So we disabled the
@@ -1943,6 +1951,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
struct net_device *dev = vi->dev;
struct sk_buff *skb;
struct virtio_net_common_hdr *hdr;
+ u8 flags;
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
@@ -1951,6 +1960,15 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
return;
}
+ /* 1. Save the flags early, as the XDP program might overwrite them.
+ * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
+ * stay valid after XDP processing.
+ * 2. XDP doesn't work with partially checksummed packets (refer to
+ * virtnet_xdp_set()), so packets marked as
+ * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
+ */
+ flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
+
if (vi->mergeable_rx_bufs)
skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
stats);
@@ -1966,7 +1984,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
- if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
+ if (flags & VIRTIO_NET_HDR_F_DATA_VALID)
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
@@ -5666,8 +5684,16 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
/* (!csum && gso) case will be fixed by register_netdev() */
}
- if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
- dev->features |= NETIF_F_RXCSUM;
+
+ /* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't
+ * need to calculate checksums for partially checksummed packets,
+ * as they're considered valid by the upper layer.
+ * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only
+ * receives fully checksummed packets. The device may assist in
+ * validating these packets' checksums, so the driver won't have to.
+ */
+ dev->features |= NETIF_F_RXCSUM;
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
dev->features |= NETIF_F_GRO_HW;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 567cb3faab70..ba59e92ab941 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2339,7 +2339,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_key *pkey;
struct ip_tunnel_key key;
struct vxlan_dev *vxlan = netdev_priv(dev);
- const struct iphdr *old_iph = ip_hdr(skb);
+ const struct iphdr *old_iph;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
unsigned int pkt_len = skb->len;
@@ -2353,8 +2353,15 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
bool use_cache;
bool udp_sum = false;
bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
+ bool no_eth_encap;
__be32 vni = 0;
+ no_eth_encap = flags & VXLAN_F_GPE && skb->protocol != htons(ETH_P_TEB);
+ if (!skb_vlan_inet_prepare(skb, no_eth_encap))
+ goto drop;
+
+ old_iph = ip_hdr(skb);
+
info = skb_tunnel_info(skb);
use_cache = ip_tunnel_dst_cache_usable(skb, info);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index de9f0b446545..18ce060df9b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -4795,7 +4795,7 @@ static int iwl_mvm_roc_station(struct iwl_mvm *mvm,
if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) {
ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration);
- } else if (fw_ver == 3) {
+ } else if (fw_ver >= 3) {
ret = iwl_mvm_roc_add_cmd(mvm, channel, vif, duration,
ROC_ACTIVITY_HOTSPOT);
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index b5f664ae5a17..e975f5ff17b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1830,7 +1830,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
*/
if (!iwl_mvm_is_scan_fragmented(params->type)) {
if (!cfg80211_channel_is_psc(params->channels[i]) ||
- flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) {
+ psc_no_listen) {
if (unsolicited_probe_on_chan) {
max_s_ssids = 2;
max_bssids = 6;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 8ee4498f4245..31bc80cdcb7d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1238,6 +1238,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) {
IWL_DEBUG_TE(mvm,
"No remain on channel event\n");
+ mutex_unlock(&mvm->mutex);
return;
}
@@ -1253,6 +1254,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
te_data = iwl_mvm_get_roc_te(mvm);
if (!te_data) {
IWL_WARN(mvm, "No remain on channel event\n");
+ mutex_unlock(&mvm->mutex);
return;
}
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index dd6ec0865141..0cfa39361d3b 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1602,4 +1602,5 @@ static struct platform_driver apple_nvme_driver = {
module_platform_driver(apple_nvme_driver);
MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple ANS NVM Express device driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ef00bfc6b840..6784fc6e9562 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1031,6 +1031,7 @@ void nvme_cleanup_cmd(struct request *req)
clear_bit_unlock(0, &ctrl->discard_page_busy);
else
kfree(bvec_virt(&req->special_vec));
+ req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
@@ -4014,12 +4015,13 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
mutex_lock(&ctrl->namespaces_lock);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
- if (ns->head->ns_id > nsid)
- list_splice_init_rcu(&ns->list, &rm_list,
- synchronize_rcu);
+ if (ns->head->ns_id > nsid) {
+ list_del_rcu(&ns->list);
+ synchronize_srcu(&ctrl->srcu);
+ list_add_tail_rcu(&ns->list, &rm_list);
+ }
}
mutex_unlock(&ctrl->namespaces_lock);
- synchronize_srcu(&ctrl->srcu);
list_for_each_entry_safe(ns, next, &rm_list, list)
nvme_ns_remove(ns);
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 9d9d2a127c4e..8b69427a4476 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -111,6 +111,13 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
return req;
}
+static void nvme_unmap_bio(struct bio *bio)
+{
+ if (bio_integrity(bio))
+ bio_integrity_unmap_free_user(bio);
+ blk_rq_unmap_user(bio);
+}
+
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
@@ -157,7 +164,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
out_unmap:
if (bio)
- blk_rq_unmap_user(bio);
+ nvme_unmap_bio(bio);
out:
blk_mq_free_request(req);
return ret;
@@ -195,7 +202,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (bio)
- blk_rq_unmap_user(bio);
+ nvme_unmap_bio(bio);
blk_mq_free_request(req);
if (effects)
@@ -406,7 +413,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
if (pdu->bio)
- blk_rq_unmap_user(pdu->bio);
+ nvme_unmap_bio(pdu->bio);
io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
}
@@ -432,7 +439,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
*/
if (blk_rq_is_poll(req)) {
if (pdu->bio)
- blk_rq_unmap_user(pdu->bio);
+ nvme_unmap_bio(pdu->bio);
io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
} else {
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index f3a41133ac3f..68b400f9c42d 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -502,7 +502,7 @@ static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
enum nvme_ns_features {
NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
- NVME_NS_DEAC, /* DEAC bit in Write Zeores supported */
+ NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeores supported */
};
struct nvme_ns {
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index bd87dfd173a4..685e89b35d33 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -410,7 +410,29 @@ static ssize_t nvmet_addr_tsas_show(struct config_item *item,
return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name);
}
}
- return sprintf(page, "reserved\n");
+ return sprintf(page, "\n");
+}
+
+static u8 nvmet_addr_tsas_rdma_store(const char *page)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
+ if (sysfs_streq(page, nvmet_addr_tsas_rdma[i].name))
+ return nvmet_addr_tsas_rdma[i].type;
+ }
+ return NVMF_RDMA_QPTYPE_INVALID;
+}
+
+static u8 nvmet_addr_tsas_tcp_store(const char *page)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
+ if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name))
+ return nvmet_addr_tsas_tcp[i].type;
+ }
+ return NVMF_TCP_SECTYPE_INVALID;
}
static ssize_t nvmet_addr_tsas_store(struct config_item *item,
@@ -418,20 +440,19 @@ static ssize_t nvmet_addr_tsas_store(struct config_item *item,
{
struct nvmet_port *port = to_nvmet_port(item);
u8 treq = nvmet_port_disc_addr_treq_mask(port);
- u8 sectype;
- int i;
+ u8 sectype, qptype;
if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- if (port->disc_addr.trtype != NVMF_TRTYPE_TCP)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
- if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) {
- sectype = nvmet_addr_tsas_tcp[i].type;
+ if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
+ qptype = nvmet_addr_tsas_rdma_store(page);
+ if (qptype == port->disc_addr.tsas.rdma.qptype)
+ return count;
+ } else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
+ sectype = nvmet_addr_tsas_tcp_store(page);
+ if (sectype != NVMF_TCP_SECTYPE_INVALID)
goto found;
- }
}
pr_err("Invalid value '%s' for tsas\n", page);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 06f0c587f343..4ff460ba2826 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -957,6 +957,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->metadata_sg_cnt = 0;
req->transfer_len = 0;
req->metadata_len = 0;
+ req->cqe->result.u64 = 0;
req->cqe->status = 0;
req->cqe->sq_head = 0;
req->ns = NULL;
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index d61b8c6ff3b2..cb34d644ed08 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -333,7 +333,6 @@ done:
pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
__func__, ctrl->cntlid, req->sq->qid,
status, req->error_loc);
- req->cqe->result.u64 = 0;
if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
@@ -516,8 +515,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
status = nvmet_copy_to_sgl(req, 0, d, al);
kfree(d);
done:
- req->cqe->result.u64 = 0;
-
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
nvmet_auth_sq_free(req->sq);
else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 042b379cbb36..69d77d34bec1 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -226,9 +226,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
if (status)
goto out;
- /* zero out initial completion result, assign values as needed */
- req->cqe->result.u32 = 0;
-
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
@@ -305,9 +302,6 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (status)
goto out;
- /* zero out initial completion result, assign values as needed */
- req->cqe->result.u32 = 0;
-
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 337ee1cb09ae..381b4394731f 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -148,7 +148,7 @@ struct nvmet_fc_tgt_queue {
struct workqueue_struct *work_q;
struct kref ref;
/* array of fcp_iods */
- struct nvmet_fc_fcp_iod fod[] __counted_by(sqsize);
+ struct nvmet_fc_fcp_iod fod[] /* __counted_by(sqsize) */;
} __aligned(sizeof(unsigned long long));
struct nvmet_fc_hostport {
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index bb4a69d538fd..f003782d4ecf 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -226,13 +226,13 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
req->cmd->common.opcode == nvme_admin_identify) {
switch (req->cmd->identify.cns) {
case NVME_ID_CNS_CTRL:
- nvmet_passthru_override_id_ctrl(req);
+ status = nvmet_passthru_override_id_ctrl(req);
break;
case NVME_ID_CNS_NS:
- nvmet_passthru_override_id_ns(req);
+ status = nvmet_passthru_override_id_ns(req);
break;
case NVME_ID_CNS_NS_DESC_LIST:
- nvmet_passthru_override_id_descs(req);
+ status = nvmet_passthru_override_id_descs(req);
break;
}
} else if (status < 0)
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
index e6dc857aac3f..e06c7b2aac5c 100644
--- a/drivers/parport/parport_amiga.c
+++ b/drivers/parport/parport_amiga.c
@@ -229,7 +229,13 @@ static void __exit amiga_parallel_remove(struct platform_device *pdev)
parport_put_port(port);
}
-static struct platform_driver amiga_parallel_driver = {
+/*
+ * amiga_parallel_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver amiga_parallel_driver __refdata = {
.remove_new = __exit_p(amiga_parallel_remove),
.driver = {
.name = "amiga-parallel",
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index c5625dd9bf49..3a45879d85db 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -352,7 +352,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
struct irq_affinity *affd)
{
struct irq_affinity_desc *masks = NULL;
- struct msi_desc *entry;
+ struct msi_desc *entry, desc;
int ret;
/* Reject multi-MSI early on irq domain enabled architectures */
@@ -377,6 +377,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
/* All MSIs are unmasked by default; mask them all */
entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
pci_msi_mask(entry, msi_multi_mask(entry));
+ /*
+ * Copy the MSI descriptor for the error path because
+ * pci_msi_setup_msi_irqs() will free it for the hierarchical
+ * interrupt domain case.
+ */
+ memcpy(&desc, entry, sizeof(desc));
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
@@ -396,7 +402,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
goto unlock;
err:
- pci_msi_unmask(entry, msi_multi_mask(entry));
+ pci_msi_unmask(&desc, msi_multi_mask(&desc));
pci_free_msi_irqs(dev);
fail:
dev->msi_enabled = 0;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 7f999e8a433d..7b00945f7191 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -187,6 +187,31 @@ static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_TX_TRANSCEIVER_BIAS_EN,
};
+static const unsigned int qmp_v6_n4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V6_N4_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V6_N4_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V6_N4_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V6_N4_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V6_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V6_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+
+ [QPHY_COM_RESETSM_CNTRL] = QSERDES_V6_COM_RESETSM_CNTRL,
+ [QPHY_COM_C_READY_STATUS] = QSERDES_V6_COM_C_READY_STATUS,
+ [QPHY_COM_CMN_STATUS] = QSERDES_V6_COM_CMN_STATUS,
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V6_N4_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V6_N4_TX_TX_DRV_LVL,
+ [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V6_N4_TX_TX_EMP_POST1_LVL,
+ [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V6_N4_TX_HIGHZ_DRVR_EN,
+ [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN,
+};
+
static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
@@ -997,6 +1022,31 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f),
};
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SVS_MODE_CLK_SEL, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f),
+};
+
static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_TX_VMODE_CTRL1, 0x40),
QMP_PHY_INIT_CFG(QSERDES_V6_TX_PRE_STALL_LDO_BOOST_EN, 0x30),
@@ -1011,6 +1061,19 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_TX_TX_BAND, 0x4),
};
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_VMODE_CTRL1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_INTERFACE_SELECT, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_CLKBUF_ENABLE, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RESET_TSYNC_EN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TX_BAND, 0x1),
+};
+
static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_rbr[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
@@ -1059,6 +1122,74 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_hbr3[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
};
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_rbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr2[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x02),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr3[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+};
+
static const struct qmp_phy_init_tbl sc8280xp_usb43dp_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
@@ -1273,20 +1404,20 @@ static const struct qmp_phy_init_tbl x1e80100_usb43dp_rx_tbl[] = {
};
static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG2, 0x89),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG3, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG6, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_RX_SIGDET_LVL, 0x55),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG1, 0xd4),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG2, 0x30),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2, 0x89),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_SIGDET_LVL, 0x55),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_CONFIG, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1, 0xd4),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2, 0x30),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG5, 0x10),
};
static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_usb_tbl[] = {
@@ -1794,22 +1925,22 @@ static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = {
.pcs_usb_tbl = x1e80100_usb43dp_pcs_usb_tbl,
.pcs_usb_tbl_num = ARRAY_SIZE(x1e80100_usb43dp_pcs_usb_tbl),
- .dp_serdes_tbl = qmp_v6_dp_serdes_tbl,
- .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl),
- .dp_tx_tbl = qmp_v6_dp_tx_tbl,
- .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl),
+ .dp_serdes_tbl = qmp_v6_n4_dp_serdes_tbl,
+ .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl),
+ .dp_tx_tbl = qmp_v6_n4_dp_tx_tbl,
+ .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_n4_dp_tx_tbl),
- .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr,
- .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr),
- .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr,
- .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr),
- .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2,
- .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2),
- .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3,
- .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3),
+ .serdes_tbl_rbr = qmp_v6_n4_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v6_n4_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v6_n4_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v6_n4_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr3),
- .swing_hbr_rbr = &qmp_dp_v5_voltage_swing_hbr_rbr,
- .pre_emphasis_hbr_rbr = &qmp_dp_v5_pre_emphasis_hbr_rbr,
+ .swing_hbr_rbr = &qmp_dp_v6_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr,
.swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
.pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
@@ -1822,7 +1953,7 @@ static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = {
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v45_usb3phy_regs_layout,
+ .regs = qmp_v6_n4_usb3phy_regs_layout,
};
static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h
new file mode 100644
index 000000000000..b3024714dab4
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_V6_N4_H_
+#define QCOM_PHY_QMP_PCS_V6_N4_H_
+
+/* Only for QMP V6 N4 PHY - USB/PCIe PCS registers */
+#define QPHY_V6_N4_PCS_SW_RESET 0x000
+#define QPHY_V6_N4_PCS_PCS_STATUS1 0x014
+#define QPHY_V6_N4_PCS_POWER_DOWN_CONTROL 0x040
+#define QPHY_V6_N4_PCS_START_CONTROL 0x044
+#define QPHY_V6_N4_PCS_POWER_STATE_CONFIG1 0x090
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1 0x0c4
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2 0x0c8
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3 0x0cc
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6 0x0d8
+#define QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V6_N4_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
+#define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
+#define QPHY_V6_N4_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V6_N4_PCS_RX_CONFIG 0x1b0
+#define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1 0x1c0
+#define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2 0x1c4
+#define QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG 0x1d0
+#define QPHY_V6_N4_PCS_EQ_CONFIG1 0x1dc
+#define QPHY_V6_N4_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V6_N4_PCS_EQ_CONFIG5 0x1ec
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
index a814ad11af07..d37cc0d4fd36 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
@@ -6,11 +6,24 @@
#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_
#define QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_
+#define QSERDES_V6_N4_TX_CLKBUF_ENABLE 0x08
+#define QSERDES_V6_N4_TX_TX_EMP_POST1_LVL 0x0c
+#define QSERDES_V6_N4_TX_TX_DRV_LVL 0x14
+#define QSERDES_V6_N4_TX_RESET_TSYNC_EN 0x1c
+#define QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN 0x20
#define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX 0x30
#define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN 0x48
+#define QSERDES_V6_N4_TX_HIGHZ_DRVR_EN 0x4c
+#define QSERDES_V6_N4_TX_TX_POL_INV 0x50
+#define QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN 0x54
#define QSERDES_V6_N4_TX_LANE_MODE_1 0x78
#define QSERDES_V6_N4_TX_LANE_MODE_2 0x7c
#define QSERDES_V6_N4_TX_LANE_MODE_3 0x80
+#define QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN 0xac
+#define QSERDES_V6_N4_TX_TX_BAND 0xd8
+#define QSERDES_V6_N4_TX_INTERFACE_SELECT 0xe4
+#define QSERDES_V6_N4_TX_VMODE_CTRL1 0xb0
#define QSERDES_V6_N4_RX_UCDR_FO_GAIN_RATE2 0x8
#define QSERDES_V6_N4_RX_UCDR_SO_GAIN_RATE2 0x18
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index d10b8f653c4b..d0f41e4aaa85 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -46,6 +46,8 @@
#include "phy-qcom-qmp-pcs-v6.h"
+#include "phy-qcom-qmp-pcs-v6-n4.h"
+
#include "phy-qcom-qmp-pcs-v6_20.h"
#include "phy-qcom-qmp-pcs-v7.h"
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 7178a38475cc..27fd54795791 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -245,7 +245,7 @@ static const char * const irq_type_names[] = {
};
static bool persist_gpio_outputs;
-module_param(persist_gpio_outputs, bool, 0644);
+module_param(persist_gpio_outputs, bool, 0444);
MODULE_PARM_DESC(persist_gpio_outputs, "Enable GPIO_OUT persistence when pin is freed");
static inline u32 bcm2835_gpio_rd(struct bcm2835_pinctrl *pc, unsigned reg)
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index cffeb869130d..f424a57f0013 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1106,8 +1106,8 @@ static struct pinctrl *create_pinctrl(struct device *dev,
* an -EPROBE_DEFER later, as that is the worst case.
*/
if (ret == -EPROBE_DEFER) {
- pinctrl_free(p, false);
mutex_unlock(&pinctrl_maps_mutex);
+ pinctrl_free(p, false);
return ERR_PTR(ret);
}
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 3bedf36a0019..3f56991f5b89 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -634,23 +634,68 @@ static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
static struct rockchip_mux_recalced_data rk3328_mux_recalced_data[] = {
{
- .num = 2,
- .pin = 12,
- .reg = 0x24,
- .bit = 8,
- .mask = 0x3
- }, {
+ /* gpio2_b7_sel */
.num = 2,
.pin = 15,
.reg = 0x28,
.bit = 0,
.mask = 0x7
}, {
+ /* gpio2_c7_sel */
.num = 2,
.pin = 23,
.reg = 0x30,
.bit = 14,
.mask = 0x3
+ }, {
+ /* gpio3_b1_sel */
+ .num = 3,
+ .pin = 9,
+ .reg = 0x44,
+ .bit = 2,
+ .mask = 0x3
+ }, {
+ /* gpio3_b2_sel */
+ .num = 3,
+ .pin = 10,
+ .reg = 0x44,
+ .bit = 4,
+ .mask = 0x3
+ }, {
+ /* gpio3_b3_sel */
+ .num = 3,
+ .pin = 11,
+ .reg = 0x44,
+ .bit = 6,
+ .mask = 0x3
+ }, {
+ /* gpio3_b4_sel */
+ .num = 3,
+ .pin = 12,
+ .reg = 0x44,
+ .bit = 8,
+ .mask = 0x3
+ }, {
+ /* gpio3_b5_sel */
+ .num = 3,
+ .pin = 13,
+ .reg = 0x44,
+ .bit = 10,
+ .mask = 0x3
+ }, {
+ /* gpio3_b6_sel */
+ .num = 3,
+ .pin = 14,
+ .reg = 0x44,
+ .bit = 12,
+ .mask = 0x3
+ }, {
+ /* gpio3_b7_sel */
+ .num = 3,
+ .pin = 15,
+ .reg = 0x44,
+ .bit = 14,
+ .mask = 0x3
},
};
@@ -2433,6 +2478,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RK3188:
case RK3288:
case RK3308:
+ case RK3328:
case RK3368:
case RK3399:
case RK3568:
@@ -2491,6 +2537,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RK3188:
case RK3288:
case RK3308:
+ case RK3328:
case RK3368:
case RK3399:
case RK3568:
@@ -2704,8 +2751,10 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
if (ret) {
/* revert the already done pin settings */
- for (cnt--; cnt >= 0; cnt--)
+ for (cnt--; cnt >= 0; cnt--) {
+ bank = pin_to_bank(info, pins[cnt]);
rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0);
+ }
return ret;
}
@@ -2753,6 +2802,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RK3188:
case RK3288:
case RK3308:
+ case RK3328:
case RK3368:
case RK3399:
case RK3568:
@@ -3763,7 +3813,7 @@ static struct rockchip_pin_bank rk3328_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0),
PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0),
PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0,
- IOMUX_WIDTH_3BIT,
+ 0,
IOMUX_WIDTH_3BIT,
0),
PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3",
@@ -3777,7 +3827,7 @@ static struct rockchip_pin_ctrl rk3328_pin_ctrl = {
.pin_banks = rk3328_pin_banks,
.nr_banks = ARRAY_SIZE(rk3328_pin_banks),
.label = "RK3328-GPIO",
- .type = RK3288,
+ .type = RK3328,
.grf_mux_offset = 0x0,
.iomux_recalced = rk3328_mux_recalced_data,
.niomux_recalced = ARRAY_SIZE(rk3328_mux_recalced_data),
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index 4759f336941e..849266f8b191 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -193,6 +193,7 @@ enum rockchip_pinctrl_type {
RK3188,
RK3288,
RK3308,
+ RK3328,
RK3368,
RK3399,
RK3568,
diff --git a/drivers/pinctrl/pinctrl-tps6594.c b/drivers/pinctrl/pinctrl-tps6594.c
index 085047320853..5e7c7cf93445 100644
--- a/drivers/pinctrl/pinctrl-tps6594.c
+++ b/drivers/pinctrl/pinctrl-tps6594.c
@@ -486,6 +486,7 @@ static int tps6594_pinctrl_probe(struct platform_device *pdev)
break;
case TPS6593:
case TPS6594:
+ case LP8764:
pctrl_desc->pins = tps6594_pins;
pctrl_desc->npins = ARRAY_SIZE(tps6594_pins);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 4e80c7204e5f..4abd6f18bbef 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1207,7 +1207,6 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm7325-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm7550ba-gpio", .data = (void *) 8},
{ .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
- { .compatible = "qcom,pm8008-gpio", .data = (void *) 2 },
{ .compatible = "qcom,pm8019-gpio", .data = (void *) 6 },
/* pm8150 has 10 GPIOs with holes on 2, 5, 7 and 8 */
{ .compatible = "qcom,pm8150-gpio", .data = (void *) 10 },
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index c3256bfde502..60be78da9f52 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -2071,11 +2071,11 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
* This has to be atomically executed to protect against a concurrent
* interrupt.
*/
- raw_spin_lock_irqsave(&pctrl->lock.rlock, flags);
+ spin_lock_irqsave(&pctrl->lock, flags);
ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data));
if (!ret && !irqd_irq_disabled(data))
rzg2l_gpio_irq_enable(data);
- raw_spin_unlock_irqrestore(&pctrl->lock.rlock, flags);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
if (ret)
dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq);
diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c
index 3ef655591424..abe7be602f84 100644
--- a/drivers/platform/mellanox/nvsw-sn2201.c
+++ b/drivers/platform/mellanox/nvsw-sn2201.c
@@ -1198,6 +1198,7 @@ static int nvsw_sn2201_config_pre_init(struct nvsw_sn2201 *nvsw_sn2201)
static int nvsw_sn2201_probe(struct platform_device *pdev)
{
struct nvsw_sn2201 *nvsw_sn2201;
+ int ret;
nvsw_sn2201 = devm_kzalloc(&pdev->dev, sizeof(*nvsw_sn2201), GFP_KERNEL);
if (!nvsw_sn2201)
@@ -1205,8 +1206,10 @@ static int nvsw_sn2201_probe(struct platform_device *pdev)
nvsw_sn2201->dev = &pdev->dev;
platform_set_drvdata(pdev, nvsw_sn2201);
- platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
+ ret = platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
ARRAY_SIZE(nvsw_sn2201_lpc_io_resources));
+ if (ret)
+ return ret;
nvsw_sn2201->main_mux_deferred_nr = NVSW_SN2201_MAIN_MUX_DEFER_NR;
nvsw_sn2201->main_mux_devs = nvsw_sn2201_main_mux_brdinfo;
diff --git a/drivers/platform/x86/amilo-rfkill.c b/drivers/platform/x86/amilo-rfkill.c
index efcf909786a5..2423dc91debb 100644
--- a/drivers/platform/x86/amilo-rfkill.c
+++ b/drivers/platform/x86/amilo-rfkill.c
@@ -171,6 +171,7 @@ static void __exit amilo_rfkill_exit(void)
}
MODULE_AUTHOR("Ben Hutchings <ben@decadent.org.uk>");
+MODULE_DESCRIPTION("Fujitsu-Siemens Amilo rfkill support");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(dmi, amilo_rfkill_id_table);
diff --git a/drivers/platform/x86/firmware_attributes_class.c b/drivers/platform/x86/firmware_attributes_class.c
index dd8240009565..182a07d8ae3d 100644
--- a/drivers/platform/x86/firmware_attributes_class.c
+++ b/drivers/platform/x86/firmware_attributes_class.c
@@ -49,4 +49,5 @@ int fw_attributes_class_put(void)
EXPORT_SYMBOL_GPL(fw_attributes_class_put);
MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
+MODULE_DESCRIPTION("Firmware attributes class helper module");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index 1d4bbae115f1..231b37909801 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -29,6 +29,7 @@ static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Show debug output");
+MODULE_DESCRIPTION("IBM Premium Real Time Mode (PRTM) driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Keith Mannthey <kmmanth@us.ibm.com>");
MODULE_AUTHOR("Vernon Mauery <vernux@us.ibm.com>");
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index c7a827645864..10cd65497cc1 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -38,6 +38,7 @@ MODULE_PARM_DESC(enable_sw_tablet_mode,
/* When NOT in tablet mode, VGBS returns with the flag 0x40 */
#define TABLET_MODE_FLAG BIT(6)
+MODULE_DESCRIPTION("Intel HID Event hotkey driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
diff --git a/drivers/platform/x86/intel/pmc/pltdrv.c b/drivers/platform/x86/intel/pmc/pltdrv.c
index ddfba38c2104..f2cb87dc2d37 100644
--- a/drivers/platform/x86/intel/pmc/pltdrv.c
+++ b/drivers/platform/x86/intel/pmc/pltdrv.c
@@ -86,4 +86,5 @@ static void __exit pmc_core_platform_exit(void)
module_init(pmc_core_platform_init);
module_exit(pmc_core_platform_exit);
+MODULE_DESCRIPTION("Intel PMC Core platform driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/rst.c b/drivers/platform/x86/intel/rst.c
index 6bc9c4a603e0..f3a60e14d4c1 100644
--- a/drivers/platform/x86/intel/rst.c
+++ b/drivers/platform/x86/intel/rst.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/slab.h>
+MODULE_DESCRIPTION("Intel Rapid Start Technology Driver");
MODULE_LICENSE("GPL");
static ssize_t irst_show_wakeup_events(struct device *dev,
diff --git a/drivers/platform/x86/intel/smartconnect.c b/drivers/platform/x86/intel/smartconnect.c
index cd25d0585324..31019a1a6d5e 100644
--- a/drivers/platform/x86/intel/smartconnect.c
+++ b/drivers/platform/x86/intel/smartconnect.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
+MODULE_DESCRIPTION("Intel Smart Connect disabling driver");
MODULE_LICENSE("GPL");
static int smartconnect_acpi_init(struct acpi_device *acpi)
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 84c1353eb12b..9b7ce03ba085 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -24,6 +24,7 @@
#define VGBS_TABLET_MODE_FLAGS (VGBS_TABLET_MODE_FLAG | VGBS_TABLET_MODE_FLAG_ALT)
+MODULE_DESCRIPTION("Intel Virtual Button driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("AceLan Kao");
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index d0fee5d375d7..9c7857842caf 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -39,8 +39,6 @@ MODULE_LICENSE("GPL");
#define WMI_METHOD_WMBB "2B4F501A-BD3C-4394-8DCF-00A7D2BC8210"
#define WMI_EVENT_GUID WMI_EVENT_GUID0
-#define WMAB_METHOD "\\XINI.WMAB"
-#define WMBB_METHOD "\\XINI.WMBB"
#define SB_GGOV_METHOD "\\_SB.GGOV"
#define GOV_TLED 0x2020008
#define WM_GET 1
@@ -74,7 +72,7 @@ static u32 inited;
static int battery_limit_use_wmbb;
static struct led_classdev kbd_backlight;
-static enum led_brightness get_kbd_backlight_level(void);
+static enum led_brightness get_kbd_backlight_level(struct device *dev);
static const struct key_entry wmi_keymap[] = {
{KE_KEY, 0x70, {KEY_F15} }, /* LG control panel (F1) */
@@ -84,7 +82,6 @@ static const struct key_entry wmi_keymap[] = {
* this key both sends an event and
* changes backlight level.
*/
- {KE_KEY, 0x80, {KEY_RFKILL} },
{KE_END, 0}
};
@@ -128,11 +125,10 @@ static int ggov(u32 arg0)
return res;
}
-static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
+static union acpi_object *lg_wmab(struct device *dev, u32 method, u32 arg1, u32 arg2)
{
union acpi_object args[3];
acpi_status status;
- acpi_handle handle;
struct acpi_object_list arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -143,29 +139,22 @@ static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
args[2].type = ACPI_TYPE_INTEGER;
args[2].integer.value = arg2;
- status = acpi_get_handle(NULL, (acpi_string) WMAB_METHOD, &handle);
- if (ACPI_FAILURE(status)) {
- pr_err("Cannot get handle");
- return NULL;
- }
-
arg.count = 3;
arg.pointer = args;
- status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
+ status = acpi_evaluate_object(ACPI_HANDLE(dev), "WMAB", &arg, &buffer);
if (ACPI_FAILURE(status)) {
- acpi_handle_err(handle, "WMAB: call failed.\n");
+ dev_err(dev, "WMAB: call failed.\n");
return NULL;
}
return buffer.pointer;
}
-static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
+static union acpi_object *lg_wmbb(struct device *dev, u32 method_id, u32 arg1, u32 arg2)
{
union acpi_object args[3];
acpi_status status;
- acpi_handle handle;
struct acpi_object_list arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
u8 buf[32];
@@ -181,18 +170,12 @@ static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
args[2].buffer.length = 32;
args[2].buffer.pointer = buf;
- status = acpi_get_handle(NULL, (acpi_string)WMBB_METHOD, &handle);
- if (ACPI_FAILURE(status)) {
- pr_err("Cannot get handle");
- return NULL;
- }
-
arg.count = 3;
arg.pointer = args;
- status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
+ status = acpi_evaluate_object(ACPI_HANDLE(dev), "WMBB", &arg, &buffer);
if (ACPI_FAILURE(status)) {
- acpi_handle_err(handle, "WMAB: call failed.\n");
+ dev_err(dev, "WMBB: call failed.\n");
return NULL;
}
@@ -223,7 +206,7 @@ static void wmi_notify(u32 value, void *context)
if (eventcode == 0x10000000) {
led_classdev_notify_brightness_hw_changed(
- &kbd_backlight, get_kbd_backlight_level());
+ &kbd_backlight, get_kbd_backlight_level(kbd_backlight.dev->parent));
} else {
key = sparse_keymap_entry_from_scancode(
wmi_input_dev, eventcode);
@@ -272,14 +255,7 @@ static void wmi_input_setup(void)
static void acpi_notify(struct acpi_device *device, u32 event)
{
- struct key_entry *key;
-
acpi_handle_debug(device->handle, "notify: %d\n", event);
- if (inited & INIT_SPARSE_KEYMAP) {
- key = sparse_keymap_entry_from_scancode(wmi_input_dev, 0x80);
- if (key && key->type == KE_KEY)
- sparse_keymap_report_entry(wmi_input_dev, key, 1, true);
- }
}
static ssize_t fan_mode_store(struct device *dev,
@@ -295,7 +271,7 @@ static ssize_t fan_mode_store(struct device *dev,
if (ret)
return ret;
- r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
+ r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
if (!r)
return -EIO;
@@ -306,9 +282,9 @@ static ssize_t fan_mode_store(struct device *dev,
m = r->integer.value;
kfree(r);
- r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
+ r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
kfree(r);
- r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
+ r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
kfree(r);
return count;
@@ -320,7 +296,7 @@ static ssize_t fan_mode_show(struct device *dev,
unsigned int status;
union acpi_object *r;
- r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
+ r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
if (!r)
return -EIO;
@@ -347,7 +323,7 @@ static ssize_t usb_charge_store(struct device *dev,
if (ret)
return ret;
- r = lg_wmbb(WMBB_USB_CHARGE, WM_SET, value);
+ r = lg_wmbb(dev, WMBB_USB_CHARGE, WM_SET, value);
if (!r)
return -EIO;
@@ -361,7 +337,7 @@ static ssize_t usb_charge_show(struct device *dev,
unsigned int status;
union acpi_object *r;
- r = lg_wmbb(WMBB_USB_CHARGE, WM_GET, 0);
+ r = lg_wmbb(dev, WMBB_USB_CHARGE, WM_GET, 0);
if (!r)
return -EIO;
@@ -389,7 +365,7 @@ static ssize_t reader_mode_store(struct device *dev,
if (ret)
return ret;
- r = lg_wmab(WM_READER_MODE, WM_SET, value);
+ r = lg_wmab(dev, WM_READER_MODE, WM_SET, value);
if (!r)
return -EIO;
@@ -403,7 +379,7 @@ static ssize_t reader_mode_show(struct device *dev,
unsigned int status;
union acpi_object *r;
- r = lg_wmab(WM_READER_MODE, WM_GET, 0);
+ r = lg_wmab(dev, WM_READER_MODE, WM_GET, 0);
if (!r)
return -EIO;
@@ -431,7 +407,7 @@ static ssize_t fn_lock_store(struct device *dev,
if (ret)
return ret;
- r = lg_wmab(WM_FN_LOCK, WM_SET, value);
+ r = lg_wmab(dev, WM_FN_LOCK, WM_SET, value);
if (!r)
return -EIO;
@@ -445,7 +421,7 @@ static ssize_t fn_lock_show(struct device *dev,
unsigned int status;
union acpi_object *r;
- r = lg_wmab(WM_FN_LOCK, WM_GET, 0);
+ r = lg_wmab(dev, WM_FN_LOCK, WM_GET, 0);
if (!r)
return -EIO;
@@ -475,9 +451,9 @@ static ssize_t charge_control_end_threshold_store(struct device *dev,
union acpi_object *r;
if (battery_limit_use_wmbb)
- r = lg_wmbb(WMBB_BATT_LIMIT, WM_SET, value);
+ r = lg_wmbb(&pf_device->dev, WMBB_BATT_LIMIT, WM_SET, value);
else
- r = lg_wmab(WM_BATT_LIMIT, WM_SET, value);
+ r = lg_wmab(&pf_device->dev, WM_BATT_LIMIT, WM_SET, value);
if (!r)
return -EIO;
@@ -496,7 +472,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
union acpi_object *r;
if (battery_limit_use_wmbb) {
- r = lg_wmbb(WMBB_BATT_LIMIT, WM_GET, 0);
+ r = lg_wmbb(&pf_device->dev, WMBB_BATT_LIMIT, WM_GET, 0);
if (!r)
return -EIO;
@@ -507,7 +483,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
status = r->buffer.pointer[0x10];
} else {
- r = lg_wmab(WM_BATT_LIMIT, WM_GET, 0);
+ r = lg_wmab(&pf_device->dev, WM_BATT_LIMIT, WM_GET, 0);
if (!r)
return -EIO;
@@ -586,7 +562,7 @@ static void tpad_led_set(struct led_classdev *cdev,
{
union acpi_object *r;
- r = lg_wmab(WM_TLED, WM_SET, brightness > LED_OFF);
+ r = lg_wmab(cdev->dev->parent, WM_TLED, WM_SET, brightness > LED_OFF);
kfree(r);
}
@@ -608,16 +584,16 @@ static void kbd_backlight_set(struct led_classdev *cdev,
val = 0;
if (brightness >= LED_FULL)
val = 0x24;
- r = lg_wmab(WM_KEY_LIGHT, WM_SET, val);
+ r = lg_wmab(cdev->dev->parent, WM_KEY_LIGHT, WM_SET, val);
kfree(r);
}
-static enum led_brightness get_kbd_backlight_level(void)
+static enum led_brightness get_kbd_backlight_level(struct device *dev)
{
union acpi_object *r;
int val;
- r = lg_wmab(WM_KEY_LIGHT, WM_GET, 0);
+ r = lg_wmab(dev, WM_KEY_LIGHT, WM_GET, 0);
if (!r)
return LED_OFF;
@@ -645,7 +621,7 @@ static enum led_brightness get_kbd_backlight_level(void)
static enum led_brightness kbd_backlight_get(struct led_classdev *cdev)
{
- return get_kbd_backlight_level();
+ return get_kbd_backlight_level(cdev->dev->parent);
}
static LED_DEVICE(kbd_backlight, 255, LED_BRIGHT_HW_CHANGED);
@@ -672,6 +648,11 @@ static struct platform_driver pf_driver = {
static int acpi_add(struct acpi_device *device)
{
+ struct platform_device_info pdev_info = {
+ .fwnode = acpi_fwnode_handle(device),
+ .name = PLATFORM_NAME,
+ .id = PLATFORM_DEVID_NONE,
+ };
int ret;
const char *product;
int year = 2017;
@@ -683,9 +664,7 @@ static int acpi_add(struct acpi_device *device)
if (ret)
return ret;
- pf_device = platform_device_register_simple(PLATFORM_NAME,
- PLATFORM_DEVID_NONE,
- NULL, 0);
+ pf_device = platform_device_register_full(&pdev_info);
if (IS_ERR(pf_device)) {
ret = PTR_ERR(pf_device);
pf_device = NULL;
@@ -776,7 +755,7 @@ static void acpi_remove(struct acpi_device *device)
}
static const struct acpi_device_id device_ids[] = {
- {"LGEX0815", 0},
+ {"LGEX0820", 0},
{"", 0}
};
MODULE_DEVICE_TABLE(acpi, device_ids);
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c b/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c
index 31a139d87d9a..5edc294de6e4 100644
--- a/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c
@@ -45,6 +45,7 @@ static struct platform_driver simatic_ipc_batt_driver = {
module_platform_driver(simatic_ipc_batt_driver);
+MODULE_DESCRIPTION("CMOS Battery monitoring for Simatic IPCs based on Apollo Lake GPIO");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-batt platform:apollolake-pinctrl");
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c b/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c
index a7676f224075..e6a56d14b505 100644
--- a/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c
@@ -45,6 +45,7 @@ static struct platform_driver simatic_ipc_batt_driver = {
module_platform_driver(simatic_ipc_batt_driver);
+MODULE_DESCRIPTION("CMOS Battery monitoring for Simatic IPCs based on Elkhart Lake GPIO");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-batt platform:elkhartlake-pinctrl");
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c b/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c
index 5e77e05fdb5d..f8849d0e48a8 100644
--- a/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c
@@ -81,6 +81,7 @@ static struct platform_driver simatic_ipc_batt_driver = {
module_platform_driver(simatic_ipc_batt_driver);
+MODULE_DESCRIPTION("CMOS Battery monitoring for Simatic IPCs based on Nuvoton GPIO");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-batt gpio_f7188x platform:elkhartlake-pinctrl platform:alderlake-pinctrl");
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt.c b/drivers/platform/x86/siemens/simatic-ipc-batt.c
index c6dd263b4ee3..d9aff10608cf 100644
--- a/drivers/platform/x86/siemens/simatic-ipc-batt.c
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt.c
@@ -247,6 +247,7 @@ static struct platform_driver simatic_ipc_batt_driver = {
module_platform_driver(simatic_ipc_batt_driver);
+MODULE_DESCRIPTION("CMOS core battery driver for Siemens Simatic IPCs");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/platform/x86/siemens/simatic-ipc.c b/drivers/platform/x86/siemens/simatic-ipc.c
index 8ca6e277fa03..7039874d8f11 100644
--- a/drivers/platform/x86/siemens/simatic-ipc.c
+++ b/drivers/platform/x86/siemens/simatic-ipc.c
@@ -231,6 +231,7 @@ static void __exit simatic_ipc_exit_module(void)
module_init(simatic_ipc_init_module);
module_exit(simatic_ipc_exit_module);
+MODULE_DESCRIPTION("Siemens SIMATIC IPC platform driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Gerd Haeussler <gerd.haeussler.ext@siemens.com>");
MODULE_ALIAS("dmi:*:svnSIEMENSAG:*");
diff --git a/drivers/platform/x86/uv_sysfs.c b/drivers/platform/x86/uv_sysfs.c
index 37372d7cc54a..f6a0627f36db 100644
--- a/drivers/platform/x86/uv_sysfs.c
+++ b/drivers/platform/x86/uv_sysfs.c
@@ -929,4 +929,5 @@ module_init(uv_sysfs_init);
module_exit(uv_sysfs_exit);
MODULE_AUTHOR("Hewlett Packard Enterprise");
+MODULE_DESCRIPTION("Sysfs structure for HPE UV systems");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/wireless-hotkey.c b/drivers/platform/x86/wireless-hotkey.c
index e95cdbbfb708..a220fe4f9ef8 100644
--- a/drivers/platform/x86/wireless-hotkey.c
+++ b/drivers/platform/x86/wireless-hotkey.c
@@ -14,11 +14,13 @@
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
+MODULE_DESCRIPTION("Airplane mode button for AMD, HP & Xiaomi laptops");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
MODULE_ALIAS("acpi*:HPQ6001:*");
MODULE_ALIAS("acpi*:WSTADEF:*");
MODULE_ALIAS("acpi*:AMDI0051:*");
+MODULE_ALIAS("acpi*:LGEX0815:*");
struct wl_button {
struct input_dev *input_dev;
@@ -29,6 +31,7 @@ static const struct acpi_device_id wl_ids[] = {
{"HPQ6001", 0},
{"WSTADEF", 0},
{"AMDI0051", 0},
+ {"LGEX0815", 0},
{"", 0},
};
diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c
index e64d5646b4c7..5fe68296501c 100644
--- a/drivers/platform/x86/xo1-rfkill.c
+++ b/drivers/platform/x86/xo1-rfkill.c
@@ -74,5 +74,6 @@ static struct platform_driver xo1_rfkill_driver = {
module_platform_driver(xo1_rfkill_driver);
MODULE_AUTHOR("Daniel Drake <dsd@laptop.org>");
+MODULE_DESCRIPTION("OLPC XO-1 software RF kill switch");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:xo1-rfkill");
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index a15460aaa03b..6b1b8f57cd95 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -296,8 +296,7 @@ static ssize_t max_vclocks_store(struct device *dev,
if (max < ptp->n_vclocks)
goto out;
- size = sizeof(int) * max;
- vclock_index = kzalloc(size, GFP_KERNEL);
+ vclock_index = kcalloc(max, sizeof(int), GFP_KERNEL);
if (!vclock_index) {
err = -ENOMEM;
goto out;
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index a2f231d13a9f..8bae3fd2b330 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -321,22 +321,30 @@ static int stm32_pwm_config(struct stm32_pwm *priv, unsigned int ch,
* First we need to find the minimal value for prescaler such that
*
* period_ns * clkrate
- * ------------------------------
+ * ------------------------------ < max_arr + 1
* NSEC_PER_SEC * (prescaler + 1)
*
- * isn't bigger than max_arr.
+ * This equation is equivalent to
+ *
+ * period_ns * clkrate
+ * ---------------------------- < prescaler + 1
+ * NSEC_PER_SEC * (max_arr + 1)
+ *
+ * Using integer division and knowing that the right hand side is
+ * integer, this is further equivalent to
+ *
+ * (period_ns * clkrate) // (NSEC_PER_SEC * (max_arr + 1)) ≤ prescaler
*/
prescaler = mul_u64_u64_div_u64(period_ns, clk_get_rate(priv->clk),
- (u64)NSEC_PER_SEC * priv->max_arr);
- if (prescaler > 0)
- prescaler -= 1;
-
+ (u64)NSEC_PER_SEC * ((u64)priv->max_arr + 1));
if (prescaler > MAX_TIM_PSC)
return -EINVAL;
prd = mul_u64_u64_div_u64(period_ns, clk_get_rate(priv->clk),
(u64)NSEC_PER_SEC * (prescaler + 1));
+ if (!prd)
+ return -EINVAL;
/*
* All channels share the same prescaler and counter so when two
@@ -673,7 +681,8 @@ static int stm32_pwm_probe(struct platform_device *pdev)
* .apply() won't overflow.
*/
if (clk_get_rate(priv->clk) > 1000000000)
- return dev_err_probe(dev, -EINVAL, "Failed to lock clock\n");
+ return dev_err_probe(dev, -EINVAL, "Clock freq too high (%lu)\n",
+ clk_get_rate(priv->clk));
chip->ops = &stm32pwm_ops;
diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
index 5de69e0bb0f9..196c1c8b578c 100644
--- a/drivers/ras/amd/atl/internal.h
+++ b/drivers/ras/amd/atl/internal.h
@@ -224,7 +224,7 @@ int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo);
int get_df_system_info(void);
int determine_node_id(struct addr_ctx *ctx, u8 socket_num, u8 die_num);
-int get_addr_hash_mi300(void);
+int get_umc_info_mi300(void);
int get_address_map(struct addr_ctx *ctx);
diff --git a/drivers/ras/amd/atl/system.c b/drivers/ras/amd/atl/system.c
index 701349e84942..6979fa3d4fe2 100644
--- a/drivers/ras/amd/atl/system.c
+++ b/drivers/ras/amd/atl/system.c
@@ -127,7 +127,7 @@ static int df4_determine_df_rev(u32 reg)
if (reg == DF_FUNC0_ID_MI300) {
df_cfg.flags.heterogeneous = 1;
- if (get_addr_hash_mi300())
+ if (get_umc_info_mi300())
return -EINVAL;
}
diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c
index 59b6169093f7..a1b4accf7b96 100644
--- a/drivers/ras/amd/atl/umc.c
+++ b/drivers/ras/amd/atl/umc.c
@@ -68,6 +68,8 @@ struct xor_bits {
};
#define NUM_BANK_BITS 4
+#define NUM_COL_BITS 5
+#define NUM_SID_BITS 2
static struct {
/* UMC::CH::AddrHashBank */
@@ -80,7 +82,22 @@ static struct {
u8 bank_xor;
} addr_hash;
+static struct {
+ u8 bank[NUM_BANK_BITS];
+ u8 col[NUM_COL_BITS];
+ u8 sid[NUM_SID_BITS];
+ u8 num_row_lo;
+ u8 num_row_hi;
+ u8 row_lo;
+ u8 row_hi;
+ u8 pc;
+} bit_shifts;
+
#define MI300_UMC_CH_BASE 0x90000
+#define MI300_ADDR_CFG (MI300_UMC_CH_BASE + 0x30)
+#define MI300_ADDR_SEL (MI300_UMC_CH_BASE + 0x40)
+#define MI300_COL_SEL_LO (MI300_UMC_CH_BASE + 0x50)
+#define MI300_ADDR_SEL_2 (MI300_UMC_CH_BASE + 0xA4)
#define MI300_ADDR_HASH_BANK0 (MI300_UMC_CH_BASE + 0xC8)
#define MI300_ADDR_HASH_PC (MI300_UMC_CH_BASE + 0xE0)
#define MI300_ADDR_HASH_PC2 (MI300_UMC_CH_BASE + 0xE4)
@@ -90,17 +107,42 @@ static struct {
#define ADDR_HASH_ROW_XOR GENMASK(31, 14)
#define ADDR_HASH_BANK_XOR GENMASK(5, 0)
+#define ADDR_CFG_NUM_ROW_LO GENMASK(11, 8)
+#define ADDR_CFG_NUM_ROW_HI GENMASK(15, 12)
+
+#define ADDR_SEL_BANK0 GENMASK(3, 0)
+#define ADDR_SEL_BANK1 GENMASK(7, 4)
+#define ADDR_SEL_BANK2 GENMASK(11, 8)
+#define ADDR_SEL_BANK3 GENMASK(15, 12)
+#define ADDR_SEL_BANK4 GENMASK(20, 16)
+#define ADDR_SEL_ROW_LO GENMASK(27, 24)
+#define ADDR_SEL_ROW_HI GENMASK(31, 28)
+
+#define COL_SEL_LO_COL0 GENMASK(3, 0)
+#define COL_SEL_LO_COL1 GENMASK(7, 4)
+#define COL_SEL_LO_COL2 GENMASK(11, 8)
+#define COL_SEL_LO_COL3 GENMASK(15, 12)
+#define COL_SEL_LO_COL4 GENMASK(19, 16)
+
+#define ADDR_SEL_2_BANK5 GENMASK(4, 0)
+#define ADDR_SEL_2_CHAN GENMASK(15, 12)
+
/*
* Read UMC::CH::AddrHash{Bank,PC,PC2} registers to get XOR bits used
- * for hashing. Do this during module init, since the values will not
- * change during run time.
+ * for hashing.
+ *
+ * Also, read UMC::CH::Addr{Cfg,Sel,Sel2} and UMC::CH:ColSelLo registers to
+ * get the values needed to reconstruct the normalized address. Apply additional
+ * offsets to the raw register values, as needed.
+ *
+ * Do this during module init, since the values will not change during run time.
*
* These registers are instantiated for each UMC across each AMD Node.
* However, they should be identically programmed due to the fixed hardware
* design of MI300 systems. So read the values from Node 0 UMC 0 and keep a
* single global structure for simplicity.
*/
-int get_addr_hash_mi300(void)
+int get_umc_info_mi300(void)
{
u32 temp;
int ret;
@@ -130,6 +172,44 @@ int get_addr_hash_mi300(void)
addr_hash.bank_xor = FIELD_GET(ADDR_HASH_BANK_XOR, temp);
+ ret = amd_smn_read(0, MI300_ADDR_CFG, &temp);
+ if (ret)
+ return ret;
+
+ bit_shifts.num_row_hi = FIELD_GET(ADDR_CFG_NUM_ROW_HI, temp);
+ bit_shifts.num_row_lo = 10 + FIELD_GET(ADDR_CFG_NUM_ROW_LO, temp);
+
+ ret = amd_smn_read(0, MI300_ADDR_SEL, &temp);
+ if (ret)
+ return ret;
+
+ bit_shifts.bank[0] = 5 + FIELD_GET(ADDR_SEL_BANK0, temp);
+ bit_shifts.bank[1] = 5 + FIELD_GET(ADDR_SEL_BANK1, temp);
+ bit_shifts.bank[2] = 5 + FIELD_GET(ADDR_SEL_BANK2, temp);
+ bit_shifts.bank[3] = 5 + FIELD_GET(ADDR_SEL_BANK3, temp);
+ /* Use BankBit4 for the SID0 position. */
+ bit_shifts.sid[0] = 5 + FIELD_GET(ADDR_SEL_BANK4, temp);
+ bit_shifts.row_lo = 12 + FIELD_GET(ADDR_SEL_ROW_LO, temp);
+ bit_shifts.row_hi = 24 + FIELD_GET(ADDR_SEL_ROW_HI, temp);
+
+ ret = amd_smn_read(0, MI300_COL_SEL_LO, &temp);
+ if (ret)
+ return ret;
+
+ bit_shifts.col[0] = 2 + FIELD_GET(COL_SEL_LO_COL0, temp);
+ bit_shifts.col[1] = 2 + FIELD_GET(COL_SEL_LO_COL1, temp);
+ bit_shifts.col[2] = 2 + FIELD_GET(COL_SEL_LO_COL2, temp);
+ bit_shifts.col[3] = 2 + FIELD_GET(COL_SEL_LO_COL3, temp);
+ bit_shifts.col[4] = 2 + FIELD_GET(COL_SEL_LO_COL4, temp);
+
+ ret = amd_smn_read(0, MI300_ADDR_SEL_2, &temp);
+ if (ret)
+ return ret;
+
+ /* Use BankBit5 for the SID1 position. */
+ bit_shifts.sid[1] = 5 + FIELD_GET(ADDR_SEL_2_BANK5, temp);
+ bit_shifts.pc = 5 + FIELD_GET(ADDR_SEL_2_CHAN, temp);
+
return 0;
}
@@ -146,9 +226,6 @@ int get_addr_hash_mi300(void)
* The MCA address format is as follows:
* MCA_ADDR[27:0] = {S[1:0], P[0], R[14:0], B[3:0], C[4:0], Z[0]}
*
- * The normalized address format is fixed in hardware and is as follows:
- * NA[30:0] = {S[1:0], R[13:0], C4, B[1:0], B[3:2], C[3:2], P, C[1:0], Z[4:0]}
- *
* Additionally, the PC and Bank bits may be hashed. This must be accounted for before
* reconstructing the normalized address.
*/
@@ -158,18 +235,10 @@ int get_addr_hash_mi300(void)
#define MI300_UMC_MCA_PC BIT(25)
#define MI300_UMC_MCA_SID GENMASK(27, 26)
-#define MI300_NA_COL_1_0 GENMASK(6, 5)
-#define MI300_NA_PC BIT(7)
-#define MI300_NA_COL_3_2 GENMASK(9, 8)
-#define MI300_NA_BANK_3_2 GENMASK(11, 10)
-#define MI300_NA_BANK_1_0 GENMASK(13, 12)
-#define MI300_NA_COL_4 BIT(14)
-#define MI300_NA_ROW GENMASK(28, 15)
-#define MI300_NA_SID GENMASK(30, 29)
-
static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr)
{
- u16 i, col, row, bank, pc, sid, temp;
+ u16 i, col, row, bank, pc, sid;
+ u32 temp;
col = FIELD_GET(MI300_UMC_MCA_COL, addr);
bank = FIELD_GET(MI300_UMC_MCA_BANK, addr);
@@ -189,49 +258,48 @@ static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr)
/* Calculate hash for PC bit. */
if (addr_hash.pc.xor_enable) {
- /* Bits SID[1:0] act as Bank[6:5] for PC hash, so apply them here. */
- bank |= sid << 5;
-
temp = bitwise_xor_bits(col & addr_hash.pc.col_xor);
temp ^= bitwise_xor_bits(row & addr_hash.pc.row_xor);
- temp ^= bitwise_xor_bits(bank & addr_hash.bank_xor);
+ /* Bits SID[1:0] act as Bank[5:4] for PC hash, so apply them here. */
+ temp ^= bitwise_xor_bits((bank | sid << NUM_BANK_BITS) & addr_hash.bank_xor);
pc ^= temp;
-
- /* Drop SID bits for the sake of debug printing later. */
- bank &= 0x1F;
}
/* Reconstruct the normalized address starting with NA[4:0] = 0 */
addr = 0;
- /* NA[6:5] = Column[1:0] */
- temp = col & 0x3;
- addr |= FIELD_PREP(MI300_NA_COL_1_0, temp);
-
- /* NA[7] = PC */
- addr |= FIELD_PREP(MI300_NA_PC, pc);
-
- /* NA[9:8] = Column[3:2] */
- temp = (col >> 2) & 0x3;
- addr |= FIELD_PREP(MI300_NA_COL_3_2, temp);
+ /* Column bits */
+ for (i = 0; i < NUM_COL_BITS; i++) {
+ temp = (col >> i) & 0x1;
+ addr |= temp << bit_shifts.col[i];
+ }
- /* NA[11:10] = Bank[3:2] */
- temp = (bank >> 2) & 0x3;
- addr |= FIELD_PREP(MI300_NA_BANK_3_2, temp);
+ /* Bank bits */
+ for (i = 0; i < NUM_BANK_BITS; i++) {
+ temp = (bank >> i) & 0x1;
+ addr |= temp << bit_shifts.bank[i];
+ }
- /* NA[13:12] = Bank[1:0] */
- temp = bank & 0x3;
- addr |= FIELD_PREP(MI300_NA_BANK_1_0, temp);
+ /* Row lo bits */
+ for (i = 0; i < bit_shifts.num_row_lo; i++) {
+ temp = (row >> i) & 0x1;
+ addr |= temp << (i + bit_shifts.row_lo);
+ }
- /* NA[14] = Column[4] */
- temp = (col >> 4) & 0x1;
- addr |= FIELD_PREP(MI300_NA_COL_4, temp);
+ /* Row hi bits */
+ for (i = 0; i < bit_shifts.num_row_hi; i++) {
+ temp = (row >> (i + bit_shifts.num_row_lo)) & 0x1;
+ addr |= temp << (i + bit_shifts.row_hi);
+ }
- /* NA[28:15] = Row[13:0] */
- addr |= FIELD_PREP(MI300_NA_ROW, row);
+ /* PC bit */
+ addr |= pc << bit_shifts.pc;
- /* NA[30:29] = SID[1:0] */
- addr |= FIELD_PREP(MI300_NA_SID, sid);
+ /* SID bits */
+ for (i = 0; i < NUM_SID_BITS; i++) {
+ temp = (sid >> i) & 0x1;
+ addr |= temp << bit_shifts.sid[i];
+ }
pr_debug("Addr=0x%016lx", addr);
pr_debug("Bank=%u Row=%u Column=%u PC=%u SID=%u", bank, row, col, pc, sid);
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 34fcdd82b2ea..f3c447ecdc3b 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -140,7 +140,7 @@
#define AXP717_DCDC1_NUM_VOLTAGES 88
#define AXP717_DCDC2_NUM_VOLTAGES 107
-#define AXP717_DCDC3_NUM_VOLTAGES 104
+#define AXP717_DCDC3_NUM_VOLTAGES 103
#define AXP717_DCDC_V_OUT_MASK GENMASK(6, 0)
#define AXP717_LDO_V_OUT_MASK GENMASK(4, 0)
@@ -763,10 +763,15 @@ static const struct linear_range axp717_dcdc1_ranges[] = {
REGULATOR_LINEAR_RANGE(1220000, 71, 87, 20000),
};
+/*
+ * The manual says that the last voltage is 3.4V, encoded as 0b1101011 (107),
+ * but every other method proves that this is wrong, so it's really 106 that
+ * programs the final 3.4V.
+ */
static const struct linear_range axp717_dcdc2_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 70, 10000),
REGULATOR_LINEAR_RANGE(1220000, 71, 87, 20000),
- REGULATOR_LINEAR_RANGE(1600000, 88, 107, 100000),
+ REGULATOR_LINEAR_RANGE(1600000, 88, 106, 100000),
};
static const struct linear_range axp717_dcdc3_ranges[] = {
@@ -790,40 +795,40 @@ static const struct regulator_desc axp717_regulators[] = {
AXP_DESC(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100,
AXP717_DCDC4_CONTROL, AXP717_DCDC_V_OUT_MASK,
AXP717_DCDC_OUTPUT_CONTROL, BIT(3)),
- AXP_DESC(AXP717, ALDO1, "aldo1", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, ALDO1, "aldo1", "aldoin", 500, 3500, 100,
AXP717_ALDO1_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(0)),
- AXP_DESC(AXP717, ALDO2, "aldo2", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, ALDO2, "aldo2", "aldoin", 500, 3500, 100,
AXP717_ALDO2_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(1)),
- AXP_DESC(AXP717, ALDO3, "aldo3", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, ALDO3, "aldo3", "aldoin", 500, 3500, 100,
AXP717_ALDO3_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(2)),
- AXP_DESC(AXP717, ALDO4, "aldo4", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, ALDO4, "aldo4", "aldoin", 500, 3500, 100,
AXP717_ALDO4_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(3)),
- AXP_DESC(AXP717, BLDO1, "bldo1", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, BLDO1, "bldo1", "bldoin", 500, 3500, 100,
AXP717_BLDO1_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(4)),
- AXP_DESC(AXP717, BLDO2, "bldo2", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, BLDO2, "bldo2", "bldoin", 500, 3500, 100,
AXP717_BLDO2_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(5)),
- AXP_DESC(AXP717, BLDO3, "bldo3", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, BLDO3, "bldo3", "bldoin", 500, 3500, 100,
AXP717_BLDO3_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(6)),
- AXP_DESC(AXP717, BLDO4, "bldo4", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, BLDO4, "bldo4", "bldoin", 500, 3500, 100,
AXP717_BLDO4_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(7)),
- AXP_DESC(AXP717, CLDO1, "cldo1", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, CLDO1, "cldo1", "cldoin", 500, 3500, 100,
AXP717_CLDO1_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO1_OUTPUT_CONTROL, BIT(0)),
- AXP_DESC(AXP717, CLDO2, "cldo2", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, CLDO2, "cldo2", "cldoin", 500, 3500, 100,
AXP717_CLDO2_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO1_OUTPUT_CONTROL, BIT(1)),
- AXP_DESC(AXP717, CLDO3, "cldo3", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, CLDO3, "cldo3", "cldoin", 500, 3500, 100,
AXP717_CLDO3_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO1_OUTPUT_CONTROL, BIT(2)),
- AXP_DESC(AXP717, CLDO4, "cldo4", "vin1", 500, 3500, 100,
+ AXP_DESC(AXP717, CLDO4, "cldo4", "cldoin", 500, 3500, 100,
AXP717_CLDO4_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO1_OUTPUT_CONTROL, BIT(3)),
AXP_DESC(AXP717, CPUSLDO, "cpusldo", "vin1", 500, 1400, 50,
diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c
index 26192d55a685..79fbb45297f6 100644
--- a/drivers/regulator/bd71815-regulator.c
+++ b/drivers/regulator/bd71815-regulator.c
@@ -256,7 +256,7 @@ static int buck12_set_hw_dvs_levels(struct device_node *np,
* 10: 2.50mV/usec 10mV 4uS
* 11: 1.25mV/usec 10mV 8uS
*/
-static const unsigned int bd7181x_ramp_table[] = { 1250, 2500, 5000, 10000 };
+static const unsigned int bd7181x_ramp_table[] = { 10000, 5000, 2500, 1250 };
static int bd7181x_led_set_current_limit(struct regulator_dev *rdev,
int min_uA, int max_uA)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 5794f4e9dd52..844e9587a880 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3347,6 +3347,7 @@ struct regmap *regulator_get_regmap(struct regulator *regulator)
return map ? map : ERR_PTR(-EOPNOTSUPP);
}
+EXPORT_SYMBOL_GPL(regulator_get_regmap);
/**
* regulator_get_hardware_vsel_register - get the HW voltage selector register
diff --git a/drivers/regulator/tps6594-regulator.c b/drivers/regulator/tps6594-regulator.c
index 4a859f4c0f83..ac53792e3fed 100644
--- a/drivers/regulator/tps6594-regulator.c
+++ b/drivers/regulator/tps6594-regulator.c
@@ -653,18 +653,14 @@ static int tps6594_regulator_probe(struct platform_device *pdev)
}
}
- if (tps->chip_id == LP8764) {
- nr_buck = ARRAY_SIZE(buck_regs);
- nr_ldo = 0;
- nr_types = REGS_INT_NB;
- } else if (tps->chip_id == TPS65224) {
+ if (tps->chip_id == TPS65224) {
nr_buck = ARRAY_SIZE(tps65224_buck_regs);
nr_ldo = ARRAY_SIZE(tps65224_ldo_regs);
- nr_types = REGS_INT_NB;
+ nr_types = TPS65224_REGS_INT_NB;
} else {
nr_buck = ARRAY_SIZE(buck_regs);
- nr_ldo = ARRAY_SIZE(tps6594_ldo_regs);
- nr_types = TPS65224_REGS_INT_NB;
+ nr_ldo = (tps->chip_id == LP8764) ? 0 : ARRAY_SIZE(tps6594_ldo_regs);
+ nr_types = REGS_INT_NB;
}
reg_irq_nb = nr_types * (nr_buck + nr_ldo);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index d53ee34d398f..fbe29cabcbb8 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1293,6 +1293,7 @@ sclp_init(void)
fail_unregister_reboot_notifier:
unregister_reboot_notifier(&sclp_reboot_notifier);
fail_init_state_uninitialized:
+ list_del(&sclp_state_change_event.list);
sclp_init_state = sclp_init_state_uninitialized;
free_page((unsigned long) sclp_read_sccb);
free_page((unsigned long) sclp_init_sccb);
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index d7569f395559..d6491fc84e8c 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -698,6 +698,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
dma64_t *indicatorp = NULL;
int ret, i, queue_idx = 0;
struct ccw1 *ccw;
+ dma32_t indicatorp_dma = 0;
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
@@ -725,7 +726,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
*/
indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
sizeof(*indicatorp),
- &ccw->cda);
+ &indicatorp_dma);
if (!indicatorp)
goto out;
*indicatorp = indicators_dma(vcdev);
@@ -735,6 +736,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
/* no error, just fall back to legacy interrupts */
vcdev->is_thinint = false;
}
+ ccw->cda = indicatorp_dma;
if (!vcdev->is_thinint) {
/* Register queue indicators with host. */
*indicators(vcdev) = 0;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 4c69fc63c119..cbbe43d8ef87 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -610,15 +610,15 @@ int sas_ata_init(struct domain_device *found_dev)
rc = ata_sas_tport_add(ata_host->dev, ap);
if (rc)
- goto destroy_port;
+ goto free_port;
found_dev->sata_dev.ata_host = ata_host;
found_dev->sata_dev.ap = ap;
return 0;
-destroy_port:
- kfree(ap);
+free_port:
+ ata_port_free(ap);
free_host:
ata_host_put(ata_host);
return rc;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 8fb7c41c0962..48d975c6dbf2 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -301,7 +301,7 @@ void sas_free_device(struct kref *kref)
if (dev_is_sata(dev) && dev->sata_dev.ap) {
ata_sas_tport_delete(dev->sata_dev.ap);
- kfree(dev->sata_dev.ap);
+ ata_port_free(dev->sata_dev.ap);
ata_host_put(dev->sata_dev.ata_host);
dev->sata_dev.ata_host = NULL;
dev->sata_dev.ap = NULL;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 1638109a68a0..cd261b48eb46 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -2163,10 +2163,72 @@ persistent_id_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(persistent_id);
+/**
+ * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
+ * @dev: pointer to embedded device
+ * @attr: sas_ncq_prio_supported attribute descriptor
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' sdev attribute, only works with SATA devices
+ */
+static ssize_t
+sas_ncq_prio_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
+}
+static DEVICE_ATTR_RO(sas_ncq_prio_supported);
+
+/**
+ * sas_ncq_prio_enable_show - send prioritized io commands to device
+ * @dev: pointer to embedded device
+ * @attr: sas_ncq_prio_enable attribute descriptor
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read/write' sdev attribute, only works with SATA devices
+ */
+static ssize_t
+sas_ncq_prio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+
+ if (!sdev_priv_data)
+ return 0;
+
+ return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable);
+}
+
+static ssize_t
+sas_ncq_prio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+ bool ncq_prio_enable = 0;
+
+ if (kstrtobool(buf, &ncq_prio_enable))
+ return -EINVAL;
+
+ if (!sas_ata_ncq_prio_supported(sdev))
+ return -EINVAL;
+
+ sdev_priv_data->ncq_prio_enable = ncq_prio_enable;
+
+ return strlen(buf);
+}
+static DEVICE_ATTR_RW(sas_ncq_prio_enable);
+
static struct attribute *mpi3mr_dev_attrs[] = {
&dev_attr_sas_address.attr,
&dev_attr_device_handle.attr,
&dev_attr_persistent_id.attr,
+ &dev_attr_sas_ncq_prio_supported.attr,
+ &dev_attr_sas_ncq_prio_enable.attr,
NULL,
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 1320e06727df..b2bcf4a27ddc 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -8512,6 +8512,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
if (ioc->facts.MaxDevHandle % 8)
ioc->pd_handles_sz++;
+ /*
+ * pd_handles_sz should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory touch may occur.
+ */
+ ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long));
+
ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
GFP_KERNEL);
if (!ioc->pd_handles) {
@@ -8529,6 +8535,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
if (ioc->facts.MaxDevHandle % 8)
ioc->pend_os_device_add_sz++;
+
+ /*
+ * pend_os_device_add_sz should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory may occur.
+ */
+ ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz,
+ sizeof(unsigned long));
ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
GFP_KERNEL);
if (!ioc->pend_os_device_add) {
@@ -8820,6 +8833,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
if (ioc->facts.MaxDevHandle % 8)
pd_handles_sz++;
+ /*
+ * pd_handles should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory touch may
+ * occur.
+ */
+ pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long));
pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
GFP_KERNEL);
if (!pd_handles) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index bf100a4ebfc3..fe1e96fda284 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -2048,9 +2048,6 @@ void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
-/* NCQ Prio Handling Check */
-bool scsih_ncq_prio_supp(struct scsi_device *sdev);
-
void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_init_debugfs(void);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 1c9fd26195b8..87784c96249a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -4088,7 +4088,7 @@ sas_ncq_prio_supported_show(struct device *dev,
{
struct scsi_device *sdev = to_scsi_device(dev);
- return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
+ return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
}
static DEVICE_ATTR_RO(sas_ncq_prio_supported);
@@ -4123,7 +4123,7 @@ sas_ncq_prio_enable_store(struct device *dev,
if (kstrtobool(buf, &ncq_prio_enable))
return -EINVAL;
- if (!scsih_ncq_prio_supp(sdev))
+ if (!sas_ata_ncq_prio_supported(sdev))
return -EINVAL;
sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b050aedc9d43..97c2472cd434 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -12565,29 +12565,6 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-/**
- * scsih_ncq_prio_supp - Check for NCQ command priority support
- * @sdev: scsi device struct
- *
- * This is called when a user indicates they would like to enable
- * ncq command priorities. This works only on SATA devices.
- */
-bool scsih_ncq_prio_supp(struct scsi_device *sdev)
-{
- struct scsi_vpd *vpd;
- bool ncq_prio_supp = false;
-
- rcu_read_lock();
- vpd = rcu_dereference(sdev->vpd_pg89);
- if (!vpd || vpd->len < 214)
- goto out;
-
- ncq_prio_supp = (vpd->data[213] >> 4) & 1;
-out:
- rcu_read_unlock();
-
- return ncq_prio_supp;
-}
/*
* The pci device ids are defined in mpi/mpi2_cnfg.h.
*/
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index f0464db3f9de..ee69bd35889d 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -673,6 +673,13 @@ void scsi_cdl_check(struct scsi_device *sdev)
sdev->use_10_for_rw = 0;
sdev->cdl_supported = 1;
+
+ /*
+ * If the device supports CDL, make sure that the current drive
+ * feature status is consistent with the user controlled
+ * cdl_enable state.
+ */
+ scsi_cdl_enable(sdev, sdev->cdl_enable);
} else {
sdev->cdl_supported = 0;
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 424a89513814..4e33f1661e4c 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -416,6 +416,29 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *sdev)
}
EXPORT_SYMBOL_GPL(sas_is_tlr_enabled);
+/**
+ * sas_ata_ncq_prio_supported - Check for ATA NCQ command priority support
+ * @sdev: SCSI device
+ *
+ * Check if an ATA device supports NCQ priority using VPD page 89h (ATA
+ * Information). Since this VPD page is implemented only for ATA devices,
+ * this function always returns false for SCSI devices.
+ */
+bool sas_ata_ncq_prio_supported(struct scsi_device *sdev)
+{
+ struct scsi_vpd *vpd;
+ bool ncq_prio_supported = false;
+
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg89);
+ if (vpd && vpd->len >= 214)
+ ncq_prio_supported = (vpd->data[213] >> 4) & 1;
+ rcu_read_unlock();
+
+ return ncq_prio_supported;
+}
+EXPORT_SYMBOL_GPL(sas_ata_ncq_prio_supported);
+
/*
* SAS Phy attributes
*/
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 525f48c97f5e..979795dad62b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -63,6 +63,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
@@ -3230,6 +3231,9 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
struct scsi_mode_data data;
int res;
+ if (sdp->sdev_bflags & BLIST_SKIP_IO_HINTS)
+ return;
+
res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a,
/*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT,
sdkp->max_retries, &data, &sshdr);
@@ -3662,16 +3666,23 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
static void sd_read_block_zero(struct scsi_disk *sdkp)
{
- unsigned int buf_len = sdkp->device->sector_size;
- char *buffer, cmd[10] = { };
+ struct scsi_device *sdev = sdkp->device;
+ unsigned int buf_len = sdev->sector_size;
+ u8 *buffer, cmd[16] = { };
buffer = kmalloc(buf_len, GFP_KERNEL);
if (!buffer)
return;
- cmd[0] = READ_10;
- put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
- put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+ if (sdev->use_16_for_rw) {
+ cmd[0] = READ_16;
+ put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */
+ } else {
+ cmd[0] = READ_10;
+ put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+ }
scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
SD_TIMEOUT, sdkp->max_retries, NULL);
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index b6bfd6729df3..d27667283846 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -127,8 +127,8 @@ static void tegra_fuse_print_sku_info(struct tegra_sku_info *tegra_sku_info)
static int tegra_fuse_add_lookups(struct tegra_fuse *fuse)
{
- fuse->lookups = kmemdup_array(fuse->soc->lookups, sizeof(*fuse->lookups),
- fuse->soc->num_lookups, GFP_KERNEL);
+ fuse->lookups = kmemdup_array(fuse->soc->lookups, fuse->soc->num_lookups,
+ sizeof(*fuse->lookups), GFP_KERNEL);
if (!fuse->lookups)
return -ENOMEM;
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 20d94bcfc9b4..795e223f7e5c 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -571,6 +571,9 @@ static int sdw_master_read_amd_prop(struct sdw_bus *bus)
amd_manager->wake_en_mask = wake_en_mask;
fwnode_property_read_u32(link, "amd-sdw-power-mode", &power_mode_mask);
amd_manager->power_mode_mask = power_mode_mask;
+
+ fwnode_handle_put(link);
+
return 0;
}
diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
index 17cf27e6ea73..18517121cc89 100644
--- a/drivers/soundwire/intel_auxdevice.c
+++ b/drivers/soundwire/intel_auxdevice.c
@@ -155,8 +155,10 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus)
SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
intel_prop = devm_kzalloc(bus->dev, sizeof(*intel_prop), GFP_KERNEL);
- if (!intel_prop)
+ if (!intel_prop) {
+ fwnode_handle_put(link);
return -ENOMEM;
+ }
/* initialize with hardware defaults, in case the properties are not found */
intel_prop->doaise = 0x1;
@@ -184,6 +186,8 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus)
intel_prop->dodse,
intel_prop->dods);
+ fwnode_handle_put(link);
+
return 0;
}
diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c
index 55a9c51c84c1..e5d9df26d4dc 100644
--- a/drivers/soundwire/mipi_disco.c
+++ b/drivers/soundwire/mipi_disco.c
@@ -66,8 +66,10 @@ int sdw_master_read_prop(struct sdw_bus *bus)
prop->clk_freq = devm_kcalloc(bus->dev, prop->num_clk_freq,
sizeof(*prop->clk_freq),
GFP_KERNEL);
- if (!prop->clk_freq)
+ if (!prop->clk_freq) {
+ fwnode_handle_put(link);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(link,
"mipi-sdw-clock-frequencies-supported",
@@ -92,8 +94,10 @@ int sdw_master_read_prop(struct sdw_bus *bus)
prop->clk_gears = devm_kcalloc(bus->dev, prop->num_clk_gears,
sizeof(*prop->clk_gears),
GFP_KERNEL);
- if (!prop->clk_gears)
+ if (!prop->clk_gears) {
+ fwnode_handle_put(link);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(link,
"mipi-sdw-supported-clock-gears",
@@ -116,6 +120,8 @@ int sdw_master_read_prop(struct sdw_bus *bus)
fwnode_property_read_u32(link, "mipi-sdw-command-error-threshold",
&prop->err_threshold);
+ fwnode_handle_put(link);
+
return 0;
}
EXPORT_SYMBOL(sdw_master_read_prop);
@@ -197,8 +203,10 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
dpn[i].num_words,
sizeof(*dpn[i].words),
GFP_KERNEL);
- if (!dpn[i].words)
+ if (!dpn[i].words) {
+ fwnode_handle_put(node);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(node,
"mipi-sdw-port-wordlength-configs",
@@ -236,8 +244,10 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
dpn[i].num_channels,
sizeof(*dpn[i].channels),
GFP_KERNEL);
- if (!dpn[i].channels)
+ if (!dpn[i].channels) {
+ fwnode_handle_put(node);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(node,
"mipi-sdw-channel-number-list",
@@ -251,8 +261,10 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
dpn[i].num_ch_combinations,
sizeof(*dpn[i].ch_combinations),
GFP_KERNEL);
- if (!dpn[i].ch_combinations)
+ if (!dpn[i].ch_combinations) {
+ fwnode_handle_put(node);
return -ENOMEM;
+ }
fwnode_property_read_u32_array(node,
"mipi-sdw-channel-combination-list",
@@ -274,6 +286,8 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
/* TODO: Read audio mode */
+ fwnode_handle_put(node);
+
i++;
}
@@ -348,10 +362,14 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
prop->dp0_prop = devm_kzalloc(&slave->dev,
sizeof(*prop->dp0_prop),
GFP_KERNEL);
- if (!prop->dp0_prop)
+ if (!prop->dp0_prop) {
+ fwnode_handle_put(port);
return -ENOMEM;
+ }
sdw_slave_read_dp0(slave, port, prop->dp0_prop);
+
+ fwnode_handle_put(port);
}
/*
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index 9d747ea69926..8b618ef0f711 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -26,7 +26,7 @@
#include <linux/units.h>
#define CS42L43_FIFO_SIZE 16
-#define CS42L43_SPI_ROOT_HZ (40 * HZ_PER_MHZ)
+#define CS42L43_SPI_ROOT_HZ 49152000
#define CS42L43_SPI_MAX_LENGTH 65532
enum cs42l43_spi_cmd {
@@ -54,7 +54,7 @@ static const struct software_node ampr = {
static struct spi_board_info ampl_info = {
.modalias = "cs35l56",
- .max_speed_hz = 20 * HZ_PER_MHZ,
+ .max_speed_hz = 11 * HZ_PER_MHZ,
.chip_select = 0,
.mode = SPI_MODE_0,
.swnode = &ampl,
@@ -62,7 +62,7 @@ static struct spi_board_info ampl_info = {
static struct spi_board_info ampr_info = {
.modalias = "cs35l56",
- .max_speed_hz = 20 * HZ_PER_MHZ,
+ .max_speed_hz = 11 * HZ_PER_MHZ,
.chip_select = 1,
.mode = SPI_MODE_0,
.swnode = &ampr,
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index f4006c82f867..33164ebdb583 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -660,18 +660,8 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
ctrl |= (spi_imx->target_burst * 8 - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
else {
- if (spi_imx->usedma) {
- ctrl |= (spi_imx->bits_per_word - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
- } else {
- if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST)
- ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
- else
- ctrl |= (spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
- BITS_PER_BYTE) * spi_imx->bits_per_word - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
- }
+ ctrl |= (spi_imx->bits_per_word - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
}
/* set clock speed */
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index f1e922fd362a..955c920c4b63 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -349,7 +349,7 @@ static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi)
static int stm32_qspi_get_mode(u8 buswidth)
{
- if (buswidth == 4)
+ if (buswidth >= 4)
return CCR_BUSWIDTH_4;
return buswidth;
@@ -653,9 +653,7 @@ static int stm32_qspi_setup(struct spi_device *spi)
return -EINVAL;
mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL);
- if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) ||
- ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) &&
- gpiod_count(qspi->dev, "cs") == -ENOENT)) {
+ if (mode && gpiod_count(qspi->dev, "cs") == -ENOENT) {
dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n");
dev_err(qspi->dev, "configuration not supported\n");
@@ -676,10 +674,10 @@ static int stm32_qspi_setup(struct spi_device *spi)
qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
/*
- * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL
- * are both set in spi->mode and "cs-gpios" properties is found in DT
+ * Dual flash mode is only enable in case SPI_TX_OCTAL or SPI_RX_OCTAL
+ * is set in spi->mode and "cs-gpios" properties is found in DT
*/
- if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) {
+ if (mode) {
qspi->cr_reg |= CR_DFM;
dev_dbg(qspi->dev, "Dual flash mode enable");
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 9bc9fd10d538..fc13fa192189 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -689,10 +689,12 @@ static int __spi_add_device(struct spi_device *spi)
* Make sure that multiple logical CS doesn't map to the same physical CS.
* For example, spi->chip_select[0] != spi->chip_select[1] and so on.
*/
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
- status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
- if (status)
- return status;
+ if (!spi_controller_is_target(ctlr)) {
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
+ if (status)
+ return status;
+ }
}
/* Set the bus ID string */
@@ -4156,7 +4158,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
return -EINVAL;
if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
xfer->tx_nbits != SPI_NBITS_DUAL &&
- xfer->tx_nbits != SPI_NBITS_QUAD)
+ xfer->tx_nbits != SPI_NBITS_QUAD &&
+ xfer->tx_nbits != SPI_NBITS_OCTAL)
return -EINVAL;
if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
@@ -4171,7 +4174,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
return -EINVAL;
if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
xfer->rx_nbits != SPI_NBITS_DUAL &&
- xfer->rx_nbits != SPI_NBITS_QUAD)
+ xfer->rx_nbits != SPI_NBITS_QUAD &&
+ xfer->rx_nbits != SPI_NBITS_OCTAL)
return -EINVAL;
if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 297af1d80b12..5f518e5a9273 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -707,7 +707,7 @@ int vchiq_initialise(struct vchiq_state *state, struct vchiq_instance **instance
* block forever.
*/
for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
- if (state)
+ if (vchiq_remote_initialised(state))
break;
usleep_range(500, 600);
}
@@ -1202,7 +1202,7 @@ void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f
{
int i;
- if (!state)
+ if (!vchiq_remote_initialised(state))
return;
/*
@@ -1759,7 +1759,7 @@ static int vchiq_probe(struct platform_device *pdev)
if (err)
goto failed_platform_init;
- vchiq_debugfs_init();
+ vchiq_debugfs_init(&mgmt->state);
dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
VCHIQ_VERSION, VCHIQ_VERSION_MIN);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 8af209e34fb2..382ec08f6a14 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -413,6 +413,11 @@ struct vchiq_state {
struct opaque_platform_state *platform_state;
};
+static inline bool vchiq_remote_initialised(const struct vchiq_state *state)
+{
+ return state->remote && state->remote->initialised;
+}
+
struct bulk_waiter {
struct vchiq_bulk *bulk;
struct completion event;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index 54e7bf029d9a..d5f7f61c5626 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -42,9 +42,9 @@ static int debugfs_trace_show(struct seq_file *f, void *offset)
static int vchiq_dump_show(struct seq_file *f, void *offset)
{
- struct vchiq_instance *instance = f->private;
+ struct vchiq_state *state = f->private;
- vchiq_dump_state(f, instance->state);
+ vchiq_dump_state(f, state);
return 0;
}
@@ -121,12 +121,12 @@ void vchiq_debugfs_remove_instance(struct vchiq_instance *instance)
debugfs_remove_recursive(node->dentry);
}
-void vchiq_debugfs_init(void)
+void vchiq_debugfs_init(struct vchiq_state *state)
{
vchiq_dbg_dir = debugfs_create_dir("vchiq", NULL);
vchiq_dbg_clients = debugfs_create_dir("clients", vchiq_dbg_dir);
- debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, NULL,
+ debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, state,
&vchiq_dump_fops);
}
@@ -138,7 +138,7 @@ void vchiq_debugfs_deinit(void)
#else /* CONFIG_DEBUG_FS */
-void vchiq_debugfs_init(void)
+void vchiq_debugfs_init(struct vchiq_state *state)
{
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
index e9bf055a4ca9..fabffd81b1ec 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
@@ -10,7 +10,7 @@ struct vchiq_debugfs_node {
struct dentry *dentry;
};
-void vchiq_debugfs_init(void);
+void vchiq_debugfs_init(struct vchiq_state *state);
void vchiq_debugfs_deinit(void);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
index 3c63347d2d08..430f2ed2ccd3 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
@@ -1170,6 +1170,11 @@ static int vchiq_open(struct inode *inode, struct file *file)
dev_dbg(state->dev, "arm: vchiq open\n");
+ if (!vchiq_remote_initialised(state)) {
+ dev_dbg(state->dev, "arm: vchiq has no connection to VideoCore\n");
+ return -ENOTCONN;
+ }
+
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance)
return -ENOMEM;
@@ -1200,7 +1205,7 @@ static int vchiq_release(struct inode *inode, struct file *file)
dev_dbg(state->dev, "arm: instance=%p\n", instance);
- if (!state) {
+ if (!vchiq_remote_initialised(state)) {
ret = -EPERM;
goto out;
}
diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c
index e0fdc497bfcc..fd5527188cf9 100644
--- a/drivers/thermal/gov_step_wise.c
+++ b/drivers/thermal/gov_step_wise.c
@@ -55,7 +55,11 @@ static unsigned long get_target_state(struct thermal_instance *instance,
if (cur_state <= instance->lower)
return THERMAL_NO_TARGET;
- return clamp(cur_state - 1, instance->lower, instance->upper);
+ /*
+ * If 'throttle' is false, no mitigation is necessary, so
+ * request the lower state for this instance.
+ */
+ return instance->lower;
}
return instance->target;
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index 14e34eabc419..4a1bfebb1b8e 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -150,7 +150,7 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid)
{
struct proc_thermal_pci *pci_info = devid;
struct proc_thermal_device *proc_priv;
- int ret = IRQ_HANDLED;
+ int ret = IRQ_NONE;
u32 status;
proc_priv = pci_info->proc_priv;
@@ -175,6 +175,7 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid)
/* Disable enable interrupt flag */
proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
pkg_thermal_schedule_work(&pci_info->work);
+ ret = IRQ_HANDLED;
}
pci_write_config_byte(pci_info->pdev, 0xdc, 0x01);
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index 0bb3a495b56e..819ed0110f3e 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -769,7 +769,11 @@ static int lvts_golden_temp_init(struct device *dev, u8 *calib,
*/
gt = (((u32 *)calib)[0] >> lvts_data->gt_calib_bit_offset) & 0xff;
- if (gt && gt < LVTS_GOLDEN_TEMP_MAX)
+ /* A zero value for gt means that device has invalid efuse data */
+ if (!gt)
+ return -ENODATA;
+
+ if (gt < LVTS_GOLDEN_TEMP_MAX)
golden_temp = gt;
golden_temp_offset = golden_temp * 500 + lvts_data->temp_offset;
@@ -1458,7 +1462,6 @@ static const struct lvts_ctrl_data mt8188_lvts_mcu_data_ctrl[] = {
},
VALID_SENSOR_MAP(1, 1, 1, 1),
.offset = 0x0,
- .mode = LVTS_MSR_FILTERED_MODE,
},
{
.lvts_sensor = {
@@ -1469,7 +1472,6 @@ static const struct lvts_ctrl_data mt8188_lvts_mcu_data_ctrl[] = {
},
VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x100,
- .mode = LVTS_MSR_FILTERED_MODE,
}
};
@@ -1483,7 +1485,6 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = {
},
VALID_SENSOR_MAP(0, 1, 0, 0),
.offset = 0x0,
- .mode = LVTS_MSR_FILTERED_MODE,
},
{
.lvts_sensor = {
@@ -1496,7 +1497,6 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = {
},
VALID_SENSOR_MAP(1, 1, 1, 0),
.offset = 0x100,
- .mode = LVTS_MSR_FILTERED_MODE,
},
{
.lvts_sensor = {
@@ -1507,7 +1507,6 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = {
},
VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x200,
- .mode = LVTS_MSR_FILTERED_MODE,
},
{
.lvts_sensor = {
@@ -1518,7 +1517,6 @@ static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = {
},
VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x300,
- .mode = LVTS_MSR_FILTERED_MODE,
}
};
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 30567b499455..1b0ab2790860 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -999,9 +999,17 @@ __thermal_cooling_device_register(struct device_node *np,
if (ret)
goto out_cdev_type;
+ /*
+ * The cooling device's current state is only needed for debug
+ * initialization below, so a failure to get it does not cause
+ * the entire cooling device initialization to fail. However,
+ * the debug will not work for the device if its initial state
+ * cannot be determined and drivers are responsible for ensuring
+ * that this will not happen.
+ */
ret = cdev->ops->get_cur_state(cdev, &current_state);
if (ret)
- goto out_cdev_type;
+ current_state = ULONG_MAX;
thermal_cooling_device_setup_sysfs(cdev);
@@ -1016,7 +1024,8 @@ __thermal_cooling_device_register(struct device_node *np,
return ERR_PTR(ret);
}
- thermal_debug_cdev_add(cdev, current_state);
+ if (current_state <= cdev->max_state)
+ thermal_debug_cdev_add(cdev, current_state);
/* Add 'this' new cdev to the global cdev list */
mutex_lock(&thermal_list_lock);
@@ -1397,6 +1406,7 @@ thermal_zone_device_register_with_trips(const char *type,
ida_init(&tz->ida);
mutex_init(&tz->lock);
init_completion(&tz->removal);
+ init_completion(&tz->resume);
id = ida_alloc(&thermal_tz_ida, GFP_KERNEL);
if (id < 0) {
result = id;
@@ -1642,6 +1652,9 @@ static void thermal_zone_device_resume(struct work_struct *work)
thermal_zone_device_init(tz);
__thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+ complete(&tz->resume);
+ tz->resuming = false;
+
mutex_unlock(&tz->lock);
}
@@ -1659,6 +1672,20 @@ static int thermal_pm_notify(struct notifier_block *nb,
list_for_each_entry(tz, &thermal_tz_list, node) {
mutex_lock(&tz->lock);
+ if (tz->resuming) {
+ /*
+ * thermal_zone_device_resume() queued up for
+ * this zone has not acquired the lock yet, so
+ * release it to let the function run and wait
+ * util it has done the work.
+ */
+ mutex_unlock(&tz->lock);
+
+ wait_for_completion(&tz->resume);
+
+ mutex_lock(&tz->lock);
+ }
+
tz->suspended = true;
mutex_unlock(&tz->lock);
@@ -1676,6 +1703,9 @@ static int thermal_pm_notify(struct notifier_block *nb,
cancel_delayed_work(&tz->poll_queue);
+ reinit_completion(&tz->resume);
+ tz->resuming = true;
+
/*
* Replace the work function with the resume one, which
* will restore the original work function and schedule
@@ -1700,6 +1730,12 @@ static int thermal_pm_notify(struct notifier_block *nb,
static struct notifier_block thermal_pm_nb = {
.notifier_call = thermal_pm_notify,
+ /*
+ * Run at the lowest priority to avoid interference between the thermal
+ * zone resume work items spawned by thermal_pm_notify() and the other
+ * PM notifiers.
+ */
+ .priority = INT_MIN,
};
static int __init thermal_init(void)
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 20e7b45673d6..66f67e54e0c8 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -55,6 +55,7 @@ struct thermal_governor {
* @type: the thermal zone device type
* @device: &struct device for this thermal zone
* @removal: removal completion
+ * @resume: resume completion
* @trip_temp_attrs: attributes for trip points for sysfs: trip temperature
* @trip_type_attrs: attributes for trip points for sysfs: trip type
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
@@ -89,6 +90,7 @@ struct thermal_governor {
* @poll_queue: delayed work for polling
* @notify_event: Last notification event
* @suspended: thermal zone suspend indicator
+ * @resuming: indicates whether or not thermal zone resume is in progress
* @trips: array of struct thermal_trip objects
*/
struct thermal_zone_device {
@@ -96,6 +98,7 @@ struct thermal_zone_device {
char type[THERMAL_NAME_LENGTH];
struct device device;
struct completion removal;
+ struct completion resume;
struct attribute_group trips_attribute_group;
struct thermal_attr *trip_temp_attrs;
struct thermal_attr *trip_type_attrs;
@@ -123,6 +126,7 @@ struct thermal_zone_device {
struct delayed_work poll_queue;
enum thermal_notify_event notify_event;
bool suspended;
+ bool resuming;
#ifdef CONFIG_THERMAL_DEBUGFS
struct thermal_debugfs *debugfs;
#endif
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index 193e9dfc983b..70b52aac3d97 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -943,8 +943,9 @@ static void margining_port_init(struct tb_port *port)
debugfs_create_file("run", 0600, dir, port, &margining_run_fops);
debugfs_create_file("results", 0600, dir, port, &margining_results_fops);
debugfs_create_file("test", 0600, dir, port, &margining_test_fops);
- if (independent_voltage_margins(usb4) ||
- (supports_time(usb4) && independent_time_margins(usb4)))
+ if (independent_voltage_margins(usb4) == USB4_MARGIN_CAP_0_VOLTAGE_HL ||
+ (supports_time(usb4) &&
+ independent_time_margins(usb4) == USB4_MARGIN_CAP_1_TIME_LR))
debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops);
}
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 458bb1280ebf..5b97e420a95f 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -288,7 +288,7 @@ struct mxser_board {
enum mxser_must_hwid must_hwid;
speed_t max_baud;
- struct mxser_port ports[] __counted_by(nports);
+ struct mxser_port ports[] /* __counted_by(nports) */;
};
static DECLARE_BITMAP(mxser_boards, MXSER_BOARDS);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index f252d0b5a434..5e9ca4376d68 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1619,15 +1619,25 @@ static void __receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp,
else if (ldata->raw || (L_EXTPROC(tty) && !preops))
n_tty_receive_buf_raw(tty, cp, fp, count);
else if (tty->closing && !L_EXTPROC(tty)) {
- if (la_count > 0)
+ if (la_count > 0) {
n_tty_receive_buf_closing(tty, cp, fp, la_count, true);
- if (count > la_count)
- n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false);
+ cp += la_count;
+ if (fp)
+ fp += la_count;
+ count -= la_count;
+ }
+ if (count > 0)
+ n_tty_receive_buf_closing(tty, cp, fp, count, false);
} else {
- if (la_count > 0)
+ if (la_count > 0) {
n_tty_receive_buf_standard(tty, cp, fp, la_count, true);
- if (count > la_count)
- n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false);
+ cp += la_count;
+ if (fp)
+ fp += la_count;
+ count -= la_count;
+ }
+ if (count > 0)
+ n_tty_receive_buf_standard(tty, cp, fp, count, false);
flush_echoes(tty);
if (tty->ops->flush_chars)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index ff15022369e4..b0adafc44747 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -15,7 +15,6 @@
*/
#include <linux/acpi.h>
-#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ioport.h>
@@ -42,8 +41,6 @@
#include <asm/irq.h>
-#include "../serial_base.h" /* For serial_base_add_isa_preferred_console() */
-
#include "8250.h"
/*
@@ -563,8 +560,6 @@ static void __init serial8250_isa_init_ports(void)
port->irqflags |= irqflag;
if (serial8250_isa_config != NULL)
serial8250_isa_config(i, &up->port, &up->capabilities);
-
- serial_base_add_isa_preferred_console(serial8250_reg.dev_name, i);
}
}
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index ba9f4dc4e71d..fb809e32c6ae 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -55,6 +55,34 @@
#define DW_UART_QUIRK_SKIP_SET_RATE BIT(2)
#define DW_UART_QUIRK_IS_DMA_FC BIT(3)
#define DW_UART_QUIRK_APMC0D08 BIT(4)
+#define DW_UART_QUIRK_CPR_VALUE BIT(5)
+
+struct dw8250_platform_data {
+ u8 usr_reg;
+ u32 cpr_value;
+ unsigned int quirks;
+};
+
+struct dw8250_data {
+ struct dw8250_port_data data;
+ const struct dw8250_platform_data *pdata;
+
+ int msr_mask_on;
+ int msr_mask_off;
+ struct clk *clk;
+ struct clk *pclk;
+ struct notifier_block clk_notifier;
+ struct work_struct clk_work;
+ struct reset_control *rst;
+
+ unsigned int skip_autocfg:1;
+ unsigned int uart_16550_compatible:1;
+};
+
+static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
+{
+ return container_of(data, struct dw8250_data, data);
+}
static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb)
{
@@ -432,6 +460,10 @@ static void dw8250_prepare_rx_dma(struct uart_8250_port *p)
static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
{
unsigned int quirks = data->pdata ? data->pdata->quirks : 0;
+ u32 cpr_value = data->pdata ? data->pdata->cpr_value : 0;
+
+ if (quirks & DW_UART_QUIRK_CPR_VALUE)
+ data->data.cpr_value = cpr_value;
#ifdef CONFIG_64BIT
if (quirks & DW_UART_QUIRK_OCTEON) {
@@ -714,8 +746,8 @@ static const struct dw8250_platform_data dw8250_armada_38x_data = {
static const struct dw8250_platform_data dw8250_renesas_rzn1_data = {
.usr_reg = DW_UART_USR,
- .cpr_val = 0x00012f32,
- .quirks = DW_UART_QUIRK_IS_DMA_FC,
+ .cpr_value = 0x00012f32,
+ .quirks = DW_UART_QUIRK_CPR_VALUE | DW_UART_QUIRK_IS_DMA_FC,
};
static const struct dw8250_platform_data dw8250_starfive_jh7100_data = {
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index 3e33ddf7bc80..5a2520943dfd 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -242,7 +242,6 @@ static const struct serial_rs485 dw8250_rs485_supported = {
void dw8250_setup_port(struct uart_port *p)
{
struct dw8250_port_data *pd = p->private_data;
- struct dw8250_data *data = to_dw8250_data(pd);
struct uart_8250_port *up = up_to_u8250p(p);
u32 reg, old_dlf;
@@ -278,7 +277,7 @@ void dw8250_setup_port(struct uart_port *p)
reg = dw8250_readl_ext(p, DW_UART_CPR);
if (!reg) {
- reg = data->pdata->cpr_val;
+ reg = pd->cpr_value;
dev_dbg(p->dev, "CPR is not available, using 0x%08x instead\n", reg);
}
if (!reg)
diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h
index f13e91f2cace..7dd2a8e7b780 100644
--- a/drivers/tty/serial/8250/8250_dwlib.h
+++ b/drivers/tty/serial/8250/8250_dwlib.h
@@ -2,15 +2,10 @@
/* Synopsys DesignWare 8250 library header file. */
#include <linux/io.h>
-#include <linux/notifier.h>
#include <linux/types.h>
-#include <linux/workqueue.h>
#include "8250.h"
-struct clk;
-struct reset_control;
-
struct dw8250_port_data {
/* Port properties */
int line;
@@ -19,42 +14,16 @@ struct dw8250_port_data {
struct uart_8250_dma dma;
/* Hardware configuration */
+ u32 cpr_value;
u8 dlf_size;
/* RS485 variables */
bool hw_rs485_support;
};
-struct dw8250_platform_data {
- u8 usr_reg;
- u32 cpr_val;
- unsigned int quirks;
-};
-
-struct dw8250_data {
- struct dw8250_port_data data;
- const struct dw8250_platform_data *pdata;
-
- int msr_mask_on;
- int msr_mask_off;
- struct clk *clk;
- struct clk *pclk;
- struct notifier_block clk_notifier;
- struct work_struct clk_work;
- struct reset_control *rst;
-
- unsigned int skip_autocfg:1;
- unsigned int uart_16550_compatible:1;
-};
-
void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, const struct ktermios *old);
void dw8250_setup_port(struct uart_port *p);
-static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
-{
- return container_of(data, struct dw8250_data, data);
-}
-
static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
{
if (p->iotype == UPIO_MEM32BE)
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 170639d12b2a..ddac0a13cf84 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -115,6 +115,10 @@
/* RX FIFO occupancy indicator */
#define UART_OMAP_RX_LVL 0x19
+/* Timeout low and High */
+#define UART_OMAP_TO_L 0x26
+#define UART_OMAP_TO_H 0x27
+
/*
* Copy of the genpd flags for the console.
* Only used if console suspend is disabled
@@ -663,13 +667,24 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
/*
* On K3 SoCs, it is observed that RX TIMEOUT is signalled after
- * FIFO has been drained, in which case a dummy read of RX FIFO
- * is required to clear RX TIMEOUT condition.
+ * FIFO has been drained or erroneously.
+ * So apply solution of Errata i2310 as mentioned in
+ * https://www.ti.com/lit/pdf/sprz536
*/
if (priv->habit & UART_RX_TIMEOUT_QUIRK &&
- (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT &&
- serial_port_in(port, UART_OMAP_RX_LVL) == 0) {
- serial_port_in(port, UART_RX);
+ (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT) {
+ unsigned char efr2, timeout_h, timeout_l;
+
+ efr2 = serial_in(up, UART_OMAP_EFR2);
+ timeout_h = serial_in(up, UART_OMAP_TO_H);
+ timeout_l = serial_in(up, UART_OMAP_TO_L);
+ serial_out(up, UART_OMAP_TO_H, 0xFF);
+ serial_out(up, UART_OMAP_TO_L, 0xFF);
+ serial_out(up, UART_OMAP_EFR2, UART_OMAP_EFR2_TIMEOUT_BEHAVE);
+ serial_in(up, UART_IIR);
+ serial_out(up, UART_OMAP_EFR2, efr2);
+ serial_out(up, UART_OMAP_TO_H, timeout_h);
+ serial_out(up, UART_OMAP_TO_L, timeout_l);
}
/* Stop processing interrupts on input overrun */
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 40af74b55933..e1d7aa2fa347 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1985,6 +1985,17 @@ enum {
MOXA_SUPP_RS485 = BIT(2),
};
+static unsigned short moxa_get_nports(unsigned short device)
+{
+ switch (device) {
+ case PCI_DEVICE_ID_MOXA_CP116E_A_A:
+ case PCI_DEVICE_ID_MOXA_CP116E_A_B:
+ return 8;
+ }
+
+ return FIELD_GET(0x00F0, device);
+}
+
static bool pci_moxa_is_mini_pcie(unsigned short device)
{
if (device == PCI_DEVICE_ID_MOXA_CP102N ||
@@ -2038,7 +2049,7 @@ static int pci_moxa_init(struct pci_dev *dev)
{
unsigned short device = dev->device;
resource_size_t iobar_addr = pci_resource_start(dev, 2);
- unsigned int num_ports = (device & 0x00F0) >> 4, i;
+ unsigned int i, num_ports = moxa_get_nports(device);
u8 val, init_mode = MOXA_RS232;
if (!(pci_moxa_supported_rs(dev) & MOXA_SUPP_RS232)) {
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index f1a51b00b1b9..ba96fa913e7f 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -125,6 +125,7 @@ static int serial_pxa_probe(struct platform_device *pdev)
uart.port.iotype = UPIO_MEM32;
uart.port.regshift = 2;
uart.port.fifosize = 64;
+ uart.tx_loadsz = 32;
uart.dl_write = serial_pxa_dl_write;
ret = serial8250_register_8250_port(&uart);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 4fdd7857ef4d..28e4beeabf8f 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1023,8 +1023,9 @@ config SERIAL_SCCNXP_CONSOLE
help
Support for console on SCCNXP serial ports.
-config SERIAL_SC16IS7XX_CORE
+config SERIAL_SC16IS7XX
tristate "NXP SC16IS7xx UART support"
+ depends on SPI_MASTER || I2C
select SERIAL_CORE
select SERIAL_SC16IS7XX_SPI if SPI_MASTER
select SERIAL_SC16IS7XX_I2C if I2C
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index faa45f2b8bb0..6ff74f0a9530 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -75,7 +75,7 @@ obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
obj-$(CONFIG_SERIAL_SAMSUNG) += samsung_tty.o
obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o
-obj-$(CONFIG_SERIAL_SC16IS7XX_CORE) += sc16is7xx.o
+obj-$(CONFIG_SERIAL_SC16IS7XX) += sc16is7xx.o
obj-$(CONFIG_SERIAL_SC16IS7XX_SPI) += sc16is7xx_spi.o
obj-$(CONFIG_SERIAL_SC16IS7XX_I2C) += sc16is7xx_i2c.o
obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 34801a6f300b..b88cc28c94e3 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -308,8 +308,8 @@ static void bcm_uart_do_tx(struct uart_port *port)
val = bcm_uart_readl(port, UART_MCTL_REG);
val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT;
-
- pending = uart_port_tx_limited(port, ch, port->fifosize - val,
+ pending = uart_port_tx_limited_flags(port, ch, UART_TX_NOSTOP,
+ port->fifosize - val,
true,
bcm_uart_writel(port, ch, UART_FIFO_REG),
({}));
@@ -320,6 +320,9 @@ static void bcm_uart_do_tx(struct uart_port *port)
val = bcm_uart_readl(port, UART_IR_REG);
val &= ~UART_TX_INT_MASK;
bcm_uart_writel(port, val, UART_IR_REG);
+
+ if (uart_tx_stopped(port))
+ bcm_uart_stop_tx(port);
}
/*
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 2eb22594960f..f4f40c9373c2 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1952,8 +1952,10 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio
/* Make sure Rx is enabled in case Tx is active with Rx disabled */
if (!(rs485conf->flags & SER_RS485_ENABLED) ||
- rs485conf->flags & SER_RS485_RX_DURING_TX)
+ rs485conf->flags & SER_RS485_RX_DURING_TX) {
+ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
imx_uart_start_rx(port);
+ }
return 0;
}
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index b0604d6da025..58858dd352c5 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -462,7 +462,7 @@ static const struct uart_ops mcf_uart_ops = {
.verify_port = mcf_verify_port,
};
-static struct mcf_uart mcf_ports[4];
+static struct mcf_uart mcf_ports[10];
#define MCF_MAXPORTS ARRAY_SIZE(mcf_ports)
diff --git a/drivers/tty/serial/serial_base.h b/drivers/tty/serial/serial_base.h
index 743a72ac34f3..b6c38d2edfd4 100644
--- a/drivers/tty/serial/serial_base.h
+++ b/drivers/tty/serial/serial_base.h
@@ -49,33 +49,3 @@ void serial_ctrl_unregister_port(struct uart_driver *drv, struct uart_port *port
int serial_core_register_port(struct uart_driver *drv, struct uart_port *port);
void serial_core_unregister_port(struct uart_driver *drv, struct uart_port *port);
-
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
-
-int serial_base_add_preferred_console(struct uart_driver *drv,
- struct uart_port *port);
-
-#else
-
-static inline
-int serial_base_add_preferred_console(struct uart_driver *drv,
- struct uart_port *port)
-{
- return 0;
-}
-
-#endif
-
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-
-int serial_base_add_isa_preferred_console(const char *name, int idx);
-
-#else
-
-static inline
-int serial_base_add_isa_preferred_console(const char *name, int idx)
-{
- return 0;
-}
-
-#endif
diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c
index 73c6ee540c83..4df2a4b10445 100644
--- a/drivers/tty/serial/serial_base_bus.c
+++ b/drivers/tty/serial/serial_base_bus.c
@@ -8,7 +8,6 @@
* The serial core bus manages the serial core controller instances.
*/
-#include <linux/cleanup.h>
#include <linux/container_of.h>
#include <linux/device.h>
#include <linux/idr.h>
@@ -205,134 +204,6 @@ void serial_base_port_device_remove(struct serial_port_device *port_dev)
put_device(&port_dev->dev);
}
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
-
-static int serial_base_add_one_prefcon(const char *match, const char *dev_name,
- int port_id)
-{
- int ret;
-
- ret = add_preferred_console_match(match, dev_name, port_id);
- if (ret == -ENOENT)
- return 0;
-
- return ret;
-}
-
-#ifdef __sparc__
-
-/* Handle Sparc ttya and ttyb options as done in console_setup() */
-static int serial_base_add_sparc_console(const char *dev_name, int idx)
-{
- const char *name;
-
- switch (idx) {
- case 0:
- name = "ttya";
- break;
- case 1:
- name = "ttyb";
- break;
- default:
- return 0;
- }
-
- return serial_base_add_one_prefcon(name, dev_name, idx);
-}
-
-#else
-
-static inline int serial_base_add_sparc_console(const char *dev_name, int idx)
-{
- return 0;
-}
-
-#endif
-
-static int serial_base_add_prefcon(const char *name, int idx)
-{
- const char *char_match __free(kfree) = NULL;
- const char *nmbr_match __free(kfree) = NULL;
- int ret;
-
- /* Handle ttyS specific options */
- if (strstarts(name, "ttyS")) {
- /* No name, just a number */
- nmbr_match = kasprintf(GFP_KERNEL, "%i", idx);
- if (!nmbr_match)
- return -ENODEV;
-
- ret = serial_base_add_one_prefcon(nmbr_match, name, idx);
- if (ret)
- return ret;
-
- /* Sparc ttya and ttyb */
- ret = serial_base_add_sparc_console(name, idx);
- if (ret)
- return ret;
- }
-
- /* Handle the traditional character device name style console=ttyS0 */
- char_match = kasprintf(GFP_KERNEL, "%s%i", name, idx);
- if (!char_match)
- return -ENOMEM;
-
- return serial_base_add_one_prefcon(char_match, name, idx);
-}
-
-/**
- * serial_base_add_preferred_console - Adds a preferred console
- * @drv: Serial port device driver
- * @port: Serial port instance
- *
- * Tries to add a preferred console for a serial port if specified in the
- * kernel command line. Supports both the traditional character device such
- * as console=ttyS0, and a hardware addressing based console=DEVNAME:0.0
- * style name.
- *
- * Translates the kernel command line option using a hardware based addressing
- * console=DEVNAME:0.0 to the serial port character device such as ttyS0.
- * Cannot be called early for ISA ports, depends on struct device.
- *
- * Note that duplicates are ignored by add_preferred_console().
- *
- * Return: 0 on success, negative error code on failure.
- */
-int serial_base_add_preferred_console(struct uart_driver *drv,
- struct uart_port *port)
-{
- const char *port_match __free(kfree) = NULL;
- int ret;
-
- ret = serial_base_add_prefcon(drv->dev_name, port->line);
- if (ret)
- return ret;
-
- port_match = kasprintf(GFP_KERNEL, "%s:%i.%i", dev_name(port->dev),
- port->ctrl_id, port->port_id);
- if (!port_match)
- return -ENOMEM;
-
- /* Translate a hardware addressing style console=DEVNAME:0.0 */
- return serial_base_add_one_prefcon(port_match, drv->dev_name, port->line);
-}
-
-#endif
-
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-
-/*
- * Early ISA ports initialize the console before there is no struct device.
- * This should be only called from serial8250_isa_init_preferred_console(),
- * other callers are likely wrong and should rely on earlycon instead.
- */
-int serial_base_add_isa_preferred_console(const char *name, int idx)
-{
- return serial_base_add_prefcon(name, idx);
-}
-
-#endif
-
static int serial_base_init(void)
{
int ret;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 2c1a0254d3f4..2a8006e3d687 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -622,7 +622,7 @@ static ssize_t uart_write(struct tty_struct *tty, const u8 *buf, size_t count)
return -EL3HLT;
port = uart_port_lock(state, flags);
- if (WARN_ON_ONCE(!state->port.xmit_buf)) {
+ if (!state->port.xmit_buf) {
uart_port_unlock(port, flags);
return 0;
}
@@ -3422,10 +3422,6 @@ int serial_core_register_port(struct uart_driver *drv, struct uart_port *port)
if (ret)
goto err_unregister_ctrl_dev;
- ret = serial_base_add_preferred_console(drv, port);
- if (ret)
- goto err_unregister_port_dev;
-
ret = serial_core_add_one_port(drv, port);
if (ret)
goto err_unregister_port_dev;
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index 91a338d3cb34..d35f1d24156c 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -64,6 +64,13 @@ static int serial_port_runtime_suspend(struct device *dev)
if (port->flags & UPF_DEAD)
return 0;
+ /*
+ * Nothing to do on pm_runtime_force_suspend(), see
+ * DEFINE_RUNTIME_DEV_PM_OPS.
+ */
+ if (!pm_runtime_enabled(dev))
+ return 0;
+
uart_port_lock_irqsave(port, &flags);
if (!port_dev->tx_enabled) {
uart_port_unlock_irqrestore(port, flags);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index b7957a431589..9ccd88d7640b 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1366,7 +1366,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
- ufshcd_scsi_block_requests(hba);
+ blk_mq_quiesce_tagset(&hba->host->tag_set);
mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
@@ -1375,7 +1375,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
- ufshcd_scsi_unblock_requests(hba);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
goto out;
}
@@ -1396,7 +1396,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
mutex_unlock(&hba->wb_mutex);
- ufshcd_scsi_unblock_requests(hba);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
ufshcd_release(hba);
}
@@ -8789,6 +8789,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
(hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
/* Reset the device and controller before doing reinit */
ufshcd_device_reset(hba);
+ ufs_put_device_desc(hba);
ufshcd_hba_stop(hba);
ufshcd_vops_reinit_notify(hba);
ret = ufshcd_hba_enable(hba);
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 3a9a0dd4be70..949eca0adebe 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_USB_R8A66597_HCD) += host/
obj-$(CONFIG_USB_FSL_USB2) += host/
obj-$(CONFIG_USB_FOTG210_HCD) += host/
obj-$(CONFIG_USB_MAX3421_HCD) += host/
+obj-$(CONFIG_USB_XEN_HCD) += host/
obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 4ce7cba2b48a..8f3b9a0a38e1 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1131,6 +1131,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
struct cxacru_data *instance;
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
+ struct usb_endpoint_descriptor *in, *out;
int ret;
/* instance init */
@@ -1177,6 +1178,19 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
goto fail;
}
+ if (usb_endpoint_xfer_int(&cmd_ep->desc))
+ ret = usb_find_common_endpoints(intf->cur_altsetting,
+ NULL, NULL, &in, &out);
+ else
+ ret = usb_find_common_endpoints(intf->cur_altsetting,
+ &in, &out, NULL, NULL);
+
+ if (ret) {
+ usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_INT) {
usb_fill_int_urb(instance->rcv_urb,
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index bada13f704b6..835bf2428dc6 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1084,6 +1084,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return -ENODEV;
}
+ ret = ci_ulpi_init(ci);
+ if (ret)
+ return ret;
+
if (ci->platdata->phy) {
ci->phy = ci->platdata->phy;
} else if (ci->platdata->usb_phy) {
@@ -1138,10 +1142,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
goto ulpi_exit;
}
- ret = ci_ulpi_init(ci);
- if (ret)
- return ret;
-
ci->hw_bank.phys = res->start;
ci->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/usb/chipidea/ulpi.c b/drivers/usb/chipidea/ulpi.c
index 89fb51e2c3de..dfec07e8ae1d 100644
--- a/drivers/usb/chipidea/ulpi.c
+++ b/drivers/usb/chipidea/ulpi.c
@@ -68,6 +68,11 @@ int ci_ulpi_init(struct ci_hdrc *ci)
if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI)
return 0;
+ /*
+ * Set PORTSC correctly so we can read/write ULPI registers for
+ * identification purposes
+ */
+ hw_phymode_configure(ci);
ci->ulpi_ops.read = ci_ulpi_read;
ci->ulpi_ops.write = ci_ulpi_write;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index c553decb5461..6830be4419e2 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -266,14 +266,14 @@ static void wdm_int_callback(struct urb *urb)
dev_err(&desc->intf->dev, "Stall on int endpoint\n");
goto sw; /* halt is cleared in work */
default:
- dev_err(&desc->intf->dev,
+ dev_err_ratelimited(&desc->intf->dev,
"nonzero urb status received: %d\n", status);
break;
}
}
if (urb->actual_length < sizeof(struct usb_cdc_notification)) {
- dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
+ dev_err_ratelimited(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
urb->actual_length);
goto exit;
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index e3366f4d82b9..1ff7d901fede 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1623,6 +1623,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
struct usb_anchor *anchor = urb->anchor;
int status = urb->unlinked;
+ unsigned long flags;
urb->hcpriv = NULL;
if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
@@ -1640,13 +1641,14 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
/* pass ownership to the completion handler */
urb->status = status;
/*
- * This function can be called in task context inside another remote
- * coverage collection section, but kcov doesn't support that kind of
- * recursion yet. Only collect coverage in softirq context for now.
+ * Only collect coverage in the softirq context and disable interrupts
+ * to avoid scenarios with nested remote coverage collection sections
+ * that KCOV does not support.
+ * See the comment next to kcov_remote_start_usb_softirq() for details.
*/
- kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
+ flags = kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
urb->complete(urb);
- kcov_remote_stop_softirq();
+ kcov_remote_stop_softirq(flags);
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 7ee61a89520b..cb82557678dd 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -957,12 +957,16 @@ static bool dwc3_core_is_valid(struct dwc3 *dwc)
static void dwc3_core_setup_global_control(struct dwc3 *dwc)
{
+ unsigned int power_opt;
+ unsigned int hw_mode;
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+ power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
- switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
+ switch (power_opt) {
case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
/**
* WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
@@ -995,6 +999,20 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
break;
}
+ /*
+ * This is a workaround for STAR#4846132, which only affects
+ * DWC_usb31 version2.00a operating in host mode.
+ *
+ * There is a problem in DWC_usb31 version 2.00a operating
+ * in host mode that would cause a CSR read timeout When CSR
+ * read coincides with RAM Clock Gating Entry. By disable
+ * Clock Gating, sacrificing power consumption for normal
+ * operation.
+ */
+ if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO &&
+ hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A))
+ reg |= DWC3_GCTL_DSBLCLKGTNG;
+
/* check if current dwc3 is on simulation board */
if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
dev_info(dwc->dev, "Running with FPGA optimizations\n");
@@ -2250,7 +2268,6 @@ assert_reset:
static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
{
- unsigned long flags;
u32 reg;
int i;
@@ -2293,9 +2310,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
break;
if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
- spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_suspend(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
synchronize_irq(dwc->irq_gadget);
}
@@ -2312,7 +2327,6 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
{
- unsigned long flags;
int ret;
u32 reg;
int i;
@@ -2366,9 +2380,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
dwc3_otg_host_init(dwc);
} else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
- spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_resume(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
}
break;
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index ba7d180cc9e6..44e20c6c36d3 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -213,6 +213,7 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
struct usb_endpoint_descriptor *ss)
{
switch (gadget->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
return ss;
case USB_SPEED_HIGH:
@@ -449,11 +450,8 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
- if (dev->interface < 0) {
- spin_unlock_irqrestore(&dev->lock, flags);
- mutex_unlock(&dev->lock_printer_io);
- return -ENODEV;
- }
+ if (dev->interface < 0)
+ goto out_disabled;
/* We will use this flag later to check if a printer reset happened
* after we turn interrupts back on.
@@ -461,6 +459,9 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
dev->reset_printer = 0;
setup_rx_reqs(dev);
+ /* this dropped the lock - need to retest */
+ if (dev->interface < 0)
+ goto out_disabled;
bytes_copied = 0;
current_rx_req = dev->current_rx_req;
@@ -494,6 +495,8 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
wait_event_interruptible(dev->rx_wait,
(likely(!list_empty(&dev->rx_buffers))));
spin_lock_irqsave(&dev->lock, flags);
+ if (dev->interface < 0)
+ goto out_disabled;
}
/* We have data to return then copy it to the caller's buffer.*/
@@ -537,6 +540,9 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
return -EAGAIN;
}
+ if (dev->interface < 0)
+ goto out_disabled;
+
/* If we not returning all the data left in this RX request
* buffer then adjust the amount of data left in the buffer.
* Othewise if we are done with this RX request buffer then
@@ -566,6 +572,11 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
return bytes_copied;
else
return -EAGAIN;
+
+out_disabled:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->lock_printer_io);
+ return -ENODEV;
}
static ssize_t
@@ -586,11 +597,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
- if (dev->interface < 0) {
- spin_unlock_irqrestore(&dev->lock, flags);
- mutex_unlock(&dev->lock_printer_io);
- return -ENODEV;
- }
+ if (dev->interface < 0)
+ goto out_disabled;
/* Check if a printer reset happens while we have interrupts on */
dev->reset_printer = 0;
@@ -613,6 +621,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
wait_event_interruptible(dev->tx_wait,
(likely(!list_empty(&dev->tx_reqs))));
spin_lock_irqsave(&dev->lock, flags);
+ if (dev->interface < 0)
+ goto out_disabled;
}
while (likely(!list_empty(&dev->tx_reqs)) && len) {
@@ -662,6 +672,9 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
return -EAGAIN;
}
+ if (dev->interface < 0)
+ goto out_disabled;
+
list_add(&req->list, &dev->tx_reqs_active);
/* here, we unlock, and only unlock, to avoid deadlock. */
@@ -674,6 +687,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
}
+ if (dev->interface < 0)
+ goto out_disabled;
}
spin_unlock_irqrestore(&dev->lock, flags);
@@ -685,6 +700,11 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
return bytes_copied;
else
return -EAGAIN;
+
+out_disabled:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->lock_printer_io);
+ return -ENODEV;
}
static int
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 11dd0b9e847f..95191083b455 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -1163,8 +1163,6 @@ struct net_device *gether_connect(struct gether *link)
if (netif_running(dev->net))
eth_start(dev, GFP_ATOMIC);
- netif_device_attach(dev->net);
-
/* on error, disable any endpoints */
} else {
(void) usb_ep_disable(link->out_ep);
@@ -1202,7 +1200,7 @@ void gether_disconnect(struct gether *link)
DBG(dev, "%s\n", __func__);
- netif_device_detach(dev->net);
+ netif_stop_queue(dev->net);
netif_carrier_off(dev->net);
/* disable endpoints, forcing (synchronous) completion
diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c
index 3916c8e2ba01..821a6ab5da56 100644
--- a/drivers/usb/gadget/udc/aspeed_udc.c
+++ b/drivers/usb/gadget/udc/aspeed_udc.c
@@ -66,8 +66,8 @@
#define USB_UPSTREAM_EN BIT(0)
/* Main config reg */
-#define UDC_CFG_SET_ADDR(x) ((x) & 0x3f)
-#define UDC_CFG_ADDR_MASK (0x3f)
+#define UDC_CFG_SET_ADDR(x) ((x) & UDC_CFG_ADDR_MASK)
+#define UDC_CFG_ADDR_MASK GENMASK(6, 0)
/* Interrupt ctrl & status reg */
#define UDC_IRQ_EP_POOL_NAK BIT(17)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c040d816e626..05881153883e 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -36,6 +36,7 @@
#define PCI_VENDOR_ID_ETRON 0x1b6f
#define PCI_DEVICE_ID_EJ168 0x7023
+#define PCI_DEVICE_ID_EJ188 0x7052
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
@@ -395,6 +396,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_RESET_ON_RESUME;
xhci->quirks |= XHCI_BROKEN_STREAMS;
}
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_EJ188) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
+ }
+
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0014) {
xhci->quirks |= XHCI_ZERO_64B_REGS;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9e90d2952760..fd0cde3d1569 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1031,13 +1031,27 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
break;
case TD_DIRTY: /* TD is cached, clear it */
case TD_HALTED:
+ case TD_CLEARING_CACHE_DEFERRED:
+ if (cached_td) {
+ if (cached_td->urb->stream_id != td->urb->stream_id) {
+ /* Multiple streams case, defer move dq */
+ xhci_dbg(xhci,
+ "Move dq deferred: stream %u URB %p\n",
+ td->urb->stream_id, td->urb);
+ td->cancel_status = TD_CLEARING_CACHE_DEFERRED;
+ break;
+ }
+
+ /* Should never happen, but clear the TD if it does */
+ xhci_warn(xhci,
+ "Found multiple active URBs %p and %p in stream %u?\n",
+ td->urb, cached_td->urb,
+ td->urb->stream_id);
+ td_to_noop(xhci, ring, cached_td, false);
+ cached_td->cancel_status = TD_CLEARED;
+ }
+
td->cancel_status = TD_CLEARING_CACHE;
- if (cached_td)
- /* FIXME stream case, several stopped rings */
- xhci_dbg(xhci,
- "Move dq past stream %u URB %p instead of stream %u URB %p\n",
- td->urb->stream_id, td->urb,
- cached_td->urb->stream_id, cached_td->urb);
cached_td = td;
break;
}
@@ -1057,10 +1071,16 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
if (err) {
/* Failed to move past cached td, just set cached TDs to no-op */
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
- if (td->cancel_status != TD_CLEARING_CACHE)
+ /*
+ * Deferred TDs need to have the deq pointer set after the above command
+ * completes, so if that failed we just give up on all of them (and
+ * complain loudly since this could cause issues due to caching).
+ */
+ if (td->cancel_status != TD_CLEARING_CACHE &&
+ td->cancel_status != TD_CLEARING_CACHE_DEFERRED)
continue;
- xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
- td->urb);
+ xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
+ td->urb);
td_to_noop(xhci, ring, td, false);
td->cancel_status = TD_CLEARED;
}
@@ -1346,6 +1366,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_td *td, *tmp_td;
+ bool deferred = false;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
@@ -1432,6 +1453,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
__func__, td->urb);
xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
+ } else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) {
+ deferred = true;
} else {
xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
__func__, td->urb, td->cancel_status);
@@ -1441,8 +1464,17 @@ cleanup:
ep->ep_state &= ~SET_DEQ_PENDING;
ep->queued_deq_seg = NULL;
ep->queued_deq_ptr = NULL;
- /* Restart any rings with pending URBs */
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+
+ if (deferred) {
+ /* We have more streams to clear */
+ xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n",
+ __func__);
+ xhci_invalidate_cancelled_tds(ep);
+ } else {
+ /* Restart any rings with pending URBs */
+ xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ }
}
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
@@ -2524,9 +2556,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
goto finish_td;
case COMP_STOPPED_LENGTH_INVALID:
/* stopped on ep trb with invalid length, exclude it */
- ep_trb_len = 0;
- remaining = 0;
- break;
+ td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb);
+ goto finish_td;
case COMP_USB_TRANSACTION_ERROR:
if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
(ep->err_count++ > MAX_SOFT_RETRY) ||
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 30415158ed3c..78d014c4d884 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1276,6 +1276,7 @@ enum xhci_cancelled_td_status {
TD_DIRTY = 0,
TD_HALTED,
TD_CLEARING_CACHE,
+ TD_CLEARING_CACHE_DEFERRED,
TD_CLEARED,
};
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 8abf3a567e30..108d9a593a80 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -556,7 +556,7 @@ static int da8xx_probe(struct platform_device *pdev)
ret = of_platform_populate(pdev->dev.of_node, NULL,
da8xx_auxdata_lookup, &pdev->dev);
if (ret)
- return ret;
+ goto err_unregister_phy;
pinfo = da8xx_dev_info;
pinfo.parent = &pdev->dev;
@@ -571,9 +571,13 @@ static int da8xx_probe(struct platform_device *pdev)
ret = PTR_ERR_OR_ZERO(glue->musb);
if (ret) {
dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
- usb_phy_generic_unregister(glue->usb_phy);
+ goto err_unregister_phy;
}
+ return 0;
+
+err_unregister_phy:
+ usb_phy_generic_unregister(glue->usb_phy);
return ret;
}
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 115f05a6201a..40d34cc28344 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -105,6 +105,8 @@ struct alauda_info {
unsigned char sense_key;
unsigned long sense_asc; /* additional sense code */
unsigned long sense_ascq; /* additional sense code qualifier */
+
+ bool media_initialized;
};
#define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) )
@@ -476,11 +478,12 @@ static int alauda_check_media(struct us_data *us)
}
/* Check for media change */
- if (status[0] & 0x08) {
+ if (status[0] & 0x08 || !info->media_initialized) {
usb_stor_dbg(us, "Media change detected\n");
alauda_free_maps(&MEDIA_INFO(us));
- alauda_init_media(us);
-
+ rc = alauda_init_media(us);
+ if (rc == USB_STOR_TRANSPORT_GOOD)
+ info->media_initialized = true;
info->sense_key = UNIT_ATTENTION;
info->sense_asc = 0x28;
info->sense_ascq = 0x00;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index b31464740f6c..8c8b5e6041cc 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -79,6 +79,12 @@ static int slave_alloc (struct scsi_device *sdev)
if (us->protocol == USB_PR_BULK && us->max_lun > 0)
sdev->sdev_bflags |= BLIST_FORCELUN;
+ /*
+ * Some USB storage devices reset if the IO advice hints grouping mode
+ * page is queried. Hence skip that mode page.
+ */
+ sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS;
+
return 0;
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index a48870a87a29..b610a2de4ae5 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -21,6 +21,7 @@
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
@@ -820,6 +821,12 @@ static int uas_slave_alloc(struct scsi_device *sdev)
struct uas_dev_info *devinfo =
(struct uas_dev_info *)sdev->host->hostdata;
+ /*
+ * Some USB storage devices reset if the IO advice hints grouping mode
+ * page is queried. Hence skip that mode page.
+ */
+ sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS;
+
sdev->hostdata = devinfo;
return 0;
}
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 8a1af08f71b6..5d4da962acc8 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -3014,8 +3014,10 @@ static int tcpm_register_source_caps(struct tcpm_port *port)
memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
caps.role = TYPEC_SOURCE;
- if (cap)
+ if (cap) {
usb_power_delivery_unregister_capabilities(cap);
+ port->partner_source_caps = NULL;
+ }
cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
if (IS_ERR(cap))
@@ -6172,6 +6174,7 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
port->tcpc->set_bist_data(port->tcpc, false);
switch (port->state) {
+ case TOGGLING:
case ERROR_RECOVERY:
case PORT_RESET:
case PORT_RESET_WAIT_OFF:
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index cb52e7b0a2c5..2cc7aedd490f 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -153,8 +153,13 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
}
if (cci & UCSI_CCI_ERROR) {
- if (cmd == UCSI_GET_ERROR_STATUS)
+ if (cmd == UCSI_GET_ERROR_STATUS) {
+ ret = ucsi_acknowledge(ucsi, false);
+ if (ret)
+ return ret;
+
return -EIO;
+ }
return ucsi_read_error(ucsi);
}
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 8d112c3edae5..adf32ca0f761 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -25,6 +25,7 @@ struct ucsi_acpi {
unsigned long flags;
#define UCSI_ACPI_COMMAND_PENDING 1
#define UCSI_ACPI_ACK_PENDING 2
+#define UCSI_ACPI_CHECK_BOGUS_EVENT 3
guid_t guid;
u64 cmd;
};
@@ -128,6 +129,58 @@ static const struct ucsi_operations ucsi_zenbook_ops = {
.async_write = ucsi_acpi_async_write
};
+static int ucsi_gram_read(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len)
+{
+ u16 bogus_change = UCSI_CONSTAT_POWER_LEVEL_CHANGE |
+ UCSI_CONSTAT_PDOS_CHANGE;
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ struct ucsi_connector_status *status;
+ int ret;
+
+ ret = ucsi_acpi_read(ucsi, offset, val, val_len);
+ if (ret < 0)
+ return ret;
+
+ if (UCSI_COMMAND(ua->cmd) == UCSI_GET_CONNECTOR_STATUS &&
+ test_bit(UCSI_ACPI_CHECK_BOGUS_EVENT, &ua->flags) &&
+ offset == UCSI_MESSAGE_IN) {
+ status = (struct ucsi_connector_status *)val;
+
+ /* Clear the bogus change */
+ if (status->change == bogus_change)
+ status->change = 0;
+
+ clear_bit(UCSI_ACPI_CHECK_BOGUS_EVENT, &ua->flags);
+ }
+
+ return ret;
+}
+
+static int ucsi_gram_sync_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
+{
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len);
+ if (ret < 0)
+ return ret;
+
+ if (UCSI_COMMAND(ua->cmd) == UCSI_GET_PDOS &&
+ ua->cmd & UCSI_GET_PDOS_PARTNER_PDO(1) &&
+ ua->cmd & UCSI_GET_PDOS_SRC_PDOS)
+ set_bit(UCSI_ACPI_CHECK_BOGUS_EVENT, &ua->flags);
+
+ return ret;
+}
+
+static const struct ucsi_operations ucsi_gram_ops = {
+ .read = ucsi_gram_read,
+ .sync_write = ucsi_gram_sync_write,
+ .async_write = ucsi_acpi_async_write
+};
+
static const struct dmi_system_id ucsi_acpi_quirks[] = {
{
.matches = {
@@ -136,6 +189,14 @@ static const struct dmi_system_id ucsi_acpi_quirks[] = {
},
.driver_data = (void *)&ucsi_zenbook_ops,
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "LG gram PC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "90Q"),
+ },
+ .driver_data = (void *)&ucsi_gram_ops,
+ },
{ }
};
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index f7546bb488c3..2fa973afe4e6 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -14,7 +14,7 @@
#include <linux/soc/qcom/pmic_glink.h>
#include "ucsi.h"
-#define PMIC_GLINK_MAX_PORTS 2
+#define PMIC_GLINK_MAX_PORTS 3
#define UCSI_BUF_SIZE 48
@@ -372,6 +372,7 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
ret = fwnode_property_read_u32(fwnode, "reg", &port);
if (ret < 0) {
dev_err(dev, "missing reg property of %pOFn\n", fwnode);
+ fwnode_handle_put(fwnode);
return ret;
}
@@ -386,9 +387,11 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
if (!desc)
continue;
- if (IS_ERR(desc))
+ if (IS_ERR(desc)) {
+ fwnode_handle_put(fwnode);
return dev_err_probe(dev, PTR_ERR(desc),
"unable to acquire orientation gpio\n");
+ }
ucsi->port_orientation[port] = desc;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
index ac48b7763114..ac69288e8bb0 100644
--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
@@ -65,6 +65,7 @@ struct ucsi_stm32g0 {
struct device *dev;
unsigned long flags;
#define COMMAND_PENDING 1
+#define ACK_PENDING 2
const char *fw_name;
struct ucsi *ucsi;
bool suspended;
@@ -396,9 +397,13 @@ static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const
size_t len)
{
struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
+ bool ack = UCSI_COMMAND(*(u64 *)val) == UCSI_ACK_CC_CI;
int ret;
- set_bit(COMMAND_PENDING, &g0->flags);
+ if (ack)
+ set_bit(ACK_PENDING, &g0->flags);
+ else
+ set_bit(COMMAND_PENDING, &g0->flags);
ret = ucsi_stm32g0_async_write(ucsi, offset, val, len);
if (ret)
@@ -406,9 +411,14 @@ static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const
if (!wait_for_completion_timeout(&g0->complete, msecs_to_jiffies(5000)))
ret = -ETIMEDOUT;
+ else
+ return 0;
out_clear_bit:
- clear_bit(COMMAND_PENDING, &g0->flags);
+ if (ack)
+ clear_bit(ACK_PENDING, &g0->flags);
+ else
+ clear_bit(COMMAND_PENDING, &g0->flags);
return ret;
}
@@ -429,8 +439,9 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data)
if (UCSI_CCI_CONNECTOR(cci))
ucsi_connector_change(g0->ucsi, UCSI_CCI_CONNECTOR(cci));
- if (test_bit(COMMAND_PENDING, &g0->flags) &&
- cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ if (cci & UCSI_CCI_ACK_COMPLETE && test_and_clear_bit(ACK_PENDING, &g0->flags))
+ complete(&g0->complete);
+ if (cci & UCSI_CCI_COMMAND_COMPLETE && test_and_clear_bit(COMMAND_PENDING, &g0->flags))
complete(&g0->complete);
return IRQ_HANDLED;
diff --git a/drivers/vfio/device_cdev.c b/drivers/vfio/device_cdev.c
index e75da0a70d1f..bb1817bd4ff3 100644
--- a/drivers/vfio/device_cdev.c
+++ b/drivers/vfio/device_cdev.c
@@ -39,6 +39,13 @@ int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
filep->private_data = df;
+ /*
+ * Use the pseudo fs inode on the device to link all mmaps
+ * to the same address space, allowing us to unmap all vmas
+ * associated to this device using unmap_mapping_range().
+ */
+ filep->f_mapping = device->inode->i_mapping;
+
return 0;
err_put_registration:
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index 610a429c6191..ded364588d29 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -286,6 +286,13 @@ static struct file *vfio_device_open_file(struct vfio_device *device)
*/
filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE);
+ /*
+ * Use the pseudo fs inode on the device to link all mmaps
+ * to the same address space, allowing us to unmap all vmas
+ * associated to this device using unmap_mapping_range().
+ */
+ filep->f_mapping = device->inode->i_mapping;
+
if (device->group->type == VFIO_NO_IOMMU)
dev_warn(device->dev, "vfio-noiommu device opened by user "
"(%s:%d)\n", current->comm, task_pid_nr(current));
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 80cae87fff36..987c7921affa 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1610,100 +1610,20 @@ ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *bu
}
EXPORT_SYMBOL_GPL(vfio_pci_core_write);
-/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
-static int vfio_pci_zap_and_vma_lock(struct vfio_pci_core_device *vdev, bool try)
+static void vfio_pci_zap_bars(struct vfio_pci_core_device *vdev)
{
- struct vfio_pci_mmap_vma *mmap_vma, *tmp;
+ struct vfio_device *core_vdev = &vdev->vdev;
+ loff_t start = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_BAR0_REGION_INDEX);
+ loff_t end = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_ROM_REGION_INDEX);
+ loff_t len = end - start;
- /*
- * Lock ordering:
- * vma_lock is nested under mmap_lock for vm_ops callback paths.
- * The memory_lock semaphore is used by both code paths calling
- * into this function to zap vmas and the vm_ops.fault callback
- * to protect the memory enable state of the device.
- *
- * When zapping vmas we need to maintain the mmap_lock => vma_lock
- * ordering, which requires using vma_lock to walk vma_list to
- * acquire an mm, then dropping vma_lock to get the mmap_lock and
- * reacquiring vma_lock. This logic is derived from similar
- * requirements in uverbs_user_mmap_disassociate().
- *
- * mmap_lock must always be the top-level lock when it is taken.
- * Therefore we can only hold the memory_lock write lock when
- * vma_list is empty, as we'd need to take mmap_lock to clear
- * entries. vma_list can only be guaranteed empty when holding
- * vma_lock, thus memory_lock is nested under vma_lock.
- *
- * This enables the vm_ops.fault callback to acquire vma_lock,
- * followed by memory_lock read lock, while already holding
- * mmap_lock without risk of deadlock.
- */
- while (1) {
- struct mm_struct *mm = NULL;
-
- if (try) {
- if (!mutex_trylock(&vdev->vma_lock))
- return 0;
- } else {
- mutex_lock(&vdev->vma_lock);
- }
- while (!list_empty(&vdev->vma_list)) {
- mmap_vma = list_first_entry(&vdev->vma_list,
- struct vfio_pci_mmap_vma,
- vma_next);
- mm = mmap_vma->vma->vm_mm;
- if (mmget_not_zero(mm))
- break;
-
- list_del(&mmap_vma->vma_next);
- kfree(mmap_vma);
- mm = NULL;
- }
- if (!mm)
- return 1;
- mutex_unlock(&vdev->vma_lock);
-
- if (try) {
- if (!mmap_read_trylock(mm)) {
- mmput(mm);
- return 0;
- }
- } else {
- mmap_read_lock(mm);
- }
- if (try) {
- if (!mutex_trylock(&vdev->vma_lock)) {
- mmap_read_unlock(mm);
- mmput(mm);
- return 0;
- }
- } else {
- mutex_lock(&vdev->vma_lock);
- }
- list_for_each_entry_safe(mmap_vma, tmp,
- &vdev->vma_list, vma_next) {
- struct vm_area_struct *vma = mmap_vma->vma;
-
- if (vma->vm_mm != mm)
- continue;
-
- list_del(&mmap_vma->vma_next);
- kfree(mmap_vma);
-
- zap_vma_ptes(vma, vma->vm_start,
- vma->vm_end - vma->vm_start);
- }
- mutex_unlock(&vdev->vma_lock);
- mmap_read_unlock(mm);
- mmput(mm);
- }
+ unmap_mapping_range(core_vdev->inode->i_mapping, start, len, true);
}
void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev)
{
- vfio_pci_zap_and_vma_lock(vdev, false);
down_write(&vdev->memory_lock);
- mutex_unlock(&vdev->vma_lock);
+ vfio_pci_zap_bars(vdev);
}
u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev)
@@ -1725,99 +1645,56 @@ void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 c
up_write(&vdev->memory_lock);
}
-/* Caller holds vma_lock */
-static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev,
- struct vm_area_struct *vma)
-{
- struct vfio_pci_mmap_vma *mmap_vma;
-
- mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL_ACCOUNT);
- if (!mmap_vma)
- return -ENOMEM;
-
- mmap_vma->vma = vma;
- list_add(&mmap_vma->vma_next, &vdev->vma_list);
-
- return 0;
-}
-
-/*
- * Zap mmaps on open so that we can fault them in on access and therefore
- * our vma_list only tracks mappings accessed since last zap.
- */
-static void vfio_pci_mmap_open(struct vm_area_struct *vma)
-{
- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
-}
-
-static void vfio_pci_mmap_close(struct vm_area_struct *vma)
+static unsigned long vma_to_pfn(struct vm_area_struct *vma)
{
struct vfio_pci_core_device *vdev = vma->vm_private_data;
- struct vfio_pci_mmap_vma *mmap_vma;
+ int index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+ u64 pgoff;
- mutex_lock(&vdev->vma_lock);
- list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
- if (mmap_vma->vma == vma) {
- list_del(&mmap_vma->vma_next);
- kfree(mmap_vma);
- break;
- }
- }
- mutex_unlock(&vdev->vma_lock);
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+
+ return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff;
}
static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct vfio_pci_core_device *vdev = vma->vm_private_data;
- struct vfio_pci_mmap_vma *mmap_vma;
- vm_fault_t ret = VM_FAULT_NOPAGE;
+ unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
+ unsigned long addr = vma->vm_start;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
+
+ pfn = vma_to_pfn(vma);
- mutex_lock(&vdev->vma_lock);
down_read(&vdev->memory_lock);
- /*
- * Memory region cannot be accessed if the low power feature is engaged
- * or memory access is disabled.
- */
- if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) {
- ret = VM_FAULT_SIGBUS;
- goto up_out;
- }
+ if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
+ goto out_unlock;
+
+ ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
+ if (ret & VM_FAULT_ERROR)
+ goto out_unlock;
/*
- * We populate the whole vma on fault, so we need to test whether
- * the vma has already been mapped, such as for concurrent faults
- * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
- * we ask it to fill the same range again.
+ * Pre-fault the remainder of the vma, abort further insertions and
+ * supress error if fault is encountered during pre-fault.
*/
- list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
- if (mmap_vma->vma == vma)
- goto up_out;
- }
-
- if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot)) {
- ret = VM_FAULT_SIGBUS;
- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
- goto up_out;
- }
+ for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) {
+ if (addr == vmf->address)
+ continue;
- if (__vfio_pci_add_vma(vdev, vma)) {
- ret = VM_FAULT_OOM;
- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+ if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
+ break;
}
-up_out:
+out_unlock:
up_read(&vdev->memory_lock);
- mutex_unlock(&vdev->vma_lock);
+
return ret;
}
static const struct vm_operations_struct vfio_pci_mmap_ops = {
- .open = vfio_pci_mmap_open,
- .close = vfio_pci_mmap_close,
.fault = vfio_pci_mmap_fault,
};
@@ -1880,11 +1757,12 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma
vma->vm_private_data = vdev;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
/*
- * See remap_pfn_range(), called from vfio_pci_fault() but we can't
- * change vm_flags within the fault handler. Set them now.
+ * Set vm_flags now, they should not be changed in the fault handler.
+ * We want the same flags and page protection (decrypted above) as
+ * io_remap_pfn_range() would set.
*
* VM_ALLOW_ANY_UNCACHED: The VMA flag is implemented for ARM64,
* allowing KVM stage 2 device mapping attributes to use Normal-NC
@@ -2202,8 +2080,6 @@ int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
mutex_init(&vdev->ioeventfds_lock);
INIT_LIST_HEAD(&vdev->dummy_resources_list);
INIT_LIST_HEAD(&vdev->ioeventfds_list);
- mutex_init(&vdev->vma_lock);
- INIT_LIST_HEAD(&vdev->vma_list);
INIT_LIST_HEAD(&vdev->sriov_pfs_item);
init_rwsem(&vdev->memory_lock);
xa_init(&vdev->ctx);
@@ -2219,7 +2095,6 @@ void vfio_pci_core_release_dev(struct vfio_device *core_vdev)
mutex_destroy(&vdev->igate);
mutex_destroy(&vdev->ioeventfds_lock);
- mutex_destroy(&vdev->vma_lock);
kfree(vdev->region);
kfree(vdev->pm_save);
}
@@ -2497,26 +2372,15 @@ unwind:
return ret;
}
-/*
- * We need to get memory_lock for each device, but devices can share mmap_lock,
- * therefore we need to zap and hold the vma_lock for each device, and only then
- * get each memory_lock.
- */
static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
struct vfio_pci_group_info *groups,
struct iommufd_ctx *iommufd_ctx)
{
- struct vfio_pci_core_device *cur_mem;
- struct vfio_pci_core_device *cur_vma;
- struct vfio_pci_core_device *cur;
+ struct vfio_pci_core_device *vdev;
struct pci_dev *pdev;
- bool is_mem = true;
int ret;
mutex_lock(&dev_set->lock);
- cur_mem = list_first_entry(&dev_set->device_list,
- struct vfio_pci_core_device,
- vdev.dev_set_list);
pdev = vfio_pci_dev_set_resettable(dev_set);
if (!pdev) {
@@ -2533,7 +2397,7 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
if (ret)
goto err_unlock;
- list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
+ list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list) {
bool owned;
/*
@@ -2557,38 +2421,38 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
* Otherwise, reset is not allowed.
*/
if (iommufd_ctx) {
- int devid = vfio_iommufd_get_dev_id(&cur_vma->vdev,
+ int devid = vfio_iommufd_get_dev_id(&vdev->vdev,
iommufd_ctx);
owned = (devid > 0 || devid == -ENOENT);
} else {
- owned = vfio_dev_in_groups(&cur_vma->vdev, groups);
+ owned = vfio_dev_in_groups(&vdev->vdev, groups);
}
if (!owned) {
ret = -EINVAL;
- goto err_undo;
+ break;
}
/*
- * Locking multiple devices is prone to deadlock, runaway and
- * unwind if we hit contention.
+ * Take the memory write lock for each device and zap BAR
+ * mappings to prevent the user accessing the device while in
+ * reset. Locking multiple devices is prone to deadlock,
+ * runaway and unwind if we hit contention.
*/
- if (!vfio_pci_zap_and_vma_lock(cur_vma, true)) {
+ if (!down_write_trylock(&vdev->memory_lock)) {
ret = -EBUSY;
- goto err_undo;
+ break;
}
+
+ vfio_pci_zap_bars(vdev);
}
- cur_vma = NULL;
- list_for_each_entry(cur_mem, &dev_set->device_list, vdev.dev_set_list) {
- if (!down_write_trylock(&cur_mem->memory_lock)) {
- ret = -EBUSY;
- goto err_undo;
- }
- mutex_unlock(&cur_mem->vma_lock);
+ if (!list_entry_is_head(vdev,
+ &dev_set->device_list, vdev.dev_set_list)) {
+ vdev = list_prev_entry(vdev, vdev.dev_set_list);
+ goto err_undo;
}
- cur_mem = NULL;
/*
* The pci_reset_bus() will reset all the devices in the bus.
@@ -2599,25 +2463,22 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
* cause the PCI config space reset without restoring the original
* state (saved locally in 'vdev->pm_save').
*/
- list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
- vfio_pci_set_power_state(cur, PCI_D0);
+ list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list)
+ vfio_pci_set_power_state(vdev, PCI_D0);
ret = pci_reset_bus(pdev);
+ vdev = list_last_entry(&dev_set->device_list,
+ struct vfio_pci_core_device, vdev.dev_set_list);
+
err_undo:
- list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
- if (cur == cur_mem)
- is_mem = false;
- if (cur == cur_vma)
- break;
- if (is_mem)
- up_write(&cur->memory_lock);
- else
- mutex_unlock(&cur->vma_lock);
- }
+ list_for_each_entry_from_reverse(vdev, &dev_set->device_list,
+ vdev.dev_set_list)
+ up_write(&vdev->memory_lock);
+
+ list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list)
+ pm_runtime_put(&vdev->pdev->dev);
- list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
- pm_runtime_put(&cur->pdev->dev);
err_unlock:
mutex_unlock(&dev_set->lock);
return ret;
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index e97d796a54fb..a5a62d9d963f 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -22,8 +22,10 @@
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
+#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/pseudo_fs.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -43,9 +45,13 @@
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
#define DRIVER_DESC "VFIO - User Level meta-driver"
+#define VFIO_MAGIC 0x5646494f /* "VFIO" */
+
static struct vfio {
struct class *device_class;
struct ida device_ida;
+ struct vfsmount *vfs_mount;
+ int fs_count;
} vfio;
#ifdef CONFIG_VFIO_NOIOMMU
@@ -186,6 +192,8 @@ static void vfio_device_release(struct device *dev)
if (device->ops->release)
device->ops->release(device);
+ iput(device->inode);
+ simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
kvfree(device);
}
@@ -228,6 +236,34 @@ out_free:
}
EXPORT_SYMBOL_GPL(_vfio_alloc_device);
+static int vfio_fs_init_fs_context(struct fs_context *fc)
+{
+ return init_pseudo(fc, VFIO_MAGIC) ? 0 : -ENOMEM;
+}
+
+static struct file_system_type vfio_fs_type = {
+ .name = "vfio",
+ .owner = THIS_MODULE,
+ .init_fs_context = vfio_fs_init_fs_context,
+ .kill_sb = kill_anon_super,
+};
+
+static struct inode *vfio_fs_inode_new(void)
+{
+ struct inode *inode;
+ int ret;
+
+ ret = simple_pin_fs(&vfio_fs_type, &vfio.vfs_mount, &vfio.fs_count);
+ if (ret)
+ return ERR_PTR(ret);
+
+ inode = alloc_anon_inode(vfio.vfs_mount->mnt_sb);
+ if (IS_ERR(inode))
+ simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
+
+ return inode;
+}
+
/*
* Initialize a vfio_device so it can be registered to vfio core.
*/
@@ -246,6 +282,11 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev,
init_completion(&device->comp);
device->dev = dev;
device->ops = ops;
+ device->inode = vfio_fs_inode_new();
+ if (IS_ERR(device->inode)) {
+ ret = PTR_ERR(device->inode);
+ goto out_inode;
+ }
if (ops->init) {
ret = ops->init(device);
@@ -260,6 +301,9 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev,
return 0;
out_uninit:
+ iput(device->inode);
+ simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
+out_inode:
vfio_release_device_set(device);
ida_free(&vfio.device_ida, device->index);
return ret;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 85eea38dbdf4..2882944d23cc 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -257,6 +257,7 @@ config GPIO_WATCHDOG_ARCH_INITCALL
config LENOVO_SE10_WDT
tristate "Lenovo SE10 Watchdog"
depends on (X86 && DMI) || COMPILE_TEST
+ depends on HAS_IOPORT
select WATCHDOG_CORE
help
If you say yes here you get support for the watchdog
diff --git a/drivers/watchdog/menz69_wdt.c b/drivers/watchdog/menz69_wdt.c
index c7de30270043..0508a65acfa6 100644
--- a/drivers/watchdog/menz69_wdt.c
+++ b/drivers/watchdog/menz69_wdt.c
@@ -161,6 +161,7 @@ static struct mcb_driver men_z069_driver = {
module_mcb_driver(men_z069_driver);
MODULE_AUTHOR("Johannes Thumshirn <jth@kernel.org>");
+MODULE_DESCRIPTION("Watchdog driver for the MEN z069 IP-Core");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z069");
MODULE_IMPORT_NS(MCB);
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index a7a12f2fe9de..b6e0236509bb 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -370,5 +370,6 @@ static struct platform_driver omap_wdt_driver = {
module_platform_driver(omap_wdt_driver);
MODULE_AUTHOR("George G. Davis");
+MODULE_DESCRIPTION("Driver for the TI OMAP 16xx/24xx/34xx 32KHz (non-secure) watchdog");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:omap_wdt");
diff --git a/drivers/watchdog/simatic-ipc-wdt.c b/drivers/watchdog/simatic-ipc-wdt.c
index cdc1a2e15180..1e91f0a560ff 100644
--- a/drivers/watchdog/simatic-ipc-wdt.c
+++ b/drivers/watchdog/simatic-ipc-wdt.c
@@ -227,6 +227,7 @@ static struct platform_driver simatic_ipc_wdt_driver = {
module_platform_driver(simatic_ipc_wdt_driver);
+MODULE_DESCRIPTION("Siemens SIMATIC IPC driver for Watchdogs");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_AUTHOR("Gerd Haeussler <gerd.haeussler.ext@siemens.com>");
diff --git a/drivers/watchdog/ts4800_wdt.c b/drivers/watchdog/ts4800_wdt.c
index 0099403f4992..24b1ad52102e 100644
--- a/drivers/watchdog/ts4800_wdt.c
+++ b/drivers/watchdog/ts4800_wdt.c
@@ -200,5 +200,6 @@ static struct platform_driver ts4800_wdt_driver = {
module_platform_driver(ts4800_wdt_driver);
MODULE_AUTHOR("Damien Riegel <damien.riegel@savoirfairelinux.com>");
+MODULE_DESCRIPTION("Watchdog driver for TS-4800 based boards");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ts4800_wdt");
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 09d17e20f4a7..8c80d04811e4 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -118,6 +118,7 @@ static struct platform_driver twl4030_wdt_driver = {
module_platform_driver(twl4030_wdt_driver);
MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("TWL4030 Watchdog");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:twl4030_wdt");