summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_video.c17
-rw-r--r--drivers/acpi/resource.c32
-rw-r--r--drivers/acpi/video_detect.c23
-rw-r--r--drivers/acpi/x86/s2idle.c87
-rw-r--r--drivers/ata/ahci.c32
-rw-r--r--drivers/block/Kconfig43
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/pktcdvd.c2944
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/ublk_drv.c3
-rw-r--r--drivers/block/virtio_blk.c35
-rw-r--r--drivers/block/xen-blkback/xenbus.c4
-rw-r--r--drivers/block/xen-blkfront.c3
-rw-r--r--drivers/char/tpm/tpm-interface.c4
-rw-r--r--drivers/char/tpm/xen-tpmfront.c3
-rw-r--r--drivers/crypto/atmel-ecc.c4
-rw-r--r--drivers/crypto/atmel-i2c.c4
-rw-r--r--drivers/crypto/atmel-i2c.h2
-rw-r--r--drivers/crypto/caam/blob_gen.c2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_skcipher_algs.c3
-rw-r--r--drivers/dma-buf/dma-buf-sysfs-stats.c7
-rw-r--r--drivers/dma-buf/dma-buf-sysfs-stats.h4
-rw-r--r--drivers/dma-buf/dma-buf.c82
-rw-r--r--drivers/gpio/gpio-eic-sprd.c23
-rw-r--r--drivers/gpio/gpio-pca953x.c3
-rw-r--r--drivers/gpio/gpio-pmic-eic-sprd.c29
-rw-r--r--drivers/gpio/gpio-sifive.c1
-rw-r--r--drivers/gpio/gpio-sprd.c9
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c94
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c59
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c42
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h15
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c35
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.h4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c14
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c27
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c16
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h5
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c4
-rw-r--r--drivers/gpu/drm/tests/Makefile2
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c6
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c3
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c6
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c49
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h8
-rw-r--r--drivers/input/misc/xen-kbdfront.c5
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/mtd/parsers/scpart.c2
-rw-r--r--drivers/mtd/parsers/tplink_safeloader.c4
-rw-r--r--drivers/mtd/spi-nor/core.c1
-rw-r--r--drivers/net/bonding/bond_3ad.c1
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig4
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c164
-rw-r--r--drivers/net/dsa/qca/qca8k.h5
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c29
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c83
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h17
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c132
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c30
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c61
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c28
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c63
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c2
-rw-r--r--drivers/net/ipa/ipa_interrupt.c3
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c1
-rw-r--r--drivers/net/usb/cdc_ether.c6
-rw-r--r--drivers/net/usb/r8152.c1
-rw-r--r--drivers/net/usb/rndis_host.c3
-rw-r--r--drivers/net/veth.c5
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/vrf.c6
-rw-r--r--drivers/net/vxlan/vxlan_core.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Kconfig1
-rw-r--r--drivers/net/wireless/ti/Makefile3
-rw-r--r--drivers/net/xen-netback/xenbus.c3
-rw-r--r--drivers/net/xen-netfront.c4
-rw-r--r--drivers/nfc/pn533/usb.c44
-rw-r--r--drivers/nvme/host/auth.c2
-rw-r--r--drivers/nvme/host/core.c34
-rw-r--r--drivers/nvme/host/ioctl.c28
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/pci.c46
-rw-r--r--drivers/nvme/target/admin-cmd.c37
-rw-r--r--drivers/nvme/target/passthru.c11
-rw-r--r--drivers/of/fdt.c60
-rw-r--r--drivers/pci/xen-pcifront.c4
-rw-r--r--drivers/regulator/da9211-regulator.c11
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c2
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/net/qeth_core_sys.c12
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c3
-rw-r--r--drivers/scsi/mpi3mr/Makefile2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c3
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_error.c5
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c50
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/scsi/xen-scsifront.c4
-rw-r--r--drivers/spi/spi-cadence-xspi.c5
-rw-r--r--drivers/spi/spi-mt65xx.c12
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/spi/spidev.c36
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c4
-rw-r--r--drivers/tty/hvc/hvc_xen.c50
-rw-r--r--drivers/ufs/core/ufshcd.c26
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c1
-rw-r--r--drivers/usb/dwc3/gadget.c5
-rw-r--r--drivers/usb/fotg210/fotg210-core.c5
-rw-r--r--drivers/usb/fotg210/fotg210-udc.c2
-rw-r--r--drivers/usb/host/xen-hcd.c4
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h5
-rw-r--r--drivers/vdpa/mlx5/core/mr.c46
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c78
-rw-r--r--drivers/vdpa/vdpa.c11
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c7
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c4
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c7
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c3
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c2
-rw-r--r--drivers/vhost/vdpa.c52
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/vhost/vringh.c5
-rw-r--r--drivers/vhost/vsock.c9
-rw-r--r--drivers/video/fbdev/Kconfig4
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c3
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c4
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c5
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c28
-rw-r--r--drivers/video/fbdev/xen-fbfront.c6
-rw-r--r--drivers/virtio/virtio.c12
-rw-r--r--drivers/virtio/virtio_pci_modern.c4
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/xen/pvcalls-back.c3
-rw-r--r--drivers/xen/pvcalls-front.c7
-rw-r--r--drivers/xen/xen-pciback/xenbus.c4
-rw-r--r--drivers/xen/xen-scsiback.c4
234 files changed, 5038 insertions, 1159 deletions
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 30d8fd03fec7..97b711e57bff 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -70,11 +70,7 @@ module_param(device_id_scheme, bool, 0444);
static int only_lcd = -1;
module_param(only_lcd, int, 0444);
-/*
- * Display probing is known to take up to 5 seconds, so delay the fallback
- * backlight registration by 5 seconds + 3 seconds for some extra margin.
- */
-static int register_backlight_delay = 8;
+static int register_backlight_delay;
module_param(register_backlight_delay, int, 0444);
MODULE_PARM_DESC(register_backlight_delay,
"Delay in seconds before doing fallback (non GPU driver triggered) "
@@ -2176,6 +2172,17 @@ static bool should_check_lcd_flag(void)
return false;
}
+/*
+ * At least one graphics driver has reported that no LCD is connected
+ * via the native interface. cancel the registration for fallback acpi_video0.
+ * If another driver still deems this necessary, it can explicitly register it.
+ */
+void acpi_video_report_nolcd(void)
+{
+ cancel_delayed_work(&video_bus_register_backlight_work);
+}
+EXPORT_SYMBOL(acpi_video_report_nolcd);
+
int acpi_video_register(void)
{
int ret = 0;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index f27914aedbd5..16dcd31d124f 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -432,10 +432,24 @@ static const struct dmi_system_id asus_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
},
},
+ {
+ .ident = "Asus ExpertBook B2502",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
+ },
+ },
{ }
};
-static const struct dmi_system_id lenovo_82ra[] = {
+static const struct dmi_system_id lenovo_laptop[] = {
+ {
+ .ident = "LENOVO IdeaPad Flex 5 14ALC7",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82R9"),
+ },
+ },
{
.ident = "LENOVO IdeaPad Flex 5 16ALC7",
.matches = {
@@ -446,6 +460,17 @@ static const struct dmi_system_id lenovo_82ra[] = {
{ }
};
+static const struct dmi_system_id schenker_gm_rg[] = {
+ {
+ .ident = "XMG CORE 15 (M22)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+ DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
+ },
+ },
+ { }
+};
+
struct irq_override_cmp {
const struct dmi_system_id *system;
unsigned char irq;
@@ -458,8 +483,9 @@ struct irq_override_cmp {
static const struct irq_override_cmp override_table[] = {
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
- { lenovo_82ra, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
- { lenovo_82ra, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+ { lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+ { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+ { schenker_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
};
static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index a934bbc9dd37..1b78c7434492 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h>
+#include <linux/pnp.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <acpi/video.h>
@@ -105,6 +106,26 @@ static bool nvidia_wmi_ec_supported(void)
}
#endif
+static bool apple_gmux_backlight_present(void)
+{
+ struct acpi_device *adev;
+ struct device *dev;
+
+ adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
+ if (!adev)
+ return false;
+
+ dev = acpi_get_first_physical_node(adev);
+ if (!dev)
+ return false;
+
+ /*
+ * drivers/platform/x86/apple-gmux.c only supports old style
+ * Apple GMUX with an IO-resource.
+ */
+ return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL;
+}
+
/* Force to use vendor driver when the ACPI device is known to be
* buggy */
static int video_detect_force_vendor(const struct dmi_system_id *d)
@@ -767,7 +788,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
if (nvidia_wmi_ec_present)
return acpi_backlight_nvidia_wmi_ec;
- if (apple_gmux_present())
+ if (apple_gmux_backlight_present())
return acpi_backlight_apple_gmux;
/* Use ACPI video if available, except when native should be preferred. */
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 5350c73564b6..c7afce465a07 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -28,10 +28,6 @@ static bool sleep_no_lps0 __read_mostly;
module_param(sleep_no_lps0, bool, 0644);
MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
-static bool prefer_microsoft_dsm_guid __read_mostly;
-module_param(prefer_microsoft_dsm_guid, bool, 0644);
-MODULE_PARM_DESC(prefer_microsoft_dsm_guid, "Prefer using Microsoft GUID in LPS0 device _DSM evaluation");
-
static const struct acpi_device_id lps0_device_ids[] = {
{"PNP0D80", },
{"", },
@@ -369,27 +365,15 @@ out:
}
struct amd_lps0_hid_device_data {
- const unsigned int rev_id;
const bool check_off_by_one;
- const bool prefer_amd_guid;
};
static const struct amd_lps0_hid_device_data amd_picasso = {
- .rev_id = 0,
.check_off_by_one = true,
- .prefer_amd_guid = false,
};
static const struct amd_lps0_hid_device_data amd_cezanne = {
- .rev_id = 0,
- .check_off_by_one = false,
- .prefer_amd_guid = false,
-};
-
-static const struct amd_lps0_hid_device_data amd_rembrandt = {
- .rev_id = 2,
.check_off_by_one = false,
- .prefer_amd_guid = true,
};
static const struct acpi_device_id amd_hid_ids[] = {
@@ -397,69 +381,27 @@ static const struct acpi_device_id amd_hid_ids[] = {
{"AMD0005", (kernel_ulong_t)&amd_picasso, },
{"AMDI0005", (kernel_ulong_t)&amd_picasso, },
{"AMDI0006", (kernel_ulong_t)&amd_cezanne, },
- {"AMDI0007", (kernel_ulong_t)&amd_rembrandt, },
{}
};
-static int lps0_prefer_microsoft(const struct dmi_system_id *id)
+static int lps0_prefer_amd(const struct dmi_system_id *id)
{
- pr_debug("Preferring Microsoft GUID.\n");
- prefer_microsoft_dsm_guid = true;
+ pr_debug("Using AMD GUID w/ _REV 2.\n");
+ rev_id = 2;
return 0;
}
-
static const struct dmi_system_id s2idle_dmi_table[] __initconst = {
{
/*
- * ASUS TUF Gaming A17 FA707RE
- * https://bugzilla.kernel.org/show_bug.cgi?id=216101
- */
- .callback = lps0_prefer_microsoft,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ASUS TUF Gaming A17"),
- },
- },
- {
- /* ASUS ROG Zephyrus G14 (2022) */
- .callback = lps0_prefer_microsoft,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus G14 GA402"),
- },
- },
- {
- /*
- * Lenovo Yoga Slim 7 Pro X 14ARH7
- * https://bugzilla.kernel.org/show_bug.cgi?id=216473 : 82V2
- * https://bugzilla.kernel.org/show_bug.cgi?id=216438 : 82TL
- */
- .callback = lps0_prefer_microsoft,
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "82"),
- },
- },
- {
- /*
- * ASUSTeK COMPUTER INC. ROG Flow X13 GV301RE_GV301RE
- * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
+ * AMD Rembrandt based HP EliteBook 835/845/865 G9
+ * Contains specialized AML in AMD/_REV 2 path to avoid
+ * triggering a bug in Qualcomm WLAN firmware. This may be
+ * removed in the future if that firmware is fixed.
*/
- .callback = lps0_prefer_microsoft,
+ .callback = lps0_prefer_amd,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X13 GV301"),
- },
- },
- {
- /*
- * ASUSTeK COMPUTER INC. ROG Flow X16 GV601RW_GV601RW
- * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
- */
- .callback = lps0_prefer_microsoft,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X16 GV601"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ DMI_MATCH(DMI_BOARD_NAME, "8990"),
},
},
{}
@@ -484,16 +426,14 @@ static int lps0_device_attach(struct acpi_device *adev,
if (dev_id->id[0])
data = (const struct amd_lps0_hid_device_data *) dev_id->driver_data;
else
- data = &amd_rembrandt;
- rev_id = data->rev_id;
+ data = &amd_cezanne;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
if (lps0_dsm_func_mask > 0x3 && data->check_off_by_one) {
lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
- } else if (lps0_dsm_func_mask_microsoft > 0 && data->prefer_amd_guid &&
- !prefer_microsoft_dsm_guid) {
+ } else if (lps0_dsm_func_mask_microsoft > 0 && rev_id) {
lps0_dsm_func_mask_microsoft = -EINVAL;
acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
}
@@ -501,8 +441,7 @@ static int lps0_device_attach(struct acpi_device *adev,
rev_id = 1;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
- if (!prefer_microsoft_dsm_guid)
- lps0_dsm_func_mask_microsoft = -EINVAL;
+ lps0_dsm_func_mask_microsoft = -EINVAL;
}
if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 0cfd0ec6229b..14a1c0d14916 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -83,6 +83,7 @@ enum board_ids {
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void ahci_remove_one(struct pci_dev *dev);
static void ahci_shutdown_one(struct pci_dev *dev);
+static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
@@ -676,6 +677,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
ahci_save_initial_config(&pdev->dev, hpriv);
}
+static int ahci_pci_reset_controller(struct ata_host *host)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ /*
+ * If platform firmware failed to enable ports, try to enable
+ * them here.
+ */
+ ahci_intel_pcs_quirk(pdev, hpriv);
+
+ return 0;
+}
+
static void ahci_pci_init_controller(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
@@ -870,7 +890,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
ahci_pci_init_controller(host);
@@ -906,7 +926,7 @@ static int ahci_pci_device_resume(struct device *dev)
ahci_mcp89_apple_enable(pdev);
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
@@ -1784,12 +1804,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
- /*
- * If platform firmware failed to enable ports, try to enable
- * them here.
- */
- ahci_intel_pcs_quirk(pdev, hpriv);
-
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ) {
pi.flags |= ATA_FLAG_NCQ;
@@ -1899,7 +1913,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index a2184b428493..a41145d52de9 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -285,6 +285,49 @@ config BLK_DEV_RAM_SIZE
The default value is 4096 kilobytes. Only change this if you know
what you are doing.
+config CDROM_PKTCDVD
+ tristate "Packet writing on CD/DVD media (DEPRECATED)"
+ depends on !UML
+ depends on SCSI
+ select CDROM
+ help
+ Note: This driver is deprecated and will be removed from the
+ kernel in the near future!
+
+ If you have a CDROM/DVD drive that supports packet writing, say
+ Y to include support. It should work with any MMC/Mt Fuji
+ compliant ATAPI or SCSI drive, which is just about any newer
+ DVD/CD writer.
+
+ Currently only writing to CD-RW, DVD-RW, DVD+RW and DVDRAM discs
+ is possible.
+ DVD-RW disks must be in restricted overwrite mode.
+
+ See the file <file:Documentation/cdrom/packet-writing.rst>
+ for further information on the use of this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pktcdvd.
+
+config CDROM_PKTCDVD_BUFFERS
+ int "Free buffers for data gathering"
+ depends on CDROM_PKTCDVD
+ default "8"
+ help
+ This controls the maximum number of active concurrent packets. More
+ concurrent packets can increase write performance, but also require
+ more memory. Each concurrent packet will require approximately 64Kb
+ of non-swappable kernel memory, memory which will be allocated when
+ a disc is opened for writing.
+
+config CDROM_PKTCDVD_WCACHE
+ bool "Enable write caching"
+ depends on CDROM_PKTCDVD
+ help
+ If enabled, write caching will be set for the CD-R/W device. For now
+ this option is dangerous unless the CD-RW media is known good, as we
+ don't do deferred write error handling yet.
+
config ATA_OVER_ETH
tristate "ATA over Ethernet support"
depends on NET
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 962ee65d8ca3..101612cba303 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_N64CART) += n64cart.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
+obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index eb14ec8ec04c..e36216d50753 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1607,6 +1607,8 @@ void drbd_submit_bio(struct bio *bio)
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
bio = bio_split_to_limits(bio);
+ if (!bio)
+ return;
/*
* what we "blindly" assume:
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
new file mode 100644
index 000000000000..4cea3b08087e
--- /dev/null
+++ b/drivers/block/pktcdvd.c
@@ -0,0 +1,2944 @@
+/*
+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
+ * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License. See linux/COPYING for more information.
+ *
+ * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
+ * DVD-RAM devices.
+ *
+ * Theory of operation:
+ *
+ * At the lowest level, there is the standard driver for the CD/DVD device,
+ * such as drivers/scsi/sr.c. This driver can handle read and write requests,
+ * but it doesn't know anything about the special restrictions that apply to
+ * packet writing. One restriction is that write requests must be aligned to
+ * packet boundaries on the physical media, and the size of a write request
+ * must be equal to the packet size. Another restriction is that a
+ * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
+ * command, if the previous command was a write.
+ *
+ * The purpose of the packet writing driver is to hide these restrictions from
+ * higher layers, such as file systems, and present a block device that can be
+ * randomly read and written using 2kB-sized blocks.
+ *
+ * The lowest layer in the packet writing driver is the packet I/O scheduler.
+ * Its data is defined by the struct packet_iosched and includes two bio
+ * queues with pending read and write requests. These queues are processed
+ * by the pkt_iosched_process_queue() function. The write requests in this
+ * queue are already properly aligned and sized. This layer is responsible for
+ * issuing the flush cache commands and scheduling the I/O in a good order.
+ *
+ * The next layer transforms unaligned write requests to aligned writes. This
+ * transformation requires reading missing pieces of data from the underlying
+ * block device, assembling the pieces to full packets and queuing them to the
+ * packet I/O scheduler.
+ *
+ * At the top layer there is a custom ->submit_bio function that forwards
+ * read requests directly to the iosched queue and puts write requests in the
+ * unaligned write queue. A kernel thread performs the necessary read
+ * gathering to convert the unaligned writes to aligned writes and then feeds
+ * them to the packet I/O scheduler.
+ *
+ *************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pktcdvd.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/compat.h>
+#include <linux/kthread.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/file.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+#include <linux/freezer.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/backing-dev.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/nospec.h>
+#include <linux/uaccess.h>
+
+#define DRIVER_NAME "pktcdvd"
+
+#define pkt_err(pd, fmt, ...) \
+ pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_notice(pd, fmt, ...) \
+ pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_info(pd, fmt, ...) \
+ pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
+
+#define pkt_dbg(level, pd, fmt, ...) \
+do { \
+ if (level == 2 && PACKET_DEBUG >= 2) \
+ pr_notice("%s: %s():" fmt, \
+ pd->name, __func__, ##__VA_ARGS__); \
+ else if (level == 1 && PACKET_DEBUG >= 1) \
+ pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
+} while (0)
+
+#define MAX_SPEED 0xffff
+
+static DEFINE_MUTEX(pktcdvd_mutex);
+static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
+static struct proc_dir_entry *pkt_proc;
+static int pktdev_major;
+static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
+static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
+static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
+static mempool_t psd_pool;
+static struct bio_set pkt_bio_set;
+
+static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */
+static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
+
+/* forward declaration */
+static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
+static int pkt_remove_dev(dev_t pkt_dev);
+static int pkt_seq_show(struct seq_file *m, void *p);
+
+static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
+{
+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
+}
+
+/**********************************************************
+ * sysfs interface for pktcdvd
+ * by (C) 2006 Thomas Maier <balagi@justmail.de>
+
+ /sys/class/pktcdvd/pktcdvd[0-7]/
+ stat/reset
+ stat/packets_started
+ stat/packets_finished
+ stat/kb_written
+ stat/kb_read
+ stat/kb_read_gather
+ write_queue/size
+ write_queue/congestion_off
+ write_queue/congestion_on
+ **********************************************************/
+
+static ssize_t packets_started_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
+}
+static DEVICE_ATTR_RO(packets_started);
+
+static ssize_t packets_finished_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
+}
+static DEVICE_ATTR_RO(packets_finished);
+
+static ssize_t kb_written_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
+}
+static DEVICE_ATTR_RO(kb_written);
+
+static ssize_t kb_read_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
+}
+static DEVICE_ATTR_RO(kb_read);
+
+static ssize_t kb_read_gather_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
+}
+static DEVICE_ATTR_RO(kb_read_gather);
+
+static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+ if (len > 0) {
+ pd->stats.pkt_started = 0;
+ pd->stats.pkt_ended = 0;
+ pd->stats.secs_w = 0;
+ pd->stats.secs_rg = 0;
+ pd->stats.secs_r = 0;
+ }
+ return len;
+}
+static DEVICE_ATTR_WO(reset);
+
+static struct attribute *pkt_stat_attrs[] = {
+ &dev_attr_packets_finished.attr,
+ &dev_attr_packets_started.attr,
+ &dev_attr_kb_read.attr,
+ &dev_attr_kb_written.attr,
+ &dev_attr_kb_read_gather.attr,
+ &dev_attr_reset.attr,
+ NULL,
+};
+
+static const struct attribute_group pkt_stat_group = {
+ .name = "stat",
+ .attrs = pkt_stat_attrs,
+};
+
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int n;
+
+ spin_lock(&pd->lock);
+ n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
+ spin_unlock(&pd->lock);
+ return n;
+}
+static DEVICE_ATTR_RO(size);
+
+static void init_write_congestion_marks(int* lo, int* hi)
+{
+ if (*hi > 0) {
+ *hi = max(*hi, 500);
+ *hi = min(*hi, 1000000);
+ if (*lo <= 0)
+ *lo = *hi - 100;
+ else {
+ *lo = min(*lo, *hi - 100);
+ *lo = max(*lo, 100);
+ }
+ } else {
+ *hi = -1;
+ *lo = -1;
+ }
+}
+
+static ssize_t congestion_off_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int n;
+
+ spin_lock(&pd->lock);
+ n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
+ spin_unlock(&pd->lock);
+ return n;
+}
+
+static ssize_t congestion_off_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int val;
+
+ if (sscanf(buf, "%d", &val) == 1) {
+ spin_lock(&pd->lock);
+ pd->write_congestion_off = val;
+ init_write_congestion_marks(&pd->write_congestion_off,
+ &pd->write_congestion_on);
+ spin_unlock(&pd->lock);
+ }
+ return len;
+}
+static DEVICE_ATTR_RW(congestion_off);
+
+static ssize_t congestion_on_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int n;
+
+ spin_lock(&pd->lock);
+ n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
+ spin_unlock(&pd->lock);
+ return n;
+}
+
+static ssize_t congestion_on_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int val;
+
+ if (sscanf(buf, "%d", &val) == 1) {
+ spin_lock(&pd->lock);
+ pd->write_congestion_on = val;
+ init_write_congestion_marks(&pd->write_congestion_off,
+ &pd->write_congestion_on);
+ spin_unlock(&pd->lock);
+ }
+ return len;
+}
+static DEVICE_ATTR_RW(congestion_on);
+
+static struct attribute *pkt_wq_attrs[] = {
+ &dev_attr_congestion_on.attr,
+ &dev_attr_congestion_off.attr,
+ &dev_attr_size.attr,
+ NULL,
+};
+
+static const struct attribute_group pkt_wq_group = {
+ .name = "write_queue",
+ .attrs = pkt_wq_attrs,
+};
+
+static const struct attribute_group *pkt_groups[] = {
+ &pkt_stat_group,
+ &pkt_wq_group,
+ NULL,
+};
+
+static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
+{
+ if (class_pktcdvd) {
+ pd->dev = device_create_with_groups(class_pktcdvd, NULL,
+ MKDEV(0, 0), pd, pkt_groups,
+ "%s", pd->name);
+ if (IS_ERR(pd->dev))
+ pd->dev = NULL;
+ }
+}
+
+static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
+{
+ if (class_pktcdvd)
+ device_unregister(pd->dev);
+}
+
+
+/********************************************************************
+ /sys/class/pktcdvd/
+ add map block device
+ remove unmap packet dev
+ device_map show mappings
+ *******************************************************************/
+
+static void class_pktcdvd_release(struct class *cls)
+{
+ kfree(cls);
+}
+
+static ssize_t device_map_show(struct class *c, struct class_attribute *attr,
+ char *data)
+{
+ int n = 0;
+ int idx;
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ for (idx = 0; idx < MAX_WRITERS; idx++) {
+ struct pktcdvd_device *pd = pkt_devs[idx];
+ if (!pd)
+ continue;
+ n += sprintf(data+n, "%s %u:%u %u:%u\n",
+ pd->name,
+ MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
+ MAJOR(pd->bdev->bd_dev),
+ MINOR(pd->bdev->bd_dev));
+ }
+ mutex_unlock(&ctl_mutex);
+ return n;
+}
+static CLASS_ATTR_RO(device_map);
+
+static ssize_t add_store(struct class *c, struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int major, minor;
+
+ if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
+ /* pkt_setup_dev() expects caller to hold reference to self */
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ pkt_setup_dev(MKDEV(major, minor), NULL);
+
+ module_put(THIS_MODULE);
+
+ return count;
+ }
+
+ return -EINVAL;
+}
+static CLASS_ATTR_WO(add);
+
+static ssize_t remove_store(struct class *c, struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int major, minor;
+ if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
+ pkt_remove_dev(MKDEV(major, minor));
+ return count;
+ }
+ return -EINVAL;
+}
+static CLASS_ATTR_WO(remove);
+
+static struct attribute *class_pktcdvd_attrs[] = {
+ &class_attr_add.attr,
+ &class_attr_remove.attr,
+ &class_attr_device_map.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(class_pktcdvd);
+
+static int pkt_sysfs_init(void)
+{
+ int ret = 0;
+
+ /*
+ * create control files in sysfs
+ * /sys/class/pktcdvd/...
+ */
+ class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
+ if (!class_pktcdvd)
+ return -ENOMEM;
+ class_pktcdvd->name = DRIVER_NAME;
+ class_pktcdvd->owner = THIS_MODULE;
+ class_pktcdvd->class_release = class_pktcdvd_release;
+ class_pktcdvd->class_groups = class_pktcdvd_groups;
+ ret = class_register(class_pktcdvd);
+ if (ret) {
+ kfree(class_pktcdvd);
+ class_pktcdvd = NULL;
+ pr_err("failed to create class pktcdvd\n");
+ return ret;
+ }
+ return 0;
+}
+
+static void pkt_sysfs_cleanup(void)
+{
+ if (class_pktcdvd)
+ class_destroy(class_pktcdvd);
+ class_pktcdvd = NULL;
+}
+
+/********************************************************************
+ entries in debugfs
+
+ /sys/kernel/debug/pktcdvd[0-7]/
+ info
+
+ *******************************************************************/
+
+static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
+{
+ return pkt_seq_show(m, p);
+}
+
+static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pkt_debugfs_seq_show, inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+ .open = pkt_debugfs_fops_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
+{
+ if (!pkt_debugfs_root)
+ return;
+ pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
+ if (!pd->dfs_d_root)
+ return;
+
+ pd->dfs_f_info = debugfs_create_file("info", 0444,
+ pd->dfs_d_root, pd, &debug_fops);
+}
+
+static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
+{
+ if (!pkt_debugfs_root)
+ return;
+ debugfs_remove(pd->dfs_f_info);
+ debugfs_remove(pd->dfs_d_root);
+ pd->dfs_f_info = NULL;
+ pd->dfs_d_root = NULL;
+}
+
+static void pkt_debugfs_init(void)
+{
+ pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
+}
+
+static void pkt_debugfs_cleanup(void)
+{
+ debugfs_remove(pkt_debugfs_root);
+ pkt_debugfs_root = NULL;
+}
+
+/* ----------------------------------------------------------*/
+
+
+static void pkt_bio_finished(struct pktcdvd_device *pd)
+{
+ BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
+ if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
+ pkt_dbg(2, pd, "queue empty\n");
+ atomic_set(&pd->iosched.attention, 1);
+ wake_up(&pd->wqueue);
+ }
+}
+
+/*
+ * Allocate a packet_data struct
+ */
+static struct packet_data *pkt_alloc_packet_data(int frames)
+{
+ int i;
+ struct packet_data *pkt;
+
+ pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
+ if (!pkt)
+ goto no_pkt;
+
+ pkt->frames = frames;
+ pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
+ if (!pkt->w_bio)
+ goto no_bio;
+
+ for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
+ pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (!pkt->pages[i])
+ goto no_page;
+ }
+
+ spin_lock_init(&pkt->lock);
+ bio_list_init(&pkt->orig_bios);
+
+ for (i = 0; i < frames; i++) {
+ pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL);
+ if (!pkt->r_bios[i])
+ goto no_rd_bio;
+ }
+
+ return pkt;
+
+no_rd_bio:
+ for (i = 0; i < frames; i++)
+ kfree(pkt->r_bios[i]);
+no_page:
+ for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
+ if (pkt->pages[i])
+ __free_page(pkt->pages[i]);
+ kfree(pkt->w_bio);
+no_bio:
+ kfree(pkt);
+no_pkt:
+ return NULL;
+}
+
+/*
+ * Free a packet_data struct
+ */
+static void pkt_free_packet_data(struct packet_data *pkt)
+{
+ int i;
+
+ for (i = 0; i < pkt->frames; i++)
+ kfree(pkt->r_bios[i]);
+ for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
+ __free_page(pkt->pages[i]);
+ kfree(pkt->w_bio);
+ kfree(pkt);
+}
+
+static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
+{
+ struct packet_data *pkt, *next;
+
+ BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
+
+ list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
+ pkt_free_packet_data(pkt);
+ }
+ INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
+}
+
+static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
+{
+ struct packet_data *pkt;
+
+ BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
+
+ while (nr_packets > 0) {
+ pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
+ if (!pkt) {
+ pkt_shrink_pktlist(pd);
+ return 0;
+ }
+ pkt->id = nr_packets;
+ pkt->pd = pd;
+ list_add(&pkt->list, &pd->cdrw.pkt_free_list);
+ nr_packets--;
+ }
+ return 1;
+}
+
+static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
+{
+ struct rb_node *n = rb_next(&node->rb_node);
+ if (!n)
+ return NULL;
+ return rb_entry(n, struct pkt_rb_node, rb_node);
+}
+
+static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
+{
+ rb_erase(&node->rb_node, &pd->bio_queue);
+ mempool_free(node, &pd->rb_pool);
+ pd->bio_queue_size--;
+ BUG_ON(pd->bio_queue_size < 0);
+}
+
+/*
+ * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
+ */
+static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
+{
+ struct rb_node *n = pd->bio_queue.rb_node;
+ struct rb_node *next;
+ struct pkt_rb_node *tmp;
+
+ if (!n) {
+ BUG_ON(pd->bio_queue_size > 0);
+ return NULL;
+ }
+
+ for (;;) {
+ tmp = rb_entry(n, struct pkt_rb_node, rb_node);
+ if (s <= tmp->bio->bi_iter.bi_sector)
+ next = n->rb_left;
+ else
+ next = n->rb_right;
+ if (!next)
+ break;
+ n = next;
+ }
+
+ if (s > tmp->bio->bi_iter.bi_sector) {
+ tmp = pkt_rbtree_next(tmp);
+ if (!tmp)
+ return NULL;
+ }
+ BUG_ON(s > tmp->bio->bi_iter.bi_sector);
+ return tmp;
+}
+
+/*
+ * Insert a node into the pd->bio_queue rb tree.
+ */
+static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
+{
+ struct rb_node **p = &pd->bio_queue.rb_node;
+ struct rb_node *parent = NULL;
+ sector_t s = node->bio->bi_iter.bi_sector;
+ struct pkt_rb_node *tmp;
+
+ while (*p) {
+ parent = *p;
+ tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
+ if (s < tmp->bio->bi_iter.bi_sector)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&node->rb_node, parent, p);
+ rb_insert_color(&node->rb_node, &pd->bio_queue);
+ pd->bio_queue_size++;
+}
+
+/*
+ * Send a packet_command to the underlying block device and
+ * wait for completion.
+ */
+static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
+{
+ struct request_queue *q = bdev_get_queue(pd->bdev);
+ struct scsi_cmnd *scmd;
+ struct request *rq;
+ int ret = 0;
+
+ rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+ scmd = blk_mq_rq_to_pdu(rq);
+
+ if (cgc->buflen) {
+ ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
+ GFP_NOIO);
+ if (ret)
+ goto out;
+ }
+
+ scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
+ memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
+
+ rq->timeout = 60*HZ;
+ if (cgc->quiet)
+ rq->rq_flags |= RQF_QUIET;
+
+ blk_execute_rq(rq, false);
+ if (scmd->result)
+ ret = -EIO;
+out:
+ blk_mq_free_request(rq);
+ return ret;
+}
+
+static const char *sense_key_string(__u8 index)
+{
+ static const char * const info[] = {
+ "No sense", "Recovered error", "Not ready",
+ "Medium error", "Hardware error", "Illegal request",
+ "Unit attention", "Data protect", "Blank check",
+ };
+
+ return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
+}
+
+/*
+ * A generic sense dump / resolve mechanism should be implemented across
+ * all ATAPI + SCSI devices.
+ */
+static void pkt_dump_sense(struct pktcdvd_device *pd,
+ struct packet_command *cgc)
+{
+ struct scsi_sense_hdr *sshdr = cgc->sshdr;
+
+ if (sshdr)
+ pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
+ CDROM_PACKET_SIZE, cgc->cmd,
+ sshdr->sense_key, sshdr->asc, sshdr->ascq,
+ sense_key_string(sshdr->sense_key));
+ else
+ pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
+}
+
+/*
+ * flush the drive cache to media
+ */
+static int pkt_flush_cache(struct pktcdvd_device *pd)
+{
+ struct packet_command cgc;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_FLUSH_CACHE;
+ cgc.quiet = 1;
+
+ /*
+ * the IMMED bit -- we default to not setting it, although that
+ * would allow a much faster close, this is safer
+ */
+#if 0
+ cgc.cmd[1] = 1 << 1;
+#endif
+ return pkt_generic_packet(pd, &cgc);
+}
+
+/*
+ * speed is given as the normal factor, e.g. 4 for 4x
+ */
+static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
+ unsigned write_speed, unsigned read_speed)
+{
+ struct packet_command cgc;
+ struct scsi_sense_hdr sshdr;
+ int ret;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.sshdr = &sshdr;
+ cgc.cmd[0] = GPCMD_SET_SPEED;
+ cgc.cmd[2] = (read_speed >> 8) & 0xff;
+ cgc.cmd[3] = read_speed & 0xff;
+ cgc.cmd[4] = (write_speed >> 8) & 0xff;
+ cgc.cmd[5] = write_speed & 0xff;
+
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
+ pkt_dump_sense(pd, &cgc);
+
+ return ret;
+}
+
+/*
+ * Queue a bio for processing by the low-level CD device. Must be called
+ * from process context.
+ */
+static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
+{
+ spin_lock(&pd->iosched.lock);
+ if (bio_data_dir(bio) == READ)
+ bio_list_add(&pd->iosched.read_queue, bio);
+ else
+ bio_list_add(&pd->iosched.write_queue, bio);
+ spin_unlock(&pd->iosched.lock);
+
+ atomic_set(&pd->iosched.attention, 1);
+ wake_up(&pd->wqueue);
+}
+
+/*
+ * Process the queued read/write requests. This function handles special
+ * requirements for CDRW drives:
+ * - A cache flush command must be inserted before a read request if the
+ * previous request was a write.
+ * - Switching between reading and writing is slow, so don't do it more often
+ * than necessary.
+ * - Optimize for throughput at the expense of latency. This means that streaming
+ * writes will never be interrupted by a read, but if the drive has to seek
+ * before the next write, switch to reading instead if there are any pending
+ * read requests.
+ * - Set the read speed according to current usage pattern. When only reading
+ * from the device, it's best to use the highest possible read speed, but
+ * when switching often between reading and writing, it's better to have the
+ * same read and write speeds.
+ */
+static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
+{
+
+ if (atomic_read(&pd->iosched.attention) == 0)
+ return;
+ atomic_set(&pd->iosched.attention, 0);
+
+ for (;;) {
+ struct bio *bio;
+ int reads_queued, writes_queued;
+
+ spin_lock(&pd->iosched.lock);
+ reads_queued = !bio_list_empty(&pd->iosched.read_queue);
+ writes_queued = !bio_list_empty(&pd->iosched.write_queue);
+ spin_unlock(&pd->iosched.lock);
+
+ if (!reads_queued && !writes_queued)
+ break;
+
+ if (pd->iosched.writing) {
+ int need_write_seek = 1;
+ spin_lock(&pd->iosched.lock);
+ bio = bio_list_peek(&pd->iosched.write_queue);
+ spin_unlock(&pd->iosched.lock);
+ if (bio && (bio->bi_iter.bi_sector ==
+ pd->iosched.last_write))
+ need_write_seek = 0;
+ if (need_write_seek && reads_queued) {
+ if (atomic_read(&pd->cdrw.pending_bios) > 0) {
+ pkt_dbg(2, pd, "write, waiting\n");
+ break;
+ }
+ pkt_flush_cache(pd);
+ pd->iosched.writing = 0;
+ }
+ } else {
+ if (!reads_queued && writes_queued) {
+ if (atomic_read(&pd->cdrw.pending_bios) > 0) {
+ pkt_dbg(2, pd, "read, waiting\n");
+ break;
+ }
+ pd->iosched.writing = 1;
+ }
+ }
+
+ spin_lock(&pd->iosched.lock);
+ if (pd->iosched.writing)
+ bio = bio_list_pop(&pd->iosched.write_queue);
+ else
+ bio = bio_list_pop(&pd->iosched.read_queue);
+ spin_unlock(&pd->iosched.lock);
+
+ if (!bio)
+ continue;
+
+ if (bio_data_dir(bio) == READ)
+ pd->iosched.successive_reads +=
+ bio->bi_iter.bi_size >> 10;
+ else {
+ pd->iosched.successive_reads = 0;
+ pd->iosched.last_write = bio_end_sector(bio);
+ }
+ if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
+ if (pd->read_speed == pd->write_speed) {
+ pd->read_speed = MAX_SPEED;
+ pkt_set_speed(pd, pd->write_speed, pd->read_speed);
+ }
+ } else {
+ if (pd->read_speed != pd->write_speed) {
+ pd->read_speed = pd->write_speed;
+ pkt_set_speed(pd, pd->write_speed, pd->read_speed);
+ }
+ }
+
+ atomic_inc(&pd->cdrw.pending_bios);
+ submit_bio_noacct(bio);
+ }
+}
+
+/*
+ * Special care is needed if the underlying block device has a small
+ * max_phys_segments value.
+ */
+static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
+{
+ if ((pd->settings.size << 9) / CD_FRAMESIZE
+ <= queue_max_segments(q)) {
+ /*
+ * The cdrom device can handle one segment/frame
+ */
+ clear_bit(PACKET_MERGE_SEGS, &pd->flags);
+ return 0;
+ } else if ((pd->settings.size << 9) / PAGE_SIZE
+ <= queue_max_segments(q)) {
+ /*
+ * We can handle this case at the expense of some extra memory
+ * copies during write operations
+ */
+ set_bit(PACKET_MERGE_SEGS, &pd->flags);
+ return 0;
+ } else {
+ pkt_err(pd, "cdrom max_phys_segments too small\n");
+ return -EIO;
+ }
+}
+
+static void pkt_end_io_read(struct bio *bio)
+{
+ struct packet_data *pkt = bio->bi_private;
+ struct pktcdvd_device *pd = pkt->pd;
+ BUG_ON(!pd);
+
+ pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
+ bio, (unsigned long long)pkt->sector,
+ (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
+
+ if (bio->bi_status)
+ atomic_inc(&pkt->io_errors);
+ bio_uninit(bio);
+ if (atomic_dec_and_test(&pkt->io_wait)) {
+ atomic_inc(&pkt->run_sm);
+ wake_up(&pd->wqueue);
+ }
+ pkt_bio_finished(pd);
+}
+
+static void pkt_end_io_packet_write(struct bio *bio)
+{
+ struct packet_data *pkt = bio->bi_private;
+ struct pktcdvd_device *pd = pkt->pd;
+ BUG_ON(!pd);
+
+ pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
+
+ pd->stats.pkt_ended++;
+
+ bio_uninit(bio);
+ pkt_bio_finished(pd);
+ atomic_dec(&pkt->io_wait);
+ atomic_inc(&pkt->run_sm);
+ wake_up(&pd->wqueue);
+}
+
+/*
+ * Schedule reads for the holes in a packet
+ */
+static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+ int frames_read = 0;
+ struct bio *bio;
+ int f;
+ char written[PACKET_MAX_SIZE];
+
+ BUG_ON(bio_list_empty(&pkt->orig_bios));
+
+ atomic_set(&pkt->io_wait, 0);
+ atomic_set(&pkt->io_errors, 0);
+
+ /*
+ * Figure out which frames we need to read before we can write.
+ */
+ memset(written, 0, sizeof(written));
+ spin_lock(&pkt->lock);
+ bio_list_for_each(bio, &pkt->orig_bios) {
+ int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
+ (CD_FRAMESIZE >> 9);
+ int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
+ pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
+ BUG_ON(first_frame < 0);
+ BUG_ON(first_frame + num_frames > pkt->frames);
+ for (f = first_frame; f < first_frame + num_frames; f++)
+ written[f] = 1;
+ }
+ spin_unlock(&pkt->lock);
+
+ if (pkt->cache_valid) {
+ pkt_dbg(2, pd, "zone %llx cached\n",
+ (unsigned long long)pkt->sector);
+ goto out_account;
+ }
+
+ /*
+ * Schedule reads for missing parts of the packet.
+ */
+ for (f = 0; f < pkt->frames; f++) {
+ int p, offset;
+
+ if (written[f])
+ continue;
+
+ bio = pkt->r_bios[f];
+ bio_init(bio, pd->bdev, bio->bi_inline_vecs, 1, REQ_OP_READ);
+ bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
+ bio->bi_end_io = pkt_end_io_read;
+ bio->bi_private = pkt;
+
+ p = (f * CD_FRAMESIZE) / PAGE_SIZE;
+ offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+ pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
+ f, pkt->pages[p], offset);
+ if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
+ BUG();
+
+ atomic_inc(&pkt->io_wait);
+ pkt_queue_bio(pd, bio);
+ frames_read++;
+ }
+
+out_account:
+ pkt_dbg(2, pd, "need %d frames for zone %llx\n",
+ frames_read, (unsigned long long)pkt->sector);
+ pd->stats.pkt_started++;
+ pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
+}
+
+/*
+ * Find a packet matching zone, or the least recently used packet if
+ * there is no match.
+ */
+static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
+{
+ struct packet_data *pkt;
+
+ list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
+ if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
+ list_del_init(&pkt->list);
+ if (pkt->sector != zone)
+ pkt->cache_valid = 0;
+ return pkt;
+ }
+ }
+ BUG();
+ return NULL;
+}
+
+static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+ if (pkt->cache_valid) {
+ list_add(&pkt->list, &pd->cdrw.pkt_free_list);
+ } else {
+ list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
+ }
+}
+
+static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
+{
+#if PACKET_DEBUG > 1
+ static const char *state_name[] = {
+ "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
+ };
+ enum packet_data_state old_state = pkt->state;
+ pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
+ pkt->id, (unsigned long long)pkt->sector,
+ state_name[old_state], state_name[state]);
+#endif
+ pkt->state = state;
+}
+
+/*
+ * Scan the work queue to see if we can start a new packet.
+ * returns non-zero if any work was done.
+ */
+static int pkt_handle_queue(struct pktcdvd_device *pd)
+{
+ struct packet_data *pkt, *p;
+ struct bio *bio = NULL;
+ sector_t zone = 0; /* Suppress gcc warning */
+ struct pkt_rb_node *node, *first_node;
+ struct rb_node *n;
+
+ atomic_set(&pd->scan_queue, 0);
+
+ if (list_empty(&pd->cdrw.pkt_free_list)) {
+ pkt_dbg(2, pd, "no pkt\n");
+ return 0;
+ }
+
+ /*
+ * Try to find a zone we are not already working on.
+ */
+ spin_lock(&pd->lock);
+ first_node = pkt_rbtree_find(pd, pd->current_sector);
+ if (!first_node) {
+ n = rb_first(&pd->bio_queue);
+ if (n)
+ first_node = rb_entry(n, struct pkt_rb_node, rb_node);
+ }
+ node = first_node;
+ while (node) {
+ bio = node->bio;
+ zone = get_zone(bio->bi_iter.bi_sector, pd);
+ list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
+ if (p->sector == zone) {
+ bio = NULL;
+ goto try_next_bio;
+ }
+ }
+ break;
+try_next_bio:
+ node = pkt_rbtree_next(node);
+ if (!node) {
+ n = rb_first(&pd->bio_queue);
+ if (n)
+ node = rb_entry(n, struct pkt_rb_node, rb_node);
+ }
+ if (node == first_node)
+ node = NULL;
+ }
+ spin_unlock(&pd->lock);
+ if (!bio) {
+ pkt_dbg(2, pd, "no bio\n");
+ return 0;
+ }
+
+ pkt = pkt_get_packet_data(pd, zone);
+
+ pd->current_sector = zone + pd->settings.size;
+ pkt->sector = zone;
+ BUG_ON(pkt->frames != pd->settings.size >> 2);
+ pkt->write_size = 0;
+
+ /*
+ * Scan work queue for bios in the same zone and link them
+ * to this packet.
+ */
+ spin_lock(&pd->lock);
+ pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
+ while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
+ bio = node->bio;
+ pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
+ get_zone(bio->bi_iter.bi_sector, pd));
+ if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
+ break;
+ pkt_rbtree_erase(pd, node);
+ spin_lock(&pkt->lock);
+ bio_list_add(&pkt->orig_bios, bio);
+ pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
+ spin_unlock(&pkt->lock);
+ }
+ /* check write congestion marks, and if bio_queue_size is
+ * below, wake up any waiters
+ */
+ if (pd->congested &&
+ pd->bio_queue_size <= pd->write_congestion_off) {
+ pd->congested = false;
+ wake_up_var(&pd->congested);
+ }
+ spin_unlock(&pd->lock);
+
+ pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
+ pkt_set_state(pkt, PACKET_WAITING_STATE);
+ atomic_set(&pkt->run_sm, 1);
+
+ spin_lock(&pd->cdrw.active_list_lock);
+ list_add(&pkt->list, &pd->cdrw.pkt_active_list);
+ spin_unlock(&pd->cdrw.active_list_lock);
+
+ return 1;
+}
+
+/**
+ * bio_list_copy_data - copy contents of data buffers from one chain of bios to
+ * another
+ * @src: source bio list
+ * @dst: destination bio list
+ *
+ * Stops when it reaches the end of either the @src list or @dst list - that is,
+ * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
+ * bios).
+ */
+static void bio_list_copy_data(struct bio *dst, struct bio *src)
+{
+ struct bvec_iter src_iter = src->bi_iter;
+ struct bvec_iter dst_iter = dst->bi_iter;
+
+ while (1) {
+ if (!src_iter.bi_size) {
+ src = src->bi_next;
+ if (!src)
+ break;
+
+ src_iter = src->bi_iter;
+ }
+
+ if (!dst_iter.bi_size) {
+ dst = dst->bi_next;
+ if (!dst)
+ break;
+
+ dst_iter = dst->bi_iter;
+ }
+
+ bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
+ }
+}
+
+/*
+ * Assemble a bio to write one packet and queue the bio for processing
+ * by the underlying block device.
+ */
+static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+ int f;
+
+ bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames,
+ REQ_OP_WRITE);
+ pkt->w_bio->bi_iter.bi_sector = pkt->sector;
+ pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
+ pkt->w_bio->bi_private = pkt;
+
+ /* XXX: locking? */
+ for (f = 0; f < pkt->frames; f++) {
+ struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
+ unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+
+ if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
+ BUG();
+ }
+ pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
+
+ /*
+ * Fill-in bvec with data from orig_bios.
+ */
+ spin_lock(&pkt->lock);
+ bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
+
+ pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
+ spin_unlock(&pkt->lock);
+
+ pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
+ pkt->write_size, (unsigned long long)pkt->sector);
+
+ if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
+ pkt->cache_valid = 1;
+ else
+ pkt->cache_valid = 0;
+
+ /* Start the write request */
+ atomic_set(&pkt->io_wait, 1);
+ pkt_queue_bio(pd, pkt->w_bio);
+}
+
+static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
+{
+ struct bio *bio;
+
+ if (status)
+ pkt->cache_valid = 0;
+
+ /* Finish all bios corresponding to this packet */
+ while ((bio = bio_list_pop(&pkt->orig_bios))) {
+ bio->bi_status = status;
+ bio_endio(bio);
+ }
+}
+
+static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+ pkt_dbg(2, pd, "pkt %d\n", pkt->id);
+
+ for (;;) {
+ switch (pkt->state) {
+ case PACKET_WAITING_STATE:
+ if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
+ return;
+
+ pkt->sleep_time = 0;
+ pkt_gather_data(pd, pkt);
+ pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
+ break;
+
+ case PACKET_READ_WAIT_STATE:
+ if (atomic_read(&pkt->io_wait) > 0)
+ return;
+
+ if (atomic_read(&pkt->io_errors) > 0) {
+ pkt_set_state(pkt, PACKET_RECOVERY_STATE);
+ } else {
+ pkt_start_write(pd, pkt);
+ }
+ break;
+
+ case PACKET_WRITE_WAIT_STATE:
+ if (atomic_read(&pkt->io_wait) > 0)
+ return;
+
+ if (!pkt->w_bio->bi_status) {
+ pkt_set_state(pkt, PACKET_FINISHED_STATE);
+ } else {
+ pkt_set_state(pkt, PACKET_RECOVERY_STATE);
+ }
+ break;
+
+ case PACKET_RECOVERY_STATE:
+ pkt_dbg(2, pd, "No recovery possible\n");
+ pkt_set_state(pkt, PACKET_FINISHED_STATE);
+ break;
+
+ case PACKET_FINISHED_STATE:
+ pkt_finish_packet(pkt, pkt->w_bio->bi_status);
+ return;
+
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+static void pkt_handle_packets(struct pktcdvd_device *pd)
+{
+ struct packet_data *pkt, *next;
+
+ /*
+ * Run state machine for active packets
+ */
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (atomic_read(&pkt->run_sm) > 0) {
+ atomic_set(&pkt->run_sm, 0);
+ pkt_run_state_machine(pd, pkt);
+ }
+ }
+
+ /*
+ * Move no longer active packets to the free list
+ */
+ spin_lock(&pd->cdrw.active_list_lock);
+ list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
+ if (pkt->state == PACKET_FINISHED_STATE) {
+ list_del(&pkt->list);
+ pkt_put_packet_data(pd, pkt);
+ pkt_set_state(pkt, PACKET_IDLE_STATE);
+ atomic_set(&pd->scan_queue, 1);
+ }
+ }
+ spin_unlock(&pd->cdrw.active_list_lock);
+}
+
+static void pkt_count_states(struct pktcdvd_device *pd, int *states)
+{
+ struct packet_data *pkt;
+ int i;
+
+ for (i = 0; i < PACKET_NUM_STATES; i++)
+ states[i] = 0;
+
+ spin_lock(&pd->cdrw.active_list_lock);
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ states[pkt->state]++;
+ }
+ spin_unlock(&pd->cdrw.active_list_lock);
+}
+
+/*
+ * kcdrwd is woken up when writes have been queued for one of our
+ * registered devices
+ */
+static int kcdrwd(void *foobar)
+{
+ struct pktcdvd_device *pd = foobar;
+ struct packet_data *pkt;
+ long min_sleep_time, residue;
+
+ set_user_nice(current, MIN_NICE);
+ set_freezable();
+
+ for (;;) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ /*
+ * Wait until there is something to do
+ */
+ add_wait_queue(&pd->wqueue, &wait);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* Check if we need to run pkt_handle_queue */
+ if (atomic_read(&pd->scan_queue) > 0)
+ goto work_to_do;
+
+ /* Check if we need to run the state machine for some packet */
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (atomic_read(&pkt->run_sm) > 0)
+ goto work_to_do;
+ }
+
+ /* Check if we need to process the iosched queues */
+ if (atomic_read(&pd->iosched.attention) != 0)
+ goto work_to_do;
+
+ /* Otherwise, go to sleep */
+ if (PACKET_DEBUG > 1) {
+ int states[PACKET_NUM_STATES];
+ pkt_count_states(pd, states);
+ pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+ states[0], states[1], states[2],
+ states[3], states[4], states[5]);
+ }
+
+ min_sleep_time = MAX_SCHEDULE_TIMEOUT;
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
+ min_sleep_time = pkt->sleep_time;
+ }
+
+ pkt_dbg(2, pd, "sleeping\n");
+ residue = schedule_timeout(min_sleep_time);
+ pkt_dbg(2, pd, "wake up\n");
+
+ /* make swsusp happy with our thread */
+ try_to_freeze();
+
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (!pkt->sleep_time)
+ continue;
+ pkt->sleep_time -= min_sleep_time - residue;
+ if (pkt->sleep_time <= 0) {
+ pkt->sleep_time = 0;
+ atomic_inc(&pkt->run_sm);
+ }
+ }
+
+ if (kthread_should_stop())
+ break;
+ }
+work_to_do:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&pd->wqueue, &wait);
+
+ if (kthread_should_stop())
+ break;
+
+ /*
+ * if pkt_handle_queue returns true, we can queue
+ * another request.
+ */
+ while (pkt_handle_queue(pd))
+ ;
+
+ /*
+ * Handle packet state machine
+ */
+ pkt_handle_packets(pd);
+
+ /*
+ * Handle iosched queues
+ */
+ pkt_iosched_process_queue(pd);
+ }
+
+ return 0;
+}
+
+static void pkt_print_settings(struct pktcdvd_device *pd)
+{
+ pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
+ pd->settings.fp ? "Fixed" : "Variable",
+ pd->settings.size >> 2,
+ pd->settings.block_mode == 8 ? '1' : '2');
+}
+
+static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
+{
+ memset(cgc->cmd, 0, sizeof(cgc->cmd));
+
+ cgc->cmd[0] = GPCMD_MODE_SENSE_10;
+ cgc->cmd[2] = page_code | (page_control << 6);
+ cgc->cmd[7] = cgc->buflen >> 8;
+ cgc->cmd[8] = cgc->buflen & 0xff;
+ cgc->data_direction = CGC_DATA_READ;
+ return pkt_generic_packet(pd, cgc);
+}
+
+static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
+{
+ memset(cgc->cmd, 0, sizeof(cgc->cmd));
+ memset(cgc->buffer, 0, 2);
+ cgc->cmd[0] = GPCMD_MODE_SELECT_10;
+ cgc->cmd[1] = 0x10; /* PF */
+ cgc->cmd[7] = cgc->buflen >> 8;
+ cgc->cmd[8] = cgc->buflen & 0xff;
+ cgc->data_direction = CGC_DATA_WRITE;
+ return pkt_generic_packet(pd, cgc);
+}
+
+static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
+{
+ struct packet_command cgc;
+ int ret;
+
+ /* set up command and get the disc info */
+ init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_READ_DISC_INFO;
+ cgc.cmd[8] = cgc.buflen = 2;
+ cgc.quiet = 1;
+
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
+ return ret;
+
+ /* not all drives have the same disc_info length, so requeue
+ * packet with the length the drive tells us it can supply
+ */
+ cgc.buflen = be16_to_cpu(di->disc_information_length) +
+ sizeof(di->disc_information_length);
+
+ if (cgc.buflen > sizeof(disc_information))
+ cgc.buflen = sizeof(disc_information);
+
+ cgc.cmd[8] = cgc.buflen;
+ return pkt_generic_packet(pd, &cgc);
+}
+
+static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
+{
+ struct packet_command cgc;
+ int ret;
+
+ init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
+ cgc.cmd[1] = type & 3;
+ cgc.cmd[4] = (track & 0xff00) >> 8;
+ cgc.cmd[5] = track & 0xff;
+ cgc.cmd[8] = 8;
+ cgc.quiet = 1;
+
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
+ return ret;
+
+ cgc.buflen = be16_to_cpu(ti->track_information_length) +
+ sizeof(ti->track_information_length);
+
+ if (cgc.buflen > sizeof(track_information))
+ cgc.buflen = sizeof(track_information);
+
+ cgc.cmd[8] = cgc.buflen;
+ return pkt_generic_packet(pd, &cgc);
+}
+
+static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
+ long *last_written)
+{
+ disc_information di;
+ track_information ti;
+ __u32 last_track;
+ int ret;
+
+ ret = pkt_get_disc_info(pd, &di);
+ if (ret)
+ return ret;
+
+ last_track = (di.last_track_msb << 8) | di.last_track_lsb;
+ ret = pkt_get_track_info(pd, last_track, 1, &ti);
+ if (ret)
+ return ret;
+
+ /* if this track is blank, try the previous. */
+ if (ti.blank) {
+ last_track--;
+ ret = pkt_get_track_info(pd, last_track, 1, &ti);
+ if (ret)
+ return ret;
+ }
+
+ /* if last recorded field is valid, return it. */
+ if (ti.lra_v) {
+ *last_written = be32_to_cpu(ti.last_rec_address);
+ } else {
+ /* make it up instead */
+ *last_written = be32_to_cpu(ti.track_start) +
+ be32_to_cpu(ti.track_size);
+ if (ti.free_blocks)
+ *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
+ }
+ return 0;
+}
+
+/*
+ * write mode select package based on pd->settings
+ */
+static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
+{
+ struct packet_command cgc;
+ struct scsi_sense_hdr sshdr;
+ write_param_page *wp;
+ char buffer[128];
+ int ret, size;
+
+ /* doesn't apply to DVD+RW or DVD-RAM */
+ if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
+ return 0;
+
+ memset(buffer, 0, sizeof(buffer));
+ init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
+ cgc.sshdr = &sshdr;
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
+ if (ret) {
+ pkt_dump_sense(pd, &cgc);
+ return ret;
+ }
+
+ size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
+ pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
+ if (size > sizeof(buffer))
+ size = sizeof(buffer);
+
+ /*
+ * now get it all
+ */
+ init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
+ cgc.sshdr = &sshdr;
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
+ if (ret) {
+ pkt_dump_sense(pd, &cgc);
+ return ret;
+ }
+
+ /*
+ * write page is offset header + block descriptor length
+ */
+ wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
+
+ wp->fp = pd->settings.fp;
+ wp->track_mode = pd->settings.track_mode;
+ wp->write_type = pd->settings.write_type;
+ wp->data_block_type = pd->settings.block_mode;
+
+ wp->multi_session = 0;
+
+#ifdef PACKET_USE_LS
+ wp->link_size = 7;
+ wp->ls_v = 1;
+#endif
+
+ if (wp->data_block_type == PACKET_BLOCK_MODE1) {
+ wp->session_format = 0;
+ wp->subhdr2 = 0x20;
+ } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
+ wp->session_format = 0x20;
+ wp->subhdr2 = 8;
+#if 0
+ wp->mcn[0] = 0x80;
+ memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
+#endif
+ } else {
+ /*
+ * paranoia
+ */
+ pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
+ return 1;
+ }
+ wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
+
+ cgc.buflen = cgc.cmd[8] = size;
+ ret = pkt_mode_select(pd, &cgc);
+ if (ret) {
+ pkt_dump_sense(pd, &cgc);
+ return ret;
+ }
+
+ pkt_print_settings(pd);
+ return 0;
+}
+
+/*
+ * 1 -- we can write to this track, 0 -- we can't
+ */
+static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
+{
+ switch (pd->mmc3_profile) {
+ case 0x1a: /* DVD+RW */
+ case 0x12: /* DVD-RAM */
+ /* The track is always writable on DVD+RW/DVD-RAM */
+ return 1;
+ default:
+ break;
+ }
+
+ if (!ti->packet || !ti->fp)
+ return 0;
+
+ /*
+ * "good" settings as per Mt Fuji.
+ */
+ if (ti->rt == 0 && ti->blank == 0)
+ return 1;
+
+ if (ti->rt == 0 && ti->blank == 1)
+ return 1;
+
+ if (ti->rt == 1 && ti->blank == 0)
+ return 1;
+
+ pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
+ return 0;
+}
+
+/*
+ * 1 -- we can write to this disc, 0 -- we can't
+ */
+static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
+{
+ switch (pd->mmc3_profile) {
+ case 0x0a: /* CD-RW */
+ case 0xffff: /* MMC3 not supported */
+ break;
+ case 0x1a: /* DVD+RW */
+ case 0x13: /* DVD-RW */
+ case 0x12: /* DVD-RAM */
+ return 1;
+ default:
+ pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
+ pd->mmc3_profile);
+ return 0;
+ }
+
+ /*
+ * for disc type 0xff we should probably reserve a new track.
+ * but i'm not sure, should we leave this to user apps? probably.
+ */
+ if (di->disc_type == 0xff) {
+ pkt_notice(pd, "unknown disc - no track?\n");
+ return 0;
+ }
+
+ if (di->disc_type != 0x20 && di->disc_type != 0) {
+ pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
+ return 0;
+ }
+
+ if (di->erasable == 0) {
+ pkt_notice(pd, "disc not erasable\n");
+ return 0;
+ }
+
+ if (di->border_status == PACKET_SESSION_RESERVED) {
+ pkt_err(pd, "can't write to last track (reserved)\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
+{
+ struct packet_command cgc;
+ unsigned char buf[12];
+ disc_information di;
+ track_information ti;
+ int ret, track;
+
+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
+ cgc.cmd[8] = 8;
+ ret = pkt_generic_packet(pd, &cgc);
+ pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
+
+ memset(&di, 0, sizeof(disc_information));
+ memset(&ti, 0, sizeof(track_information));
+
+ ret = pkt_get_disc_info(pd, &di);
+ if (ret) {
+ pkt_err(pd, "failed get_disc\n");
+ return ret;
+ }
+
+ if (!pkt_writable_disc(pd, &di))
+ return -EROFS;
+
+ pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
+
+ track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
+ ret = pkt_get_track_info(pd, track, 1, &ti);
+ if (ret) {
+ pkt_err(pd, "failed get_track\n");
+ return ret;
+ }
+
+ if (!pkt_writable_track(pd, &ti)) {
+ pkt_err(pd, "can't write to this track\n");
+ return -EROFS;
+ }
+
+ /*
+ * we keep packet size in 512 byte units, makes it easier to
+ * deal with request calculations.
+ */
+ pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
+ if (pd->settings.size == 0) {
+ pkt_notice(pd, "detected zero packet size!\n");
+ return -ENXIO;
+ }
+ if (pd->settings.size > PACKET_MAX_SECTORS) {
+ pkt_err(pd, "packet size is too big\n");
+ return -EROFS;
+ }
+ pd->settings.fp = ti.fp;
+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
+
+ if (ti.nwa_v) {
+ pd->nwa = be32_to_cpu(ti.next_writable);
+ set_bit(PACKET_NWA_VALID, &pd->flags);
+ }
+
+ /*
+ * in theory we could use lra on -RW media as well and just zero
+ * blocks that haven't been written yet, but in practice that
+ * is just a no-go. we'll use that for -R, naturally.
+ */
+ if (ti.lra_v) {
+ pd->lra = be32_to_cpu(ti.last_rec_address);
+ set_bit(PACKET_LRA_VALID, &pd->flags);
+ } else {
+ pd->lra = 0xffffffff;
+ set_bit(PACKET_LRA_VALID, &pd->flags);
+ }
+
+ /*
+ * fine for now
+ */
+ pd->settings.link_loss = 7;
+ pd->settings.write_type = 0; /* packet */
+ pd->settings.track_mode = ti.track_mode;
+
+ /*
+ * mode1 or mode2 disc
+ */
+ switch (ti.data_mode) {
+ case PACKET_MODE1:
+ pd->settings.block_mode = PACKET_BLOCK_MODE1;
+ break;
+ case PACKET_MODE2:
+ pd->settings.block_mode = PACKET_BLOCK_MODE2;
+ break;
+ default:
+ pkt_err(pd, "unknown data mode\n");
+ return -EROFS;
+ }
+ return 0;
+}
+
+/*
+ * enable/disable write caching on drive
+ */
+static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
+ int set)
+{
+ struct packet_command cgc;
+ struct scsi_sense_hdr sshdr;
+ unsigned char buf[64];
+ int ret;
+
+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
+ cgc.sshdr = &sshdr;
+ cgc.buflen = pd->mode_offset + 12;
+
+ /*
+ * caching mode page might not be there, so quiet this command
+ */
+ cgc.quiet = 1;
+
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
+ if (ret)
+ return ret;
+
+ buf[pd->mode_offset + 10] |= (!!set << 2);
+
+ cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
+ ret = pkt_mode_select(pd, &cgc);
+ if (ret) {
+ pkt_err(pd, "write caching control failed\n");
+ pkt_dump_sense(pd, &cgc);
+ } else if (!ret && set)
+ pkt_notice(pd, "enabled write caching\n");
+ return ret;
+}
+
+static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
+{
+ struct packet_command cgc;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
+ cgc.cmd[4] = lockflag ? 1 : 0;
+ return pkt_generic_packet(pd, &cgc);
+}
+
+/*
+ * Returns drive maximum write speed
+ */
+static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
+ unsigned *write_speed)
+{
+ struct packet_command cgc;
+ struct scsi_sense_hdr sshdr;
+ unsigned char buf[256+18];
+ unsigned char *cap_buf;
+ int ret, offset;
+
+ cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
+ cgc.sshdr = &sshdr;
+
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+ if (ret) {
+ cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
+ sizeof(struct mode_page_header);
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+ if (ret) {
+ pkt_dump_sense(pd, &cgc);
+ return ret;
+ }
+ }
+
+ offset = 20; /* Obsoleted field, used by older drives */
+ if (cap_buf[1] >= 28)
+ offset = 28; /* Current write speed selected */
+ if (cap_buf[1] >= 30) {
+ /* If the drive reports at least one "Logical Unit Write
+ * Speed Performance Descriptor Block", use the information
+ * in the first block. (contains the highest speed)
+ */
+ int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
+ if (num_spdb > 0)
+ offset = 34;
+ }
+
+ *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
+ return 0;
+}
+
+/* These tables from cdrecord - I don't have orange book */
+/* standard speed CD-RW (1-4x) */
+static char clv_to_speed[16] = {
+ /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
+ 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+/* high speed CD-RW (-10x) */
+static char hs_clv_to_speed[16] = {
+ /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
+ 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+/* ultra high speed CD-RW */
+static char us_clv_to_speed[16] = {
+ /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
+ 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
+};
+
+/*
+ * reads the maximum media speed from ATIP
+ */
+static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
+ unsigned *speed)
+{
+ struct packet_command cgc;
+ struct scsi_sense_hdr sshdr;
+ unsigned char buf[64];
+ unsigned int size, st, sp;
+ int ret;
+
+ init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
+ cgc.sshdr = &sshdr;
+ cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+ cgc.cmd[1] = 2;
+ cgc.cmd[2] = 4; /* READ ATIP */
+ cgc.cmd[8] = 2;
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret) {
+ pkt_dump_sense(pd, &cgc);
+ return ret;
+ }
+ size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
+ if (size > sizeof(buf))
+ size = sizeof(buf);
+
+ init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
+ cgc.sshdr = &sshdr;
+ cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+ cgc.cmd[1] = 2;
+ cgc.cmd[2] = 4;
+ cgc.cmd[8] = size;
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret) {
+ pkt_dump_sense(pd, &cgc);
+ return ret;
+ }
+
+ if (!(buf[6] & 0x40)) {
+ pkt_notice(pd, "disc type is not CD-RW\n");
+ return 1;
+ }
+ if (!(buf[6] & 0x4)) {
+ pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
+ return 1;
+ }
+
+ st = (buf[6] >> 3) & 0x7; /* disc sub-type */
+
+ sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
+
+ /* Info from cdrecord */
+ switch (st) {
+ case 0: /* standard speed */
+ *speed = clv_to_speed[sp];
+ break;
+ case 1: /* high speed */
+ *speed = hs_clv_to_speed[sp];
+ break;
+ case 2: /* ultra high speed */
+ *speed = us_clv_to_speed[sp];
+ break;
+ default:
+ pkt_notice(pd, "unknown disc sub-type %d\n", st);
+ return 1;
+ }
+ if (*speed) {
+ pkt_info(pd, "maximum media speed: %d\n", *speed);
+ return 0;
+ } else {
+ pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
+ return 1;
+ }
+}
+
+static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
+{
+ struct packet_command cgc;
+ struct scsi_sense_hdr sshdr;
+ int ret;
+
+ pkt_dbg(2, pd, "Performing OPC\n");
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.sshdr = &sshdr;
+ cgc.timeout = 60*HZ;
+ cgc.cmd[0] = GPCMD_SEND_OPC;
+ cgc.cmd[1] = 1;
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
+ pkt_dump_sense(pd, &cgc);
+ return ret;
+}
+
+static int pkt_open_write(struct pktcdvd_device *pd)
+{
+ int ret;
+ unsigned int write_speed, media_write_speed, read_speed;
+
+ ret = pkt_probe_settings(pd);
+ if (ret) {
+ pkt_dbg(2, pd, "failed probe\n");
+ return ret;
+ }
+
+ ret = pkt_set_write_settings(pd);
+ if (ret) {
+ pkt_dbg(1, pd, "failed saving write settings\n");
+ return -EIO;
+ }
+
+ pkt_write_caching(pd, USE_WCACHING);
+
+ ret = pkt_get_max_speed(pd, &write_speed);
+ if (ret)
+ write_speed = 16 * 177;
+ switch (pd->mmc3_profile) {
+ case 0x13: /* DVD-RW */
+ case 0x1a: /* DVD+RW */
+ case 0x12: /* DVD-RAM */
+ pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
+ break;
+ default:
+ ret = pkt_media_speed(pd, &media_write_speed);
+ if (ret)
+ media_write_speed = 16;
+ write_speed = min(write_speed, media_write_speed * 177);
+ pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
+ break;
+ }
+ read_speed = write_speed;
+
+ ret = pkt_set_speed(pd, write_speed, read_speed);
+ if (ret) {
+ pkt_dbg(1, pd, "couldn't set write speed\n");
+ return -EIO;
+ }
+ pd->write_speed = write_speed;
+ pd->read_speed = read_speed;
+
+ ret = pkt_perform_opc(pd);
+ if (ret) {
+ pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
+ }
+
+ return 0;
+}
+
+/*
+ * called at open time.
+ */
+static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
+{
+ int ret;
+ long lba;
+ struct request_queue *q;
+ struct block_device *bdev;
+
+ /*
+ * We need to re-open the cdrom device without O_NONBLOCK to be able
+ * to read/write from/to it. It is already opened in O_NONBLOCK mode
+ * so open should not fail.
+ */
+ bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd);
+ if (IS_ERR(bdev)) {
+ ret = PTR_ERR(bdev);
+ goto out;
+ }
+
+ ret = pkt_get_last_written(pd, &lba);
+ if (ret) {
+ pkt_err(pd, "pkt_get_last_written failed\n");
+ goto out_putdev;
+ }
+
+ set_capacity(pd->disk, lba << 2);
+ set_capacity_and_notify(pd->bdev->bd_disk, lba << 2);
+
+ q = bdev_get_queue(pd->bdev);
+ if (write) {
+ ret = pkt_open_write(pd);
+ if (ret)
+ goto out_putdev;
+ /*
+ * Some CDRW drives can not handle writes larger than one packet,
+ * even if the size is a multiple of the packet size.
+ */
+ blk_queue_max_hw_sectors(q, pd->settings.size);
+ set_bit(PACKET_WRITABLE, &pd->flags);
+ } else {
+ pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
+ clear_bit(PACKET_WRITABLE, &pd->flags);
+ }
+
+ ret = pkt_set_segment_merging(pd, q);
+ if (ret)
+ goto out_putdev;
+
+ if (write) {
+ if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
+ pkt_err(pd, "not enough memory for buffers\n");
+ ret = -ENOMEM;
+ goto out_putdev;
+ }
+ pkt_info(pd, "%lukB available on disc\n", lba << 1);
+ }
+
+ return 0;
+
+out_putdev:
+ blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
+out:
+ return ret;
+}
+
+/*
+ * called when the device is closed. makes sure that the device flushes
+ * the internal cache before we close.
+ */
+static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
+{
+ if (flush && pkt_flush_cache(pd))
+ pkt_dbg(1, pd, "not flushing cache\n");
+
+ pkt_lock_door(pd, 0);
+
+ pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
+ blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
+
+ pkt_shrink_pktlist(pd);
+}
+
+static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
+{
+ if (dev_minor >= MAX_WRITERS)
+ return NULL;
+
+ dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
+ return pkt_devs[dev_minor];
+}
+
+static int pkt_open(struct block_device *bdev, fmode_t mode)
+{
+ struct pktcdvd_device *pd = NULL;
+ int ret;
+
+ mutex_lock(&pktcdvd_mutex);
+ mutex_lock(&ctl_mutex);
+ pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
+ if (!pd) {
+ ret = -ENODEV;
+ goto out;
+ }
+ BUG_ON(pd->refcnt < 0);
+
+ pd->refcnt++;
+ if (pd->refcnt > 1) {
+ if ((mode & FMODE_WRITE) &&
+ !test_bit(PACKET_WRITABLE, &pd->flags)) {
+ ret = -EBUSY;
+ goto out_dec;
+ }
+ } else {
+ ret = pkt_open_dev(pd, mode & FMODE_WRITE);
+ if (ret)
+ goto out_dec;
+ /*
+ * needed here as well, since ext2 (among others) may change
+ * the blocksize at mount time
+ */
+ set_blocksize(bdev, CD_FRAMESIZE);
+ }
+
+ mutex_unlock(&ctl_mutex);
+ mutex_unlock(&pktcdvd_mutex);
+ return 0;
+
+out_dec:
+ pd->refcnt--;
+out:
+ mutex_unlock(&ctl_mutex);
+ mutex_unlock(&pktcdvd_mutex);
+ return ret;
+}
+
+static void pkt_close(struct gendisk *disk, fmode_t mode)
+{
+ struct pktcdvd_device *pd = disk->private_data;
+
+ mutex_lock(&pktcdvd_mutex);
+ mutex_lock(&ctl_mutex);
+ pd->refcnt--;
+ BUG_ON(pd->refcnt < 0);
+ if (pd->refcnt == 0) {
+ int flush = test_bit(PACKET_WRITABLE, &pd->flags);
+ pkt_release_dev(pd, flush);
+ }
+ mutex_unlock(&ctl_mutex);
+ mutex_unlock(&pktcdvd_mutex);
+}
+
+
+static void pkt_end_io_read_cloned(struct bio *bio)
+{
+ struct packet_stacked_data *psd = bio->bi_private;
+ struct pktcdvd_device *pd = psd->pd;
+
+ psd->bio->bi_status = bio->bi_status;
+ bio_put(bio);
+ bio_endio(psd->bio);
+ mempool_free(psd, &psd_pool);
+ pkt_bio_finished(pd);
+}
+
+static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
+{
+ struct bio *cloned_bio =
+ bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
+ struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
+
+ psd->pd = pd;
+ psd->bio = bio;
+ cloned_bio->bi_private = psd;
+ cloned_bio->bi_end_io = pkt_end_io_read_cloned;
+ pd->stats.secs_r += bio_sectors(bio);
+ pkt_queue_bio(pd, cloned_bio);
+}
+
+static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
+{
+ struct pktcdvd_device *pd = q->queuedata;
+ sector_t zone;
+ struct packet_data *pkt;
+ int was_empty, blocked_bio;
+ struct pkt_rb_node *node;
+
+ zone = get_zone(bio->bi_iter.bi_sector, pd);
+
+ /*
+ * If we find a matching packet in state WAITING or READ_WAIT, we can
+ * just append this bio to that packet.
+ */
+ spin_lock(&pd->cdrw.active_list_lock);
+ blocked_bio = 0;
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (pkt->sector == zone) {
+ spin_lock(&pkt->lock);
+ if ((pkt->state == PACKET_WAITING_STATE) ||
+ (pkt->state == PACKET_READ_WAIT_STATE)) {
+ bio_list_add(&pkt->orig_bios, bio);
+ pkt->write_size +=
+ bio->bi_iter.bi_size / CD_FRAMESIZE;
+ if ((pkt->write_size >= pkt->frames) &&
+ (pkt->state == PACKET_WAITING_STATE)) {
+ atomic_inc(&pkt->run_sm);
+ wake_up(&pd->wqueue);
+ }
+ spin_unlock(&pkt->lock);
+ spin_unlock(&pd->cdrw.active_list_lock);
+ return;
+ } else {
+ blocked_bio = 1;
+ }
+ spin_unlock(&pkt->lock);
+ }
+ }
+ spin_unlock(&pd->cdrw.active_list_lock);
+
+ /*
+ * Test if there is enough room left in the bio work queue
+ * (queue size >= congestion on mark).
+ * If not, wait till the work queue size is below the congestion off mark.
+ */
+ spin_lock(&pd->lock);
+ if (pd->write_congestion_on > 0
+ && pd->bio_queue_size >= pd->write_congestion_on) {
+ struct wait_bit_queue_entry wqe;
+
+ init_wait_var_entry(&wqe, &pd->congested, 0);
+ for (;;) {
+ prepare_to_wait_event(__var_waitqueue(&pd->congested),
+ &wqe.wq_entry,
+ TASK_UNINTERRUPTIBLE);
+ if (pd->bio_queue_size <= pd->write_congestion_off)
+ break;
+ pd->congested = true;
+ spin_unlock(&pd->lock);
+ schedule();
+ spin_lock(&pd->lock);
+ }
+ }
+ spin_unlock(&pd->lock);
+
+ /*
+ * No matching packet found. Store the bio in the work queue.
+ */
+ node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
+ node->bio = bio;
+ spin_lock(&pd->lock);
+ BUG_ON(pd->bio_queue_size < 0);
+ was_empty = (pd->bio_queue_size == 0);
+ pkt_rbtree_insert(pd, node);
+ spin_unlock(&pd->lock);
+
+ /*
+ * Wake up the worker thread.
+ */
+ atomic_set(&pd->scan_queue, 1);
+ if (was_empty) {
+ /* This wake_up is required for correct operation */
+ wake_up(&pd->wqueue);
+ } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
+ /*
+ * This wake up is not required for correct operation,
+ * but improves performance in some cases.
+ */
+ wake_up(&pd->wqueue);
+ }
+}
+
+static void pkt_submit_bio(struct bio *bio)
+{
+ struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
+ struct bio *split;
+
+ bio = bio_split_to_limits(bio);
+
+ pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
+ (unsigned long long)bio->bi_iter.bi_sector,
+ (unsigned long long)bio_end_sector(bio));
+
+ /*
+ * Clone READ bios so we can have our own bi_end_io callback.
+ */
+ if (bio_data_dir(bio) == READ) {
+ pkt_make_request_read(pd, bio);
+ return;
+ }
+
+ if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
+ pkt_notice(pd, "WRITE for ro device (%llu)\n",
+ (unsigned long long)bio->bi_iter.bi_sector);
+ goto end_io;
+ }
+
+ if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
+ pkt_err(pd, "wrong bio size\n");
+ goto end_io;
+ }
+
+ do {
+ sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
+ sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
+
+ if (last_zone != zone) {
+ BUG_ON(last_zone != zone + pd->settings.size);
+
+ split = bio_split(bio, last_zone -
+ bio->bi_iter.bi_sector,
+ GFP_NOIO, &pkt_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
+
+ pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
+ } while (split != bio);
+
+ return;
+end_io:
+ bio_io_error(bio);
+}
+
+static void pkt_init_queue(struct pktcdvd_device *pd)
+{
+ struct request_queue *q = pd->disk->queue;
+
+ blk_queue_logical_block_size(q, CD_FRAMESIZE);
+ blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
+ q->queuedata = pd;
+}
+
+static int pkt_seq_show(struct seq_file *m, void *p)
+{
+ struct pktcdvd_device *pd = m->private;
+ char *msg;
+ int states[PACKET_NUM_STATES];
+
+ seq_printf(m, "Writer %s mapped to %pg:\n", pd->name, pd->bdev);
+
+ seq_printf(m, "\nSettings:\n");
+ seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
+
+ if (pd->settings.write_type == 0)
+ msg = "Packet";
+ else
+ msg = "Unknown";
+ seq_printf(m, "\twrite type:\t\t%s\n", msg);
+
+ seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
+ seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
+
+ seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
+
+ if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
+ msg = "Mode 1";
+ else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
+ msg = "Mode 2";
+ else
+ msg = "Unknown";
+ seq_printf(m, "\tblock mode:\t\t%s\n", msg);
+
+ seq_printf(m, "\nStatistics:\n");
+ seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
+ seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
+ seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
+ seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
+ seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
+
+ seq_printf(m, "\nMisc:\n");
+ seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
+ seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
+ seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
+ seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
+ seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
+ seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
+
+ seq_printf(m, "\nQueue state:\n");
+ seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
+ seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
+ seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
+
+ pkt_count_states(pd, states);
+ seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+ states[0], states[1], states[2], states[3], states[4], states[5]);
+
+ seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
+ pd->write_congestion_off,
+ pd->write_congestion_on);
+ return 0;
+}
+
+static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
+{
+ int i;
+ struct block_device *bdev;
+ struct scsi_device *sdev;
+
+ if (pd->pkt_dev == dev) {
+ pkt_err(pd, "recursive setup not allowed\n");
+ return -EBUSY;
+ }
+ for (i = 0; i < MAX_WRITERS; i++) {
+ struct pktcdvd_device *pd2 = pkt_devs[i];
+ if (!pd2)
+ continue;
+ if (pd2->bdev->bd_dev == dev) {
+ pkt_err(pd, "%pg already setup\n", pd2->bdev);
+ return -EBUSY;
+ }
+ if (pd2->pkt_dev == dev) {
+ pkt_err(pd, "can't chain pktcdvd devices\n");
+ return -EBUSY;
+ }
+ }
+
+ bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
+ sdev = scsi_device_from_queue(bdev->bd_disk->queue);
+ if (!sdev) {
+ blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
+ return -EINVAL;
+ }
+ put_device(&sdev->sdev_gendev);
+
+ /* This is safe, since we have a reference from open(). */
+ __module_get(THIS_MODULE);
+
+ pd->bdev = bdev;
+ set_blocksize(bdev, CD_FRAMESIZE);
+
+ pkt_init_queue(pd);
+
+ atomic_set(&pd->cdrw.pending_bios, 0);
+ pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
+ if (IS_ERR(pd->cdrw.thread)) {
+ pkt_err(pd, "can't start kernel thread\n");
+ goto out_mem;
+ }
+
+ proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd);
+ pkt_dbg(1, pd, "writer mapped to %pg\n", bdev);
+ return 0;
+
+out_mem:
+ blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
+ /* This is safe: open() is still holding a reference. */
+ module_put(THIS_MODULE);
+ return -ENOMEM;
+}
+
+static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
+{
+ struct pktcdvd_device *pd = bdev->bd_disk->private_data;
+ int ret;
+
+ pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
+ cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+
+ mutex_lock(&pktcdvd_mutex);
+ switch (cmd) {
+ case CDROMEJECT:
+ /*
+ * The door gets locked when the device is opened, so we
+ * have to unlock it or else the eject command fails.
+ */
+ if (pd->refcnt == 1)
+ pkt_lock_door(pd, 0);
+ fallthrough;
+ /*
+ * forward selected CDROM ioctls to CD-ROM, for UDF
+ */
+ case CDROMMULTISESSION:
+ case CDROMREADTOCENTRY:
+ case CDROM_LAST_WRITTEN:
+ case CDROM_SEND_PACKET:
+ case SCSI_IOCTL_SEND_COMMAND:
+ if (!bdev->bd_disk->fops->ioctl)
+ ret = -ENOTTY;
+ else
+ ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
+ break;
+ default:
+ pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
+ ret = -ENOTTY;
+ }
+ mutex_unlock(&pktcdvd_mutex);
+
+ return ret;
+}
+
+static unsigned int pkt_check_events(struct gendisk *disk,
+ unsigned int clearing)
+{
+ struct pktcdvd_device *pd = disk->private_data;
+ struct gendisk *attached_disk;
+
+ if (!pd)
+ return 0;
+ if (!pd->bdev)
+ return 0;
+ attached_disk = pd->bdev->bd_disk;
+ if (!attached_disk || !attached_disk->fops->check_events)
+ return 0;
+ return attached_disk->fops->check_events(attached_disk, clearing);
+}
+
+static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
+}
+
+static const struct block_device_operations pktcdvd_ops = {
+ .owner = THIS_MODULE,
+ .submit_bio = pkt_submit_bio,
+ .open = pkt_open,
+ .release = pkt_close,
+ .ioctl = pkt_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
+ .check_events = pkt_check_events,
+ .devnode = pkt_devnode,
+};
+
+/*
+ * Set up mapping from pktcdvd device to CD-ROM device.
+ */
+static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
+{
+ int idx;
+ int ret = -ENOMEM;
+ struct pktcdvd_device *pd;
+ struct gendisk *disk;
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ for (idx = 0; idx < MAX_WRITERS; idx++)
+ if (!pkt_devs[idx])
+ break;
+ if (idx == MAX_WRITERS) {
+ pr_err("max %d writers supported\n", MAX_WRITERS);
+ ret = -EBUSY;
+ goto out_mutex;
+ }
+
+ pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
+ if (!pd)
+ goto out_mutex;
+
+ ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
+ sizeof(struct pkt_rb_node));
+ if (ret)
+ goto out_mem;
+
+ INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
+ INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
+ spin_lock_init(&pd->cdrw.active_list_lock);
+
+ spin_lock_init(&pd->lock);
+ spin_lock_init(&pd->iosched.lock);
+ bio_list_init(&pd->iosched.read_queue);
+ bio_list_init(&pd->iosched.write_queue);
+ sprintf(pd->name, DRIVER_NAME"%d", idx);
+ init_waitqueue_head(&pd->wqueue);
+ pd->bio_queue = RB_ROOT;
+
+ pd->write_congestion_on = write_congestion_on;
+ pd->write_congestion_off = write_congestion_off;
+
+ ret = -ENOMEM;
+ disk = blk_alloc_disk(NUMA_NO_NODE);
+ if (!disk)
+ goto out_mem;
+ pd->disk = disk;
+ disk->major = pktdev_major;
+ disk->first_minor = idx;
+ disk->minors = 1;
+ disk->fops = &pktcdvd_ops;
+ disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
+ strcpy(disk->disk_name, pd->name);
+ disk->private_data = pd;
+
+ pd->pkt_dev = MKDEV(pktdev_major, idx);
+ ret = pkt_new_dev(pd, dev);
+ if (ret)
+ goto out_mem2;
+
+ /* inherit events of the host device */
+ disk->events = pd->bdev->bd_disk->events;
+
+ ret = add_disk(disk);
+ if (ret)
+ goto out_mem2;
+
+ pkt_sysfs_dev_new(pd);
+ pkt_debugfs_dev_new(pd);
+
+ pkt_devs[idx] = pd;
+ if (pkt_dev)
+ *pkt_dev = pd->pkt_dev;
+
+ mutex_unlock(&ctl_mutex);
+ return 0;
+
+out_mem2:
+ put_disk(disk);
+out_mem:
+ mempool_exit(&pd->rb_pool);
+ kfree(pd);
+out_mutex:
+ mutex_unlock(&ctl_mutex);
+ pr_err("setup of pktcdvd device failed\n");
+ return ret;
+}
+
+/*
+ * Tear down mapping from pktcdvd device to CD-ROM device.
+ */
+static int pkt_remove_dev(dev_t pkt_dev)
+{
+ struct pktcdvd_device *pd;
+ int idx;
+ int ret = 0;
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ for (idx = 0; idx < MAX_WRITERS; idx++) {
+ pd = pkt_devs[idx];
+ if (pd && (pd->pkt_dev == pkt_dev))
+ break;
+ }
+ if (idx == MAX_WRITERS) {
+ pr_debug("dev not setup\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (pd->refcnt > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+ if (!IS_ERR(pd->cdrw.thread))
+ kthread_stop(pd->cdrw.thread);
+
+ pkt_devs[idx] = NULL;
+
+ pkt_debugfs_dev_remove(pd);
+ pkt_sysfs_dev_remove(pd);
+
+ blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
+
+ remove_proc_entry(pd->name, pkt_proc);
+ pkt_dbg(1, pd, "writer unmapped\n");
+
+ del_gendisk(pd->disk);
+ put_disk(pd->disk);
+
+ mempool_exit(&pd->rb_pool);
+ kfree(pd);
+
+ /* This is safe: open() is still holding a reference. */
+ module_put(THIS_MODULE);
+
+out:
+ mutex_unlock(&ctl_mutex);
+ return ret;
+}
+
+static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
+{
+ struct pktcdvd_device *pd;
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
+ if (pd) {
+ ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
+ ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
+ } else {
+ ctrl_cmd->dev = 0;
+ ctrl_cmd->pkt_dev = 0;
+ }
+ ctrl_cmd->num_devices = MAX_WRITERS;
+
+ mutex_unlock(&ctl_mutex);
+}
+
+static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct pkt_ctrl_command ctrl_cmd;
+ int ret = 0;
+ dev_t pkt_dev = 0;
+
+ if (cmd != PACKET_CTRL_CMD)
+ return -ENOTTY;
+
+ if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
+ return -EFAULT;
+
+ switch (ctrl_cmd.command) {
+ case PKT_CTRL_CMD_SETUP:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
+ ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
+ break;
+ case PKT_CTRL_CMD_TEARDOWN:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
+ break;
+ case PKT_CTRL_CMD_STATUS:
+ pkt_get_status(&ctrl_cmd);
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
+ return -EFAULT;
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct file_operations pkt_ctl_fops = {
+ .open = nonseekable_open,
+ .unlocked_ioctl = pkt_ctl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pkt_ctl_compat_ioctl,
+#endif
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice pkt_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DRIVER_NAME,
+ .nodename = "pktcdvd/control",
+ .fops = &pkt_ctl_fops
+};
+
+static int __init pkt_init(void)
+{
+ int ret;
+
+ mutex_init(&ctl_mutex);
+
+ ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
+ sizeof(struct packet_stacked_data));
+ if (ret)
+ return ret;
+ ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
+ if (ret) {
+ mempool_exit(&psd_pool);
+ return ret;
+ }
+
+ ret = register_blkdev(pktdev_major, DRIVER_NAME);
+ if (ret < 0) {
+ pr_err("unable to register block device\n");
+ goto out2;
+ }
+ if (!pktdev_major)
+ pktdev_major = ret;
+
+ ret = pkt_sysfs_init();
+ if (ret)
+ goto out;
+
+ pkt_debugfs_init();
+
+ ret = misc_register(&pkt_misc);
+ if (ret) {
+ pr_err("unable to register misc device\n");
+ goto out_misc;
+ }
+
+ pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
+
+ return 0;
+
+out_misc:
+ pkt_debugfs_cleanup();
+ pkt_sysfs_cleanup();
+out:
+ unregister_blkdev(pktdev_major, DRIVER_NAME);
+out2:
+ mempool_exit(&psd_pool);
+ bioset_exit(&pkt_bio_set);
+ return ret;
+}
+
+static void __exit pkt_exit(void)
+{
+ remove_proc_entry("driver/"DRIVER_NAME, NULL);
+ misc_deregister(&pkt_misc);
+
+ pkt_debugfs_cleanup();
+ pkt_sysfs_cleanup();
+
+ unregister_blkdev(pktdev_major, DRIVER_NAME);
+ mempool_exit(&psd_pool);
+ bioset_exit(&pkt_bio_set);
+}
+
+MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
+MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
+MODULE_LICENSE("GPL");
+
+module_init(pkt_init);
+module_exit(pkt_exit);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index c76e0148eada..574e470b220b 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -587,6 +587,8 @@ static void ps3vram_submit_bio(struct bio *bio)
dev_dbg(&dev->core, "%s\n", __func__);
bio = bio_split_to_limits(bio);
+ if (!bio)
+ return;
spin_lock_irq(&priv->lock);
busy = !bio_list_empty(&priv->list);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index e9de9d846b73..17b677b5d3b2 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1992,6 +1992,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
int ret = -EINVAL;
+ if (issue_flags & IO_URING_F_NONBLOCK)
+ return -EAGAIN;
+
ublk_ctrl_cmd_dump(cmd);
if (!(issue_flags & IO_URING_F_SQE128))
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 68bd2f7961b3..6a77fa917428 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -315,22 +315,35 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
virtqueue_notify(vq->vq);
}
+static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
+{
+ virtblk_cleanup_cmd(req);
+ switch (rc) {
+ case -ENOSPC:
+ return BLK_STS_DEV_RESOURCE;
+ case -ENOMEM:
+ return BLK_STS_RESOURCE;
+ default:
+ return BLK_STS_IOERR;
+ }
+}
+
static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
struct virtio_blk *vblk,
struct request *req,
struct virtblk_req *vbr)
{
blk_status_t status;
+ int num;
status = virtblk_setup_cmd(vblk->vdev, req, vbr);
if (unlikely(status))
return status;
- vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
- if (unlikely(vbr->sg_table.nents < 0)) {
- virtblk_cleanup_cmd(req);
- return BLK_STS_RESOURCE;
- }
+ num = virtblk_map_data(hctx, req, vbr);
+ if (unlikely(num < 0))
+ return virtblk_fail_to_queue(req, -ENOMEM);
+ vbr->sg_table.nents = num;
blk_mq_start_request(req);
@@ -364,15 +377,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
virtblk_unmap_data(req, vbr);
- virtblk_cleanup_cmd(req);
- switch (err) {
- case -ENOSPC:
- return BLK_STS_DEV_RESOURCE;
- case -ENOMEM:
- return BLK_STS_RESOURCE;
- default:
- return BLK_STS_IOERR;
- }
+ return virtblk_fail_to_queue(req, err);
}
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
@@ -991,7 +996,7 @@ static int virtblk_probe(struct virtio_device *vdev)
blk_queue_max_segments(q, sg_elems);
/* No real sector limit. */
- blk_queue_max_hw_sectors(q, -1U);
+ blk_queue_max_hw_sectors(q, UINT_MAX);
max_size = virtio_max_dma_size(vdev);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c0227dfa4688..4807af1d5805 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -524,7 +524,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
return 0;
}
-static int xen_blkbk_remove(struct xenbus_device *dev)
+static void xen_blkbk_remove(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
@@ -547,8 +547,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
/* Put the reference we set in xen_blkif_alloc(). */
xen_blkif_put(be->blkif);
}
-
- return 0;
}
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b28489290323..23ed258b57f0 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2467,7 +2467,7 @@ static void blkback_changed(struct xenbus_device *dev,
}
}
-static int blkfront_remove(struct xenbus_device *xbdev)
+static void blkfront_remove(struct xenbus_device *xbdev)
{
struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
@@ -2488,7 +2488,6 @@ static int blkfront_remove(struct xenbus_device *xbdev)
}
kfree(info);
- return 0;
}
static int blkfront_is_ready(struct xenbus_device *dev)
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index d69905233aff..7e513b771832 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -412,7 +412,9 @@ int tpm_pm_suspend(struct device *dev)
}
suspended:
- return rc;
+ if (rc)
+ dev_err(dev, "Ignoring error %d while suspending\n", rc);
+ return 0;
}
EXPORT_SYMBOL_GPL(tpm_pm_suspend);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 379291826261..80cca3b83b22 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -360,14 +360,13 @@ static int tpmfront_probe(struct xenbus_device *dev,
return tpm_chip_register(priv->chip);
}
-static int tpmfront_remove(struct xenbus_device *dev)
+static void tpmfront_remove(struct xenbus_device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
tpm_chip_unregister(chip);
ring_free(priv);
dev_set_drvdata(&chip->dev, NULL);
- return 0;
}
static int tpmfront_resume(struct xenbus_device *dev)
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 53100fb9b07b..12205e2b53b4 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -3,7 +3,7 @@
* Microchip / Atmel ECC (I2C) driver.
*
* Copyright (c) 2017, Microchip Technology Inc.
- * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ * Author: Tudor Ambarus
*/
#include <linux/delay.h>
@@ -411,6 +411,6 @@ static void __exit atmel_ecc_exit(void)
module_init(atmel_ecc_init);
module_exit(atmel_ecc_exit);
-MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
+MODULE_AUTHOR("Tudor Ambarus");
MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index 81ce09bedda8..55bff1e13142 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -3,7 +3,7 @@
* Microchip / Atmel ECC (I2C) driver.
*
* Copyright (c) 2017, Microchip Technology Inc.
- * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ * Author: Tudor Ambarus
*/
#include <linux/bitrev.h>
@@ -390,6 +390,6 @@ static void __exit atmel_i2c_exit(void)
module_init(atmel_i2c_init);
module_exit(atmel_i2c_exit);
-MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
+MODULE_AUTHOR("Tudor Ambarus");
MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h
index 48929efe2a5b..35f7857a7f7c 100644
--- a/drivers/crypto/atmel-i2c.h
+++ b/drivers/crypto/atmel-i2c.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017, Microchip Technology Inc.
- * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ * Author: Tudor Ambarus
*/
#ifndef __ATMEL_I2C_H__
diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
index 1f65df489847..f46b161d2cda 100644
--- a/drivers/crypto/caam/blob_gen.c
+++ b/drivers/crypto/caam/blob_gen.c
@@ -104,7 +104,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
}
ctrlpriv = dev_get_drvdata(jrdev->parent);
- moo = FIELD_GET(CSTA_MOO, ioread32(&ctrlpriv->ctrl->perfmon.status));
+ moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status));
if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
dev_warn(jrdev,
"using insecure test key, enable HAB to use unique device key!\n");
diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
index e553ccadbcbc..e5876286828b 100644
--- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
@@ -239,7 +239,8 @@ static int virtio_crypto_alg_skcipher_close_session(
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
ctrl_status->status, destroy_session->session_id);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
err = 0;
diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c
index f69d68122b9b..fbf725fae7c1 100644
--- a/drivers/dma-buf/dma-buf-sysfs-stats.c
+++ b/drivers/dma-buf/dma-buf-sysfs-stats.c
@@ -168,14 +168,11 @@ void dma_buf_uninit_sysfs_statistics(void)
kset_unregister(dma_buf_stats_kset);
}
-int dma_buf_stats_setup(struct dma_buf *dmabuf)
+int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
{
struct dma_buf_sysfs_entry *sysfs_entry;
int ret;
- if (!dmabuf || !dmabuf->file)
- return -EINVAL;
-
if (!dmabuf->exp_name) {
pr_err("exporter name must not be empty if stats needed\n");
return -EINVAL;
@@ -192,7 +189,7 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf)
/* create the directory for buffer stats */
ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL,
- "%lu", file_inode(dmabuf->file)->i_ino);
+ "%lu", file_inode(file)->i_ino);
if (ret)
goto err_sysfs_dmabuf;
diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.h b/drivers/dma-buf/dma-buf-sysfs-stats.h
index a49c6e2650cc..7a8a995b75ba 100644
--- a/drivers/dma-buf/dma-buf-sysfs-stats.h
+++ b/drivers/dma-buf/dma-buf-sysfs-stats.h
@@ -13,7 +13,7 @@
int dma_buf_init_sysfs_statistics(void);
void dma_buf_uninit_sysfs_statistics(void);
-int dma_buf_stats_setup(struct dma_buf *dmabuf);
+int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file);
void dma_buf_stats_teardown(struct dma_buf *dmabuf);
#else
@@ -25,7 +25,7 @@ static inline int dma_buf_init_sysfs_statistics(void)
static inline void dma_buf_uninit_sysfs_statistics(void) {}
-static inline int dma_buf_stats_setup(struct dma_buf *dmabuf)
+static inline int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
{
return 0;
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index b6c36914e7c6..e6528767efc7 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -95,10 +95,11 @@ static int dma_buf_file_release(struct inode *inode, struct file *file)
return -EINVAL;
dmabuf = file->private_data;
-
- mutex_lock(&db_list.lock);
- list_del(&dmabuf->list_node);
- mutex_unlock(&db_list.lock);
+ if (dmabuf) {
+ mutex_lock(&db_list.lock);
+ list_del(&dmabuf->list_node);
+ mutex_unlock(&db_list.lock);
+ }
return 0;
}
@@ -528,17 +529,17 @@ static inline int is_dma_buf_file(struct file *file)
return file->f_op == &dma_buf_fops;
}
-static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
+static struct file *dma_buf_getfile(size_t size, int flags)
{
static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
- struct file *file;
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
+ struct file *file;
if (IS_ERR(inode))
return ERR_CAST(inode);
- inode->i_size = dmabuf->size;
- inode_set_bytes(inode, dmabuf->size);
+ inode->i_size = size;
+ inode_set_bytes(inode, size);
/*
* The ->i_ino acquired from get_next_ino() is not unique thus
@@ -552,8 +553,6 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
flags, &dma_buf_fops);
if (IS_ERR(file))
goto err_alloc_file;
- file->private_data = dmabuf;
- file->f_path.dentry->d_fsdata = dmabuf;
return file;
@@ -619,19 +618,11 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
size_t alloc_size = sizeof(struct dma_buf);
int ret;
- if (!exp_info->resv)
- alloc_size += sizeof(struct dma_resv);
- else
- /* prevent &dma_buf[1] == dma_buf->resv */
- alloc_size += 1;
-
- if (WARN_ON(!exp_info->priv
- || !exp_info->ops
- || !exp_info->ops->map_dma_buf
- || !exp_info->ops->unmap_dma_buf
- || !exp_info->ops->release)) {
+ if (WARN_ON(!exp_info->priv || !exp_info->ops
+ || !exp_info->ops->map_dma_buf
+ || !exp_info->ops->unmap_dma_buf
+ || !exp_info->ops->release))
return ERR_PTR(-EINVAL);
- }
if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
(exp_info->ops->pin || exp_info->ops->unpin)))
@@ -643,10 +634,21 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
if (!try_module_get(exp_info->owner))
return ERR_PTR(-ENOENT);
+ file = dma_buf_getfile(exp_info->size, exp_info->flags);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err_module;
+ }
+
+ if (!exp_info->resv)
+ alloc_size += sizeof(struct dma_resv);
+ else
+ /* prevent &dma_buf[1] == dma_buf->resv */
+ alloc_size += 1;
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
if (!dmabuf) {
ret = -ENOMEM;
- goto err_module;
+ goto err_file;
}
dmabuf->priv = exp_info->priv;
@@ -658,43 +660,35 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
init_waitqueue_head(&dmabuf->poll);
dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
+ INIT_LIST_HEAD(&dmabuf->attachments);
if (!resv) {
- resv = (struct dma_resv *)&dmabuf[1];
- dma_resv_init(resv);
+ dmabuf->resv = (struct dma_resv *)&dmabuf[1];
+ dma_resv_init(dmabuf->resv);
+ } else {
+ dmabuf->resv = resv;
}
- dmabuf->resv = resv;
- file = dma_buf_getfile(dmabuf, exp_info->flags);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
+ ret = dma_buf_stats_setup(dmabuf, file);
+ if (ret)
goto err_dmabuf;
- }
+ file->private_data = dmabuf;
+ file->f_path.dentry->d_fsdata = dmabuf;
dmabuf->file = file;
- INIT_LIST_HEAD(&dmabuf->attachments);
-
mutex_lock(&db_list.lock);
list_add(&dmabuf->list_node, &db_list.head);
mutex_unlock(&db_list.lock);
- ret = dma_buf_stats_setup(dmabuf);
- if (ret)
- goto err_sysfs;
-
return dmabuf;
-err_sysfs:
- /*
- * Set file->f_path.dentry->d_fsdata to NULL so that when
- * dma_buf_release() gets invoked by dentry_ops, it exits
- * early before calling the release() dma_buf op.
- */
- file->f_path.dentry->d_fsdata = NULL;
- fput(file);
err_dmabuf:
+ if (!resv)
+ dma_resv_fini(dmabuf->resv);
kfree(dmabuf);
+err_file:
+ fput(file);
err_module:
module_put(exp_info->owner);
return ERR_PTR(ret);
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index 8d722e026e9c..84352a6f4973 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -91,7 +91,6 @@ enum sprd_eic_type {
struct sprd_eic {
struct gpio_chip chip;
- struct irq_chip intc;
void __iomem *base[SPRD_EIC_MAX_BANK];
enum sprd_eic_type type;
spinlock_t lock;
@@ -255,6 +254,8 @@ static void sprd_eic_irq_mask(struct irq_data *data)
default:
dev_err(chip->parent, "Unsupported EIC type.\n");
}
+
+ gpiochip_disable_irq(chip, offset);
}
static void sprd_eic_irq_unmask(struct irq_data *data)
@@ -263,6 +264,8 @@ static void sprd_eic_irq_unmask(struct irq_data *data)
struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
u32 offset = irqd_to_hwirq(data);
+ gpiochip_enable_irq(chip, offset);
+
switch (sprd_eic->type) {
case SPRD_EIC_DEBOUNCE:
sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IE, 1);
@@ -564,6 +567,15 @@ static void sprd_eic_irq_handler(struct irq_desc *desc)
chained_irq_exit(ic, desc);
}
+static const struct irq_chip sprd_eic_irq = {
+ .name = "sprd-eic",
+ .irq_ack = sprd_eic_irq_ack,
+ .irq_mask = sprd_eic_irq_mask,
+ .irq_unmask = sprd_eic_irq_unmask,
+ .irq_set_type = sprd_eic_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
static int sprd_eic_probe(struct platform_device *pdev)
{
const struct sprd_eic_variant_data *pdata;
@@ -626,15 +638,8 @@ static int sprd_eic_probe(struct platform_device *pdev)
break;
}
- sprd_eic->intc.name = dev_name(&pdev->dev);
- sprd_eic->intc.irq_ack = sprd_eic_irq_ack;
- sprd_eic->intc.irq_mask = sprd_eic_irq_mask;
- sprd_eic->intc.irq_unmask = sprd_eic_irq_unmask;
- sprd_eic->intc.irq_set_type = sprd_eic_irq_set_type;
- sprd_eic->intc.flags = IRQCHIP_SKIP_SET_WAKE;
-
irq = &sprd_eic->chip.irq;
- irq->chip = &sprd_eic->intc;
+ gpio_irq_chip_set_chip(irq, &sprd_eic_irq);
irq->handler = handle_bad_irq;
irq->default_type = IRQ_TYPE_NONE;
irq->parent_handler = sprd_eic_irq_handler;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index a59d61cd44b2..5299e5bb76d6 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -474,6 +474,9 @@ static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off)
case PCAL6524_DEBOUNCE:
pinctrl = ((reg & PCAL_PINCTRL_MASK) >> 1) + 0x1c;
break;
+ default:
+ pinctrl = 0;
+ break;
}
return pinctrl + addr + (off / BANK_SZ);
diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
index e518490c4b68..c3e4d90f6b18 100644
--- a/drivers/gpio/gpio-pmic-eic-sprd.c
+++ b/drivers/gpio/gpio-pmic-eic-sprd.c
@@ -47,7 +47,6 @@ enum {
/**
* struct sprd_pmic_eic - PMIC EIC controller
* @chip: the gpio_chip structure.
- * @intc: the irq_chip structure.
* @map: the regmap from the parent device.
* @offset: the EIC controller's offset address of the PMIC.
* @reg: the array to cache the EIC registers.
@@ -56,7 +55,6 @@ enum {
*/
struct sprd_pmic_eic {
struct gpio_chip chip;
- struct irq_chip intc;
struct regmap *map;
u32 offset;
u8 reg[CACHE_NR_REGS];
@@ -151,15 +149,21 @@ static void sprd_pmic_eic_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct sprd_pmic_eic *pmic_eic = gpiochip_get_data(chip);
+ u32 offset = irqd_to_hwirq(data);
pmic_eic->reg[REG_IE] = 0;
pmic_eic->reg[REG_TRIG] = 0;
+
+ gpiochip_disable_irq(chip, offset);
}
static void sprd_pmic_eic_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct sprd_pmic_eic *pmic_eic = gpiochip_get_data(chip);
+ u32 offset = irqd_to_hwirq(data);
+
+ gpiochip_enable_irq(chip, offset);
pmic_eic->reg[REG_IE] = 1;
pmic_eic->reg[REG_TRIG] = 1;
@@ -292,6 +296,17 @@ static irqreturn_t sprd_pmic_eic_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
+static const struct irq_chip pmic_eic_irq_chip = {
+ .name = "sprd-pmic-eic",
+ .irq_mask = sprd_pmic_eic_irq_mask,
+ .irq_unmask = sprd_pmic_eic_irq_unmask,
+ .irq_set_type = sprd_pmic_eic_irq_set_type,
+ .irq_bus_lock = sprd_pmic_eic_bus_lock,
+ .irq_bus_sync_unlock = sprd_pmic_eic_bus_sync_unlock,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int sprd_pmic_eic_probe(struct platform_device *pdev)
{
struct gpio_irq_chip *irq;
@@ -338,16 +353,8 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
pmic_eic->chip.set = sprd_pmic_eic_set;
pmic_eic->chip.get = sprd_pmic_eic_get;
- pmic_eic->intc.name = dev_name(&pdev->dev);
- pmic_eic->intc.irq_mask = sprd_pmic_eic_irq_mask;
- pmic_eic->intc.irq_unmask = sprd_pmic_eic_irq_unmask;
- pmic_eic->intc.irq_set_type = sprd_pmic_eic_irq_set_type;
- pmic_eic->intc.irq_bus_lock = sprd_pmic_eic_bus_lock;
- pmic_eic->intc.irq_bus_sync_unlock = sprd_pmic_eic_bus_sync_unlock;
- pmic_eic->intc.flags = IRQCHIP_SKIP_SET_WAKE;
-
irq = &pmic_eic->chip.irq;
- irq->chip = &pmic_eic->intc;
+ gpio_irq_chip_set_chip(irq, &pmic_eic_irq_chip);
irq->threaded = true;
ret = devm_gpiochip_add_data(&pdev->dev, &pmic_eic->chip, pmic_eic);
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index 238f3210970c..bc5660f61c57 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -215,6 +215,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
parent = irq_find_host(irq_parent);
+ of_node_put(irq_parent);
if (!parent) {
dev_err(dev, "no IRQ parent domain\n");
return -ENODEV;
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index 9bff63990eee..072b4e653216 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -120,6 +120,7 @@ static void sprd_gpio_irq_mask(struct irq_data *data)
u32 offset = irqd_to_hwirq(data);
sprd_gpio_update(chip, offset, SPRD_GPIO_IE, 0);
+ gpiochip_disable_irq(chip, offset);
}
static void sprd_gpio_irq_ack(struct irq_data *data)
@@ -136,6 +137,7 @@ static void sprd_gpio_irq_unmask(struct irq_data *data)
u32 offset = irqd_to_hwirq(data);
sprd_gpio_update(chip, offset, SPRD_GPIO_IE, 1);
+ gpiochip_enable_irq(chip, offset);
}
static int sprd_gpio_irq_set_type(struct irq_data *data,
@@ -205,13 +207,14 @@ static void sprd_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(ic, desc);
}
-static struct irq_chip sprd_gpio_irqchip = {
+static const struct irq_chip sprd_gpio_irqchip = {
.name = "sprd-gpio",
.irq_ack = sprd_gpio_irq_ack,
.irq_mask = sprd_gpio_irq_mask,
.irq_unmask = sprd_gpio_irq_unmask,
.irq_set_type = sprd_gpio_irq_set_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int sprd_gpio_probe(struct platform_device *pdev)
@@ -245,7 +248,7 @@ static int sprd_gpio_probe(struct platform_device *pdev)
sprd_gpio->chip.direction_output = sprd_gpio_direction_output;
irq = &sprd_gpio->chip.irq;
- irq->chip = &sprd_gpio_irqchip;
+ gpio_irq_chip_set_chip(irq, &sprd_gpio_irqchip);
irq->handler = handle_bad_irq;
irq->default_type = IRQ_TYPE_NONE;
irq->parent_handler = sprd_gpio_irq_handler;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 5a66d9616d7c..939c776b9488 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -3905,8 +3905,8 @@ static struct gpio_desc *gpiod_find_and_request(struct device *consumer,
const char *label,
bool platform_lookup_allowed)
{
+ unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc = ERR_PTR(-ENOENT);
- unsigned long lookupflags;
int ret;
if (!IS_ERR_OR_NULL(fwnode))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6b74df446694..e3e2e6e3b485 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -195,6 +195,7 @@ extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern int amdgpu_smu_pptable_id;
extern uint amdgpu_dc_feature_mask;
+extern uint amdgpu_freesync_vid_mode;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm;
extern uint amdgpu_dm_abm_level;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b4f2d61ea0d5..1353ffd08988 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -181,6 +181,7 @@ int amdgpu_mes_kiq;
int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
int amdgpu_tmz = -1; /* auto */
+uint amdgpu_freesync_vid_mode;
int amdgpu_reset_method = -1; /* auto */
int amdgpu_num_kcq = -1;
int amdgpu_smartshift_bias;
@@ -880,6 +881,32 @@ MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)
module_param_named(tmz, amdgpu_tmz, int, 0444);
/**
+ * DOC: freesync_video (uint)
+ * Enable the optimization to adjust front porch timing to achieve seamless
+ * mode change experience when setting a freesync supported mode for which full
+ * modeset is not needed.
+ *
+ * The Display Core will add a set of modes derived from the base FreeSync
+ * video mode into the corresponding connector's mode list based on commonly
+ * used refresh rates and VRR range of the connected display, when users enable
+ * this feature. From the userspace perspective, they can see a seamless mode
+ * change experience when the change between different refresh rates under the
+ * same resolution. Additionally, userspace applications such as Video playback
+ * can read this modeset list and change the refresh rate based on the video
+ * frame rate. Finally, the userspace can also derive an appropriate mode for a
+ * particular refresh rate based on the FreeSync Mode and add it to the
+ * connector's mode list.
+ *
+ * Note: This is an experimental feature.
+ *
+ * The default value: 0 (off).
+ */
+MODULE_PARM_DESC(
+ freesync_video,
+ "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
+module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
+
+/**
* DOC: reset_method (int)
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
*/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index bceb1a5b2518..3fdaba56be6f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -801,7 +801,7 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
p2plink->attr.name = "properties";
p2plink->attr.mode = KFD_SYSFS_FILE_MODE;
- sysfs_attr_init(&iolink->attr);
+ sysfs_attr_init(&p2plink->attr);
ret = sysfs_create_file(p2plink->kobj, &p2plink->attr);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 50c783e19f5a..1b7f20a9d4ae 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4361,6 +4361,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_set_panel_orientation(&aconnector->base);
}
+ /* If we didn't find a panel, notify the acpi video detection */
+ if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0)
+ acpi_video_report_nolcd();
+
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -5831,7 +5835,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else {
- recalculate_timing = is_freesync_video_mode(&mode, aconnector);
+ recalculate_timing = amdgpu_freesync_vid_mode &&
+ is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
drm_mode_copy(&saved_mode, &mode);
@@ -6982,7 +6987,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- if (!edid)
+ if (!(amdgpu_freesync_vid_mode && edid))
return;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@ -8846,7 +8851,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
* TODO: Refactor this function to allow this check to work
* in all conditions.
*/
- if (dm_new_crtc_state->stream &&
+ if (amdgpu_freesync_vid_mode &&
+ dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
goto skip_modeset;
@@ -8881,7 +8887,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!dm_old_crtc_state->stream)
goto skip_modeset;
- if (dm_new_crtc_state->stream &&
+ if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
@@ -8893,7 +8899,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
set_freesync_fixed_config(dm_new_crtc_state);
goto skip_modeset;
- } else if (aconnector &&
+ } else if (amdgpu_freesync_vid_mode && aconnector &&
is_freesync_video_mode(&new_crtc_state->mode,
aconnector)) {
struct drm_display_mode *high_mode;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 5af601cff1a0..b53feeaf5cf1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -6257,12 +6257,12 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
double SwathSizePerSurfaceC[DC__NUM_DPP__MAX];
bool NotEnoughDETSwathFillLatencyHiding = false;
- /* calculate sum of single swath size for all pipes in bytes*/
+ /* calculate sum of single swath size for all pipes in bytes */
for (k = 0; k < NumberOfActiveSurfaces; k++) {
- SwathSizePerSurfaceY[k] += SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
+ SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
if (SwathHeightC[k] != 0)
- SwathSizePerSurfaceC[k] += SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];
+ SwathSizePerSurfaceC[k] = SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];
else
SwathSizePerSurfaceC[k] = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index fce69fa446d5..2cbc1292ab38 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -41,9 +41,11 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
+#include "intel_gmbus_regs.h"
#include "vlv_dsi.h"
#include "vlv_dsi_regs.h"
#include "vlv_sideband.h"
@@ -377,6 +379,85 @@ static void icl_exec_gpio(struct intel_connector *connector,
drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
}
+enum {
+ MIPI_RESET_1 = 0,
+ MIPI_AVDD_EN_1,
+ MIPI_BKLT_EN_1,
+ MIPI_AVEE_EN_1,
+ MIPI_VIO_EN_1,
+ MIPI_RESET_2,
+ MIPI_AVDD_EN_2,
+ MIPI_BKLT_EN_2,
+ MIPI_AVEE_EN_2,
+ MIPI_VIO_EN_2,
+};
+
+static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
+ int gpio, bool value)
+{
+ int index;
+
+ if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2))
+ return;
+
+ switch (gpio) {
+ case MIPI_RESET_1:
+ case MIPI_RESET_2:
+ index = gpio == MIPI_RESET_1 ? HPD_PORT_A : HPD_PORT_B;
+
+ /*
+ * Disable HPD to set the pin to output, and set output
+ * value. The HPD pin should not be enabled for DSI anyway,
+ * assuming the board design and VBT are sane, and the pin isn't
+ * used by a non-DSI encoder.
+ *
+ * The locking protects against concurrent SHOTPLUG_CTL_DDI
+ * modifications in irq setup and handling.
+ */
+ spin_lock_irq(&dev_priv->irq_lock);
+ intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI,
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(index) |
+ SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index),
+ value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0);
+ spin_unlock_irq(&dev_priv->irq_lock);
+ break;
+ case MIPI_AVDD_EN_1:
+ case MIPI_AVDD_EN_2:
+ index = gpio == MIPI_AVDD_EN_1 ? 0 : 1;
+
+ intel_de_rmw(dev_priv, PP_CONTROL(index), PANEL_POWER_ON,
+ value ? PANEL_POWER_ON : 0);
+ break;
+ case MIPI_BKLT_EN_1:
+ case MIPI_BKLT_EN_2:
+ index = gpio == MIPI_BKLT_EN_1 ? 0 : 1;
+
+ intel_de_rmw(dev_priv, PP_CONTROL(index), EDP_BLC_ENABLE,
+ value ? EDP_BLC_ENABLE : 0);
+ break;
+ case MIPI_AVEE_EN_1:
+ case MIPI_AVEE_EN_2:
+ index = gpio == MIPI_AVEE_EN_1 ? 1 : 2;
+
+ intel_de_rmw(dev_priv, GPIO(dev_priv, index),
+ GPIO_CLOCK_VAL_OUT,
+ GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_DIR_OUT |
+ GPIO_CLOCK_VAL_MASK | (value ? GPIO_CLOCK_VAL_OUT : 0));
+ break;
+ case MIPI_VIO_EN_1:
+ case MIPI_VIO_EN_2:
+ index = gpio == MIPI_VIO_EN_1 ? 1 : 2;
+
+ intel_de_rmw(dev_priv, GPIO(dev_priv, index),
+ GPIO_DATA_VAL_OUT,
+ GPIO_DATA_DIR_MASK | GPIO_DATA_DIR_OUT |
+ GPIO_DATA_VAL_MASK | (value ? GPIO_DATA_VAL_OUT : 0));
+ break;
+ default:
+ MISSING_CASE(gpio);
+ }
+}
+
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
@@ -384,8 +465,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
struct intel_connector *connector = intel_dsi->attached_connector;
u8 gpio_source, gpio_index = 0, gpio_number;
bool value;
-
- drm_dbg_kms(&dev_priv->drm, "\n");
+ bool native = DISPLAY_VER(dev_priv) >= 11;
if (connector->panel.vbt.dsi.seq_version >= 3)
gpio_index = *data++;
@@ -398,10 +478,18 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
else
gpio_source = 0;
+ if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1))
+ native = false;
+
/* pull up/down */
value = *data++ & 1;
- if (DISPLAY_VER(dev_priv) >= 11)
+ drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
+ gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
+
+ if (native)
+ icl_native_gpio_set_value(dev_priv, gpio_number, value);
+ else if (DISPLAY_VER(dev_priv) >= 11)
icl_exec_gpio(connector, gpio_source, gpio_index, value);
else if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(connector, gpio_source, gpio_number, value);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index da09767fda07..f266b68cf012 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -730,32 +730,69 @@ static int eb_reserve(struct i915_execbuffer *eb)
bool unpinned;
/*
- * Attempt to pin all of the buffers into the GTT.
- * This is done in 2 phases:
+ * We have one more buffers that we couldn't bind, which could be due to
+ * various reasons. To resolve this we have 4 passes, with every next
+ * level turning the screws tighter:
*
- * 1. Unbind all objects that do not match the GTT constraints for
- * the execbuffer (fenceable, mappable, alignment etc).
- * 2. Bind new objects.
+ * 0. Unbind all objects that do not match the GTT constraints for the
+ * execbuffer (fenceable, mappable, alignment etc). Bind all new
+ * objects. This avoids unnecessary unbinding of later objects in order
+ * to make room for the earlier objects *unless* we need to defragment.
*
- * This avoid unnecessary unbinding of later objects in order to make
- * room for the earlier objects *unless* we need to defragment.
+ * 1. Reorder the buffers, where objects with the most restrictive
+ * placement requirements go first (ignoring fixed location buffers for
+ * now). For example, objects needing the mappable aperture (the first
+ * 256M of GTT), should go first vs objects that can be placed just
+ * about anywhere. Repeat the previous pass.
*
- * Defragmenting is skipped if all objects are pinned at a fixed location.
+ * 2. Consider buffers that are pinned at a fixed location. Also try to
+ * evict the entire VM this time, leaving only objects that we were
+ * unable to lock. Try again to bind the buffers. (still using the new
+ * buffer order).
+ *
+ * 3. We likely have object lock contention for one or more stubborn
+ * objects in the VM, for which we need to evict to make forward
+ * progress (perhaps we are fighting the shrinker?). When evicting the
+ * VM this time around, anything that we can't lock we now track using
+ * the busy_bo, using the full lock (after dropping the vm->mutex to
+ * prevent deadlocks), instead of trylock. We then continue to evict the
+ * VM, this time with the stubborn object locked, which we can now
+ * hopefully unbind (if still bound in the VM). Repeat until the VM is
+ * evicted. Finally we should be able bind everything.
*/
- for (pass = 0; pass <= 2; pass++) {
+ for (pass = 0; pass <= 3; pass++) {
int pin_flags = PIN_USER | PIN_VALIDATE;
if (pass == 0)
pin_flags |= PIN_NONBLOCK;
if (pass >= 1)
- unpinned = eb_unbind(eb, pass == 2);
+ unpinned = eb_unbind(eb, pass >= 2);
if (pass == 2) {
err = mutex_lock_interruptible(&eb->context->vm->mutex);
if (!err) {
- err = i915_gem_evict_vm(eb->context->vm, &eb->ww);
+ err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL);
+ mutex_unlock(&eb->context->vm->mutex);
+ }
+ if (err)
+ return err;
+ }
+
+ if (pass == 3) {
+retry:
+ err = mutex_lock_interruptible(&eb->context->vm->mutex);
+ if (!err) {
+ struct drm_i915_gem_object *busy_bo = NULL;
+
+ err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo);
mutex_unlock(&eb->context->vm->mutex);
+ if (err && busy_bo) {
+ err = i915_gem_object_lock(busy_bo, &eb->ww);
+ i915_gem_object_put(busy_bo);
+ if (!err)
+ goto retry;
+ }
}
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c29efdef8313..0ad44f3868de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -369,7 +369,7 @@ retry:
if (vma == ERR_PTR(-ENOSPC)) {
ret = mutex_lock_interruptible(&ggtt->vm.mutex);
if (!ret) {
- ret = i915_gem_evict_vm(&ggtt->vm, &ww);
+ ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
mutex_unlock(&ggtt->vm.mutex);
}
if (ret)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 767e329e1cc5..9c18b5f2e789 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -1109,9 +1109,15 @@ static void mmio_invalidate_full(struct intel_gt *gt)
continue;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ u32 val = BIT(engine->instance);
+
+ if (engine->class == VIDEO_DECODE_CLASS ||
+ engine->class == VIDEO_ENHANCEMENT_CLASS ||
+ engine->class == COMPUTE_CLASS)
+ val = _MASKED_BIT_ENABLE(val);
intel_gt_mcr_multicast_write_fw(gt,
xehp_regs[engine->class],
- BIT(engine->instance));
+ val);
} else {
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
if (!i915_mmio_reg_offset(rb.reg))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 0c80ba51a4bd..2bcdd192f814 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -545,6 +545,32 @@ static int check_ccs_header(struct intel_gt *gt,
return 0;
}
+static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **fw)
+{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+ struct device *dev = gt->i915->drm.dev;
+ int err;
+
+ err = firmware_request_nowarn(fw, uc_fw->file_selected.path, dev);
+
+ if (err)
+ return err;
+
+ if ((*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) {
+ drm_err(&gt->i915->drm,
+ "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ (*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
+
+ /* try to find another blob to load */
+ release_firmware(*fw);
+ *fw = NULL;
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
/**
* intel_uc_fw_fetch - fetch uC firmware
* @uc_fw: uC firmware
@@ -558,7 +584,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
struct drm_i915_private *i915 = gt->i915;
struct intel_uc_fw_file file_ideal;
- struct device *dev = i915->drm.dev;
struct drm_i915_gem_object *obj;
const struct firmware *fw = NULL;
bool old_ver = false;
@@ -574,20 +599,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);
- err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
+ err = try_firmware_load(uc_fw, &fw);
memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
- if (!err && fw->size > INTEL_UC_RSVD_GGTT_PER_FW) {
- drm_err(&i915->drm,
- "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
-
- /* try to find another blob to load */
- release_firmware(fw);
- err = -ENOENT;
- }
-
/* Any error is terminal if overriding. Don't bother searching for older versions */
if (err && intel_uc_fw_is_overridden(uc_fw))
goto fail;
@@ -608,7 +622,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
break;
}
- err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
+ err = try_firmware_load(uc_fw, &fw);
}
if (err)
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 9f1c209d9251..0616b73175f3 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -151,6 +151,22 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
"0x%llx\n");
+static int vgpu_status_get(void *data, u64 *val)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+
+ *val = 0;
+
+ if (test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+ *val |= (1 << INTEL_VGPU_STATUS_ATTACHED);
+ if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
+ *val |= (1 << INTEL_VGPU_STATUS_ACTIVE);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vgpu_status_fops, vgpu_status_get, NULL, "0x%llx\n");
+
/**
* intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
* @vgpu: a vGPU
@@ -162,11 +178,12 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
snprintf(name, 16, "vgpu%d", vgpu->id);
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
- debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active);
debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu,
&vgpu_mmio_diff_fops);
debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
&vgpu_scan_nonprivbb_fops);
+ debugfs_create_file("status", 0644, vgpu->debugfs, vgpu,
+ &vgpu_status_fops);
}
/**
@@ -175,8 +192,13 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
{
- debugfs_remove_recursive(vgpu->debugfs);
- vgpu->debugfs = NULL;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_minor *minor = gvt->gt->i915->drm.primary;
+
+ if (minor->debugfs_root && gvt->debugfs_root) {
+ debugfs_remove_recursive(vgpu->debugfs);
+ vgpu->debugfs = NULL;
+ }
}
/**
@@ -199,6 +221,10 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt)
*/
void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
{
- debugfs_remove_recursive(gvt->debugfs_root);
- gvt->debugfs_root = NULL;
+ struct drm_minor *minor = gvt->gt->i915->drm.primary;
+
+ if (minor->debugfs_root) {
+ debugfs_remove_recursive(gvt->debugfs_root);
+ gvt->debugfs_root = NULL;
+ }
}
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 355f1c0e8664..ffe41e9be04f 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -134,7 +134,8 @@ static void dmabuf_gem_object_free(struct kref *kref)
struct list_head *pos;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
- if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
+ if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) &&
+ !list_empty(&vgpu->dmabuf_obj_list_head)) {
list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
if (dmabuf_obj == obj) {
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 51e5e8fb505b..4ec85308379a 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -55,7 +55,7 @@ static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
int idx;
bool ret;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return false;
idx = srcu_read_lock(&kvm->srcu);
@@ -1178,7 +1178,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -EINVAL;
pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
if (is_error_noslot_pfn(pfn))
@@ -1209,10 +1209,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
PAGE_SIZE, &dma_addr);
- if (ret) {
- ppgtt_invalidate_spt(spt);
- return ret;
- }
+ if (ret)
+ goto err;
sub_se.val64 = se->val64;
/* Copy the PAT field from PDE. */
@@ -1231,6 +1229,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
ops->set_pfn(se, sub_spt->shadow_page.mfn);
ppgtt_set_shadow_entry(spt, se, index);
return 0;
+err:
+ /* Cancel the existing addess mappings of DMA addr. */
+ for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
+ gvt_vdbg_mm("invalidate 4K entry\n");
+ ppgtt_invalidate_pte(sub_spt, &sub_se);
+ }
+ /* Release the new allocated spt. */
+ trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
+ sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
+ ppgtt_free_spt(sub_spt);
+ return ret;
}
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 62823c0e13ab..2d65800d8e93 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -172,13 +172,18 @@ struct intel_vgpu_submission {
#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
+enum {
+ INTEL_VGPU_STATUS_ATTACHED = 0,
+ INTEL_VGPU_STATUS_ACTIVE,
+ INTEL_VGPU_STATUS_NR_BITS,
+};
+
struct intel_vgpu {
struct vfio_device vfio_device;
struct intel_gvt *gvt;
struct mutex vgpu_lock;
int id;
- bool active;
- bool attached;
+ DECLARE_BITMAP(status, INTEL_VGPU_STATUS_NR_BITS);
bool pv_notified;
bool failsafe;
unsigned int resetting_eng;
@@ -467,7 +472,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
- for_each_if(vgpu->active)
+ for_each_if(test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
u32 offset, u32 val, bool low)
@@ -725,7 +730,7 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch(
static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
void *buf, unsigned long len)
{
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -ESRCH;
return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
}
@@ -743,7 +748,7 @@ static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -ESRCH;
return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index a6b2021b665f..68eca023bbc6 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -433,7 +433,7 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
* enabled by guest. so if msi_trigger is null, success is still
* returned and don't inject interrupt into guest.
*/
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -ESRCH;
if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
return -EFAULT;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index f5451adcd489..8ae7039b3683 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -638,7 +638,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
mutex_lock(&vgpu->gvt->lock);
for_each_active_vgpu(vgpu->gvt, itr, id) {
- if (!itr->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, itr->status))
continue;
if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
@@ -655,9 +655,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- if (vgpu->attached)
- return -EEXIST;
-
if (!vgpu->vfio_device.kvm ||
vgpu->vfio_device.kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
@@ -667,14 +664,14 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
if (__kvmgt_vgpu_exist(vgpu))
return -EEXIST;
- vgpu->attached = true;
-
vgpu->track_node.track_write = kvmgt_page_track_write;
vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_get_kvm(vgpu->vfio_device.kvm);
kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
&vgpu->track_node);
+ set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
+
debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
&vgpu->nr_cache_entries);
@@ -698,11 +695,10 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- if (!vgpu->attached)
- return;
-
intel_gvt_release_vgpu(vgpu);
+ clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
+
debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
@@ -718,8 +714,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
vgpu->dma_addr_cache = RB_ROOT;
intel_vgpu_release_msi_eventfd_ctx(vgpu);
-
- vgpu->attached = false;
}
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -1512,9 +1506,6 @@ static void intel_vgpu_remove(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
- if (WARN_ON_ONCE(vgpu->attached))
- return;
-
vfio_unregister_group_dev(&vgpu->vfio_device);
vfio_put_device(&vgpu->vfio_device);
}
@@ -1559,7 +1550,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
struct kvm_memory_slot *slot;
int idx;
- if (!info->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
return -ESRCH;
idx = srcu_read_lock(&kvm->srcu);
@@ -1589,8 +1580,8 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
struct kvm_memory_slot *slot;
int idx;
- if (!info->attached)
- return 0;
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
+ return -ESRCH;
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
@@ -1668,7 +1659,7 @@ int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
struct gvt_dma *entry;
int ret;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return -EINVAL;
mutex_lock(&vgpu->cache_lock);
@@ -1714,8 +1705,8 @@ int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
struct gvt_dma *entry;
int ret = 0;
- if (!vgpu->attached)
- return -ENODEV;
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+ return -EINVAL;
mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
@@ -1742,7 +1733,7 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
{
struct gvt_dma *entry;
- if (!vgpu->attached)
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
return;
mutex_lock(&vgpu->cache_lock);
@@ -1778,7 +1769,7 @@ static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
(void *)&gvt->service_request)) {
- if (vgpu->active)
+ if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
intel_vgpu_emulate_vblank(vgpu);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 9cd8fcbf7cad..f4055804aad1 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -695,6 +695,7 @@ intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
!workload->shadow_mm->ppgtt_mm.shadowed) {
+ intel_vgpu_unpin_mm(workload->shadow_mm);
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
return -EINVAL;
}
@@ -865,7 +866,8 @@ pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
goto out;
}
- if (!scheduler->current_vgpu->active ||
+ if (!test_bit(INTEL_VGPU_STATUS_ACTIVE,
+ scheduler->current_vgpu->status) ||
list_empty(workload_q_head(scheduler->current_vgpu, engine)))
goto out;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 3c529c2705dd..a5497440484f 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -166,9 +166,7 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
*/
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
{
- mutex_lock(&vgpu->vgpu_lock);
- vgpu->active = true;
- mutex_unlock(&vgpu->vgpu_lock);
+ set_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
}
/**
@@ -183,7 +181,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
{
mutex_lock(&vgpu->vgpu_lock);
- vgpu->active = false;
+ clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
if (atomic_read(&vgpu->submission.running_workload_num)) {
mutex_unlock(&vgpu->vgpu_lock);
@@ -228,7 +226,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *i915 = gvt->gt->i915;
- drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
+ drm_WARN(&i915->drm, test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status),
+ "vGPU is still active!\n");
/*
* remove idr first so later clean can judge if need to stop
@@ -285,8 +284,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
if (ret)
goto out_free_vgpu;
- vgpu->active = false;
-
+ clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
return vgpu;
out_free_vgpu:
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index f025ee4fa526..a4b4d9b7d26c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -416,6 +416,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* @vm: Address space to cleanse
* @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm
* will be able to evict vma's locked by the ww as well.
+ * @busy_bo: Optional pointer to struct drm_i915_gem_object. If not NULL, then
+ * in the event i915_gem_evict_vm() is unable to trylock an object for eviction,
+ * then @busy_bo will point to it. -EBUSY is also returned. The caller must drop
+ * the vm->mutex, before trying again to acquire the contended lock. The caller
+ * also owns a reference to the object.
*
* This function evicts all vmas from a vm.
*
@@ -425,7 +430,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
-int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
+int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww,
+ struct drm_i915_gem_object **busy_bo)
{
int ret = 0;
@@ -457,15 +463,22 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
* the resv is shared among multiple objects, we still
* need the object ref.
*/
- if (dying_vma(vma) ||
+ if (!i915_gem_object_get_rcu(vma->obj) ||
(ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) {
__i915_vma_pin(vma);
list_add(&vma->evict_link, &locked_eviction_list);
continue;
}
- if (!i915_gem_object_trylock(vma->obj, ww))
+ if (!i915_gem_object_trylock(vma->obj, ww)) {
+ if (busy_bo) {
+ *busy_bo = vma->obj; /* holds ref */
+ ret = -EBUSY;
+ break;
+ }
+ i915_gem_object_put(vma->obj);
continue;
+ }
__i915_vma_pin(vma);
list_add(&vma->evict_link, &eviction_list);
@@ -473,25 +486,29 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
if (list_empty(&eviction_list) && list_empty(&locked_eviction_list))
break;
- ret = 0;
/* Unbind locked objects first, before unlocking the eviction_list */
list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) {
__i915_vma_unpin(vma);
- if (ret == 0)
+ if (ret == 0) {
ret = __i915_vma_unbind(vma);
- if (ret != -EINTR) /* "Get me out of here!" */
- ret = 0;
+ if (ret != -EINTR) /* "Get me out of here!" */
+ ret = 0;
+ }
+ if (!dying_vma(vma))
+ i915_gem_object_put(vma->obj);
}
list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
- if (ret == 0)
+ if (ret == 0) {
ret = __i915_vma_unbind(vma);
- if (ret != -EINTR) /* "Get me out of here!" */
- ret = 0;
+ if (ret != -EINTR) /* "Get me out of here!" */
+ ret = 0;
+ }
i915_gem_object_unlock(vma->obj);
+ i915_gem_object_put(vma->obj);
}
} while (ret == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.h b/drivers/gpu/drm/i915/i915_gem_evict.h
index e593c530f9bd..bf0ee0e4fe60 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.h
+++ b/drivers/gpu/drm/i915/i915_gem_evict.h
@@ -11,6 +11,7 @@
struct drm_mm_node;
struct i915_address_space;
struct i915_gem_ww_ctx;
+struct drm_i915_gem_object;
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww,
@@ -23,6 +24,7 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
struct drm_mm_node *node,
unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm,
- struct i915_gem_ww_ctx *ww);
+ struct i915_gem_ww_ctx *ww,
+ struct drm_i915_gem_object **busy_bo);
#endif /* __I915_GEM_EVICT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index edfe363af838..91c533986041 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1974,7 +1974,10 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (ddi_hotplug_trigger) {
u32 dig_hotplug_reg;
+ /* Locking due to DSI native GPIO sequences */
+ spin_lock(&dev_priv->irq_lock);
dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
+ spin_unlock(&dev_priv->irq_lock);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
ddi_hotplug_trigger, dig_hotplug_reg,
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6da9784fe4a2..ccd1f864aa19 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -1129,7 +1129,6 @@ static const struct intel_gt_definition xelpmp_extra_gt[] = {
{}
};
-__maybe_unused
static const struct intel_device_info mtl_info = {
XE_HP_FEATURES,
XE_LPDP_FEATURES,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 8e1892d14774..916176872544 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -5988,6 +5988,7 @@
#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
#define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin) (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4))
#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin) (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4))
#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin) (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4))
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 703fee6b5f75..3a33be5401ed 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1566,7 +1566,7 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
* locked objects when called from execbuf when pinning
* is removed. This would probably regress badly.
*/
- i915_gem_evict_vm(vm, NULL);
+ i915_gem_evict_vm(vm, NULL, NULL);
mutex_unlock(&vm->mutex);
}
} while (1);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 8c6517d29b8e..37068542aafe 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -344,7 +344,7 @@ static int igt_evict_vm(void *arg)
/* Everything is pinned, nothing should happen */
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_vm(&ggtt->vm, NULL);
+ err = i915_gem_evict_vm(&ggtt->vm, NULL, NULL);
mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
@@ -356,7 +356,7 @@ static int igt_evict_vm(void *arg)
for_i915_gem_ww(&ww, err, false) {
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_vm(&ggtt->vm, &ww);
+ err = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
mutex_unlock(&ggtt->vm.mutex);
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index dba4f7d81d69..80142d9a4a55 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -614,6 +614,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
break;
}
+ if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_BG)
+ width = ipu_src_rect_width(new_state);
+ else
+ width = drm_rect_width(&new_state->src) >> 16;
+
eba = drm_plane_state_to_eba(new_state, 0);
/*
@@ -622,8 +627,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
*/
if (ipu_state->use_pre) {
axi_id = ipu_chan_assign_axi_id(ipu_plane->dma);
- ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id,
- ipu_src_rect_width(new_state),
+ ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, width,
drm_rect_height(&new_state->src) >> 16,
fb->pitches[0], fb->format->format,
fb->modifier, &eba);
@@ -678,9 +682,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
break;
}
- ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8));
+ ipu_dmfc_config_wait4eot(ipu_plane->dmfc, width);
- width = ipu_src_rect_width(new_state);
height = drm_rect_height(&new_state->src) >> 16;
info = drm_format_info(fb->format->format);
ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0],
@@ -744,8 +747,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16);
ipu_cpmem_zero(ipu_plane->alpha_ch);
- ipu_cpmem_set_resolution(ipu_plane->alpha_ch,
- ipu_src_rect_width(new_state),
+ ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width,
drm_rect_height(&new_state->src) >> 16);
ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8);
ipu_cpmem_set_high_priority(ipu_plane->alpha_ch);
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index d4b907889a21..cd399b0b7181 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -436,15 +436,14 @@ void meson_viu_init(struct meson_drm *priv)
/* Initialize OSD1 fifo control register */
reg = VIU_OSD_DDR_PRIORITY_URGENT |
- VIU_OSD_HOLD_FIFO_LINES(31) |
VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */
VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */
VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
- reg |= VIU_OSD_BURST_LENGTH_32;
+ reg |= (VIU_OSD_BURST_LENGTH_32 | VIU_OSD_HOLD_FIFO_LINES(31));
else
- reg |= VIU_OSD_BURST_LENGTH_64;
+ reg |= (VIU_OSD_BURST_LENGTH_64 | VIU_OSD_HOLD_FIFO_LINES(4));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 2fa5afe21288..919e6cc04982 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -82,6 +82,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
struct panfrost_gem_object *bo;
struct drm_panfrost_create_bo *args = data;
struct panfrost_gem_mapping *mapping;
+ int ret;
if (!args->size || args->pad ||
(args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
@@ -92,21 +93,29 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
!(args->flags & PANFROST_BO_NOEXEC))
return -EINVAL;
- bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
- &args->handle);
+ bo = panfrost_gem_create(dev, args->size, args->flags);
if (IS_ERR(bo))
return PTR_ERR(bo);
+ ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
+ if (ret)
+ goto out;
+
mapping = panfrost_gem_mapping_get(bo, priv);
- if (!mapping) {
- drm_gem_object_put(&bo->base.base);
- return -EINVAL;
+ if (mapping) {
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
+ panfrost_gem_mapping_put(mapping);
+ } else {
+ /* This can only happen if the handle from
+ * drm_gem_handle_create() has already been guessed and freed
+ * by user space
+ */
+ ret = -EINVAL;
}
- args->offset = mapping->mmnode.start << PAGE_SHIFT;
- panfrost_gem_mapping_put(mapping);
-
- return 0;
+out:
+ drm_gem_object_put(&bo->base.base);
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 293e799e2fe8..3c812fbd126f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -235,12 +235,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
}
struct panfrost_gem_object *
-panfrost_gem_create_with_handle(struct drm_file *file_priv,
- struct drm_device *dev, size_t size,
- u32 flags,
- uint32_t *handle)
+panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
{
- int ret;
struct drm_gem_shmem_object *shmem;
struct panfrost_gem_object *bo;
@@ -256,16 +252,6 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv,
bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
bo->is_heap = !!(flags & PANFROST_BO_HEAP);
- /*
- * Allocate an id of idr table where the obj is registered
- * and handle has the id what user can see.
- */
- ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
- /* drop reference from allocate - handle holds it now. */
- drm_gem_object_put(&shmem->base);
- if (ret)
- return ERR_PTR(ret);
-
return bo;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 8088d5fd8480..ad2877eeeccd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -69,10 +69,7 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt);
struct panfrost_gem_object *
-panfrost_gem_create_with_handle(struct drm_file *file_priv,
- struct drm_device *dev, size_t size,
- u32 flags,
- uint32_t *handle);
+panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags);
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
void panfrost_gem_close(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index fe09e5be79bd..15d04a0ec623 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -81,7 +81,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
init_completion(&entity->entity_idle);
/* We start in an idle state. */
- complete(&entity->entity_idle);
+ complete_all(&entity->entity_idle);
spin_lock_init(&entity->rq_lock);
spsc_queue_init(&entity->job_queue);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 31f3a1267be4..fd22d753b4ed 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -987,7 +987,7 @@ static int drm_sched_main(void *param)
sched_job = drm_sched_entity_pop_job(entity);
if (!sched_job) {
- complete(&entity->entity_idle);
+ complete_all(&entity->entity_idle);
continue;
}
@@ -998,7 +998,7 @@ static int drm_sched_main(void *param)
trace_drm_run_job(sched_job, entity);
fence = sched->ops->run_job(sched_job);
- complete(&entity->entity_idle);
+ complete_all(&entity->entity_idle);
drm_sched_fence_scheduled(s_fence);
if (!IS_ERR_OR_NULL(fence)) {
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index b29ef1085cad..f896ef85c2f2 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -12,3 +12,5 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_mm_test.o \
drm_plane_helper_test.o \
drm_rect_test.o
+
+CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 89f12d3b4a21..186b28dc7038 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -298,9 +298,9 @@ static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct dr
return false;
}
-static bool check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
- unsigned int count,
- u64 size)
+static bool noinline_for_stack check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
+ unsigned int count,
+ u64 size)
{
const struct boundary {
u64 start, size;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 8d7728181de0..c7e74cf13022 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -184,7 +184,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs = NULL;
struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
- struct virtio_gpu_mem_entry *ents;
+ struct virtio_gpu_mem_entry *ents = NULL;
unsigned int nents;
int ret;
@@ -210,7 +210,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
ret = -ENOMEM;
objs = virtio_gpu_array_alloc(1);
if (!objs)
- goto err_put_id;
+ goto err_free_entry;
virtio_gpu_array_add_obj(objs, &bo->base.base);
ret = virtio_gpu_array_lock_resv(objs);
@@ -239,6 +239,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
err_put_objs:
virtio_gpu_array_put_free(objs);
+err_free_entry:
+ kvfree(ents);
err_put_id:
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
err_free_gem:
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 0d8e6bd1ccbf..90996c108146 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -717,7 +717,7 @@ static int xen_drv_probe(struct xenbus_device *xb_dev,
return xenbus_switch_state(xb_dev, XenbusStateInitialising);
}
-static int xen_drv_remove(struct xenbus_device *dev)
+static void xen_drv_remove(struct xenbus_device *dev)
{
struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
int to = 100;
@@ -751,7 +751,6 @@ static int xen_drv_remove(struct xenbus_device *dev)
xen_drm_drv_fini(front_info);
xenbus_frontend_closed(dev);
- return 0;
}
static const struct xenbus_device_id xen_driver_ids[] = {
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 945758f39523..3e1272695d99 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -278,7 +278,6 @@ static int do_get_hw_stats(struct ib_device *ibdev,
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
struct mlx5_core_dev *mdev;
int ret, num_counters;
- u32 mdev_port_num;
if (!stats)
return -EINVAL;
@@ -299,8 +298,9 @@ static int do_get_hw_stats(struct ib_device *ibdev,
}
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
- mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
- &mdev_port_num);
+ if (!port_num)
+ port_num = 1;
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, NULL);
if (!mdev) {
/* If port is not affiliated yet, its in down state
* which doesn't have any counters yet, so it would be
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 40d9410ec303..cf953d23d18d 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4502,6 +4502,40 @@ static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev,
return false;
}
+static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_type qp_type)
+{
+ int log_max_ra_res;
+ int log_max_ra_req;
+
+ if (qp_type == MLX5_IB_QPT_DCI) {
+ log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_res_dc);
+ log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_req_dc);
+ } else {
+ log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_res_qp);
+ log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_req_qp);
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+ attr->max_rd_atomic > log_max_ra_res) {
+ mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
+ attr->max_rd_atomic);
+ return false;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+ attr->max_dest_rd_atomic > log_max_ra_req) {
+ mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
+ attr->max_dest_rd_atomic);
+ return false;
+ }
+ return true;
+}
+
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
@@ -4589,21 +4623,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
- attr->max_rd_atomic >
- (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
- mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
- attr->max_rd_atomic);
- goto out;
- }
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
- attr->max_dest_rd_atomic >
- (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
- mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
- attr->max_dest_rd_atomic);
+ if (!validate_rd_atomic(dev, attr, attr_mask, qp_type))
goto out;
- }
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
err = 0;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 00b0068fda20..5d94db453df3 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -62,9 +62,6 @@ enum {
SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
SRP_TSK_MGMT_SQ_SIZE,
- SRP_TAG_NO_REQ = ~0U,
- SRP_TAG_TSK_MGMT = 1U << 31,
-
SRP_MAX_PAGES_PER_MR = 512,
SRP_MAX_ADD_CDB_LEN = 16,
@@ -79,6 +76,11 @@ enum {
sizeof(struct srp_imm_buf),
};
+enum {
+ SRP_TAG_NO_REQ = ~0U,
+ SRP_TAG_TSK_MGMT = BIT(31),
+};
+
enum srp_target_state {
SRP_TARGET_SCANNING,
SRP_TARGET_LIVE,
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 8d8ebdc2039b..67f1c7364c95 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -51,7 +51,7 @@ module_param_array(ptr_size, int, NULL, 0444);
MODULE_PARM_DESC(ptr_size,
"Pointing device width, height in pixels (default 800,600)");
-static int xenkbd_remove(struct xenbus_device *);
+static void xenkbd_remove(struct xenbus_device *);
static int xenkbd_connect_backend(struct xenbus_device *, struct xenkbd_info *);
static void xenkbd_disconnect_backend(struct xenkbd_info *);
@@ -404,7 +404,7 @@ static int xenkbd_resume(struct xenbus_device *dev)
return xenkbd_connect_backend(dev, info);
}
-static int xenkbd_remove(struct xenbus_device *dev)
+static void xenkbd_remove(struct xenbus_device *dev)
{
struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
@@ -417,7 +417,6 @@ static int xenkbd_remove(struct xenbus_device *dev)
input_unregister_device(info->mtouch);
free_page((unsigned long)info->page);
kfree(info);
- return 0;
}
static int xenkbd_connect_backend(struct xenbus_device *dev,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e1ea3a7bd9d9..b424a6ee27ba 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1742,6 +1742,8 @@ static void dm_split_and_process_bio(struct mapped_device *md,
* otherwise associated queue_limits won't be imposed.
*/
bio = bio_split_to_limits(bio);
+ if (!bio)
+ return;
}
init_clone_info(&ci, md, map, bio, is_abnormal);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 775f1dde190a..8af639296b3c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -455,6 +455,8 @@ static void md_submit_bio(struct bio *bio)
}
bio = bio_split_to_limits(bio);
+ if (!bio)
+ return;
if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) {
if (bio_sectors(bio) != 0)
diff --git a/drivers/mtd/parsers/scpart.c b/drivers/mtd/parsers/scpart.c
index 02601bb33de4..6e5e11c37078 100644
--- a/drivers/mtd/parsers/scpart.c
+++ b/drivers/mtd/parsers/scpart.c
@@ -50,7 +50,7 @@ static int scpart_scan_partmap(struct mtd_info *master, loff_t partmap_offs,
int cnt = 0;
int res = 0;
int res2;
- loff_t offs;
+ uint32_t offs;
size_t retlen;
struct sc_part_desc *pdesc = NULL;
struct sc_part_desc *tmpdesc;
diff --git a/drivers/mtd/parsers/tplink_safeloader.c b/drivers/mtd/parsers/tplink_safeloader.c
index f601e7bd8627..1c689dafca2a 100644
--- a/drivers/mtd/parsers/tplink_safeloader.c
+++ b/drivers/mtd/parsers/tplink_safeloader.c
@@ -91,7 +91,7 @@ static int mtd_parser_tplink_safeloader_parse(struct mtd_info *mtd,
buf = mtd_parser_tplink_safeloader_read_table(mtd);
if (!buf) {
err = -ENOENT;
- goto err_out;
+ goto err_free_parts;
}
for (idx = 0, offset = TPLINK_SAFELOADER_DATA_OFFSET;
@@ -118,6 +118,8 @@ static int mtd_parser_tplink_safeloader_parse(struct mtd_info *mtd,
err_free:
for (idx -= 1; idx >= 0; idx--)
kfree(parts[idx].name);
+err_free_parts:
+ kfree(parts);
err_out:
return err;
};
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index d8703d7dfd0a..d67c926bca8b 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -10,6 +10,7 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/math64.h>
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 455b555275f1..c99ffe6c683a 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1549,6 +1549,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
slave_err(bond->dev, port->slave->dev,
"Port %d did not find a suitable aggregator\n",
port->actor_port_number);
+ return;
}
}
/* if all aggregator's ports are READY_N == TRUE, set ready=TRUE
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b4c65783960a..0363ce597661 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2654,10 +2654,12 @@ static void bond_miimon_link_change(struct bonding *bond,
static void bond_miimon_commit(struct bonding *bond)
{
- struct slave *slave, *primary;
+ struct slave *slave, *primary, *active;
bool do_failover = false;
struct list_head *iter;
+ ASSERT_RTNL();
+
bond_for_each_slave(bond, slave, iter) {
switch (slave->link_new_state) {
case BOND_LINK_NOCHANGE:
@@ -2700,8 +2702,8 @@ static void bond_miimon_commit(struct bonding *bond)
bond_miimon_link_change(bond, slave, BOND_LINK_UP);
- if (!rcu_access_pointer(bond->curr_active_slave) || slave == primary ||
- slave->prio > rcu_dereference(bond->curr_active_slave)->prio)
+ active = rtnl_dereference(bond->curr_active_slave);
+ if (!active || slave == primary || slave->prio > active->prio)
do_failover = true;
continue;
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
index 7a2445a34eb7..e3181d5471df 100644
--- a/drivers/net/dsa/mv88e6xxx/Kconfig
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -2,7 +2,6 @@
config NET_DSA_MV88E6XXX
tristate "Marvell 88E6xxx Ethernet switch fabric support"
depends on NET_DSA
- depends on PTP_1588_CLOCK_OPTIONAL
select IRQ_DOMAIN
select NET_DSA_TAG_EDSA
select NET_DSA_TAG_DSA
@@ -13,7 +12,8 @@ config NET_DSA_MV88E6XXX
config NET_DSA_MV88E6XXX_PTP
bool "PTP support for Marvell 88E6xxx"
default n
- depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK
+ depends on (NET_DSA_MV88E6XXX = y && PTP_1588_CLOCK = y) || \
+ (NET_DSA_MV88E6XXX = m && PTP_1588_CLOCK)
help
Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
chips that support it.
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index c5c3b4e92f28..2f224b166bbb 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -37,77 +37,104 @@ qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
}
static int
-qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
+qca8k_mii_write_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
{
- u16 *cached_lo = &priv->mdio_cache.lo;
- struct mii_bus *bus = priv->bus;
int ret;
+ u16 lo;
- if (lo == *cached_lo)
- return 0;
-
+ lo = val & 0xffff;
ret = bus->write(bus, phy_id, regnum, lo);
if (ret < 0)
dev_err_ratelimited(&bus->dev,
"failed to write qca8k 32bit lo register\n");
- *cached_lo = lo;
- return 0;
+ return ret;
}
static int
-qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
+qca8k_mii_write_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
{
- u16 *cached_hi = &priv->mdio_cache.hi;
- struct mii_bus *bus = priv->bus;
int ret;
+ u16 hi;
- if (hi == *cached_hi)
- return 0;
-
+ hi = (u16)(val >> 16);
ret = bus->write(bus, phy_id, regnum, hi);
if (ret < 0)
dev_err_ratelimited(&bus->dev,
"failed to write qca8k 32bit hi register\n");
- *cached_hi = hi;
- return 0;
+ return ret;
}
static int
-qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+qca8k_mii_read_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
{
int ret;
ret = bus->read(bus, phy_id, regnum);
- if (ret >= 0) {
- *val = ret;
- ret = bus->read(bus, phy_id, regnum + 1);
- *val |= ret << 16;
- }
+ if (ret < 0)
+ goto err;
- if (ret < 0) {
- dev_err_ratelimited(&bus->dev,
- "failed to read qca8k 32bit register\n");
- *val = 0;
- return ret;
- }
+ *val = ret & 0xffff;
+ return 0;
+
+err:
+ dev_err_ratelimited(&bus->dev,
+ "failed to read qca8k 32bit lo register\n");
+ *val = 0;
+
+ return ret;
+}
+static int
+qca8k_mii_read_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+{
+ int ret;
+
+ ret = bus->read(bus, phy_id, regnum);
+ if (ret < 0)
+ goto err;
+
+ *val = ret << 16;
return 0;
+
+err:
+ dev_err_ratelimited(&bus->dev,
+ "failed to read qca8k 32bit hi register\n");
+ *val = 0;
+
+ return ret;
}
-static void
-qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
+static int
+qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
{
- u16 lo, hi;
+ u32 hi, lo;
int ret;
- lo = val & 0xffff;
- hi = (u16)(val >> 16);
+ *val = 0;
- ret = qca8k_set_lo(priv, phy_id, regnum, lo);
- if (ret >= 0)
- ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
+ ret = qca8k_mii_read_lo(bus, phy_id, regnum, &lo);
+ if (ret < 0)
+ goto err;
+
+ ret = qca8k_mii_read_hi(bus, phy_id, regnum + 1, &hi);
+ if (ret < 0)
+ goto err;
+
+ *val = lo | hi;
+
+err:
+ return ret;
+}
+
+static void
+qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+{
+ if (qca8k_mii_write_lo(bus, phy_id, regnum, val) < 0)
+ return;
+
+ qca8k_mii_write_hi(bus, phy_id, regnum + 1, val);
}
static int
@@ -146,7 +173,16 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
command = get_unaligned_le32(&mgmt_ethhdr->command);
cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
+
len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
+ /* Special case for len of 15 as this is the max value for len and needs to
+ * be increased before converting it from word to dword.
+ */
+ if (len == 15)
+ len++;
+
+ /* We can ignore odd value, we always round up them in the alloc function. */
+ len *= sizeof(u16);
/* Make sure the seq match the requested packet */
if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
@@ -193,17 +229,33 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *
if (!skb)
return NULL;
- /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
- * Actually for some reason the steps are:
- * 0: nothing
- * 1-4: first 4 byte
- * 5-6: first 12 byte
- * 7-15: all 16 byte
+ /* Hdr mgmt length value is in step of word size.
+ * As an example to process 4 byte of data the correct length to set is 2.
+ * To process 8 byte 4, 12 byte 6, 16 byte 8...
+ *
+ * Odd values will always return the next size on the ack packet.
+ * (length of 3 (6 byte) will always return 8 bytes of data)
+ *
+ * This means that a value of 15 (0xf) actually means reading/writing 32 bytes
+ * of data.
+ *
+ * To correctly calculate the length we devide the requested len by word and
+ * round up.
+ * On the ack function we can skip the odd check as we already handle the
+ * case here.
*/
- if (len == 16)
- real_len = 15;
- else
- real_len = len;
+ real_len = DIV_ROUND_UP(len, sizeof(u16));
+
+ /* We check if the result len is odd and we round up another time to
+ * the next size. (length of 3 will be increased to 4 as switch will always
+ * return 8 bytes)
+ */
+ if (real_len % sizeof(u16) != 0)
+ real_len++;
+
+ /* Max reg value is 0xf(15) but switch will always return the next size (32 byte) */
+ if (real_len == 16)
+ real_len--;
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb->len);
@@ -417,7 +469,7 @@ qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
if (ret < 0)
goto exit;
- qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+ qca8k_mii_write32(bus, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
@@ -450,7 +502,7 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_
val &= ~mask;
val |= write_val;
- qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+ qca8k_mii_write32(bus, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
@@ -688,9 +740,9 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
qca8k_split_addr(reg, &r1, &r2, &page);
- ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
+ ret = read_poll_timeout(qca8k_mii_read_hi, ret1, !(val & mask), 0,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
- bus, 0x10 | r2, r1, &val);
+ bus, 0x10 | r2, r1 + 1, &val);
/* Check if qca8k_read has failed for a different reason
* before returnting -ETIMEDOUT
@@ -725,14 +777,14 @@ qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
if (ret)
goto exit;
- qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+ qca8k_mii_write32(bus, 0x10 | r2, r1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
- qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
+ qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
mutex_unlock(&bus->mdio_lock);
@@ -762,18 +814,18 @@ qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
if (ret)
goto exit;
- qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+ qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
if (ret)
goto exit;
- ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
+ ret = qca8k_mii_read_lo(bus, 0x10 | r2, r1, &val);
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
- qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
+ qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
mutex_unlock(&bus->mdio_lock);
@@ -1943,8 +1995,6 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
}
priv->mdio_cache.page = 0xffff;
- priv->mdio_cache.lo = 0xffff;
- priv->mdio_cache.hi = 0xffff;
/* Check the detected switch id */
ret = qca8k_read_switch_id(priv);
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index 0b7a5cb12321..03514f7a20be 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -375,11 +375,6 @@ struct qca8k_mdio_cache {
* mdio writes
*/
u16 page;
-/* lo and hi can also be cached and from Documentation we can skip one
- * extra mdio write if lo or hi is didn't change.
- */
- u16 lo;
- u16 hi;
};
struct qca8k_pcs {
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 8c8b4c88c7de..451c3a1b6255 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -2400,29 +2400,18 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return -EOPNOTSUPP;
}
- switch (func) {
- case ENA_ADMIN_TOEPLITZ:
- if (key) {
- if (key_len != sizeof(hash_key->key)) {
- netdev_err(ena_dev->net_device,
- "key len (%u) doesn't equal the supported size (%zu)\n",
- key_len, sizeof(hash_key->key));
- return -EINVAL;
- }
- memcpy(hash_key->key, key, key_len);
- rss->hash_init_val = init_val;
- hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
+ if ((func == ENA_ADMIN_TOEPLITZ) && key) {
+ if (key_len != sizeof(hash_key->key)) {
+ netdev_err(ena_dev->net_device,
+ "key len (%u) doesn't equal the supported size (%zu)\n",
+ key_len, sizeof(hash_key->key));
+ return -EINVAL;
}
- break;
- case ENA_ADMIN_CRC32:
- rss->hash_init_val = init_val;
- break;
- default:
- netdev_err(ena_dev->net_device, "Invalid hash function (%d)\n",
- func);
- return -EINVAL;
+ memcpy(hash_key->key, key, key_len);
+ hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
}
+ rss->hash_init_val = init_val;
old_func = rss->hash_func;
rss->hash_func = func;
rc = ena_com_set_hash_function(ena_dev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 48ae6d810f8f..8da79eedc057 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -887,11 +887,7 @@ static int ena_set_tunable(struct net_device *netdev,
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
len = *(u32 *)data;
- if (len > adapter->netdev->mtu) {
- ret = -EINVAL;
- break;
- }
- adapter->rx_copybreak = len;
+ ret = ena_set_rx_copybreak(adapter, len);
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a95529a69cbb..e8ad5ea31aff 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -374,9 +374,9 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
{
+ u32 verdict = ENA_XDP_PASS;
struct bpf_prog *xdp_prog;
struct ena_ring *xdp_ring;
- u32 verdict = XDP_PASS;
struct xdp_frame *xdpf;
u64 *xdp_stat;
@@ -393,7 +393,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
if (unlikely(!xdpf)) {
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = XDP_ABORTED;
+ verdict = ENA_XDP_DROP;
break;
}
@@ -409,29 +409,35 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
spin_unlock(&xdp_ring->xdp_tx_lock);
xdp_stat = &rx_ring->rx_stats.xdp_tx;
+ verdict = ENA_XDP_TX;
break;
case XDP_REDIRECT:
if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
xdp_stat = &rx_ring->rx_stats.xdp_redirect;
+ verdict = ENA_XDP_REDIRECT;
break;
}
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = XDP_ABORTED;
+ verdict = ENA_XDP_DROP;
break;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = ENA_XDP_DROP;
break;
case XDP_DROP:
xdp_stat = &rx_ring->rx_stats.xdp_drop;
+ verdict = ENA_XDP_DROP;
break;
case XDP_PASS:
xdp_stat = &rx_ring->rx_stats.xdp_pass;
+ verdict = ENA_XDP_PASS;
break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_invalid;
+ verdict = ENA_XDP_DROP;
}
ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
@@ -512,16 +518,18 @@ static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
struct bpf_prog *prog,
int first, int count)
{
+ struct bpf_prog *old_bpf_prog;
struct ena_ring *rx_ring;
int i = 0;
for (i = first; i < count; i++) {
rx_ring = &adapter->rx_ring[i];
- xchg(&rx_ring->xdp_bpf_prog, prog);
- if (prog) {
+ old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
+
+ if (!old_bpf_prog && prog) {
ena_xdp_register_rxq_info(rx_ring);
rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
- } else {
+ } else if (old_bpf_prog && !prog) {
ena_xdp_unregister_rxq_info(rx_ring);
rx_ring->rx_headroom = NET_SKB_PAD;
}
@@ -672,6 +680,7 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
ring->ena_dev = adapter->ena_dev;
ring->per_napi_packets = 0;
ring->cpu = 0;
+ ring->numa_node = 0;
ring->no_interrupt_event_cnt = 0;
u64_stats_init(&ring->syncp);
}
@@ -775,6 +784,7 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
tx_ring->cpu = ena_irq->cpu;
+ tx_ring->numa_node = node;
return 0;
err_push_buf_intermediate_buf:
@@ -907,6 +917,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
rx_ring->cpu = ena_irq->cpu;
+ rx_ring->numa_node = node;
return 0;
}
@@ -1619,12 +1630,12 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
* we expect, then we simply drop it
*/
if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
- return XDP_DROP;
+ return ENA_XDP_DROP;
ret = ena_xdp_execute(rx_ring, xdp);
/* The xdp program might expand the headers */
- if (ret == XDP_PASS) {
+ if (ret == ENA_XDP_PASS) {
rx_info->page_offset = xdp->data - xdp->data_hard_start;
rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
}
@@ -1663,7 +1674,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
do {
- xdp_verdict = XDP_PASS;
+ xdp_verdict = ENA_XDP_PASS;
skb = NULL;
ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
ena_rx_ctx.max_bufs = rx_ring->sgl_size;
@@ -1691,7 +1702,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
/* allocate skb and fill it */
- if (xdp_verdict == XDP_PASS)
+ if (xdp_verdict == ENA_XDP_PASS)
skb = ena_rx_skb(rx_ring,
rx_ring->ena_bufs,
ena_rx_ctx.descs,
@@ -1709,14 +1720,15 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* Packets was passed for transmission, unmap it
* from RX side.
*/
- if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) {
+ if (xdp_verdict & ENA_XDP_FORWARDED) {
ena_unmap_rx_buff(rx_ring,
&rx_ring->rx_buffer_info[req_id]);
rx_ring->rx_buffer_info[req_id].page = NULL;
}
}
- if (xdp_verdict != XDP_PASS) {
+ if (xdp_verdict != ENA_XDP_PASS) {
xdp_flags |= xdp_verdict;
+ total_len += ena_rx_ctx.ena_bufs[0].len;
res_budget--;
continue;
}
@@ -1760,7 +1772,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ena_refill_rx_bufs(rx_ring, refill_required);
}
- if (xdp_flags & XDP_REDIRECT)
+ if (xdp_flags & ENA_XDP_REDIRECT)
xdp_do_flush_map();
return work_done;
@@ -1814,8 +1826,9 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
static void ena_unmask_interrupt(struct ena_ring *tx_ring,
struct ena_ring *rx_ring)
{
+ u32 rx_interval = tx_ring->smoothed_interval;
struct ena_eth_io_intr_reg intr_reg;
- u32 rx_interval = 0;
+
/* Rx ring can be NULL when for XDP tx queues which don't have an
* accompanying rx_ring pair.
*/
@@ -1853,20 +1866,27 @@ static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
if (likely(tx_ring->cpu == cpu))
goto out;
+ tx_ring->cpu = cpu;
+ if (rx_ring)
+ rx_ring->cpu = cpu;
+
numa_node = cpu_to_node(cpu);
+
+ if (likely(tx_ring->numa_node == numa_node))
+ goto out;
+
put_cpu();
if (numa_node != NUMA_NO_NODE) {
ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
- if (rx_ring)
+ tx_ring->numa_node = numa_node;
+ if (rx_ring) {
+ rx_ring->numa_node = numa_node;
ena_com_update_numa_node(rx_ring->ena_com_io_cq,
numa_node);
+ }
}
- tx_ring->cpu = cpu;
- if (rx_ring)
- rx_ring->cpu = cpu;
-
return;
out:
put_cpu();
@@ -1987,11 +2007,10 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
ena_adjust_adaptive_rx_intr_moderation(ena_napi);
+ ena_update_ring_numa_node(tx_ring, rx_ring);
ena_unmask_interrupt(tx_ring, rx_ring);
}
- ena_update_ring_numa_node(tx_ring, rx_ring);
-
ret = rx_work_done;
} else {
ret = budget;
@@ -2376,7 +2395,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
ctx.msix_vector = msix_vector;
ctx.queue_size = tx_ring->ring_size;
- ctx.numa_node = cpu_to_node(tx_ring->cpu);
+ ctx.numa_node = tx_ring->numa_node;
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) {
@@ -2444,7 +2463,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
ctx.msix_vector = msix_vector;
ctx.queue_size = rx_ring->ring_size;
- ctx.numa_node = cpu_to_node(rx_ring->cpu);
+ ctx.numa_node = rx_ring->numa_node;
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) {
@@ -2805,6 +2824,24 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
return dev_was_up ? ena_up(adapter) : 0;
}
+int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak)
+{
+ struct ena_ring *rx_ring;
+ int i;
+
+ if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE))
+ return -EINVAL;
+
+ adapter->rx_copybreak = rx_copybreak;
+
+ for (i = 0; i < adapter->num_io_queues; i++) {
+ rx_ring = &adapter->rx_ring[i];
+ rx_ring->rx_copybreak = rx_copybreak;
+ }
+
+ return 0;
+}
+
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 1bdce99bf688..2cb141079474 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -262,9 +262,11 @@ struct ena_ring {
bool disable_meta_caching;
u16 no_interrupt_event_cnt;
- /* cpu for TPH */
+ /* cpu and NUMA for TPH */
int cpu;
- /* number of tx/rx_buffer_info's entries */
+ int numa_node;
+
+ /* number of tx/rx_buffer_info's entries */
int ring_size;
enum ena_admin_placement_policy_type tx_mem_queue_type;
@@ -392,6 +394,8 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
+int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak);
+
int ena_get_sset_count(struct net_device *netdev, int sset);
static inline void ena_reset_device(struct ena_adapter *adapter,
@@ -409,6 +413,15 @@ enum ena_xdp_errors_t {
ENA_XDP_NO_ENOUGH_QUEUES,
};
+enum ENA_XDP_ACTIONS {
+ ENA_XDP_PASS = 0,
+ ENA_XDP_TX = BIT(0),
+ ENA_XDP_REDIRECT = BIT(1),
+ ENA_XDP_DROP = BIT(2)
+};
+
+#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
+
static inline bool ena_xdp_present(struct ena_adapter *adapter)
{
return !!adapter->xdp_bpf_prog;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 7b666106feee..614c0278419b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1064,6 +1064,9 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+ tasklet_kill(&pdata->tasklet_dev);
+ tasklet_kill(&pdata->tasklet_ecc);
+
if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 22d4fc547a0a..a9ccc4258ee5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -447,8 +447,10 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
xgbe_i2c_disable(pdata);
xgbe_i2c_clear_all_interrupts(pdata);
- if (pdata->dev_irq != pdata->i2c_irq)
+ if (pdata->dev_irq != pdata->i2c_irq) {
devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
+ tasklet_kill(&pdata->tasklet_i2c);
+ }
}
static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4e97b4869522..0c5c1b155683 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1390,8 +1390,10 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
/* Disable auto-negotiation */
xgbe_an_disable_all(pdata);
- if (pdata->dev_irq != pdata->an_irq)
+ if (pdata->dev_irq != pdata->an_irq) {
devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+ tasklet_kill(&pdata->tasklet_an);
+ }
pdata->phy_if.phy_impl.stop(pdata);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index d91fdb0c2649..2cf96892e565 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -2784,17 +2784,11 @@ static int bcm_enet_shared_probe(struct platform_device *pdev)
return 0;
}
-static int bcm_enet_shared_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
/* this "shared" driver is needed because both macs share a single
* address space
*/
struct platform_driver bcm63xx_enet_shared_driver = {
.probe = bcm_enet_shared_probe,
- .remove = bcm_enet_shared_remove,
.driver = {
.name = "bcm63xx_enet_shared",
.owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4c7d07c684c4..240a7e8a7652 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -991,10 +991,9 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE +
- bp->rx_dma_offset);
+ skb = build_skb(page_address(page), PAGE_SIZE);
if (!skb) {
- __free_page(page);
+ page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
skb_mark_for_recycle(skb);
@@ -1032,7 +1031,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
if (!skb) {
- __free_page(page);
+ page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
@@ -1925,7 +1924,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
dma_addr = rx_buf->mapping;
if (bnxt_xdp_attached(bp, rxr)) {
- bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
+ bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
if (agg_bufs) {
u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
cp_cons, agg_bufs,
@@ -1940,7 +1939,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (xdp_active) {
- if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
+ if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
rc = 1;
goto next_rx;
}
@@ -3969,8 +3968,10 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
if (BNXT_RX_PAGE_MODE(bp)) {
- rx_space = BNXT_PAGE_MODE_BUF_SIZE;
- rx_size = BNXT_MAX_PAGE_MODE_MTU;
+ rx_space = PAGE_SIZE;
+ rx_size = PAGE_SIZE -
+ ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
} else {
rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD +
@@ -5398,15 +5399,16 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
- if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) {
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
+ } else {
req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
req->enables |=
cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
+ req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
}
- /* thresholds not implemented in firmware yet */
- req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
- req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
return hwrm_req_send(bp, req);
}
@@ -13591,7 +13593,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
bp = netdev_priv(dev);
- SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
bp->board_idx = ent->driver_data;
bp->msg_enable = BNXT_DEF_MSG_ENABLE;
bnxt_set_max_func_irqs(bp, max_irqs);
@@ -13599,6 +13600,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (bnxt_vf_pciid(bp->board_idx))
bp->flags |= BNXT_FLAG_VF;
+ /* No devlink port registration in case of a VF */
+ if (BNXT_PF(bp))
+ SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
+
if (pdev->msix_cap)
bp->flags |= BNXT_FLAG_MSIX_CAP;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 41c6dd0ae447..5163ef4a49ea 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -591,12 +591,20 @@ struct nqe_cn {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
#define BNXT_MAX_MTU 9500
-#define BNXT_PAGE_MODE_BUF_SIZE \
+
+/* First RX buffer page in XDP multi-buf mode
+ *
+ * +-------------------------------------------------------------------------+
+ * | XDP_PACKET_HEADROOM | bp->rx_buf_use_size | skb_shared_info|
+ * | (bp->rx_dma_offset) | | |
+ * +-------------------------------------------------------------------------+
+ */
+#define BNXT_MAX_PAGE_MODE_MTU_SBUF \
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
XDP_PACKET_HEADROOM)
#define BNXT_MAX_PAGE_MODE_MTU \
- BNXT_PAGE_MODE_BUF_SIZE - \
- SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))
+ (BNXT_MAX_PAGE_MODE_MTU_SBUF - \
+ SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
#define BNXT_MIN_PKT_SIZE 52
@@ -2134,7 +2142,6 @@ struct bnxt {
#define BNXT_DUMP_CRASH 1
struct bpf_prog *xdp_prog;
- u8 xdp_has_frags;
struct bnxt_ptp_cfg *ptp_cfg;
u8 ptp_all_rx_tstamp;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index c3065ec0a479..36d5202c0aee 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -177,7 +177,7 @@ bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
}
void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
- u16 cons, u8 **data_ptr, unsigned int *len,
+ u16 cons, u8 *data_ptr, unsigned int len,
struct xdp_buff *xdp)
{
struct bnxt_sw_rx_bd *rx_buf;
@@ -191,13 +191,10 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
offset = bp->rx_offset;
mapping = rx_buf->mapping - bp->rx_dma_offset;
- dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
-
- if (bp->xdp_has_frags)
- buflen = BNXT_PAGE_MODE_BUF_SIZE + offset;
+ dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
- xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
+ xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false);
}
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
@@ -222,7 +219,8 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
* false - packet should be passed to the stack.
*/
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event)
+ struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+ unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
struct bnxt_tx_ring_info *txr;
@@ -255,8 +253,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event &= ~BNXT_RX_EVENT;
*len = xdp.data_end - xdp.data;
- if (orig_data != xdp.data)
+ if (orig_data != xdp.data) {
offset = xdp.data - xdp.data_hard_start;
+ *data_ptr = xdp.data_hard_start + offset;
+ }
switch (act) {
case XDP_PASS:
@@ -401,10 +401,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
}
- if (prog) {
+ if (prog)
tx_xdp = bp->rx_nr_rings;
- bp->xdp_has_frags = prog->aux->xdp_has_frags;
- }
tc = netdev_get_num_tc(dev);
if (!tc)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 505911ae095d..ea430d6961df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -18,8 +18,8 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct xdp_buff xdp, struct page *page, unsigned int *len,
- u8 *event);
+ struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+ unsigned int *len, u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame **frames, u32 flags);
@@ -27,7 +27,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
- u16 cons, u8 **data_ptr, unsigned int *len,
+ u16 cons, u8 *data_ptr, unsigned int len,
struct xdp_buff *xdp);
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
index 91f02c505028..b307bef4dc29 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
@@ -127,11 +127,6 @@ static int enetc_ierb_probe(struct platform_device *pdev)
return 0;
}
-static int enetc_ierb_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static const struct of_device_id enetc_ierb_match[] = {
{ .compatible = "fsl,ls1028a-enetc-ierb", },
{},
@@ -144,7 +139,6 @@ static struct platform_driver enetc_ierb_driver = {
.of_match_table = enetc_ierb_match,
},
.probe = enetc_ierb_probe,
- .remove = enetc_ierb_remove,
};
module_platform_driver(enetc_ierb_driver);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index d00bae15a901..d528ca681b6f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1430,7 +1430,7 @@ int dtsec_initialization(struct mac_device *mac_dev,
dtsec->dtsec_drv_param->tx_pad_crc = true;
phy_node = of_parse_phandle(mac_node, "tbi-handle", 0);
- if (!phy_node || of_device_is_available(phy_node)) {
+ if (!phy_node || !of_device_is_available(phy_node)) {
of_node_put(phy_node);
err = -EINVAL;
dev_err_probe(mac_dev->dev, err,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 0ec5730b1788..b4c4fb873568 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -3855,18 +3855,16 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
return 0;
}
-static bool hns3_checksum_complete(struct hns3_enet_ring *ring,
+static void hns3_checksum_complete(struct hns3_enet_ring *ring,
struct sk_buff *skb, u32 ptype, u16 csum)
{
if (ptype == HNS3_INVALID_PTYPE ||
hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
- return false;
+ return;
hns3_ring_stats_update(ring, csum_complete);
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)csum);
-
- return true;
}
static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info,
@@ -3926,8 +3924,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
HNS3_RXD_PTYPE_S);
- if (hns3_checksum_complete(ring, skb, ptype, csum))
- return;
+ hns3_checksum_complete(ring, skb, ptype, csum);
/* check if hardware has done checksum */
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
@@ -3936,6 +3933,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
BIT(HNS3_RXD_OL3E_B) |
BIT(HNS3_RXD_OL4E_B)))) {
+ skb->ip_summed = CHECKSUM_NONE;
hns3_ring_stats_update(ring, l3l4_csum_err);
return;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 4e54f91f7a6c..07ad5f35219e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3910,9 +3910,17 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
return ret;
}
- if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ if (!reset ||
+ !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state))
continue;
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) &&
+ hdev->reset_type == HNAE3_FUNC_RESET) {
+ set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET,
+ &vport->need_notify);
+ continue;
+ }
+
/* Inform VF to process the reset.
* hclge_inform_reset_assert_to_vf may fail if VF
* driver is not loaded.
@@ -4609,18 +4617,25 @@ static void hclge_reset_service_task(struct hclge_dev *hdev)
static void hclge_update_vport_alive(struct hclge_dev *hdev)
{
+#define HCLGE_ALIVE_SECONDS_NORMAL 8
+
+ unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ;
int i;
/* start from vport 1 for PF is always alive */
for (i = 1; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i];
- if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
+ if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) ||
+ !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ continue;
+ if (time_after(jiffies, vport->last_active_jiffies +
+ alive_time)) {
clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
-
- /* If vf is not alive, set to default value */
- if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
- vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+ dev_warn(&hdev->pdev->dev,
+ "VF %u heartbeat timeout\n",
+ i - HCLGE_VF_VPORT_START_NUM);
+ }
}
}
@@ -8064,9 +8079,11 @@ int hclge_vport_start(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
+ set_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
vport->last_active_jiffies = jiffies;
+ vport->need_notify = 0;
if (test_bit(vport->vport_id, hdev->vport_config_block)) {
if (vport->vport_id) {
@@ -8084,7 +8101,9 @@ int hclge_vport_start(struct hclge_vport *vport)
void hclge_vport_stop(struct hclge_vport *vport)
{
+ clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport->need_notify = 0;
}
static int hclge_client_start(struct hnae3_handle *handle)
@@ -9208,7 +9227,8 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
return 0;
}
- dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n",
+ dev_info(&hdev->pdev->dev,
+ "MAC of VF %d has been set to %s, will be active after VF reset\n",
vf, format_mac_addr);
return 0;
}
@@ -10465,12 +10485,16 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
* VLAN state.
*/
- if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
- test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
- (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
- vport->vport_id,
- state, &vlan_info);
-
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
+ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+ vport->vport_id,
+ state,
+ &vlan_info);
+ else
+ set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
+ &vport->need_notify);
+ }
return 0;
}
@@ -11941,7 +11965,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev)
int i;
for (i = 0; i < hdev->num_alloc_vport; i++) {
- hclge_vport_stop(vport);
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
vport++;
}
}
@@ -12754,60 +12778,71 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
return ret;
}
-static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
+static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport)
{
- struct hclge_vport *vport = &hdev->vport[0];
struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
+ bool uc_en = false;
+ bool mc_en = false;
u8 tmp_flags;
+ bool bc_en;
int ret;
- u16 i;
if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
vport->last_promisc_flags = vport->overflow_promisc_flags;
}
- if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
+ if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state))
+ return 0;
+
+ /* for PF */
+ if (!vport->vport_id) {
tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
tmp_flags & HNAE3_MPE);
- if (!ret) {
- clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
- &vport->state);
+ if (!ret)
set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
&vport->state);
- }
+ else
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state);
+ return ret;
}
- for (i = 1; i < hdev->num_alloc_vport; i++) {
- bool uc_en = false;
- bool mc_en = false;
- bool bc_en;
+ /* for VF */
+ if (vport->vf_info.trusted) {
+ uc_en = vport->vf_info.request_uc_en > 0 ||
+ vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE;
+ mc_en = vport->vf_info.request_mc_en > 0 ||
+ vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE;
+ }
+ bc_en = vport->vf_info.request_bc_en > 0;
- vport = &hdev->vport[i];
+ ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
+ mc_en, bc_en);
+ if (ret) {
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ return ret;
+ }
+ hclge_set_vport_vlan_fltr_change(vport);
- if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
- &vport->state))
- continue;
+ return 0;
+}
- if (vport->vf_info.trusted) {
- uc_en = vport->vf_info.request_uc_en > 0 ||
- vport->overflow_promisc_flags &
- HNAE3_OVERFLOW_UPE;
- mc_en = vport->vf_info.request_mc_en > 0 ||
- vport->overflow_promisc_flags &
- HNAE3_OVERFLOW_MPE;
- }
- bc_en = vport->vf_info.request_bc_en > 0;
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int ret;
+ u16 i;
- ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
- mc_en, bc_en);
- if (ret) {
- set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
- &vport->state);
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+
+ ret = hclge_sync_vport_promisc_mode(vport);
+ if (ret)
return;
- }
- hclge_set_vport_vlan_fltr_change(vport);
}
}
@@ -12944,6 +12979,11 @@ static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
struct hclge_vlan_info vlan_info;
int ret;
+ clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport->need_notify = 0;
+ vport->mps = 0;
+
/* after disable sriov, clean VF rate configured by PF */
ret = hclge_tm_qs_shaper_cfg(vport, 0);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 495b639b0dc2..13f23d606e77 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -995,9 +995,15 @@ enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
HCLGE_VPORT_STATE_PROMISC_CHANGE,
HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ HCLGE_VPORT_STATE_INITED,
HCLGE_VPORT_STATE_MAX
};
+enum HCLGE_VPORT_NEED_NOTIFY {
+ HCLGE_VPORT_NEED_NOTIFY_RESET,
+ HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
+};
+
struct hclge_vlan_info {
u16 vlan_proto; /* so far support 802.1Q only */
u16 qos;
@@ -1044,6 +1050,7 @@ struct hclge_vport {
struct hnae3_handle roce;
unsigned long state;
+ unsigned long need_notify;
unsigned long last_active_jiffies;
u32 mps; /* Max packet size */
struct hclge_vf_info vf_info;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index a7b06c63143c..04ff9bf12185 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -124,17 +124,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
return status;
}
+static int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type)
+{
+ __le16 msg_data;
+ u8 dest_vfid;
+
+ dest_vfid = (u8)vport->vport_id;
+ msg_data = cpu_to_le16(reset_type);
+
+ /* send this requested info to VF */
+ return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data),
+ HCLGE_MBX_ASSERTING_RESET, dest_vfid);
+}
+
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
- __le16 msg_data;
u16 reset_type;
- u8 dest_vfid;
BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX);
- dest_vfid = (u8)vport->vport_id;
-
if (hdev->reset_type == HNAE3_FUNC_RESET)
reset_type = HNAE3_VF_PF_FUNC_RESET;
else if (hdev->reset_type == HNAE3_FLR_RESET)
@@ -142,11 +151,7 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
else
reset_type = HNAE3_VF_FUNC_RESET;
- msg_data = cpu_to_le16(reset_type);
-
- /* send this requested info to VF */
- return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data),
- HCLGE_MBX_ASSERTING_RESET, dest_vfid);
+ return hclge_inform_vf_reset(vport, reset_type);
}
static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
@@ -652,9 +657,56 @@ static int hclge_reset_vf(struct hclge_vport *vport)
return hclge_func_reset_cmd(hdev, vport->vport_id);
}
+static void hclge_notify_vf_config(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_port_base_vlan_config *vlan_cfg;
+ int ret;
+
+ hclge_push_vf_link_status(vport);
+ if (test_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, &vport->need_notify)) {
+ ret = hclge_inform_vf_reset(vport, HNAE3_VF_PF_FUNC_RESET);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to inform VF %u reset!",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
+ return;
+ }
+ vport->need_notify = 0;
+ return;
+ }
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
+ test_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify)) {
+ vlan_cfg = &vport->port_base_vlan_cfg;
+ ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+ vport->vport_id,
+ vlan_cfg->state,
+ &vlan_cfg->vlan_info);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to inform VF %u port base vlan!",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
+ return;
+ }
+ clear_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify);
+ }
+}
+
static void hclge_vf_keep_alive(struct hclge_vport *vport)
{
+ struct hclge_dev *hdev = vport->back;
+
vport->last_active_jiffies = jiffies;
+
+ if (test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) &&
+ !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
+ set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ dev_info(&hdev->pdev->dev, "VF %u is alive!",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
+ hclge_notify_vf_config(vport);
+ }
}
static int hclge_set_vf_mtu(struct hclge_vport *vport,
@@ -954,6 +1006,7 @@ static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param)
hclge_rm_vport_all_mac_table(param->vport, true,
HCLGE_MAC_ADDR_MC);
hclge_rm_vport_all_vlan_table(param->vport, true);
+ param->vport->mps = 0;
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index db6f7cdba958..e84e5be8e59e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2767,7 +2767,8 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
struct pci_dev *pdev = hdev->pdev;
int ret = 0;
- if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
+ if ((hdev->reset_type == HNAE3_VF_FULL_RESET ||
+ hdev->reset_type == HNAE3_FLR_RESET) &&
test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
hclgevf_misc_irq_uninit(hdev);
hclgevf_uninit_msi(hdev);
@@ -3129,7 +3130,7 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
hclgevf_update_rss_size(handle, new_tqps_num);
- hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map,
+ hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map,
tc_offset, tc_valid, tc_size);
ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
tc_valid, tc_size);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index c4e451ef7942..adc02adef83a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3850,7 +3850,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
field_flags |= IAVF_CLOUD_FIELD_IIP;
} else {
dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
- be32_to_cpu(match.mask->dst));
+ be32_to_cpu(match.mask->src));
return -EINVAL;
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index b5a7f246d230..43e199b5b513 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -363,6 +363,7 @@ ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
/* Send the data out to a hardware port */
write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
if (!write_buf) {
+ kfree(cmd_buf);
err = -ENOMEM;
goto exit;
}
@@ -460,6 +461,9 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
pf->gnss_tty_port[i] = kzalloc(sizeof(*pf->gnss_tty_port[i]),
GFP_KERNEL);
+ if (!pf->gnss_tty_port[i])
+ goto err_out;
+
pf->gnss_serial[i] = NULL;
tty_port_init(pf->gnss_tty_port[i]);
@@ -469,21 +473,23 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
err = tty_register_driver(tty_driver);
if (err) {
dev_err(dev, "Failed to register TTY driver err=%d\n", err);
-
- for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
- tty_port_destroy(pf->gnss_tty_port[i]);
- kfree(pf->gnss_tty_port[i]);
- }
- kfree(ttydrv_name);
- tty_driver_kref_put(pf->ice_gnss_tty_driver);
-
- return NULL;
+ goto err_out;
}
for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++)
dev_info(dev, "%s%d registered\n", ttydrv_name, i);
return tty_driver;
+
+err_out:
+ while (i--) {
+ tty_port_destroy(pf->gnss_tty_port[i]);
+ kfree(pf->gnss_tty_port[i]);
+ }
+ kfree(ttydrv_name);
+ tty_driver_kref_put(pf->ice_gnss_tty_driver);
+
+ return NULL;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 907055b77af0..7105de6fb344 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -783,7 +783,7 @@ construct_skb:
static void
ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
{
- xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
+ page_frag_free(tx_buf->raw_buf);
xdp_ring->xdp_tx_active--;
dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index a7b22639cfcd..e9747ec5ac0b 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -475,7 +475,9 @@
#define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */
#define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */
#define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */
+#define IGC_TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */
#define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */
+#define IGC_TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */
#define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */
#define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */
#define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 8dbb9f903ca7..c34734d432e0 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -322,7 +322,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
ts = ns_to_timespec64(ns);
if (rq->perout.index == 1) {
if (use_freq) {
- tsauxc_mask = IGC_TSAUXC_EN_CLK1;
+ tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1;
tsim_mask = 0;
} else {
tsauxc_mask = IGC_TSAUXC_EN_TT1;
@@ -333,7 +333,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
freqout = IGC_FREQOUT1;
} else {
if (use_freq) {
- tsauxc_mask = IGC_TSAUXC_EN_CLK0;
+ tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0;
tsim_mask = 0;
} else {
tsauxc_mask = IGC_TSAUXC_EN_TT0;
@@ -347,10 +347,12 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
tsauxc = rd32(IGC_TSAUXC);
tsim = rd32(IGC_TSIM);
if (rq->perout.index == 1) {
- tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1);
+ tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 |
+ IGC_TSAUXC_ST1);
tsim &= ~IGC_TSICR_TT1;
} else {
- tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0);
+ tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 |
+ IGC_TSAUXC_ST0);
tsim &= ~IGC_TSICR_TT0;
}
if (on) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 24aa97f993ca..123dca9ce468 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -855,9 +855,11 @@ static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)
rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);
if (rp_pdev && rp_pdev->subordinate) {
bus = rp_pdev->subordinate->number;
+ pci_dev_put(rp_pdev);
return pci_get_domain_bus_and_slot(0, bus, 0);
}
+ pci_dev_put(rp_pdev);
return NULL;
}
@@ -874,6 +876,7 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
struct ixgbe_adapter *adapter = hw->back;
struct pci_dev *pdev = adapter->pdev;
struct pci_dev *func0_pdev;
+ bool has_mii = false;
/* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices
* are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0
@@ -884,15 +887,16 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));
if (func0_pdev) {
if (func0_pdev == pdev)
- return true;
- else
- return false;
+ has_mii = true;
+ goto out;
}
func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));
if (func0_pdev == pdev)
- return true;
+ has_mii = true;
- return false;
+out:
+ pci_dev_put(func0_pdev);
+ return has_mii;
}
/**
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index b2b71fe80d61..724df6398bbe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -774,9 +774,9 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
if (enable)
- cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+ cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
else
- cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+ cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index fb2d37676d84..5a20d93004c7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -26,7 +26,6 @@
#define CMR_P2X_SEL_SHIFT 59ULL
#define CMR_P2X_SEL_NIX0 1ULL
#define CMR_P2X_SEL_NIX1 2ULL
-#define CMR_EN BIT_ULL(55)
#define DATA_PKT_TX_EN BIT_ULL(53)
#define DATA_PKT_RX_EN BIT_ULL(54)
#define CGX_LMAC_TYPE_SHIFT 40
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index fa8029a94068..eb25e458266c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -589,7 +589,7 @@ int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
u16 pcifunc = req->hdr.pcifunc;
struct mcs_rsrc_map *map;
struct mcs *mcs;
- int rc;
+ int rc = 0;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 9e10e7471b88..88f8772a61cd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1012,6 +1012,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
rbpool = cq->rbpool;
free_ptrs = cq->pool_ptrs;
+ get_cpu();
while (cq->pool_ptrs) {
if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
/* Schedule a WQ if we fails to free atleast half of the
@@ -1031,6 +1032,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
+ put_cpu();
cq->refill_task_sched = false;
}
@@ -1368,6 +1370,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
if (err)
goto fail;
+ get_cpu();
/* Allocate pointers and free them to aura/pool */
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
@@ -1376,18 +1379,24 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
sq = &qset->sq[qidx];
sq->sqb_count = 0;
sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
- if (!sq->sqb_ptrs)
- return -ENOMEM;
+ if (!sq->sqb_ptrs) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
for (ptr = 0; ptr < num_sqbs; ptr++) {
- if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
- return -ENOMEM;
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ if (err)
+ goto err_mem;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
}
- return 0;
+err_mem:
+ put_cpu();
+ return err ? -ENOMEM : 0;
+
fail:
otx2_mbox_reset(&pfvf->mbox.mbox, 0);
otx2_aura_pool_free(pfvf);
@@ -1426,18 +1435,21 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
if (err)
goto fail;
+ get_cpu();
/* Allocate pointers and free them to aura/pool */
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) {
- if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
- return -ENOMEM;
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ if (err)
+ goto err_mem;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
bufptr + OTX2_HEAD_ROOM);
}
}
-
- return 0;
+err_mem:
+ put_cpu();
+ return err ? -ENOMEM : 0;
fail:
otx2_mbox_reset(&pfvf->mbox.mbox, 0);
otx2_aura_pool_free(pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 86653bb8e403..7f8ffbf79cf7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -758,6 +758,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
otx2_ptp_destroy(vf);
+ otx2_mcam_flow_del(vf);
+ otx2_shutdown_tc(vf);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index d3ca745d107d..c837103a9ee3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -2176,15 +2176,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
return -EINVAL;
}
- cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL);
- if (!cmd->stats)
- return -ENOMEM;
-
cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
- if (!cmd->pool) {
- err = -ENOMEM;
- goto dma_pool_err;
- }
+ if (!cmd->pool)
+ return -ENOMEM;
err = alloc_cmd_page(dev, cmd);
if (err)
@@ -2268,8 +2262,6 @@ err_free_page:
err_free_pool:
dma_pool_destroy(cmd->pool);
-dma_pool_err:
- kvfree(cmd->stats);
return err;
}
@@ -2282,7 +2274,6 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
destroy_msg_cache(dev);
free_cmd_page(dev, cmd);
dma_pool_destroy(cmd->pool);
- kvfree(cmd->stats);
}
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index ddb197970c22..5bd83c0275f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -468,7 +468,7 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
bool new_state = val.vbool;
if (new_state && !MLX5_CAP_GEN(dev, roce) &&
- !MLX5_CAP_GEN(dev, roce_rw_supported)) {
+ !(MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
return -EOPNOTSUPP;
}
@@ -563,7 +563,7 @@ static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
{
- return (val.vu16 >= 64 && val.vu16 <= 4096) ? 0 : -EINVAL;
+ return (val.vu32 >= 64 && val.vu32 <= 4096) ? 0 : -EINVAL;
}
static const struct devlink_param mlx5_devlink_params[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 5f6f95ad6888..1ae15b8536a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -459,7 +459,11 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
goto unlock;
for (i = 0; i < priv->channels.num; i++) {
- struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
+ struct mlx5e_channel *c = priv->channels.c[i];
+ struct mlx5e_rq *rq;
+
+ rq = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state) ?
+ &c->xskrq : &c->rq;
err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
index 512d43148922..c4378afdec09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -34,12 +34,6 @@ static int police_act_validate(const struct flow_action_entry *act,
return -EOPNOTSUPP;
}
- if (act->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
index 8d7d761482d2..50b60fd00946 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
@@ -127,6 +127,7 @@ mlx5e_post_meter_add_rule(struct mlx5e_priv *priv,
attr->counter = act_counter;
attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
+ attr->inner_match_level = MLX5_MATCH_NONE;
attr->outer_match_level = MLX5_MATCH_NONE;
attr->chain = 0;
attr->prio = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index a69849e0deed..313df8232db7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -2103,14 +2103,9 @@ out_err:
static void
mlx5_ct_tc_create_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
{
- bool is_fdb = ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB;
struct mlx5_tc_ct_debugfs *ct_dbgfs = &ct_priv->debugfs;
- char dirname[16] = {};
- if (sscanf(dirname, "ct_%s", is_fdb ? "fdb" : "nic") < 0)
- return;
-
- ct_dbgfs->root = debugfs_create_dir(dirname, mlx5_debugfs_get_dev_root(ct_priv->dev));
+ ct_dbgfs->root = debugfs_create_dir("ct", mlx5_debugfs_get_dev_root(ct_priv->dev));
debugfs_create_atomic_t("offloaded", 0400, ct_dbgfs->root,
&ct_dbgfs->stats.offloaded);
debugfs_create_atomic_t("rx_dropped", 0400, ct_dbgfs->root,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index ff73d25bc6eb..2aaf8ab857b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -222,7 +222,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
int err;
list_for_each_entry(flow, flow_list, tmp_list) {
- if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
+ if (!mlx5e_is_offloaded_flow(flow))
continue;
attr = mlx5e_tc_get_encap_attr(flow);
@@ -231,6 +231,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL;
+ /* Clear pkt_reformat before checking slow path flag. Because
+ * in next iteration, the same flow is already set slow path
+ * flag, but still need to clear the pkt_reformat.
+ */
+ if (flow_flag_test(flow, SLOW))
+ continue;
+
/* update from encap rule to slow path rule */
spec = &flow->attr->parse_attr->spec;
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
index f5b26f5a7de4..054d80c4e65c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
@@ -273,6 +273,11 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
geneve_tlv_option_0_data, be32_to_cpu(opt_data_key));
MLX5_SET(fte_match_set_misc3, misc_3_c,
geneve_tlv_option_0_data, be32_to_cpu(opt_data_mask));
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
+ ft_field_support.geneve_tlv_option_0_exist)) {
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, geneve_tlv_option_0_exist);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist);
+ }
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index fd07c4cbfd1d..1f62c702b625 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -88,6 +88,8 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
struct udphdr *udp = (struct udphdr *)(buf);
struct vxlanhdr *vxh;
+ if (tun_key->tun_flags & TUNNEL_VXLAN_OPT)
+ return -EOPNOTSUPP;
vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
*ip_proto = IPPROTO_UDP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 9369a580743e..7f6b940830b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -62,6 +62,7 @@ struct mlx5e_macsec_sa {
u32 enc_key_id;
u32 next_pn;
sci_t sci;
+ ssci_t ssci;
salt_t salt;
struct rhash_head hash;
@@ -358,7 +359,6 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_macsec_obj_attrs obj_attrs;
union mlx5e_macsec_rule *macsec_rule;
- struct macsec_key *key;
int err;
obj_attrs.next_pn = sa->next_pn;
@@ -368,13 +368,9 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
obj_attrs.aso_pdn = macsec->aso.pdn;
obj_attrs.epn_state = sa->epn_state;
- key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key;
-
if (sa->epn_state.epn_enabled) {
- obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) :
- cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
-
- memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
+ obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci);
+ memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt));
}
obj_attrs.replay_window = ctx->secy->replay_window;
@@ -499,10 +495,11 @@ mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
}
static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
- const pn_t *next_pn_halves)
+ const pn_t *next_pn_halves, ssci_t ssci)
{
struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
+ sa->ssci = ssci;
sa->salt = key->salt;
epn_state->epn_enabled = 1;
epn_state->epn_msb = next_pn_halves->upper;
@@ -550,7 +547,8 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
tx_sa->assoc_num = assoc_num;
if (secy->xpn)
- update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves);
+ update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves,
+ ctx_tx_sa->ssci);
err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
MLX5_ACCEL_OBJ_MACSEC_KEY,
@@ -945,7 +943,8 @@ static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
if (ctx->secy->xpn)
- update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves);
+ update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
+ ctx_rx_sa->ssci);
err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
MLX5_ACCEL_OBJ_MACSEC_KEY,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 8d36e2de53a9..abcc614b6191 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1305,7 +1305,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
- sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN;
sq->xsk_pool = xsk_pool;
sq->stats = sq->xsk_pool ?
@@ -4084,6 +4084,9 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
struct mlx5e_vlan_table *vlan;
struct mlx5e_params *params;
+ if (!netif_device_present(netdev))
+ return features;
+
vlan = mlx5e_fs_get_vlan(priv->fs);
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 75b9e1528fd2..7d90e5b72854 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -191,7 +191,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
if (err) {
netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
rep->vport, err);
- return;
+ goto out;
}
#define MLX5_GET_CTR(p, x) \
@@ -241,6 +241,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
rep_stats->tx_vport_rdma_multicast_bytes =
MLX5_GET_CTR(out, received_ib_multicast.octets);
+out:
kvfree(out);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index c8820ab22169..3df455f6b168 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -2419,7 +2419,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
priv = mlx5i_epriv(netdev);
tstamp = &priv->tstamp;
- stats = rq->stats;
+ stats = &priv->channel_stats[rq->ix]->rq;
flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
g = (flags_rqpn >> 28) & 3;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9af2aa2922f5..dbadaf166487 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1301,7 +1301,6 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
@@ -1359,8 +1358,10 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
}
mutex_unlock(&tc->t_lock);
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
mlx5e_detach_mod_hdr(priv, flow);
+ }
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(priv->mdev, attr->counter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 60a73990017c..6b4c9ffad95b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -67,6 +67,7 @@ static void esw_acl_egress_lgcy_groups_destroy(struct mlx5_vport *vport)
int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ bool vst_mode_steering = esw_vst_mode_is_steering(esw);
struct mlx5_flow_destination drop_ctr_dst = {};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_fc *drop_counter = NULL;
@@ -77,6 +78,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
*/
int table_size = 2;
int dest_num = 0;
+ int actions_flag;
int err = 0;
if (vport->egress.legacy.drop_counter) {
@@ -119,8 +121,11 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
vport->vport, vport->info.vlan, vport->info.qos);
/* Allowed vlan rule */
+ actions_flag = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ if (vst_mode_steering)
+ actions_flag |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = esw_egress_acl_vlan_create(esw, vport, NULL, vport->info.vlan,
- MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+ actions_flag);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index b1a5199260f6..093ed86a0acd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -139,11 +139,14 @@ static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport)
int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ bool vst_mode_steering = esw_vst_mode_is_steering(esw);
struct mlx5_flow_destination drop_ctr_dst = {};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_spec *spec = NULL;
struct mlx5_fc *counter = NULL;
+ bool vst_check_cvlan = false;
+ bool vst_push_cvlan = false;
/* The ingress acl table contains 4 groups
* (2 active rules at the same time -
* 1 allow rule from one of the first 3 groups.
@@ -203,7 +206,26 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
goto out;
}
- if (vport->info.vlan || vport->info.qos)
+ if ((vport->info.vlan || vport->info.qos)) {
+ if (vst_mode_steering)
+ vst_push_cvlan = true;
+ else if (!MLX5_CAP_ESW(esw->dev, vport_cvlan_insert_always))
+ vst_check_cvlan = true;
+ }
+
+ if (vst_check_cvlan || vport->info.spoofchk)
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ /* Create ingress allow rule */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ if (vst_push_cvlan) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ flow_act.vlan[0].prio = vport->info.qos;
+ flow_act.vlan[0].vid = vport->info.vlan;
+ flow_act.vlan[0].ethtype = ETH_P_8021Q;
+ }
+
+ if (vst_check_cvlan)
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
@@ -218,9 +240,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
ether_addr_copy(smac_v, vport->info.mac);
}
- /* Create ingress allow rule */
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
&flow_act, NULL, 0);
if (IS_ERR(vport->ingress.allow_rule)) {
@@ -232,6 +251,9 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
goto out;
}
+ if (!vst_check_cvlan && !vport->info.spoofchk)
+ goto out;
+
memset(&flow_act, 0, sizeof(flow_act));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach drop flow counter */
@@ -257,7 +279,8 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
return 0;
out:
- esw_acl_ingress_lgcy_cleanup(esw, vport);
+ if (err)
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
kvfree(spec);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 527e4bffda8d..0dfd5742c6fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -161,10 +161,17 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
esw_vport_context.vport_cvlan_strip, 1);
if (set_flags & SET_VLAN_INSERT) {
- /* insert only if no vlan in packet */
- MLX5_SET(modify_esw_vport_context_in, in,
- esw_vport_context.vport_cvlan_insert, 1);
-
+ if (MLX5_CAP_ESW(dev, vport_cvlan_insert_always)) {
+ /* insert either if vlan exist in packet or not */
+ MLX5_SET(modify_esw_vport_context_in, in,
+ esw_vport_context.vport_cvlan_insert,
+ MLX5_VPORT_CVLAN_INSERT_ALWAYS);
+ } else {
+ /* insert only if no vlan in packet */
+ MLX5_SET(modify_esw_vport_context_in, in,
+ esw_vport_context.vport_cvlan_insert,
+ MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN);
+ }
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.cvlan_pcp, qos);
MLX5_SET(modify_esw_vport_context_in, in,
@@ -809,6 +816,7 @@ out_free:
static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
+ bool vst_mode_steering = esw_vst_mode_is_steering(esw);
u16 vport_num = vport->vport;
int flags;
int err;
@@ -839,8 +847,9 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
flags = (vport->info.vlan || vport->info.qos) ?
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
- modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
- vport->info.qos, flags);
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering)
+ modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
+ vport->info.qos, flags);
return 0;
@@ -1848,6 +1857,7 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags)
{
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+ bool vst_mode_steering = esw_vst_mode_is_steering(esw);
int err = 0;
if (IS_ERR(evport))
@@ -1855,9 +1865,11 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
if (vlan > 4095 || qos > 7)
return -EINVAL;
- err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
- if (err)
- return err;
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) {
+ err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
+ if (err)
+ return err;
+ }
evport->info.vlan = vlan;
evport->info.qos = qos;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 5a85a5d32be7..92644fbb5081 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -527,6 +527,12 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
+static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw)
+{
+ return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) &&
+ MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan));
+}
+
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
u8 vlan_depth)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index e455b215c708..c981fa77f439 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -143,7 +143,7 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
if (mlx5_esw_indir_table_decap_vport(attr))
vport = mlx5_esw_indir_table_decap_vport(attr);
- if (attr && !attr->chain && esw_attr->int_port)
+ if (!attr->chain && esw_attr && esw_attr->int_port)
metadata =
mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
else
@@ -4143,8 +4143,6 @@ int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
}
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
- memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability),
- MLX5_UN_SZ_BYTES(hca_cap_union));
MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1);
err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
@@ -4236,8 +4234,6 @@ int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
}
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
- memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability),
- MLX5_UN_SZ_BYTES(hca_cap_union));
MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 86ed87d704f7..96417c5feed7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -674,6 +674,12 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
dev = container_of(priv, struct mlx5_core_dev, priv);
devlink = priv_to_devlink(dev);
+ mutex_lock(&dev->intf_state_mutex);
+ if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) {
+ mlx5_core_err(dev, "health works are not permitted at this stage\n");
+ return;
+ }
+ mutex_unlock(&dev->intf_state_mutex);
enter_error_state(dev, false);
if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
devl_lock(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index c247cca154e9..eff92dc0927c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -90,9 +90,21 @@ static void mlx5i_get_ringparam(struct net_device *dev,
static int mlx5i_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
- struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ struct mlx5i_priv *ipriv = netdev_priv(dev);
+ struct mlx5e_priv *epriv = mlx5i_epriv(dev);
+
+ /* rtnl lock protects from race between this ethtool op and sub
+ * interface ndo_init/uninit.
+ */
+ ASSERT_RTNL();
+ if (ipriv->num_sub_interfaces > 0) {
+ mlx5_core_warn(epriv->mdev,
+ "can't change number of channels for interfaces with sub interfaces (%u)\n",
+ ipriv->num_sub_interfaces);
+ return -EINVAL;
+ }
- return mlx5e_ethtool_set_channels(priv, ch);
+ return mlx5e_ethtool_set_channels(epriv, ch);
}
static void mlx5i_get_channels(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 7c5c500fd215..911cf4d23964 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -71,6 +71,10 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
params->tunneled_offload_en = false;
+
+ /* CQE compression is not supported for IPoIB */
+ params->rx_cqe_compress_def = false;
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
@@ -156,6 +160,44 @@ void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_dropped = sstats->tx_queue_dropped;
}
+struct net_device *mlx5i_parent_get(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv, *parent_ipriv;
+ struct net_device *parent_dev;
+ int parent_ifindex;
+
+ ipriv = priv->ppriv;
+
+ parent_ifindex = netdev->netdev_ops->ndo_get_iflink(netdev);
+ parent_dev = dev_get_by_index(dev_net(netdev), parent_ifindex);
+ if (!parent_dev)
+ return NULL;
+
+ parent_ipriv = netdev_priv(parent_dev);
+
+ ASSERT_RTNL();
+ parent_ipriv->num_sub_interfaces++;
+
+ ipriv->parent_dev = parent_dev;
+
+ return parent_dev;
+}
+
+void mlx5i_parent_put(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv, *parent_ipriv;
+
+ ipriv = priv->ppriv;
+ parent_ipriv = netdev_priv(ipriv->parent_dev);
+
+ ASSERT_RTNL();
+ parent_ipriv->num_sub_interfaces--;
+
+ dev_put(ipriv->parent_dev);
+}
+
int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 99d46fda9f82..f3f2af972020 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -54,9 +54,11 @@ struct mlx5i_priv {
struct rdma_netdev rn; /* keep this first */
u32 qpn;
bool sub_interface;
+ u32 num_sub_interfaces;
u32 qkey;
u16 pkey_index;
struct mlx5i_pkey_qpn_ht *qpn_htbl;
+ struct net_device *parent_dev;
char *mlx5e_priv[];
};
@@ -117,5 +119,9 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more);
void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
+/* Reference management for child to parent interfaces. */
+struct net_device *mlx5i_parent_get(struct net_device *netdev);
+void mlx5i_parent_put(struct net_device *netdev);
+
#endif /* CONFIG_MLX5_CORE_IPOIB */
#endif /* __MLX5E_IPOB_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 4d9c9e49645c..03e681297937 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -158,21 +158,28 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
struct mlx5e_priv *priv = mlx5i_epriv(dev);
struct mlx5i_priv *ipriv, *parent_ipriv;
struct net_device *parent_dev;
- int parent_ifindex;
ipriv = priv->ppriv;
- /* Get QPN to netdevice hash table from parent */
- parent_ifindex = dev->netdev_ops->ndo_get_iflink(dev);
- parent_dev = dev_get_by_index(dev_net(dev), parent_ifindex);
+ /* Link to parent */
+ parent_dev = mlx5i_parent_get(dev);
if (!parent_dev) {
mlx5_core_warn(priv->mdev, "failed to get parent device\n");
return -EINVAL;
}
+ if (dev->num_rx_queues < parent_dev->real_num_rx_queues) {
+ mlx5_core_warn(priv->mdev,
+ "failed to create child device with rx queues [%d] less than parent's [%d]\n",
+ dev->num_rx_queues,
+ parent_dev->real_num_rx_queues);
+ mlx5i_parent_put(dev);
+ return -EINVAL;
+ }
+
+ /* Get QPN to netdevice hash table from parent */
parent_ipriv = netdev_priv(parent_dev);
ipriv->qpn_htbl = parent_ipriv->qpn_htbl;
- dev_put(parent_dev);
return mlx5i_dev_init(dev);
}
@@ -184,6 +191,7 @@ static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
{
+ mlx5i_parent_put(netdev);
return mlx5i_dev_cleanup(netdev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 32c3e0a649a7..ad32b80e8501 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -228,6 +228,7 @@ static void mlx5_ldev_free(struct kref *ref)
if (ldev->nb.notifier_call)
unregister_netdevice_notifier_net(&init_net, &ldev->nb);
mlx5_lag_mp_cleanup(ldev);
+ cancel_delayed_work_sync(&ldev->bond_work);
destroy_workqueue(ldev->wq);
mlx5_lag_mpesw_cleanup(ldev);
mutex_destroy(&ldev->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 69cfe60c558a..69318b143268 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -681,7 +681,7 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
static const struct ptp_clock_info mlx5_ptp_clock_info = {
.owner = THIS_MODULE,
.name = "mlx5_ptp",
- .max_adj = 100000000,
+ .max_adj = 50000000,
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 7f5db13e3550..df134f6d32dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -613,7 +613,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
- if (MLX5_CAP_GEN(dev, roce_rw_supported))
+ if (MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))
MLX5_SET(cmd_hca_cap, set_hca_cap, roce,
mlx5_is_roce_on(dev));
@@ -1050,6 +1050,8 @@ err_rl_cleanup:
err_tables_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
+ mlx5_cleanup_clock(dev);
+ mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
err_events_cleanup:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 74cbe53ee9db..b851141e03de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -3,7 +3,12 @@
#include "dr_types.h"
+#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN < 2048)
+/* don't try to optimize STE allocation if the stack is too constaraining */
+#define DR_RULE_MAX_STES_OPTIMIZED 0
+#else
#define DR_RULE_MAX_STES_OPTIMIZED 5
+#endif
#define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
@@ -1218,10 +1223,7 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
mlx5dr_domain_nic_unlock(nic_dmn);
- if (unlikely(!hw_ste_arr_is_opt))
- kfree(hw_ste_arr);
-
- return 0;
+ goto out;
free_rule:
dr_rule_clean_rule_members(rule, nic_rule);
@@ -1238,6 +1240,7 @@ remove_from_nic_tbl:
free_hw_ste:
mlx5dr_domain_nic_unlock(nic_dmn);
+out:
if (unlikely(!hw_ste_arr_is_opt))
kfree(hw_ste_arr);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index c22c3ac4e2a1..09e32778b012 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2951,7 +2951,7 @@ struct mlxsw_sp_nexthop_group_info {
gateway:1, /* routes using the group use a gateway */
is_resilient:1;
struct list_head list; /* member in nh_res_grp_list */
- struct mlxsw_sp_nexthop nexthops[0];
+ struct mlxsw_sp_nexthop nexthops[];
#define nh_rif nexthops[0].rif
};
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index 1a61c6cdb077..0050fcb988b7 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -381,7 +381,7 @@ int lan966x_port_pcs_set(struct lan966x_port *port,
}
/* Take PCS out of reset */
- lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(2) |
+ lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000) |
DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
DEV_CLOCK_CFG_LINK_SPEED |
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index f9ebfaafbebc..a8348437dd87 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -1073,6 +1073,9 @@ void lan966x_ptp_deinit(struct lan966x *lan966x)
struct lan966x_port *port;
int i;
+ if (!lan966x->ptp)
+ return;
+
for (i = 0; i < lan966x->num_phys_ports; i++) {
port = lan966x->ports[i];
if (!port)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
index d8dc9fbb81e1..a54c0426a35f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
@@ -95,10 +95,7 @@ lan966x_vcap_is2_get_port_keysets(struct net_device *dev, int lookup,
bool found = false;
u32 val;
- /* Check if the port keyset selection is enabled */
val = lan_rd(lan966x, ANA_VCAP_S2_CFG(port->chip_port));
- if (!ANA_VCAP_S2_CFG_ENA_GET(val))
- return -ENOENT;
/* Collect all keysets for the port in a list */
if (l3_proto == ETH_P_ALL)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index d25f4f09faa0..3c5d4fe99373 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -834,7 +834,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
if (err)
goto cleanup_config;
- if (!of_get_mac_address(np, sparx5->base_mac)) {
+ if (of_get_mac_address(np, sparx5->base_mac)) {
dev_info(sparx5->dev, "MAC addr was not set, use random MAC\n");
eth_random_addr(sparx5->base_mac);
sparx5->base_mac[5] = 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index da33f09facb9..432d79d691c2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -617,6 +617,9 @@ struct nfp_net_dp {
* @vnic_no_name: For non-port PF vNIC make ndo_get_phys_port_name return
* -EOPNOTSUPP to keep backwards compatibility (set by app)
* @port: Pointer to nfp_port structure if vNIC is a port
+ * @mc_lock: Protect mc_addrs list
+ * @mc_addrs: List of mc addrs to add/del to HW
+ * @mc_work: Work to update mc addrs
* @app_priv: APP private data for this vNIC
*/
struct nfp_net {
@@ -718,6 +721,10 @@ struct nfp_net {
struct nfp_port *port;
+ spinlock_t mc_lock;
+ struct list_head mc_addrs;
+ struct work_struct mc_work;
+
void *app_priv;
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 09053373288f..18fc9971f1c8 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1334,9 +1334,14 @@ err_unlock:
return err;
}
-static int nfp_net_mc_cfg(struct net_device *netdev, const unsigned char *addr, const u32 cmd)
+struct nfp_mc_addr_entry {
+ u8 addr[ETH_ALEN];
+ u32 cmd;
+ struct list_head list;
+};
+
+static int nfp_net_mc_cfg(struct nfp_net *nn, const unsigned char *addr, const u32 cmd)
{
- struct nfp_net *nn = netdev_priv(netdev);
int ret;
ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
@@ -1351,6 +1356,25 @@ static int nfp_net_mc_cfg(struct net_device *netdev, const unsigned char *addr,
return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
}
+static int nfp_net_mc_prep(struct nfp_net *nn, const unsigned char *addr, const u32 cmd)
+{
+ struct nfp_mc_addr_entry *entry;
+
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ ether_addr_copy(entry->addr, addr);
+ entry->cmd = cmd;
+ spin_lock_bh(&nn->mc_lock);
+ list_add_tail(&entry->list, &nn->mc_addrs);
+ spin_unlock_bh(&nn->mc_lock);
+
+ schedule_work(&nn->mc_work);
+
+ return 0;
+}
+
static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
{
struct nfp_net *nn = netdev_priv(netdev);
@@ -1361,12 +1385,35 @@ static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
return -EINVAL;
}
- return nfp_net_mc_cfg(netdev, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD);
+ return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD);
}
static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
{
- return nfp_net_mc_cfg(netdev, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL);
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL);
+}
+
+static void nfp_net_mc_addr_config(struct work_struct *work)
+{
+ struct nfp_net *nn = container_of(work, struct nfp_net, mc_work);
+ struct nfp_mc_addr_entry *entry, *tmp;
+ struct list_head tmp_list;
+
+ INIT_LIST_HEAD(&tmp_list);
+
+ spin_lock_bh(&nn->mc_lock);
+ list_splice_init(&nn->mc_addrs, &tmp_list);
+ spin_unlock_bh(&nn->mc_lock);
+
+ list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
+ if (nfp_net_mc_cfg(nn, entry->addr, entry->cmd))
+ nn_err(nn, "Config mc address to HW failed.\n");
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
}
static void nfp_net_set_rx_mode(struct net_device *netdev)
@@ -2633,6 +2680,11 @@ int nfp_net_init(struct nfp_net *nn)
if (!nn->dp.netdev)
return 0;
+
+ spin_lock_init(&nn->mc_lock);
+ INIT_LIST_HEAD(&nn->mc_addrs);
+ INIT_WORK(&nn->mc_work, nfp_net_mc_addr_config);
+
return register_netdev(nn->dp.netdev);
err_clean_mbox:
@@ -2652,5 +2704,6 @@ void nfp_net_clean(struct nfp_net *nn)
unregister_netdev(nn->dp.netdev);
nfp_net_ipsec_clean(nn);
nfp_ccm_mbox_clean(nn);
+ flush_work(&nn->mc_work);
nfp_net_reconfig_wait_posted(nn);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 86ecb080b153..cdcead614e9f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -1832,7 +1832,8 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 image_type,
u32 *nvram_offset_bytes,
- u32 *nvram_size_bytes)
+ u32 *nvram_size_bytes,
+ bool b_can_sleep)
{
u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
struct mcp_file_att file_att;
@@ -1846,7 +1847,8 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
&ret_mcp_resp,
&ret_mcp_param,
&ret_txn_size,
- (u32 *)&file_att, false);
+ (u32 *)&file_att,
+ b_can_sleep);
/* Check response */
if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) !=
@@ -1873,7 +1875,9 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 nvram_offset_bytes,
- u32 nvram_size_bytes, u32 *ret_buf)
+ u32 nvram_size_bytes,
+ u32 *ret_buf,
+ bool b_can_sleep)
{
u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
s32 bytes_left = nvram_size_bytes;
@@ -1899,7 +1903,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
&ret_mcp_resp,
&ret_mcp_param, &ret_read_size,
(u32 *)((u8 *)ret_buf + read_offset),
- false))
+ b_can_sleep))
return DBG_STATUS_NVRAM_READ_FAILED;
/* Check response */
@@ -3380,7 +3384,8 @@ static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
p_ptt,
NVM_TYPE_HW_DUMP_OUT,
&hw_dump_offset_bytes,
- &hw_dump_size_bytes);
+ &hw_dump_size_bytes,
+ false);
if (status != DBG_STATUS_OK)
return 0;
@@ -3397,7 +3402,9 @@ static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
status = qed_nvram_read(p_hwfn,
p_ptt,
hw_dump_offset_bytes,
- hw_dump_size_bytes, dump_buf + offset);
+ hw_dump_size_bytes,
+ dump_buf + offset,
+ false);
if (status != DBG_STATUS_OK) {
DP_NOTICE(p_hwfn,
"Failed to read MCP HW Dump image from NVRAM\n");
@@ -4123,7 +4130,9 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
return qed_find_nvram_image(p_hwfn,
p_ptt,
nvram_image_type,
- trace_meta_offset, trace_meta_size);
+ trace_meta_offset,
+ trace_meta_size,
+ true);
}
/* Reads the MCP Trace meta data from NVRAM into the specified buffer */
@@ -4139,7 +4148,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
/* Read meta data from NVRAM */
status = qed_nvram_read(p_hwfn,
p_ptt,
- nvram_offset_in_bytes, size_in_bytes, buf);
+ nvram_offset_in_bytes,
+ size_in_bytes,
+ buf,
+ true);
if (status != DBG_STATUS_OK)
return status;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index dbb800769cb6..c95d56e56c59 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2505,7 +2505,13 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
goto disable_mbx_intr;
qlcnic_83xx_clear_function_resources(adapter);
- qlcnic_dcb_enable(adapter->dcb);
+
+ err = qlcnic_dcb_enable(adapter->dcb);
+ if (err) {
+ qlcnic_dcb_free(adapter->dcb);
+ goto disable_mbx_intr;
+ }
+
qlcnic_83xx_initialize_nic(adapter, 1);
qlcnic_dcb_get_info(adapter->dcb);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index 7519773eaca6..22afa2be85fd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -41,11 +41,6 @@ struct qlcnic_dcb {
unsigned long state;
};
-static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
-{
- kfree(dcb);
-}
-
static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
{
if (dcb && dcb->ops->get_hw_capability)
@@ -112,9 +107,8 @@ static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
dcb->ops->init_dcbnl_ops(dcb);
}
-static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
+static inline int qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
{
- if (dcb && qlcnic_dcb_attach(dcb))
- qlcnic_clear_dcb_ops(dcb);
+ return dcb ? qlcnic_dcb_attach(dcb) : 0;
}
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 28476b982bab..44dac3c0908e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2599,7 +2599,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Device does not support MSI interrupts\n");
if (qlcnic_82xx_check(adapter)) {
- qlcnic_dcb_enable(adapter->dcb);
+ err = qlcnic_dcb_enable(adapter->dcb);
+ if (err) {
+ qlcnic_dcb_free(adapter->dcb);
+ dev_err(&pdev->dev, "Failed to enable DCB\n");
+ goto err_out_free_hw;
+ }
+
qlcnic_dcb_get_info(adapter->dcb);
err = qlcnic_setup_intr(adapter);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index a9dcc98b6af1..dadd61bccfe7 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1996,10 +1996,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
/* 8168F family. */
{ 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
- */
+ { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
{ 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
/* 8168E family. */
@@ -2210,28 +2207,6 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static void rtl_wol_enable_rx(struct rtl8169_private *tp)
-{
- if (tp->mac_version >= RTL_GIGA_MAC_VER_25)
- RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
- AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
-}
-
-static void rtl_prepare_power_down(struct rtl8169_private *tp)
-{
- if (tp->dash_type != RTL_DASH_NONE)
- return;
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
- tp->mac_version == RTL_GIGA_MAC_VER_33)
- rtl_ephy_write(tp, 0x19, 0xff64);
-
- if (device_may_wakeup(tp_to_dev(tp))) {
- phy_speed_down(tp->phydev, false);
- rtl_wol_enable_rx(tp);
- }
-}
-
static void rtl_init_rxcfg(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@@ -2455,6 +2430,31 @@ static void rtl_enable_rxdvgate(struct rtl8169_private *tp)
rtl_wait_txrx_fifo_empty(tp);
}
+static void rtl_wol_enable_rx(struct rtl8169_private *tp)
+{
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_25)
+ RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
+
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_40)
+ rtl_disable_rxdvgate(tp);
+}
+
+static void rtl_prepare_power_down(struct rtl8169_private *tp)
+{
+ if (tp->dash_type != RTL_DASH_NONE)
+ return;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_33)
+ rtl_ephy_write(tp, 0x19, 0xff64);
+
+ if (device_may_wakeup(tp_to_dev(tp))) {
+ phy_speed_down(tp->phydev, false);
+ rtl_wol_enable_rx(tp);
+ }
+}
+
static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
{
u32 val = TX_DMA_BURST << TxDMAShift |
@@ -3872,7 +3872,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
netdev_reset_queue(tp->dev);
}
-static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
+static void rtl8169_cleanup(struct rtl8169_private *tp)
{
napi_disable(&tp->napi);
@@ -3884,9 +3884,6 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
rtl_rx_close(tp);
- if (going_down && tp->dev->wol_enabled)
- goto no_reset;
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
@@ -3907,7 +3904,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
}
rtl_hw_reset(tp);
-no_reset:
+
rtl8169_tx_clear(tp);
rtl8169_init_ring_indexes(tp);
}
@@ -3918,7 +3915,7 @@ static void rtl_reset_work(struct rtl8169_private *tp)
netif_stop_queue(tp->dev);
- rtl8169_cleanup(tp, false);
+ rtl8169_cleanup(tp);
for (i = 0; i < NUM_RX_DESC; i++)
rtl8169_mark_to_asic(tp->RxDescArray + i);
@@ -4605,7 +4602,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
pci_clear_master(tp->pci_dev);
rtl_pci_commit(tp);
- rtl8169_cleanup(tp, true);
+ rtl8169_cleanup(tp);
rtl_disable_exit_l1(tp);
rtl_prepare_power_down(tp);
}
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index e42ceaa0099f..6441892636db 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -1578,6 +1578,7 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
{
struct platform_device *pdev = priv->pdev;
struct rswitch_device *rdev;
+ struct device_node *port;
struct net_device *ndev;
int err;
@@ -1606,7 +1607,9 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
netif_napi_add(ndev, &rdev->napi, rswitch_poll);
- err = of_get_ethdev_address(pdev->dev.of_node, ndev);
+ port = rswitch_get_port_node(rdev);
+ err = of_get_ethdev_address(port, ndev);
+ of_node_put(port);
if (err) {
if (is_valid_ether_addr(rdev->etha->mac_addr))
eth_hw_addr_set(ndev, rdev->etha->mac_addr);
@@ -1786,6 +1789,11 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
ret = rswitch_init(priv);
+ if (ret < 0) {
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
device_set_wakeup_capable(&pdev->dev, 1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index d42e1afb6521..2f7d8e4561d9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -90,7 +90,6 @@ struct mediatek_dwmac_plat_data {
struct mediatek_dwmac_variant {
int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat);
int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat);
- void (*dwmac_fix_mac_speed)(void *priv, unsigned int speed);
/* clock ids to be requested */
const char * const *clk_list;
@@ -443,32 +442,9 @@ static int mt8195_set_delay(struct mediatek_dwmac_plat_data *plat)
return 0;
}
-static void mt8195_fix_mac_speed(void *priv, unsigned int speed)
-{
- struct mediatek_dwmac_plat_data *priv_plat = priv;
-
- if ((phy_interface_mode_is_rgmii(priv_plat->phy_mode))) {
- /* prefer 2ns fixed delay which is controlled by TXC_PHASE_CTRL,
- * when link speed is 1Gbps with RGMII interface,
- * Fall back to delay macro circuit for 10/100Mbps link speed.
- */
- if (speed == SPEED_1000)
- regmap_update_bits(priv_plat->peri_regmap,
- MT8195_PERI_ETH_CTRL0,
- MT8195_RGMII_TXC_PHASE_CTRL |
- MT8195_DLY_GTXC_ENABLE |
- MT8195_DLY_GTXC_INV |
- MT8195_DLY_GTXC_STAGES,
- MT8195_RGMII_TXC_PHASE_CTRL);
- else
- mt8195_set_delay(priv_plat);
- }
-}
-
static const struct mediatek_dwmac_variant mt8195_gmac_variant = {
.dwmac_set_phy_interface = mt8195_set_interface,
.dwmac_set_delay = mt8195_set_delay,
- .dwmac_fix_mac_speed = mt8195_fix_mac_speed,
.clk_list = mt8195_dwmac_clk_l,
.num_clks = ARRAY_SIZE(mt8195_dwmac_clk_l),
.dma_bit_mask = 35,
@@ -619,8 +595,6 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
plat->bsp_priv = priv_plat;
plat->init = mediatek_dwmac_init;
plat->clks_config = mediatek_dwmac_clks_config;
- if (priv_plat->variant->dwmac_fix_mac_speed)
- plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed;
plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
sizeof(*plat->safety_feat_cfg),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index fc06ddeac0d5..b4388ca8d211 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -210,7 +210,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
}
writel(acr_value, ptpaddr + PTP_ACR);
mutex_unlock(&priv->aux_ts_lock);
- ret = 0;
+ /* wait for auxts fifo clear to finish */
+ ret = readl_poll_timeout(ptpaddr + PTP_ACR, acr_value,
+ !(acr_value & PTP_ACR_ATSFC),
+ 10, 10000);
break;
default:
diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c
index 7552c400961e..b83390c48615 100644
--- a/drivers/net/ipa/data/ipa_data-v4.7.c
+++ b/drivers/net/ipa/data/ipa_data-v4.7.c
@@ -357,7 +357,7 @@ static const struct ipa_mem ipa_mem_local_data[] = {
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
- .imem_addr = 0x146a9000,
+ .imem_addr = 0x146a8000,
.imem_size = 0x00002000,
.smem_id = 497,
.smem_size = 0x00009000,
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index a49f66efacb8..d458a35839cc 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -132,10 +132,10 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
u32 endpoint_id, bool enable)
{
struct ipa *ipa = interrupt->ipa;
+ u32 mask = BIT(endpoint_id % 32);
u32 unit = endpoint_id / 32;
const struct ipa_reg *reg;
u32 offset;
- u32 mask;
u32 val;
WARN_ON(!test_bit(endpoint_id, ipa->available));
@@ -148,7 +148,6 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
offset = ipa_reg_n_offset(reg, unit);
val = ioread32(ipa->reg_virt + offset);
- mask = BIT(endpoint_id);
if (enable)
val |= mask;
else
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 8dcb49ed1f3d..7fd9fe6a602b 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -105,6 +105,7 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev)
if (!priv->phy_dev->drv) {
dev_info(dev, "Attached phy not ready\n");
+ put_device(&priv->phy_dev->mdio.dev);
return -EPROBE_DEFER;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 8911cd2ed534..c140edb4b648 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -1008,6 +1008,12 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
+ /* Cinterion PLS62-W modem by GEMALTO/THALES */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x005b, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
/* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index a481a1d831e2..23da1d9dafd1 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -9836,6 +9836,7 @@ static const struct usb_device_id rtl8152_table[] = {
REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab),
REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6),
REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
+ REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e),
REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054),
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index f79333fe1783..7b3739b29c8f 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -255,7 +255,8 @@ static int rndis_query(struct usbnet *dev, struct usb_interface *intf,
off = le32_to_cpu(u.get_c->offset);
len = le32_to_cpu(u.get_c->len);
- if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE))
+ if (unlikely((off > CONTROL_BUFFER_SIZE - 8) ||
+ (len > CONTROL_BUFFER_SIZE - 8 - off)))
goto response_error;
if (*reply_len != -1 && len != *reply_len)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ac7c0653695f..dfc7d87fad59 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -974,6 +974,9 @@ static int veth_poll(struct napi_struct *napi, int budget)
xdp_set_return_frame_no_direct();
done = veth_xdp_rcv(rq, budget, &bq, &stats);
+ if (stats.xdp_redirect > 0)
+ xdp_do_flush();
+
if (done < budget && napi_complete_done(napi, done)) {
/* Write rx_notify_masked before reading ptr_ring */
smp_store_mb(rq->rx_notify_masked, false);
@@ -987,8 +990,6 @@ static int veth_poll(struct napi_struct *napi, int budget)
if (stats.xdp_tx > 0)
veth_xdp_flush(rq, &bq);
- if (stats.xdp_redirect > 0)
- xdp_do_flush();
xdp_clear_return_frame_no_direct();
return done;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 6f1e560fb15c..56267c327f0b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1288,6 +1288,10 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
(le32_to_cpu(gdesc->dword[3]) &
VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if ((le32_to_cpu(gdesc->dword[0]) &
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
+ skb->csum_level = 1;
+ }
WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
!(le32_to_cpu(gdesc->dword[0]) &
(1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
@@ -1297,6 +1301,10 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
} else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
(1 << VMXNET3_RCD_TUC_SHIFT))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if ((le32_to_cpu(gdesc->dword[0]) &
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
+ skb->csum_level = 1;
+ }
WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
!(le32_to_cpu(gdesc->dword[0]) &
(1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 6b5a4d036d15..bdb3a76a352e 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1385,8 +1385,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
/* loopback, multicast & non-ND link-local traffic; do not push through
* packet taps again. Reset pkt_type for upper layers to process skb.
- * For strict packets with a source LLA, determine the dst using the
- * original ifindex.
+ * For non-loopback strict packets, determine the dst using the original
+ * ifindex.
*/
if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
skb->dev = vrf_dev;
@@ -1395,7 +1395,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
if (skb->pkt_type == PACKET_LOOPBACK)
skb->pkt_type = PACKET_HOST;
- else if (ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)
+ else
vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
goto out;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 92224b36787a..b1b179effe2a 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2917,16 +2917,23 @@ static int vxlan_init(struct net_device *dev)
vxlan_vnigroup_init(vxlan);
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
+ if (!dev->tstats) {
+ err = -ENOMEM;
+ goto err_vnigroup_uninit;
+ }
err = gro_cells_init(&vxlan->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
- return err;
- }
+ if (err)
+ goto err_free_percpu;
return 0;
+
+err_free_percpu:
+ free_percpu(dev->tstats);
+err_vnigroup_uninit:
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ vxlan_vnigroup_uninit(vxlan);
+ return err;
}
static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 30f0765fb9fd..237f4ec2cffd 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -327,9 +327,9 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
}
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
-#define __STAT_SAFE(hif_dev, expr) ((hif_dev)->htc_handle->drv_priv ? (expr) : 0)
-#define CAB_STAT_INC(priv) ((priv)->debug.tx_stats.cab_queued++)
-#define TX_QSTAT_INC(priv, q) ((priv)->debug.tx_stats.queue_stats[q]++)
+#define __STAT_SAFE(hif_dev, expr) do { ((hif_dev)->htc_handle->drv_priv ? (expr) : 0); } while (0)
+#define CAB_STAT_INC(priv) do { ((priv)->debug.tx_stats.cab_queued++); } while (0)
+#define TX_QSTAT_INC(priv, q) do { ((priv)->debug.tx_stats.queue_stats[q]++); } while (0)
#define TX_STAT_INC(hif_dev, c) \
__STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.tx_stats.c++)
@@ -378,10 +378,10 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
struct ethtool_stats *stats, u64 *data);
#else
-#define TX_STAT_INC(hif_dev, c)
-#define TX_STAT_ADD(hif_dev, c, a)
-#define RX_STAT_INC(hif_dev, c)
-#define RX_STAT_ADD(hif_dev, c, a)
+#define TX_STAT_INC(hif_dev, c) do { } while (0)
+#define TX_STAT_ADD(hif_dev, c, a) do { } while (0)
+#define RX_STAT_INC(hif_dev, c) do { } while (0)
+#define RX_STAT_ADD(hif_dev, c, a) do { } while (0)
#define CAB_STAT_INC(priv)
#define TX_QSTAT_INC(priv, c)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index a83699de01ec..fdd0c9abc1a1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -79,7 +79,8 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
/* Apple ARM64 platforms have their own idea of board type, passed in
* via the device tree. They also have an antenna SKU parameter
*/
- if (!of_property_read_string(np, "brcm,board-type", &prop))
+ err = of_property_read_string(np, "brcm,board-type", &prop);
+ if (!err)
settings->board_type = prop;
if (!of_property_read_string(np, "apple,antenna-sku", &prop))
@@ -87,7 +88,7 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
/* Set board-type to the first string of the machine compatible prop */
root = of_find_node_by_path("/");
- if (root && !settings->board_type) {
+ if (root && err) {
char *board_type;
const char *tmp;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index e6d64152c81a..a02e5a67b706 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -1106,6 +1106,11 @@ int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *c
int i, j, num_sub_bands;
s8 *gain;
+ /* many firmware images for JF lie about this */
+ if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
+ CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
+ return -EOPNOTSUPP;
+
if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
IWL_DEBUG_RADIO(fwrt,
"PPAG capability not supported by FW, command not sent.\n");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig
index 5c5fc569e6d5..79fb47a73c91 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig
@@ -2,6 +2,7 @@
config MT7996E
tristate "MediaTek MT7996 (PCIe) support"
select MT76_CONNAC_LIB
+ select RELAY
depends on MAC80211
depends on PCI
help
diff --git a/drivers/net/wireless/ti/Makefile b/drivers/net/wireless/ti/Makefile
index 0530dd744275..05ee016594f8 100644
--- a/drivers/net/wireless/ti/Makefile
+++ b/drivers/net/wireless/ti/Makefile
@@ -3,6 +3,3 @@ obj-$(CONFIG_WLCORE) += wlcore/
obj-$(CONFIG_WL12XX) += wl12xx/
obj-$(CONFIG_WL1251) += wl1251/
obj-$(CONFIG_WL18XX) += wl18xx/
-
-# small builtin driver bit
-obj-$(CONFIG_WILINK_PLATFORM_DATA) += wilink_platform_data.o
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index c1ba4294f364..001636901dda 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -977,7 +977,7 @@ static int read_xenbus_vif_flags(struct backend_info *be)
return 0;
}
-static int netback_remove(struct xenbus_device *dev)
+static void netback_remove(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
@@ -992,7 +992,6 @@ static int netback_remove(struct xenbus_device *dev)
kfree(be->hotplug_script);
kfree(be);
dev_set_drvdata(&dev->dev, NULL);
- return 0;
}
/*
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 14aec417fa06..12b074286df9 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -2646,7 +2646,7 @@ static void xennet_bus_close(struct xenbus_device *dev)
} while (!ret);
}
-static int xennet_remove(struct xenbus_device *dev)
+static void xennet_remove(struct xenbus_device *dev)
{
struct netfront_info *info = dev_get_drvdata(&dev->dev);
@@ -2662,8 +2662,6 @@ static int xennet_remove(struct xenbus_device *dev)
rtnl_unlock();
}
xennet_free_netdev(info->netdev);
-
- return 0;
}
static const struct xenbus_device_id netfront_ids[] = {
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index 6f71ac72012e..ed9c5e2cf3ad 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -153,10 +153,17 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags)
return usb_submit_urb(phy->ack_urb, flags);
}
+struct pn533_out_arg {
+ struct pn533_usb_phy *phy;
+ struct completion done;
+};
+
static int pn533_usb_send_frame(struct pn533 *dev,
struct sk_buff *out)
{
struct pn533_usb_phy *phy = dev->phy;
+ struct pn533_out_arg arg;
+ void *cntx;
int rc;
if (phy->priv == NULL)
@@ -168,10 +175,17 @@ static int pn533_usb_send_frame(struct pn533 *dev,
print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
out->data, out->len, false);
+ init_completion(&arg.done);
+ cntx = phy->out_urb->context;
+ phy->out_urb->context = &arg;
+
rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
if (rc)
return rc;
+ wait_for_completion(&arg.done);
+ phy->out_urb->context = cntx;
+
if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
/* request for response for sent packet directly */
rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
@@ -408,7 +422,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
return arg.rc;
}
-static void pn533_send_complete(struct urb *urb)
+static void pn533_out_complete(struct urb *urb)
+{
+ struct pn533_out_arg *arg = urb->context;
+ struct pn533_usb_phy *phy = arg->phy;
+
+ switch (urb->status) {
+ case 0:
+ break; /* success */
+ case -ECONNRESET:
+ case -ENOENT:
+ dev_dbg(&phy->udev->dev,
+ "The urb has been stopped (status %d)\n",
+ urb->status);
+ break;
+ case -ESHUTDOWN:
+ default:
+ nfc_err(&phy->udev->dev,
+ "Urb failure (status %d)\n",
+ urb->status);
+ }
+
+ complete(&arg->done);
+}
+
+static void pn533_ack_complete(struct urb *urb)
{
struct pn533_usb_phy *phy = urb->context;
@@ -496,10 +534,10 @@ static int pn533_usb_probe(struct usb_interface *interface,
usb_fill_bulk_urb(phy->out_urb, phy->udev,
usb_sndbulkpipe(phy->udev, out_endpoint),
- NULL, 0, pn533_send_complete, phy);
+ NULL, 0, pn533_out_complete, phy);
usb_fill_bulk_urb(phy->ack_urb, phy->udev,
usb_sndbulkpipe(phy->udev, out_endpoint),
- NULL, 0, pn533_send_complete, phy);
+ NULL, 0, pn533_ack_complete, phy);
switch (id->driver_info) {
case PN533_DEVICE_STD:
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index bb0abbe4491c..4424f53a8a0a 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -953,7 +953,7 @@ int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
goto err_free_dhchap_secret;
if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
- return ret;
+ return 0;
ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl),
sizeof(*chap), GFP_KERNEL);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 95c488ea91c3..7be562a4e1aa 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1074,6 +1074,18 @@ static u32 nvme_known_admin_effects(u8 opcode)
return 0;
}
+static u32 nvme_known_nvm_effects(u8 opcode)
+{
+ switch (opcode) {
+ case nvme_cmd_write:
+ case nvme_cmd_write_zeroes:
+ case nvme_cmd_write_uncor:
+ return NVME_CMD_EFFECTS_LBCC;
+ default:
+ return 0;
+ }
+}
+
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
{
u32 effects = 0;
@@ -1081,16 +1093,24 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
if (ns) {
if (ns->head->effects)
effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
+ if (ns->head->ids.csi == NVME_CAP_CSS_NVM)
+ effects |= nvme_known_nvm_effects(opcode);
if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
dev_warn_once(ctrl->device,
- "IO command:%02x has unhandled effects:%08x\n",
+ "IO command:%02x has unusual effects:%08x\n",
opcode, effects);
- return 0;
- }
- if (ctrl->effects)
- effects = le32_to_cpu(ctrl->effects->acs[opcode]);
- effects |= nvme_known_admin_effects(opcode);
+ /*
+ * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues,
+ * which would deadlock when done on an I/O command. Note that
+ * We already warn about an unusual effect above.
+ */
+ effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
+ } else {
+ if (ctrl->effects)
+ effects = le32_to_cpu(ctrl->effects->acs[opcode]);
+ effects |= nvme_known_admin_effects(opcode);
+ }
return effects;
}
@@ -4926,7 +4946,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
memset(set, 0, sizeof(*set));
set->ops = ops;
- set->queue_depth = ctrl->sqsize + 1;
+ set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1);
/*
* Some Apple controllers requires tags to be unique across admin and
* the (only) I/O queue, so reserve the first 32 tags of the I/O queue.
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 9ddda571f046..a8639919237e 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -11,6 +11,8 @@
static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
fmode_t mode)
{
+ u32 effects;
+
if (capable(CAP_SYS_ADMIN))
return true;
@@ -43,11 +45,29 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
}
/*
- * Only allow I/O commands that transfer data to the controller if the
- * special file is open for writing, but always allow I/O commands that
- * transfer data from the controller.
+ * Check if the controller provides a Commands Supported and Effects log
+ * and marks this command as supported. If not reject unprivileged
+ * passthrough.
+ */
+ effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
+ if (!(effects & NVME_CMD_EFFECTS_CSUPP))
+ return false;
+
+ /*
+ * Don't allow passthrough for command that have intrusive (or unknown)
+ * effects.
+ */
+ if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
+ NVME_CMD_EFFECTS_UUID_SEL |
+ NVME_CMD_EFFECTS_SCOPE_MASK))
+ return false;
+
+ /*
+ * Only allow I/O commands that transfer data to the controller or that
+ * change the logical block contents if the file descriptor is open for
+ * writing.
*/
- if (nvme_is_write(c))
+ if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC))
return mode & FMODE_WRITE;
return true;
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index c03093b6813c..fc39d01e7b63 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -376,6 +376,8 @@ static void nvme_ns_head_submit_bio(struct bio *bio)
* pool from the original queue to allocate the bvecs from.
*/
bio = bio_split_to_limits(bio);
+ if (!bio)
+ return;
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 6bbb73ef8b25..424c8a467a0c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -893,7 +893,7 @@ static inline void nvme_trace_bio_complete(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
- if (req->cmd_flags & REQ_NVME_MPATH)
+ if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
trace_block_bio_complete(ns->head->disk->queue, req->bio);
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f0f8027644bb..b13baccedb4a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -36,7 +36,7 @@
#define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
-#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
+#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
/*
* These can be higher, but we need to ensure that any command doesn't
@@ -144,9 +144,9 @@ struct nvme_dev {
mempool_t *iod_mempool;
/* shadow doorbell buffer support: */
- u32 *dbbuf_dbs;
+ __le32 *dbbuf_dbs;
dma_addr_t dbbuf_dbs_dma_addr;
- u32 *dbbuf_eis;
+ __le32 *dbbuf_eis;
dma_addr_t dbbuf_eis_dma_addr;
/* host memory buffer support: */
@@ -208,10 +208,10 @@ struct nvme_queue {
#define NVMEQ_SQ_CMB 1
#define NVMEQ_DELETE_ERROR 2
#define NVMEQ_POLLED 3
- u32 *dbbuf_sq_db;
- u32 *dbbuf_cq_db;
- u32 *dbbuf_sq_ei;
- u32 *dbbuf_cq_ei;
+ __le32 *dbbuf_sq_db;
+ __le32 *dbbuf_cq_db;
+ __le32 *dbbuf_sq_ei;
+ __le32 *dbbuf_cq_ei;
struct completion delete_done;
};
@@ -343,11 +343,11 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
}
/* Update dbbuf and return true if an MMIO is required */
-static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
- volatile u32 *dbbuf_ei)
+static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
+ volatile __le32 *dbbuf_ei)
{
if (dbbuf_db) {
- u16 old_value;
+ u16 old_value, event_idx;
/*
* Ensure that the queue is written before updating
@@ -355,8 +355,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
*/
wmb();
- old_value = *dbbuf_db;
- *dbbuf_db = value;
+ old_value = le32_to_cpu(*dbbuf_db);
+ *dbbuf_db = cpu_to_le32(value);
/*
* Ensure that the doorbell is updated before reading the event
@@ -366,7 +366,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
*/
mb();
- if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
+ event_idx = le32_to_cpu(*dbbuf_ei);
+ if (!nvme_dbbuf_need_event(event_idx, value, old_value))
return false;
}
@@ -380,9 +381,9 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
*/
static int nvme_pci_npages_prp(void)
{
- unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE,
- NVME_CTRL_PAGE_SIZE);
- return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+ unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
+ unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
+ return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8);
}
/*
@@ -392,7 +393,7 @@ static int nvme_pci_npages_prp(void)
static int nvme_pci_npages_sgl(void)
{
return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
- PAGE_SIZE);
+ NVME_CTRL_PAGE_SIZE);
}
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -708,7 +709,7 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
sge->length = cpu_to_le32(entries * sizeof(*sge));
sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
} else {
- sge->length = cpu_to_le32(PAGE_SIZE);
+ sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE);
sge->type = NVME_SGL_FMT_SEG_DESC << 4;
}
}
@@ -2332,10 +2333,12 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (dev->cmb_use_sqes) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
sizeof(struct nvme_command));
- if (result > 0)
+ if (result > 0) {
dev->q_depth = result;
- else
+ dev->ctrl.sqsize = result - 1;
+ } else {
dev->cmb_use_sqes = false;
+ }
}
do {
@@ -2536,7 +2539,6 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
io_queue_depth);
- dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
dev->dbs = dev->bar + 4096;
@@ -2577,7 +2579,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n",
dev->q_depth);
}
-
+ dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
nvme_map_cmb(dev);
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 53a004ea320c..6a54ed6fb121 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -164,26 +164,31 @@ out:
static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
{
- log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
- log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
- log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
- log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
- log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
- log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
- log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
-
- log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
- log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
- log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
- log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
- log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
+ log->acs[nvme_admin_get_log_page] =
+ log->acs[nvme_admin_identify] =
+ log->acs[nvme_admin_abort_cmd] =
+ log->acs[nvme_admin_set_features] =
+ log->acs[nvme_admin_get_features] =
+ log->acs[nvme_admin_async_event] =
+ log->acs[nvme_admin_keep_alive] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+
+ log->iocs[nvme_cmd_read] =
+ log->iocs[nvme_cmd_flush] =
+ log->iocs[nvme_cmd_dsm] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+ log->iocs[nvme_cmd_write] =
+ log->iocs[nvme_cmd_write_zeroes] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
}
static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
{
- log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0);
- log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0);
- log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_zone_append] =
+ log->iocs[nvme_cmd_zone_mgmt_send] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
+ log->iocs[nvme_cmd_zone_mgmt_recv] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
}
static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 79af5140af8b..adc0958755d6 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -334,14 +334,13 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
}
/*
- * If there are effects for the command we are about to execute, or
- * an end_req function we need to use nvme_execute_passthru_rq()
- * synchronously in a work item seeing the end_req function and
- * nvme_passthru_end() can't be called in the request done callback
- * which is typically in interrupt context.
+ * If a command needs post-execution fixups, or there are any
+ * non-trivial effects, make sure to execute the command synchronously
+ * in a workqueue so that nvme_passthru_end gets called.
*/
effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
- if (req->p.use_workqueue || effects) {
+ if (req->p.use_workqueue ||
+ (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))) {
INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
req->p.rq = rq;
queue_work(nvmet_wq, &req->p.work);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index b2272bccf85c..f08b25195ae7 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -1099,7 +1099,7 @@ u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
*/
int __init early_init_dt_scan_memory(void)
{
- int node;
+ int node, found_memory = 0;
const void *fdt = initial_boot_params;
fdt_for_each_subnode(node, fdt, 0) {
@@ -1139,6 +1139,8 @@ int __init early_init_dt_scan_memory(void)
early_init_dt_add_memory_arch(base, size);
+ found_memory = 1;
+
if (!hotpluggable)
continue;
@@ -1147,7 +1149,7 @@ int __init early_init_dt_scan_memory(void)
base, base + size);
}
}
- return 0;
+ return found_memory;
}
int __init early_init_dt_scan_chosen(char *cmdline)
@@ -1161,18 +1163,14 @@ int __init early_init_dt_scan_chosen(char *cmdline)
if (node < 0)
node = fdt_path_offset(fdt, "/chosen@0");
if (node < 0)
- return -ENOENT;
+ /* Handle the cmdline config options even if no /chosen node */
+ goto handle_cmdline;
chosen_node_offset = node;
early_init_dt_check_for_initrd(node);
early_init_dt_check_for_elfcorehdr(node);
- /* Retrieve command line */
- p = of_get_flat_dt_prop(node, "bootargs", &l);
- if (p != NULL && l > 0)
- strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
-
rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
if (rng_seed && l > 0) {
add_bootloader_randomness(rng_seed, l);
@@ -1185,6 +1183,32 @@ int __init early_init_dt_scan_chosen(char *cmdline)
fdt_totalsize(initial_boot_params));
}
+ /* Retrieve command line */
+ p = of_get_flat_dt_prop(node, "bootargs", &l);
+ if (p != NULL && l > 0)
+ strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
+
+handle_cmdline:
+ /*
+ * CONFIG_CMDLINE is meant to be a default in case nothing else
+ * managed to set the command line, unless CONFIG_CMDLINE_FORCE
+ * is set in which case we override whatever was found earlier.
+ */
+#ifdef CONFIG_CMDLINE
+#if defined(CONFIG_CMDLINE_EXTEND)
+ strlcat(cmdline, " ", COMMAND_LINE_SIZE);
+ strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#elif defined(CONFIG_CMDLINE_FORCE)
+ strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#else
+ /* No arguments from boot loader, use kernel's cmdl*/
+ if (!((char *)cmdline)[0])
+ strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#endif
+#endif /* CONFIG_CMDLINE */
+
+ pr_debug("Command line is: %s\n", (char *)cmdline);
+
return 0;
}
@@ -1277,26 +1301,6 @@ void __init early_init_dt_scan_nodes(void)
if (rc)
pr_warn("No chosen node found, continuing without\n");
- /*
- * CONFIG_CMDLINE is meant to be a default in case nothing else
- * managed to set the command line, unless CONFIG_CMDLINE_FORCE
- * is set in which case we override whatever was found earlier.
- */
-#ifdef CONFIG_CMDLINE
-#if defined(CONFIG_CMDLINE_EXTEND)
- strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
- strlcat(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#elif defined(CONFIG_CMDLINE_FORCE)
- strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#else
- /* No arguments from boot loader, use kernel's cmdl */
- if (!boot_command_line[0])
- strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif
-#endif /* CONFIG_CMDLINE */
-
- pr_debug("Command line is: %s\n", boot_command_line);
-
/* Setup memory, calling early_init_dt_add_memory_arch */
early_init_dt_scan_memory();
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 7378e2f3e525..fcd029ca2eb1 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1055,14 +1055,12 @@ out:
return err;
}
-static int pcifront_xenbus_remove(struct xenbus_device *xdev)
+static void pcifront_xenbus_remove(struct xenbus_device *xdev)
{
struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev);
if (pdev)
free_pdev(pdev);
-
- return 0;
}
static const struct xenbus_device_id xenpci_ids[] = {
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index e01b32d1fa17..00828f5baa97 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -498,6 +498,12 @@ static int da9211_i2c_probe(struct i2c_client *i2c)
chip->chip_irq = i2c->irq;
+ ret = da9211_regulator_init(chip);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
+ return ret;
+ }
+
if (chip->chip_irq != 0) {
ret = devm_request_threaded_irq(chip->dev, chip->chip_irq, NULL,
da9211_irq_handler,
@@ -512,11 +518,6 @@ static int da9211_i2c_probe(struct i2c_client *i2c)
dev_warn(chip->dev, "No IRQ configured\n");
}
- ret = da9211_regulator_init(chip);
-
- if (ret < 0)
- dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
-
return ret;
}
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 43b5b9377714..ae6021390143 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -1016,7 +1016,7 @@ static const struct rpmh_vreg_init_data pm8550_vreg_data[] = {
RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l8-l9"),
RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l8-l9"),
RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo, "vdd-l1-l4-l10"),
- RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l11"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo, "vdd-l11"),
RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo, "vdd-l12"),
RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l2-l13-l14"),
RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo, "vdd-l2-l13-l14"),
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index b392b9f5482e..c0f85ffb2b62 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -865,6 +865,8 @@ dcssblk_submit_bio(struct bio *bio)
unsigned long bytes_done;
bio = bio_split_to_limits(bio);
+ if (!bio)
+ return;
bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data;
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 406be169173c..d1adc4b83193 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -410,13 +410,13 @@ static ssize_t qeth_dev_isolation_show(struct device *dev,
switch (card->options.isolation) {
case ISOLATION_MODE_NONE:
- return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
+ return sysfs_emit(buf, "%s\n", ATTR_QETH_ISOLATION_NONE);
case ISOLATION_MODE_FWD:
- return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD);
+ return sysfs_emit(buf, "%s\n", ATTR_QETH_ISOLATION_FWD);
case ISOLATION_MODE_DROP:
- return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP);
+ return sysfs_emit(buf, "%s\n", ATTR_QETH_ISOLATION_DROP);
default:
- return snprintf(buf, 5, "%s\n", "N/A");
+ return sysfs_emit(buf, "%s\n", "N/A");
}
}
@@ -500,9 +500,9 @@ static ssize_t qeth_hw_trap_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
if (card->info.hwtrap)
- return snprintf(buf, 5, "arm\n");
+ return sysfs_emit(buf, "arm\n");
else
- return snprintf(buf, 8, "disarm\n");
+ return sysfs_emit(buf, "disarm\n");
}
static ssize_t qeth_hw_trap_store(struct device *dev,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 41ba22f6c7f0..e9c2d306ed87 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -162,7 +162,7 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
{
if (hisi_hba->hw->slot_index_alloc ||
- slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
+ slot_idx < HISI_SAS_RESERVED_IPTT) {
spin_lock(&hisi_hba->lock);
hisi_sas_slot_index_clear(hisi_hba, slot_idx);
spin_unlock(&hisi_hba->lock);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 1ccce706167a..5e80225b5308 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -889,7 +889,9 @@ void sas_ata_device_link_abort(struct domain_device *device, bool force_reset)
{
struct ata_port *ap = device->sata_dev.ap;
struct ata_link *link = &ap->link;
+ unsigned long flags;
+ spin_lock_irqsave(ap->lock, flags);
device->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */
device->sata_dev.fis[3] = ATA_ABORTED; /* tf error */
@@ -897,6 +899,7 @@ void sas_ata_device_link_abort(struct domain_device *device, bool force_reset)
if (force_reset)
link->eh_info.action |= ATA_EH_RESET;
ata_link_abort(link);
+ spin_unlock_irqrestore(ap->lock, flags);
}
EXPORT_SYMBOL_GPL(sas_ata_device_link_abort);
diff --git a/drivers/scsi/mpi3mr/Makefile b/drivers/scsi/mpi3mr/Makefile
index ef86ca46646b..3bf8cf34e1c3 100644
--- a/drivers/scsi/mpi3mr/Makefile
+++ b/drivers/scsi/mpi3mr/Makefile
@@ -1,5 +1,5 @@
# mpi3mr makefile
-obj-m += mpi3mr.o
+obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr.o
mpi3mr-y += mpi3mr_os.o \
mpi3mr_fw.o \
mpi3mr_app.o \
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 0c4aabaefdcc..286a44506578 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -3633,8 +3633,7 @@ int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
int i, retval = 0, capb = 0;
u16 message_control;
u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
- (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) &&
- (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
+ ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
if (pci_enable_device_mem(pdev)) {
ioc_err(mrioc, "pci_enable_device_mem: failed\n");
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 4e981ccaac41..69061545d9d2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2992,8 +2992,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
struct sysinfo s;
u64 coherent_dma_mask, dma_mask;
- if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 ||
- dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32)) {
+ if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4) {
ioc->dma_mask = 32;
coherent_dma_mask = dma_mask = DMA_BIT_MASK(32);
/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index cc6953809a24..8553277effb3 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1511,8 +1511,6 @@ static int inquiry_vpd_b0(unsigned char *arr)
put_unaligned_be64(sdebug_write_same_length, &arr[32]);
return 0x3c; /* Mandatory page length for Logical Block Provisioning */
-
- return sizeof(vpdb0_data);
}
/* Block device characteristics VPD page (SBC-3) */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index a7960ad2d386..2aa2c2aee6e7 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -231,6 +231,11 @@ scsi_abort_command(struct scsi_cmnd *scmd)
struct Scsi_Host *shost = sdev->host;
unsigned long flags;
+ if (!shost->hostt->eh_abort_handler) {
+ /* No abort handler, fail command directly */
+ return FAILED;
+ }
+
if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
/*
* Retry after abort failed, escalate to next level.
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 13cfd3e317cc..b9b97300e3b3 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1677,6 +1677,13 @@ static const char *iscsi_session_state_name(int state)
return name;
}
+static char *iscsi_session_target_state_name[] = {
+ [ISCSI_SESSION_TARGET_UNBOUND] = "UNBOUND",
+ [ISCSI_SESSION_TARGET_ALLOCATED] = "ALLOCATED",
+ [ISCSI_SESSION_TARGET_SCANNED] = "SCANNED",
+ [ISCSI_SESSION_TARGET_UNBINDING] = "UNBINDING",
+};
+
int iscsi_session_chkready(struct iscsi_cls_session *session)
{
int err;
@@ -1786,9 +1793,13 @@ static int iscsi_user_scan_session(struct device *dev, void *data)
if ((scan_data->channel == SCAN_WILD_CARD ||
scan_data->channel == 0) &&
(scan_data->id == SCAN_WILD_CARD ||
- scan_data->id == id))
+ scan_data->id == id)) {
scsi_scan_target(&session->dev, 0, id,
scan_data->lun, scan_data->rescan);
+ spin_lock_irqsave(&session->lock, flags);
+ session->target_state = ISCSI_SESSION_TARGET_SCANNED;
+ spin_unlock_irqrestore(&session->lock, flags);
+ }
}
user_scan_exit:
@@ -1961,31 +1972,41 @@ static void __iscsi_unbind_session(struct work_struct *work)
struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags;
unsigned int target_id;
+ bool remove_target = true;
ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n");
/* Prevent new scans and make sure scanning is not in progress */
mutex_lock(&ihost->mutex);
spin_lock_irqsave(&session->lock, flags);
- if (session->target_id == ISCSI_MAX_TARGET) {
+ if (session->target_state == ISCSI_SESSION_TARGET_ALLOCATED) {
+ remove_target = false;
+ } else if (session->target_state != ISCSI_SESSION_TARGET_SCANNED) {
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
- goto unbind_session_exit;
+ ISCSI_DBG_TRANS_SESSION(session,
+ "Skipping target unbinding: Session is unbound/unbinding.\n");
+ return;
}
+ session->target_state = ISCSI_SESSION_TARGET_UNBINDING;
target_id = session->target_id;
session->target_id = ISCSI_MAX_TARGET;
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
- scsi_remove_target(&session->dev);
+ if (remove_target)
+ scsi_remove_target(&session->dev);
if (session->ida_used)
ida_free(&iscsi_sess_ida, target_id);
-unbind_session_exit:
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
+
+ spin_lock_irqsave(&session->lock, flags);
+ session->target_state = ISCSI_SESSION_TARGET_UNBOUND;
+ spin_unlock_irqrestore(&session->lock, flags);
}
static void __iscsi_destroy_session(struct work_struct *work)
@@ -2062,6 +2083,9 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
session->ida_used = true;
} else
session->target_id = target_id;
+ spin_lock_irqsave(&session->lock, flags);
+ session->target_state = ISCSI_SESSION_TARGET_ALLOCATED;
+ spin_unlock_irqrestore(&session->lock, flags);
dev_set_name(&session->dev, "session%u", session->sid);
err = device_add(&session->dev);
@@ -4370,6 +4394,19 @@ iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0);
iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0);
static ssize_t
+show_priv_session_target_state(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+
+ return sysfs_emit(buf, "%s\n",
+ iscsi_session_target_state_name[session->target_state]);
+}
+
+static ISCSI_CLASS_ATTR(priv_sess, target_state, S_IRUGO,
+ show_priv_session_target_state, NULL);
+
+static ssize_t
show_priv_session_state(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -4471,6 +4508,7 @@ static struct attribute *iscsi_session_attrs[] = {
&dev_attr_sess_boot_target.attr,
&dev_attr_priv_sess_recovery_tmo.attr,
&dev_attr_priv_sess_state.attr,
+ &dev_attr_priv_sess_target_state.attr,
&dev_attr_priv_sess_creator.attr,
&dev_attr_sess_chap_out_idx.attr,
&dev_attr_sess_chap_in_idx.attr,
@@ -4584,6 +4622,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
return S_IRUGO | S_IWUSR;
else if (attr == &dev_attr_priv_sess_state.attr)
return S_IRUGO;
+ else if (attr == &dev_attr_priv_sess_target_state.attr)
+ return S_IRUGO;
else if (attr == &dev_attr_priv_sess_creator.attr)
return S_IRUGO;
else if (attr == &dev_attr_priv_sess_target_id.attr)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index d7a84c0bfaeb..22705eb781b0 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1823,6 +1823,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
ret = storvsc_do_io(dev, cmd_request, get_cpu());
put_cpu();
+ if (ret)
+ scsi_dma_unmap(scmnd);
+
if (ret == -EAGAIN) {
/* no more space */
ret = SCSI_MLQUEUE_DEVICE_BUSY;
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 66b316d173b0..71a3bb83984c 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -995,7 +995,7 @@ static int scsifront_suspend(struct xenbus_device *dev)
return err;
}
-static int scsifront_remove(struct xenbus_device *dev)
+static void scsifront_remove(struct xenbus_device *dev)
{
struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
@@ -1011,8 +1011,6 @@ static int scsifront_remove(struct xenbus_device *dev)
scsifront_free_ring(info);
scsi_host_put(info->host);
-
- return 0;
}
static void scsifront_disconnect(struct vscsifrnt_info *info)
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
index 520b4cc69cdc..91db3c973167 100644
--- a/drivers/spi/spi-cadence-xspi.c
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -177,7 +177,10 @@
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
((op)->data.nbytes >> 16) & 0xffff) | \
- FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, (op)->dummy.nbytes * 8))
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
+ (op)->dummy.buswidth != 0 ? \
+ (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
+ 0))
#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_BANK, chipsel) | \
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 6de8360e5c2a..9eab6c20dbc5 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -1253,6 +1253,11 @@ static int mtk_spi_probe(struct platform_device *pdev)
dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
addr_bits, ret);
+ ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
+ IRQF_TRIGGER_NONE, dev_name(dev), master);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register irq\n");
+
pm_runtime_enable(dev);
ret = devm_spi_register_master(dev, master);
@@ -1261,13 +1266,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret, "failed to register master\n");
}
- ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
- IRQF_TRIGGER_NONE, dev_name(dev), master);
- if (ret) {
- pm_runtime_disable(dev);
- return dev_err_probe(dev, ret, "failed to register irq\n");
- }
-
return 0;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 3cc7bb4d03de..15f174f4e056 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2310,7 +2310,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
if (!of_property_read_u32(nc, "spi-max-frequency", &value))
spi->max_speed_hz = value;
- if (!of_property_read_u16(nc, "spi-cs-setup-ns", &cs_setup)) {
+ if (!of_property_read_u16(nc, "spi-cs-setup-delay-ns", &cs_setup)) {
spi->cs_setup.value = cs_setup;
spi->cs_setup.unit = SPI_DELAY_UNIT_NSECS;
}
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 6313e7d0cdf8..1935ca613447 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -68,7 +68,7 @@ static_assert(N_SPI_MINORS > 0 && N_SPI_MINORS <= 256);
struct spidev_data {
dev_t devt;
- spinlock_t spi_lock;
+ struct mutex spi_lock;
struct spi_device *spi;
struct list_head device_entry;
@@ -95,9 +95,8 @@ spidev_sync(struct spidev_data *spidev, struct spi_message *message)
int status;
struct spi_device *spi;
- spin_lock_irq(&spidev->spi_lock);
+ mutex_lock(&spidev->spi_lock);
spi = spidev->spi;
- spin_unlock_irq(&spidev->spi_lock);
if (spi == NULL)
status = -ESHUTDOWN;
@@ -107,6 +106,7 @@ spidev_sync(struct spidev_data *spidev, struct spi_message *message)
if (status == 0)
status = message->actual_length;
+ mutex_unlock(&spidev->spi_lock);
return status;
}
@@ -359,12 +359,12 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* we issue this ioctl.
*/
spidev = filp->private_data;
- spin_lock_irq(&spidev->spi_lock);
+ mutex_lock(&spidev->spi_lock);
spi = spi_dev_get(spidev->spi);
- spin_unlock_irq(&spidev->spi_lock);
-
- if (spi == NULL)
+ if (spi == NULL) {
+ mutex_unlock(&spidev->spi_lock);
return -ESHUTDOWN;
+ }
/* use the buffer lock here for triple duty:
* - prevent I/O (from us) so calling spi_setup() is safe;
@@ -508,6 +508,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
mutex_unlock(&spidev->buf_lock);
spi_dev_put(spi);
+ mutex_unlock(&spidev->spi_lock);
return retval;
}
@@ -529,12 +530,12 @@ spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
* we issue this ioctl.
*/
spidev = filp->private_data;
- spin_lock_irq(&spidev->spi_lock);
+ mutex_lock(&spidev->spi_lock);
spi = spi_dev_get(spidev->spi);
- spin_unlock_irq(&spidev->spi_lock);
-
- if (spi == NULL)
+ if (spi == NULL) {
+ mutex_unlock(&spidev->spi_lock);
return -ESHUTDOWN;
+ }
/* SPI_IOC_MESSAGE needs the buffer locked "normally" */
mutex_lock(&spidev->buf_lock);
@@ -561,6 +562,7 @@ spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
done:
mutex_unlock(&spidev->buf_lock);
spi_dev_put(spi);
+ mutex_unlock(&spidev->spi_lock);
return retval;
}
@@ -601,7 +603,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
if (!spidev->tx_buffer) {
spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
if (!spidev->tx_buffer) {
- dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
status = -ENOMEM;
goto err_find_dev;
}
@@ -610,7 +611,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
if (!spidev->rx_buffer) {
spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
if (!spidev->rx_buffer) {
- dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
status = -ENOMEM;
goto err_alloc_rx_buf;
}
@@ -640,10 +640,10 @@ static int spidev_release(struct inode *inode, struct file *filp)
spidev = filp->private_data;
filp->private_data = NULL;
- spin_lock_irq(&spidev->spi_lock);
+ mutex_lock(&spidev->spi_lock);
/* ... after we unbound from the underlying device? */
dofree = (spidev->spi == NULL);
- spin_unlock_irq(&spidev->spi_lock);
+ mutex_unlock(&spidev->spi_lock);
/* last close? */
spidev->users--;
@@ -776,7 +776,7 @@ static int spidev_probe(struct spi_device *spi)
/* Initialize the driver data */
spidev->spi = spi;
- spin_lock_init(&spidev->spi_lock);
+ mutex_init(&spidev->spi_lock);
mutex_init(&spidev->buf_lock);
INIT_LIST_HEAD(&spidev->device_entry);
@@ -821,9 +821,9 @@ static void spidev_remove(struct spi_device *spi)
/* prevent new opens */
mutex_lock(&device_list_lock);
/* make sure ops on existing fds can abort cleanly */
- spin_lock_irq(&spidev->spi_lock);
+ mutex_lock(&spidev->spi_lock);
spidev->spi = NULL;
- spin_unlock_irq(&spidev->spi_lock);
+ mutex_unlock(&spidev->spi_lock);
list_del(&spidev->device_entry);
device_destroy(spidev_class, spidev->devt);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
index 8c42e7662033..92ed1213fe37 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
@@ -172,6 +172,7 @@ static const struct attribute_group fivr_attribute_group = {
RFIM_SHOW(rfi_restriction_run_busy, 1)
RFIM_SHOW(rfi_restriction_err_code, 1)
RFIM_SHOW(rfi_restriction_data_rate, 1)
+RFIM_SHOW(rfi_restriction_data_rate_base, 1)
RFIM_SHOW(ddr_data_rate_point_0, 1)
RFIM_SHOW(ddr_data_rate_point_1, 1)
RFIM_SHOW(ddr_data_rate_point_2, 1)
@@ -181,11 +182,13 @@ RFIM_SHOW(rfi_disable, 1)
RFIM_STORE(rfi_restriction_run_busy, 1)
RFIM_STORE(rfi_restriction_err_code, 1)
RFIM_STORE(rfi_restriction_data_rate, 1)
+RFIM_STORE(rfi_restriction_data_rate_base, 1)
RFIM_STORE(rfi_disable, 1)
static DEVICE_ATTR_RW(rfi_restriction_run_busy);
static DEVICE_ATTR_RW(rfi_restriction_err_code);
static DEVICE_ATTR_RW(rfi_restriction_data_rate);
+static DEVICE_ATTR_RW(rfi_restriction_data_rate_base);
static DEVICE_ATTR_RO(ddr_data_rate_point_0);
static DEVICE_ATTR_RO(ddr_data_rate_point_1);
static DEVICE_ATTR_RO(ddr_data_rate_point_2);
@@ -248,6 +251,7 @@ static struct attribute *dvfs_attrs[] = {
&dev_attr_rfi_restriction_run_busy.attr,
&dev_attr_rfi_restriction_err_code.attr,
&dev_attr_rfi_restriction_data_rate.attr,
+ &dev_attr_rfi_restriction_data_rate_base.attr,
&dev_attr_ddr_data_rate_point_0.attr,
&dev_attr_ddr_data_rate_point_1.attr,
&dev_attr_ddr_data_rate_point_2.attr,
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 7c23112dc923..5bddb2f5e931 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -52,17 +52,22 @@ static DEFINE_SPINLOCK(xencons_lock);
static struct xencons_info *vtermno_to_xencons(int vtermno)
{
- struct xencons_info *entry, *n, *ret = NULL;
+ struct xencons_info *entry, *ret = NULL;
+ unsigned long flags;
- if (list_empty(&xenconsoles))
- return NULL;
+ spin_lock_irqsave(&xencons_lock, flags);
+ if (list_empty(&xenconsoles)) {
+ spin_unlock_irqrestore(&xencons_lock, flags);
+ return NULL;
+ }
- list_for_each_entry_safe(entry, n, &xenconsoles, list) {
+ list_for_each_entry(entry, &xenconsoles, list) {
if (entry->vtermno == vtermno) {
ret = entry;
break;
}
}
+ spin_unlock_irqrestore(&xencons_lock, flags);
return ret;
}
@@ -223,7 +228,7 @@ static int xen_hvm_console_init(void)
{
int r;
uint64_t v = 0;
- unsigned long gfn;
+ unsigned long gfn, flags;
struct xencons_info *info;
if (!xen_hvm_domain())
@@ -258,9 +263,9 @@ static int xen_hvm_console_init(void)
goto err;
info->vtermno = HVC_COOKIE;
- spin_lock(&xencons_lock);
+ spin_lock_irqsave(&xencons_lock, flags);
list_add_tail(&info->list, &xenconsoles);
- spin_unlock(&xencons_lock);
+ spin_unlock_irqrestore(&xencons_lock, flags);
return 0;
err:
@@ -283,6 +288,7 @@ static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
static int xen_pv_console_init(void)
{
struct xencons_info *info;
+ unsigned long flags;
if (!xen_pv_domain())
return -ENODEV;
@@ -299,9 +305,9 @@ static int xen_pv_console_init(void)
/* already configured */
return 0;
}
- spin_lock(&xencons_lock);
+ spin_lock_irqsave(&xencons_lock, flags);
xencons_info_pv_init(info, HVC_COOKIE);
- spin_unlock(&xencons_lock);
+ spin_unlock_irqrestore(&xencons_lock, flags);
return 0;
}
@@ -309,6 +315,7 @@ static int xen_pv_console_init(void)
static int xen_initial_domain_console_init(void)
{
struct xencons_info *info;
+ unsigned long flags;
if (!xen_initial_domain())
return -ENODEV;
@@ -323,9 +330,9 @@ static int xen_initial_domain_console_init(void)
info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
info->vtermno = HVC_COOKIE;
- spin_lock(&xencons_lock);
+ spin_lock_irqsave(&xencons_lock, flags);
list_add_tail(&info->list, &xenconsoles);
- spin_unlock(&xencons_lock);
+ spin_unlock_irqrestore(&xencons_lock, flags);
return 0;
}
@@ -380,10 +387,12 @@ static void xencons_free(struct xencons_info *info)
static int xen_console_remove(struct xencons_info *info)
{
+ unsigned long flags;
+
xencons_disconnect_backend(info);
- spin_lock(&xencons_lock);
+ spin_lock_irqsave(&xencons_lock, flags);
list_del(&info->list);
- spin_unlock(&xencons_lock);
+ spin_unlock_irqrestore(&xencons_lock, flags);
if (info->xbdev != NULL)
xencons_free(info);
else {
@@ -394,9 +403,9 @@ static int xen_console_remove(struct xencons_info *info)
return 0;
}
-static int xencons_remove(struct xenbus_device *dev)
+static void xencons_remove(struct xenbus_device *dev)
{
- return xen_console_remove(dev_get_drvdata(&dev->dev));
+ xen_console_remove(dev_get_drvdata(&dev->dev));
}
static int xencons_connect_backend(struct xenbus_device *dev,
@@ -464,6 +473,7 @@ static int xencons_probe(struct xenbus_device *dev,
{
int ret, devid;
struct xencons_info *info;
+ unsigned long flags;
devid = dev->nodename[strlen(dev->nodename) - 1] - '0';
if (devid == 0)
@@ -482,9 +492,9 @@ static int xencons_probe(struct xenbus_device *dev,
ret = xencons_connect_backend(dev, info);
if (ret < 0)
goto error;
- spin_lock(&xencons_lock);
+ spin_lock_irqsave(&xencons_lock, flags);
list_add_tail(&info->list, &xenconsoles);
- spin_unlock(&xencons_lock);
+ spin_unlock_irqrestore(&xencons_lock, flags);
return 0;
@@ -584,10 +594,12 @@ static int __init xen_hvc_init(void)
info->hvc = hvc_alloc(HVC_COOKIE, info->irq, ops, 256);
if (IS_ERR(info->hvc)) {
+ unsigned long flags;
+
r = PTR_ERR(info->hvc);
- spin_lock(&xencons_lock);
+ spin_lock_irqsave(&xencons_lock, flags);
list_del(&info->list);
- spin_unlock(&xencons_lock);
+ spin_unlock_irqrestore(&xencons_lock, flags);
if (info->irq)
unbind_from_irqhandler(info->irq, NULL);
kfree(info);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index e18c9f4463ec..bda61be5f035 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -6056,6 +6056,14 @@ void ufshcd_schedule_eh_work(struct ufs_hba *hba)
}
}
+static void ufshcd_force_error_recovery(struct ufs_hba *hba)
+{
+ spin_lock_irq(hba->host->host_lock);
+ hba->force_reset = true;
+ ufshcd_schedule_eh_work(hba);
+ spin_unlock_irq(hba->host->host_lock);
+}
+
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
down_write(&hba->clk_scaling_lock);
@@ -9083,6 +9091,15 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (!hba->dev_info.b_rpm_dev_flush_capable) {
ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+ if (ret && pm_op != UFS_SHUTDOWN_PM) {
+ /*
+ * If return err in suspend flow, IO will hang.
+ * Trigger error handler and break suspend for
+ * error recovery.
+ */
+ ufshcd_force_error_recovery(hba);
+ ret = -EBUSY;
+ }
if (ret)
goto enable_scaling;
}
@@ -9094,6 +9111,15 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/
check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
+ if (ret && pm_op != UFS_SHUTDOWN_PM) {
+ /*
+ * If return err in suspend flow, IO will hang.
+ * Trigger error handler and break suspend for
+ * error recovery.
+ */
+ ufshcd_force_error_recovery(hba);
+ ret = -EBUSY;
+ }
if (ret)
goto set_dev_active;
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 60e8174686a1..d7c8461976ce 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -207,7 +207,7 @@ static int ulpi_read_id(struct ulpi *ulpi)
/* Test the interface */
ret = ulpi_write(ulpi, ULPI_SCRATCH, 0xaa);
if (ret < 0)
- return ret;
+ goto err;
ret = ulpi_read(ulpi, ULPI_SCRATCH);
if (ret < 0)
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index 8607d4c23283..0745e9f11b2e 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 789976567f9f..89dcfac01235 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1727,6 +1727,7 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
else if (!ret)
dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+ dep->flags &= ~DWC3_EP_DELAY_STOP;
return ret;
}
@@ -3732,8 +3733,10 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
if (dep->number <= 1 && dwc->ep0state != EP0_DATA_PHASE)
return;
+ if (interrupt && (dep->flags & DWC3_EP_DELAY_STOP))
+ return;
+
if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
- (dep->flags & DWC3_EP_DELAY_STOP) ||
(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
return;
diff --git a/drivers/usb/fotg210/fotg210-core.c b/drivers/usb/fotg210/fotg210-core.c
index 8a54edf921ac..ee740a6da463 100644
--- a/drivers/usb/fotg210/fotg210-core.c
+++ b/drivers/usb/fotg210/fotg210-core.c
@@ -144,10 +144,7 @@ static struct platform_driver fotg210_driver = {
static int __init fotg210_init(void)
{
- if (usb_disabled())
- return -ENODEV;
-
- if (IS_ENABLED(CONFIG_USB_FOTG210_HCD))
+ if (IS_ENABLED(CONFIG_USB_FOTG210_HCD) && !usb_disabled())
fotg210_hcd_init();
return platform_driver_register(&fotg210_driver);
}
diff --git a/drivers/usb/fotg210/fotg210-udc.c b/drivers/usb/fotg210/fotg210-udc.c
index 66e1b7ee3346..87cca81bf4ac 100644
--- a/drivers/usb/fotg210/fotg210-udc.c
+++ b/drivers/usb/fotg210/fotg210-udc.c
@@ -1201,6 +1201,8 @@ int fotg210_udc_probe(struct platform_device *pdev)
dev_info(dev, "found and initialized PHY\n");
}
+ ret = -ENOMEM;
+
for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
fotg210->ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
if (!fotg210->ep[i])
diff --git a/drivers/usb/host/xen-hcd.c b/drivers/usb/host/xen-hcd.c
index de1b09158318..46fdab940092 100644
--- a/drivers/usb/host/xen-hcd.c
+++ b/drivers/usb/host/xen-hcd.c
@@ -1530,15 +1530,13 @@ static void xenhcd_backend_changed(struct xenbus_device *dev,
}
}
-static int xenhcd_remove(struct xenbus_device *dev)
+static void xenhcd_remove(struct xenbus_device *dev)
{
struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
xenhcd_destroy_rings(info);
usb_put_hcd(hcd);
-
- return 0;
}
static int xenhcd_probe(struct xenbus_device *dev,
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 6af9fdbb86b7..058fbe28107e 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -116,8 +116,9 @@ int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
int inlen);
int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
- bool *change_map);
-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb);
+ bool *change_map, unsigned int asid);
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+ unsigned int asid);
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
#define mlx5_vdpa_warn(__dev, format, ...) \
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index a639b9208d41..0a1e0b0dc37e 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -311,7 +311,6 @@ static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8
u64 st;
u64 sz;
int err;
- int i = 0;
st = start;
while (size) {
@@ -336,7 +335,6 @@ static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8
mr->num_directs++;
mr->num_klms++;
st += sz;
- i++;
}
list_splice_tail(&tmp, &mr->head);
return 0;
@@ -511,7 +509,8 @@ out:
mutex_unlock(&mr->mkey_mtx);
}
-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+ struct vhost_iotlb *iotlb, unsigned int asid)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
int err;
@@ -519,42 +518,49 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
if (mr->initialized)
return 0;
- if (iotlb)
- err = create_user_mr(mvdev, iotlb);
- else
- err = create_dma_mr(mvdev, mr);
+ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+ if (iotlb)
+ err = create_user_mr(mvdev, iotlb);
+ else
+ err = create_dma_mr(mvdev, mr);
- if (err)
- return err;
+ if (err)
+ return err;
+ }
- err = dup_iotlb(mvdev, iotlb);
- if (err)
- goto out_err;
+ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) {
+ err = dup_iotlb(mvdev, iotlb);
+ if (err)
+ goto out_err;
+ }
mr->initialized = true;
return 0;
out_err:
- if (iotlb)
- destroy_user_mr(mvdev, mr);
- else
- destroy_dma_mr(mvdev, mr);
+ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+ if (iotlb)
+ destroy_user_mr(mvdev, mr);
+ else
+ destroy_dma_mr(mvdev, mr);
+ }
return err;
}
-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+ unsigned int asid)
{
int err;
mutex_lock(&mvdev->mr.mkey_mtx);
- err = _mlx5_vdpa_create_mr(mvdev, iotlb);
+ err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
mutex_unlock(&mvdev->mr.mkey_mtx);
return err;
}
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
- bool *change_map)
+ bool *change_map, unsigned int asid)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
int err = 0;
@@ -566,7 +572,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
*change_map = true;
}
if (!*change_map)
- err = _mlx5_vdpa_create_mr(mvdev, iotlb);
+ err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
mutex_unlock(&mr->mkey_mtx);
return err;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 90913365def4..3a6dbbc6440d 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1468,11 +1468,13 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
eth_broadcast_addr(dmac_c);
ether_addr_copy(dmac_v, mac);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
+ if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
+ }
if (tagged) {
MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, vid);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, vid);
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
@@ -1684,7 +1686,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
/* Need recreate the flow table entry, so that the packet could forward back
*/
- mac_vlan_del(ndev, ndev->config.mac, 0, false);
+ mac_vlan_del(ndev, mac_back, 0, false);
if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) {
mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
@@ -1821,6 +1823,9 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
size_t read;
u16 id;
+ if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)))
+ return status;
+
switch (cmd) {
case VIRTIO_NET_CTRL_VLAN_ADD:
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
@@ -2389,7 +2394,8 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
}
}
-static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+ struct vhost_iotlb *iotlb, unsigned int asid)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
@@ -2401,7 +2407,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
teardown_driver(ndev);
mlx5_vdpa_destroy_mr(mvdev);
- err = mlx5_vdpa_create_mr(mvdev, iotlb);
+ err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
if (err)
goto err_mr;
@@ -2582,7 +2588,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
++mvdev->generation;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
- if (mlx5_vdpa_create_mr(mvdev, NULL))
+ if (mlx5_vdpa_create_mr(mvdev, NULL, 0))
mlx5_vdpa_warn(mvdev, "create MR failed\n");
}
up_write(&ndev->reslock);
@@ -2618,41 +2624,20 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
return mvdev->generation;
}
-static int set_map_control(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
-{
- u64 start = 0ULL, last = 0ULL - 1;
- struct vhost_iotlb_map *map;
- int err = 0;
-
- spin_lock(&mvdev->cvq.iommu_lock);
- vhost_iotlb_reset(mvdev->cvq.iotlb);
-
- for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
- map = vhost_iotlb_itree_next(map, start, last)) {
- err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start,
- map->last, map->addr, map->perm);
- if (err)
- goto out;
- }
-
-out:
- spin_unlock(&mvdev->cvq.iommu_lock);
- return err;
-}
-
-static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+ unsigned int asid)
{
bool change_map;
int err;
- err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
+ err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid);
if (err) {
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
return err;
}
if (change_map)
- err = mlx5_vdpa_change_map(mvdev, iotlb);
+ err = mlx5_vdpa_change_map(mvdev, iotlb, asid);
return err;
}
@@ -2665,16 +2650,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
int err = -EINVAL;
down_write(&ndev->reslock);
- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
- err = set_map_data(mvdev, iotlb);
- if (err)
- goto out;
- }
-
- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid)
- err = set_map_control(mvdev, iotlb);
-
-out:
+ err = set_map_data(mvdev, iotlb, asid);
up_write(&ndev->reslock);
return err;
}
@@ -2840,8 +2816,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
int i;
down_write(&ndev->reslock);
- mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
ndev->nb_registered = false;
+ mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
flush_workqueue(ndev->mvdev.wq);
for (i = 0; i < ndev->cur_num_vqs; i++) {
mvq = &ndev->vqs[i];
@@ -3019,7 +2995,7 @@ static void update_carrier(struct work_struct *work)
else
ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
- if (ndev->config_cb.callback)
+ if (ndev->nb_registered && ndev->config_cb.callback)
ndev->config_cb.callback(ndev->config_cb.private);
kfree(wqent);
@@ -3036,21 +3012,13 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
switch (eqe->sub_type) {
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
- down_read(&ndev->reslock);
- if (!ndev->nb_registered) {
- up_read(&ndev->reslock);
- return NOTIFY_DONE;
- }
wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
- if (!wqent) {
- up_read(&ndev->reslock);
+ if (!wqent)
return NOTIFY_DONE;
- }
wqent->mvdev = &ndev->mvdev;
INIT_WORK(&wqent->work, update_carrier);
queue_work(ndev->mvdev.wq, &wqent->work);
- up_read(&ndev->reslock);
ret = NOTIFY_OK;
break;
default:
@@ -3185,7 +3153,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
goto err_mpfs;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
- err = mlx5_vdpa_create_mr(mvdev, NULL);
+ err = mlx5_vdpa_create_mr(mvdev, NULL, 0);
if (err)
goto err_res;
}
@@ -3237,8 +3205,8 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
struct workqueue_struct *wq;
if (ndev->nb_registered) {
- mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
ndev->nb_registered = false;
+ mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
}
wq = mvdev->wq;
mvdev->wq = NULL;
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index febdc99b51a7..8ef7aa1365cc 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -855,7 +855,7 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
features_device = vdev->config->get_device_features(vdev);
- if (nla_put_u64_64bit(msg, VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES, features_device,
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device,
VDPA_ATTR_PAD))
return -EMSGSIZE;
@@ -935,7 +935,6 @@ static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
{
struct virtio_net_config config = {};
u64 features;
- u16 max_vqp;
u8 status;
int err;
@@ -946,15 +945,15 @@ static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
}
vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
- max_vqp = __virtio16_to_cpu(true, config.max_virtqueue_pairs);
- if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
- return -EMSGSIZE;
-
features = vdev->config->get_driver_features(vdev);
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
features, VDPA_ATTR_PAD))
return -EMSGSIZE;
+ err = vdpa_dev_net_mq_config_fill(msg, features, &config);
+ if (err)
+ return err;
+
if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
return -EMSGSIZE;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index b071f0d842fb..cb88891b44a8 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -67,8 +67,7 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
{
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
- vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
- VDPASIM_QUEUE_MAX, false,
+ vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, false,
(struct vring_desc *)(uintptr_t)vq->desc_addr,
(struct vring_avail *)
(uintptr_t)vq->driver_addr,
@@ -690,7 +689,9 @@ static void vdpasim_free(struct vdpa_device *vdpa)
}
kvfree(vdpasim->buffer);
- vhost_iotlb_free(vdpasim->iommu);
+ for (i = 0; i < vdpasim->dev_attr.nas; i++)
+ vhost_iotlb_reset(&vdpasim->iommu[i]);
+ kfree(vdpasim->iommu);
kfree(vdpasim->vqs);
kfree(vdpasim->config);
}
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index c6db1a1baf76..f745926237a8 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -427,8 +427,10 @@ static int __init vdpasim_blk_init(void)
int ret;
ret = device_register(&vdpasim_blk_mgmtdev);
- if (ret)
+ if (ret) {
+ put_device(&vdpasim_blk_mgmtdev);
return ret;
+ }
ret = vdpa_mgmtdev_register(&mgmt_dev);
if (ret)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index c3cb225ea469..584b975a98a7 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -62,6 +62,9 @@ static bool receive_filter(struct vdpasim *vdpasim, size_t len)
if (len < ETH_ALEN + hdr_len)
return false;
+ if (is_broadcast_ether_addr(vdpasim->buffer + hdr_len) ||
+ is_multicast_ether_addr(vdpasim->buffer + hdr_len))
+ return true;
if (!strncmp(vdpasim->buffer + hdr_len, vio_config->mac, ETH_ALEN))
return true;
@@ -305,8 +308,10 @@ static int __init vdpasim_net_init(void)
int ret;
ret = device_register(&vdpasim_net_mgmtdev);
- if (ret)
+ if (ret) {
+ put_device(&vdpasim_net_mgmtdev);
return ret;
+ }
ret = vdpa_mgmtdev_register(&mgmt_dev);
if (ret)
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 0dd3c1f291da..0c3b48616a9f 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -1440,6 +1440,9 @@ static bool vduse_validate_config(struct vduse_dev_config *config)
if (config->config_size > PAGE_SIZE)
return false;
+ if (config->vq_num > 0xffff)
+ return false;
+
if (!device_is_allowed(config->device_id))
return false;
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index d448db0c4de3..8fe267ca3e76 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -647,7 +647,7 @@ static void vp_vdpa_remove(struct pci_dev *pdev)
mdev = vp_vdpa_mgtdev->mdev;
vp_modern_remove(mdev);
vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
- kfree(&vp_vdpa_mgtdev->mgtdev.id_table);
+ kfree(vp_vdpa_mgtdev->mgtdev.id_table);
kfree(mdev);
kfree(vp_vdpa_mgtdev);
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 166044642fd5..ec32f785dfde 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -65,6 +65,10 @@ static DEFINE_IDA(vhost_vdpa_ida);
static dev_t vhost_vdpa_major;
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb *iotlb, u64 start,
+ u64 last, u32 asid);
+
static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
{
struct vhost_vdpa_as *as = container_of(iotlb, struct
@@ -135,7 +139,7 @@ static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
return -EINVAL;
hlist_del(&as->hash_link);
- vhost_iotlb_reset(&as->iotlb);
+ vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
kfree(as);
return 0;
@@ -683,10 +687,20 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
mutex_unlock(&d->mutex);
return r;
}
+static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
+ struct vhost_iotlb_map *map, u32 asid)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ if (ops->dma_map) {
+ ops->dma_unmap(vdpa, asid, map->start, map->size);
+ } else if (ops->set_map == NULL) {
+ iommu_unmap(v->domain, map->start, map->size);
+ }
+}
-static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
- struct vhost_iotlb *iotlb,
- u64 start, u64 last)
+static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
+ u64 start, u64 last, u32 asid)
{
struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb_map *map;
@@ -703,13 +717,13 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
unpin_user_page(page);
}
atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
+ vhost_vdpa_general_unmap(v, map, asid);
vhost_iotlb_map_free(iotlb, map);
}
}
-static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
- struct vhost_iotlb *iotlb,
- u64 start, u64 last)
+static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
+ u64 start, u64 last, u32 asid)
{
struct vhost_iotlb_map *map;
struct vdpa_map_file *map_file;
@@ -718,20 +732,21 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
map_file = (struct vdpa_map_file *)map->opaque;
fput(map_file->file);
kfree(map_file);
+ vhost_vdpa_general_unmap(v, map, asid);
vhost_iotlb_map_free(iotlb, map);
}
}
static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
- struct vhost_iotlb *iotlb,
- u64 start, u64 last)
+ struct vhost_iotlb *iotlb, u64 start,
+ u64 last, u32 asid)
{
struct vdpa_device *vdpa = v->vdpa;
if (vdpa->use_va)
- return vhost_vdpa_va_unmap(v, iotlb, start, last);
+ return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
- return vhost_vdpa_pa_unmap(v, iotlb, start, last);
+ return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
}
static int perm_to_iommu_flags(u32 perm)
@@ -798,17 +813,12 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,
const struct vdpa_config_ops *ops = vdpa->config;
u32 asid = iotlb_to_asid(iotlb);
- vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
+ vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
- if (ops->dma_map) {
- ops->dma_unmap(vdpa, asid, iova, size);
- } else if (ops->set_map) {
+ if (ops->set_map) {
if (!v->in_batch)
ops->set_map(vdpa, asid, iotlb);
- } else {
- iommu_unmap(v->domain, iova, size);
}
-
/* If we are in the middle of batch processing, delay the free
* of AS until BATCH_END.
*/
@@ -1162,14 +1172,14 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
struct vhost_vdpa_as *as;
u32 asid;
- vhost_dev_cleanup(&v->vdev);
- kfree(v->vdev.vqs);
-
for (asid = 0; asid < v->vdpa->nas; asid++) {
as = asid_to_as(v, asid);
if (as)
vhost_vdpa_remove_as(v, asid);
}
+
+ vhost_dev_cleanup(&v->vdev);
+ kfree(v->vdev.vqs);
}
static int vhost_vdpa_open(struct inode *inode, struct file *filep)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 5c9fe3c9c364..cbe72bfd2f1f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2053,7 +2053,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct vhost_dev *dev = vq->dev;
struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
struct iovec *_iov;
- u64 s = 0;
+ u64 s = 0, last = addr + len - 1;
int ret = 0;
while ((u64)len > s) {
@@ -2063,7 +2063,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
break;
}
- map = vhost_iotlb_itree_first(umem, addr, addr + len - 1);
+ map = vhost_iotlb_itree_first(umem, addr, last);
if (map == NULL || map->start > addr) {
if (umem != dev->iotlb) {
ret = -EFAULT;
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index c9f5c8ea3afb..33eb941fcf15 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -1102,7 +1102,7 @@ static int iotlb_translate(const struct vringh *vrh,
struct vhost_iotlb_map *map;
struct vhost_iotlb *iotlb = vrh->iotlb;
int ret = 0;
- u64 s = 0;
+ u64 s = 0, last = addr + len - 1;
spin_lock(vrh->iotlb_lock);
@@ -1114,8 +1114,7 @@ static int iotlb_translate(const struct vringh *vrh,
break;
}
- map = vhost_iotlb_itree_first(iotlb, addr,
- addr + len - 1);
+ map = vhost_iotlb_itree_first(iotlb, addr, last);
if (!map || map->start > addr) {
ret = -EINVAL;
break;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index cd6f7776013a..a2b374372363 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -959,7 +959,14 @@ static int __init vhost_vsock_init(void)
VSOCK_TRANSPORT_F_H2G);
if (ret < 0)
return ret;
- return misc_register(&vhost_vsock_misc);
+
+ ret = misc_register(&vhost_vsock_misc);
+ if (ret) {
+ vsock_core_unregister(&vhost_transport.transport);
+ return ret;
+ }
+
+ return 0;
};
static void __exit vhost_vsock_exit(void)
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index df6e09f7d242..b2bed599e6c6 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -456,8 +456,8 @@ config FB_ATARI
chipset found in Ataris.
config FB_OF
- bool "Open Firmware frame buffer device support"
- depends on (FB = y) && PPC && (!PPC_PSERIES || PCI)
+ tristate "Open Firmware frame buffer device support"
+ depends on FB && PPC && (!PPC_PSERIES || PCI)
depends on !DRM_OFDRM
select APERTURE_HELPERS
select FB_CFB_FILLRECT
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 0ccf5d401ecb..d59215a4992e 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3192,8 +3192,7 @@ static void aty_init_lcd(struct atyfb_par *par, u32 bios_base)
* which we print to the screen.
*/
id = *(u8 *)par->lcd_table;
- strncpy(model, (char *)par->lcd_table+1, 24);
- model[23] = 0;
+ strscpy(model, (char *)par->lcd_table+1, sizeof(model));
width = par->lcd_width = *(u16 *)(par->lcd_table+25);
height = par->lcd_height = *(u16 *)(par->lcd_table+27);
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 0d3cee7ae726..a043a737ea9f 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -1378,8 +1378,8 @@ static struct video_board vbG200 = {
.lowlevel = &matrox_G100
};
static struct video_board vbG200eW = {
- .maxvram = 0x100000,
- .maxdisplayable = 0x800000,
+ .maxvram = 0x1000000,
+ .maxdisplayable = 0x0800000,
.accelID = FB_ACCEL_MATROX_MGAG200,
.lowlevel = &matrox_G100
};
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 17cda5765683..1f3df2055ff0 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1447,7 +1447,7 @@ static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info)
info->fbops = &omapfb_ops;
info->flags = FBINFO_FLAG_DEFAULT;
- strncpy(fix->id, MODULE_NAME, sizeof(fix->id));
+ strscpy(fix->id, MODULE_NAME, sizeof(fix->id));
info->pseudo_palette = fbdev->pseudo_palette;
@@ -1573,8 +1573,7 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev)
fbdev->ctrl = NULL;
- strncpy(name, conf->lcd.ctrl_name, sizeof(name) - 1);
- name[sizeof(name) - 1] = '\0';
+ strscpy(name, conf->lcd.ctrl_name, sizeof(name));
if (strcmp(name, "internal") == 0) {
fbdev->ctrl = fbdev->int_ctrl;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index 54b0f034c2ed..7cddb7b8ae34 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -1536,22 +1536,28 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
- struct dsi_irq_stats stats;
+ struct dsi_irq_stats *stats;
+
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats) {
+ seq_printf(s, "out of memory\n");
+ return;
+ }
spin_lock_irqsave(&dsi->irq_stats_lock, flags);
- stats = dsi->irq_stats;
+ *stats = dsi->irq_stats;
memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
dsi->irq_stats.last_reset = jiffies;
spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
seq_printf(s, "period %u ms\n",
- jiffies_to_msecs(jiffies - stats.last_reset));
+ jiffies_to_msecs(jiffies - stats->last_reset));
- seq_printf(s, "irqs %d\n", stats.irq_count);
+ seq_printf(s, "irqs %d\n", stats->irq_count);
#define PIS(x) \
- seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1])
+ seq_printf(s, "%-20s %10d\n", #x, stats->dsi_irqs[ffs(DSI_IRQ_##x)-1])
seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
PIS(VC0);
@@ -1575,10 +1581,10 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
#define PIS(x) \
seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
- stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
- stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
- stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
- stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
+ stats->vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
+ stats->vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
+ stats->vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
+ stats->vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
seq_printf(s, "-- VC interrupts --\n");
PIS(CS);
@@ -1594,7 +1600,7 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
#define PIS(x) \
seq_printf(s, "%-20s %10d\n", #x, \
- stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
+ stats->cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
seq_printf(s, "-- CIO interrupts --\n");
PIS(ERRSYNCESC1);
@@ -1618,6 +1624,8 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
PIS(ULPSACTIVENOT_ALL0);
PIS(ULPSACTIVENOT_ALL1);
#undef PIS
+
+ kfree(stats);
}
static void dsi1_dump_irqs(struct seq_file *s)
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 8752d389e382..d7f3e6281ce4 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -67,7 +67,7 @@ MODULE_PARM_DESC(video,
"Video memory size in MB, width, height in pixels (default 2,800,600)");
static void xenfb_make_preferred_console(void);
-static int xenfb_remove(struct xenbus_device *);
+static void xenfb_remove(struct xenbus_device *);
static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
static void xenfb_disconnect_backend(struct xenfb_info *);
@@ -523,7 +523,7 @@ static int xenfb_resume(struct xenbus_device *dev)
return xenfb_connect_backend(dev, info);
}
-static int xenfb_remove(struct xenbus_device *dev)
+static void xenfb_remove(struct xenbus_device *dev)
{
struct xenfb_info *info = dev_get_drvdata(&dev->dev);
@@ -538,8 +538,6 @@ static int xenfb_remove(struct xenbus_device *dev)
vfree(info->gfns);
vfree(info->fb);
kfree(info);
-
- return 0;
}
static unsigned long vmalloc_to_gfn(void *address)
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 828ced060742..b9a80aedee1b 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -15,7 +15,7 @@ static ssize_t device_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
struct virtio_device *dev = dev_to_virtio(_d);
- return sprintf(buf, "0x%04x\n", dev->id.device);
+ return sysfs_emit(buf, "0x%04x\n", dev->id.device);
}
static DEVICE_ATTR_RO(device);
@@ -23,7 +23,7 @@ static ssize_t vendor_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
struct virtio_device *dev = dev_to_virtio(_d);
- return sprintf(buf, "0x%04x\n", dev->id.vendor);
+ return sysfs_emit(buf, "0x%04x\n", dev->id.vendor);
}
static DEVICE_ATTR_RO(vendor);
@@ -31,7 +31,7 @@ static ssize_t status_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
struct virtio_device *dev = dev_to_virtio(_d);
- return sprintf(buf, "0x%08x\n", dev->config->get_status(dev));
+ return sysfs_emit(buf, "0x%08x\n", dev->config->get_status(dev));
}
static DEVICE_ATTR_RO(status);
@@ -39,7 +39,7 @@ static ssize_t modalias_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
struct virtio_device *dev = dev_to_virtio(_d);
- return sprintf(buf, "virtio:d%08Xv%08X\n",
+ return sysfs_emit(buf, "virtio:d%08Xv%08X\n",
dev->id.device, dev->id.vendor);
}
static DEVICE_ATTR_RO(modalias);
@@ -54,9 +54,9 @@ static ssize_t features_show(struct device *_d,
/* We actually represent this as a bitstring, as it could be
* arbitrary length in future. */
for (i = 0; i < sizeof(dev->features)*8; i++)
- len += sprintf(buf+len, "%c",
+ len += sysfs_emit_at(buf, len, "%c",
__virtio_test_bit(dev, i) ? '1' : '0');
- len += sprintf(buf+len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
return len;
}
static DEVICE_ATTR_RO(features);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index c3b9f2761849..9e496e288cfa 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -303,14 +303,14 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
int err;
if (index >= vp_modern_get_num_queues(mdev))
- return ERR_PTR(-ENOENT);
+ return ERR_PTR(-EINVAL);
/* Check if queue is either not available or already active. */
num = vp_modern_get_queue_size(mdev, index);
if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
- if (num & (num - 1)) {
+ if (!is_power_of_2(num)) {
dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 2e7689bb933b..723c4e29e1d3 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1052,7 +1052,7 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
dma_addr_t dma_addr;
/* We assume num is a power of 2. */
- if (num & (num - 1)) {
+ if (!is_power_of_2(num)) {
dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
return -EINVAL;
}
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 28b2a1fa25ab..0d4f8f4f4948 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -1181,9 +1181,8 @@ static void pvcalls_back_changed(struct xenbus_device *dev,
}
}
-static int pvcalls_back_remove(struct xenbus_device *dev)
+static void pvcalls_back_remove(struct xenbus_device *dev)
{
- return 0;
}
static int pvcalls_back_uevent(struct xenbus_device *xdev,
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 1826e8e67125..d5d589bda243 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -225,6 +225,8 @@ again:
return IRQ_HANDLED;
}
+static void free_active_ring(struct sock_mapping *map);
+
static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
struct sock_mapping *map)
{
@@ -240,7 +242,7 @@ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
gnttab_end_foreign_access(map->active.ring->ref[i], NULL);
gnttab_end_foreign_access(map->active.ref, NULL);
- free_page((unsigned long)map->active.ring);
+ free_active_ring(map);
kfree(map);
}
@@ -1085,7 +1087,7 @@ static const struct xenbus_device_id pvcalls_front_ids[] = {
{ "" }
};
-static int pvcalls_front_remove(struct xenbus_device *dev)
+static void pvcalls_front_remove(struct xenbus_device *dev)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map = NULL, *n;
@@ -1121,7 +1123,6 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
kfree(bedata->ring.sring);
kfree(bedata);
xenbus_switch_state(dev, XenbusStateClosed);
- return 0;
}
static int pvcalls_front_probe(struct xenbus_device *dev,
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index d171091eec12..b11e401f1b1e 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -716,14 +716,12 @@ out:
return err;
}
-static int xen_pcibk_xenbus_remove(struct xenbus_device *dev)
+static void xen_pcibk_xenbus_remove(struct xenbus_device *dev)
{
struct xen_pcibk_device *pdev = dev_get_drvdata(&dev->dev);
if (pdev != NULL)
free_pdev(pdev);
-
- return 0;
}
static const struct xenbus_device_id xen_pcibk_ids[] = {
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 6106ed93817d..954188b0b858 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1249,7 +1249,7 @@ static void scsiback_release_translation_entry(struct vscsibk_info *info)
spin_unlock_irqrestore(&info->v2p_lock, flags);
}
-static int scsiback_remove(struct xenbus_device *dev)
+static void scsiback_remove(struct xenbus_device *dev)
{
struct vscsibk_info *info = dev_get_drvdata(&dev->dev);
@@ -1261,8 +1261,6 @@ static int scsiback_remove(struct xenbus_device *dev)
gnttab_page_cache_shrink(&info->free_pages, 0);
dev_set_drvdata(&dev->dev, NULL);
-
- return 0;
}
static int scsiback_probe(struct xenbus_device *dev,