summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice/ice_irq.c
diff options
context:
space:
mode:
authorPiotr Raczynski <piotr.raczynski@intel.com>2023-05-15 21:03:12 +0200
committerTony Nguyen <anthony.l.nguyen@intel.com>2023-05-16 09:16:53 -0700
commit38e97a98e371c615f67d144ee9e4f7087c0a41e8 (patch)
tree16723cb6b272e875f75e4b6146745ed6c9ef33b5 /drivers/net/ethernet/intel/ice/ice_irq.c
parente641577eb6e82cbb89dde7cfc44ef2541c42278c (diff)
ice: move interrupt related code to separate file
Keep interrupt handling code in a dedicated file. This helps keep driver structured better and prepares for more functionality added to this file. Reviewed-by: Jacob Keller <jacob.e.keller@intel.com> Reviewed-by: Simon Horman <simon.horman@corigine.com> Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel) Signed-off-by: Piotr Raczynski <piotr.raczynski@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_irq.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c226
1 files changed, 226 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
new file mode 100644
index 000000000000..1fc7daec9732
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_irq.h"
+
+/**
+ * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
+ * @pf: board private structure
+ * @v_remain: number of remaining MSI-X vectors to be distributed
+ *
+ * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
+ * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
+ * remaining vectors.
+ */
+static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
+{
+ int v_rdma;
+
+ if (!ice_is_rdma_ena(pf)) {
+ pf->num_lan_msix = v_remain;
+ return;
+ }
+
+ /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
+ v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
+
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
+ dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
+ clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+
+ pf->num_rdma_msix = 0;
+ pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
+ } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
+ (v_remain - v_rdma < v_rdma)) {
+ /* Support minimum RDMA and give remaining vectors to LAN MSIX
+ */
+ pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
+ pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
+ } else {
+ /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
+ */
+ pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
+ ICE_RDMA_NUM_AEQ_MSIX;
+ pf->num_lan_msix = v_remain - pf->num_rdma_msix;
+ }
+}
+
+/**
+ * ice_ena_msix_range - Request a range of MSIX vectors from the OS
+ * @pf: board private structure
+ *
+ * Compute the number of MSIX vectors wanted and request from the OS. Adjust
+ * device usage if there are not enough vectors. Return the number of vectors
+ * reserved or negative on failure.
+ */
+static int ice_ena_msix_range(struct ice_pf *pf)
+{
+ int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
+ struct device *dev = ice_pf_to_dev(pf);
+ int err, i;
+
+ hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
+ num_cpus = num_online_cpus();
+
+ /* LAN miscellaneous handler */
+ v_other = ICE_MIN_LAN_OICR_MSIX;
+
+ /* Flow Director */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ v_other += ICE_FDIR_MSIX;
+
+ /* switchdev */
+ v_other += ICE_ESWITCH_MSIX;
+
+ v_wanted = v_other;
+
+ /* LAN traffic */
+ pf->num_lan_msix = num_cpus;
+ v_wanted += pf->num_lan_msix;
+
+ /* RDMA auxiliary driver */
+ if (ice_is_rdma_ena(pf)) {
+ pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
+ v_wanted += pf->num_rdma_msix;
+ }
+
+ if (v_wanted > hw_num_msix) {
+ int v_remain;
+
+ dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
+ v_wanted, hw_num_msix);
+
+ if (hw_num_msix < ICE_MIN_MSIX) {
+ err = -ERANGE;
+ goto exit_err;
+ }
+
+ v_remain = hw_num_msix - v_other;
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
+ v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
+ v_remain = ICE_MIN_LAN_TXRX_MSIX;
+ }
+
+ ice_reduce_msix_usage(pf, v_remain);
+ v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
+
+ dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
+ pf->num_lan_msix);
+ if (ice_is_rdma_ena(pf))
+ dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
+ pf->num_rdma_msix);
+ }
+
+ pf->msix_entries = devm_kcalloc(dev, v_wanted,
+ sizeof(*pf->msix_entries), GFP_KERNEL);
+ if (!pf->msix_entries) {
+ err = -ENOMEM;
+ goto exit_err;
+ }
+
+ for (i = 0; i < v_wanted; i++)
+ pf->msix_entries[i].entry = i;
+
+ /* actually reserve the vectors */
+ v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
+ ICE_MIN_MSIX, v_wanted);
+ if (v_actual < 0) {
+ dev_err(dev, "unable to reserve MSI-X vectors\n");
+ err = v_actual;
+ goto msix_err;
+ }
+
+ if (v_actual < v_wanted) {
+ dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
+ v_wanted, v_actual);
+
+ if (v_actual < ICE_MIN_MSIX) {
+ /* error if we can't get minimum vectors */
+ pci_disable_msix(pf->pdev);
+ err = -ERANGE;
+ goto msix_err;
+ } else {
+ int v_remain = v_actual - v_other;
+
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
+ v_remain = ICE_MIN_LAN_TXRX_MSIX;
+
+ ice_reduce_msix_usage(pf, v_remain);
+
+ dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
+ pf->num_lan_msix);
+
+ if (ice_is_rdma_ena(pf))
+ dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
+ pf->num_rdma_msix);
+ }
+ }
+
+ return v_actual;
+
+msix_err:
+ devm_kfree(dev, pf->msix_entries);
+
+exit_err:
+ pf->num_rdma_msix = 0;
+ pf->num_lan_msix = 0;
+ return err;
+}
+
+/**
+ * ice_dis_msix - Disable MSI-X interrupt setup in OS
+ * @pf: board private structure
+ */
+static void ice_dis_msix(struct ice_pf *pf)
+{
+ pci_disable_msix(pf->pdev);
+ devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
+ pf->msix_entries = NULL;
+}
+
+/**
+ * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
+ * @pf: board private structure
+ */
+void ice_clear_interrupt_scheme(struct ice_pf *pf)
+{
+ ice_dis_msix(pf);
+
+ if (pf->irq_tracker) {
+ devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
+ pf->irq_tracker = NULL;
+ }
+}
+
+/**
+ * ice_init_interrupt_scheme - Determine proper interrupt scheme
+ * @pf: board private structure to initialize
+ */
+int ice_init_interrupt_scheme(struct ice_pf *pf)
+{
+ int vectors;
+
+ vectors = ice_ena_msix_range(pf);
+
+ if (vectors < 0)
+ return vectors;
+
+ /* set up vector assignment tracking */
+ pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
+ struct_size(pf->irq_tracker, list,
+ vectors),
+ GFP_KERNEL);
+ if (!pf->irq_tracker) {
+ ice_dis_msix(pf);
+ return -ENOMEM;
+ }
+
+ /* populate SW interrupts pool with number of OS granted IRQs. */
+ pf->num_avail_sw_msix = (u16)vectors;
+ pf->irq_tracker->num_entries = (u16)vectors;
+ pf->irq_tracker->end = pf->irq_tracker->num_entries;
+
+ return 0;
+}