summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice/ice_lib.c
diff options
context:
space:
mode:
authorAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>2019-09-03 01:31:06 -0700
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2019-09-05 08:13:41 -0700
commit8c243700ab10a56171858cc9dd17c5d6494a0ed6 (patch)
treeff89776a1c50cd55abccf4b882e0cbf3db7546b3 /drivers/net/ethernet/intel/ice/ice_lib.c
parentea300f41bb49605ccbc64f354e6708bb385ac1c6 (diff)
ice: Minor refactor in queue management
Remove q_left_tx and q_left_rx from the PF struct as these can be obtained by calling ice_get_avail_txq_count and ice_get_avail_rxq_count respectively. The function ice_determine_q_usage is only setting num_lan_tx and num_lan_rx in the PF structure, and these are later assigned to vsi->alloc_txq and vsi->alloc_rxq respectively. This is an unnecessary indirection, so remove ice_determine_q_usage and just assign values for vsi->alloc_txq and vsi->alloc_rxq in ice_vsi_set_num_qs and use these to set num_lan_tx and num_lan_rx respectively. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_lib.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 5f7c75c3b24b..7cd8c5d13bcc 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -343,8 +343,20 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
switch (vsi->type) {
case ICE_VSI_PF:
- vsi->alloc_txq = pf->num_lan_tx;
- vsi->alloc_rxq = pf->num_lan_rx;
+ vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
+ num_online_cpus());
+
+ pf->num_lan_tx = vsi->alloc_txq;
+
+ /* only 1 Rx queue unless RSS is enabled */
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ vsi->alloc_rxq = 1;
+ else
+ vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
+ num_online_cpus());
+
+ pf->num_lan_rx = vsi->alloc_rxq;
+
vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
break;
case ICE_VSI_VF:
@@ -2577,9 +2589,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_vector_base;
- pf->q_left_tx -= vsi->alloc_txq;
- pf->q_left_rx -= vsi->alloc_rxq;
-
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value
@@ -2643,8 +2652,6 @@ unroll_vsi_init:
ice_vsi_delete(vsi);
unroll_get_qs:
ice_vsi_put_qs(vsi);
- pf->q_left_tx += vsi->alloc_txq;
- pf->q_left_rx += vsi->alloc_rxq;
ice_vsi_clear(vsi);
return NULL;
@@ -2992,8 +2999,6 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_vsi_clear_rings(vsi);
ice_vsi_put_qs(vsi);
- pf->q_left_tx += vsi->alloc_txq;
- pf->q_left_rx += vsi->alloc_rxq;
/* retain SW VSI data structure since it is needed to unregister and
* free VSI netdev when PF is not in reset recovery pending state,\
@@ -3102,8 +3107,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_vectors;
- pf->q_left_tx -= vsi->alloc_txq;
- pf->q_left_rx -= vsi->alloc_rxq;
break;
default:
break;