diff options
Diffstat (limited to 'drivers/net/ethernet/intel/igc/igc_main.c')
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_main.c | 315 | 
1 files changed, 262 insertions, 53 deletions
| diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 1c4676882082..6f557e843e49 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -254,6 +254,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)  	/* reset BQL for queue */  	netdev_tx_reset_queue(txring_txq(tx_ring)); +	/* Zero out the buffer ring */ +	memset(tx_ring->tx_buffer_info, 0, +	       sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); + +	/* Zero out the descriptor ring */ +	memset(tx_ring->desc, 0, tx_ring->size); +  	/* reset next_to_use and next_to_clean */  	tx_ring->next_to_use = 0;  	tx_ring->next_to_clean = 0; @@ -267,7 +274,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)   */  void igc_free_tx_resources(struct igc_ring *tx_ring)  { -	igc_clean_tx_ring(tx_ring); +	igc_disable_tx_ring(tx_ring);  	vfree(tx_ring->tx_buffer_info);  	tx_ring->tx_buffer_info = NULL; @@ -309,6 +316,33 @@ static void igc_clean_all_tx_rings(struct igc_adapter *adapter)  			igc_clean_tx_ring(adapter->tx_ring[i]);  } +static void igc_disable_tx_ring_hw(struct igc_ring *ring) +{ +	struct igc_hw *hw = &ring->q_vector->adapter->hw; +	u8 idx = ring->reg_idx; +	u32 txdctl; + +	txdctl = rd32(IGC_TXDCTL(idx)); +	txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; +	txdctl |= IGC_TXDCTL_SWFLUSH; +	wr32(IGC_TXDCTL(idx), txdctl); +} + +/** + * igc_disable_all_tx_rings_hw - Disable all transmit queue operation + * @adapter: board private structure + */ +static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter) +{ +	int i; + +	for (i = 0; i < adapter->num_tx_queues; i++) { +		struct igc_ring *tx_ring = adapter->tx_ring[i]; + +		igc_disable_tx_ring_hw(tx_ring); +	} +} +  /**   * igc_setup_tx_resources - allocate Tx resources (Descriptors)   * @tx_ring: tx descriptor ring (for a specific queue) to setup @@ -704,7 +738,6 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter,  	/* disable the queue */  	wr32(IGC_TXDCTL(reg_idx), 0);  	wrfl(); -	mdelay(10);  	wr32(IGC_TDLEN(reg_idx),  	     ring->count * sizeof(union igc_adv_tx_desc)); @@ -1010,7 +1043,7 @@ static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,  	ktime_t base_time = adapter->base_time;  	ktime_t now = ktime_get_clocktai();  	ktime_t baset_est, end_of_cycle; -	u32 launchtime; +	s32 launchtime;  	s64 n;  	n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); @@ -1023,7 +1056,7 @@ static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,  			*first_flag = true;  			ring->last_ff_cycle = baset_est; -			if (ktime_compare(txtime, ring->last_tx_cycle) > 0) +			if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0)  				*insert_empty = true;  		}  	} @@ -1566,26 +1599,24 @@ done:  	first->bytecount = skb->len;  	first->gso_segs = 1; -	if (tx_ring->max_sdu > 0) { -		u32 max_sdu = 0; - -		max_sdu = tx_ring->max_sdu + -			  (skb_vlan_tagged(first->skb) ? VLAN_HLEN : 0); +	if (adapter->qbv_transition || tx_ring->oper_gate_closed) +		goto out_drop; -		if (first->bytecount > max_sdu) { -			adapter->stats.txdrop++; -			goto out_drop; -		} +	if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { +		adapter->stats.txdrop++; +		goto out_drop;  	} -	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { +	if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && +		     skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {  		/* FIXME: add support for retrieving timestamps from  		 * the other timer registers before skipping the  		 * timestamping request.  		 */ -		if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && -		    !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, -					   &adapter->state)) { +		unsigned long flags; + +		spin_lock_irqsave(&adapter->ptp_tx_lock, flags); +		if (!adapter->ptp_tx_skb) {  			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;  			tx_flags |= IGC_TX_FLAGS_TSTAMP; @@ -1594,6 +1625,8 @@ done:  		} else {  			adapter->tx_hwtstamp_skipped++;  		} + +		spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);  	}  	if (skb_vlan_tag_present(skb)) { @@ -1690,14 +1723,36 @@ static void igc_rx_checksum(struct igc_ring *ring,  		   le32_to_cpu(rx_desc->wb.upper.status_error));  } +/* Mapping HW RSS Type to enum pkt_hash_types */ +static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = { +	[IGC_RSS_TYPE_NO_HASH]		= PKT_HASH_TYPE_L2, +	[IGC_RSS_TYPE_HASH_TCP_IPV4]	= PKT_HASH_TYPE_L4, +	[IGC_RSS_TYPE_HASH_IPV4]	= PKT_HASH_TYPE_L3, +	[IGC_RSS_TYPE_HASH_TCP_IPV6]	= PKT_HASH_TYPE_L4, +	[IGC_RSS_TYPE_HASH_IPV6_EX]	= PKT_HASH_TYPE_L3, +	[IGC_RSS_TYPE_HASH_IPV6]	= PKT_HASH_TYPE_L3, +	[IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4, +	[IGC_RSS_TYPE_HASH_UDP_IPV4]	= PKT_HASH_TYPE_L4, +	[IGC_RSS_TYPE_HASH_UDP_IPV6]	= PKT_HASH_TYPE_L4, +	[IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4, +	[10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW  */ +	[11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask   */ +	[12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons       */ +	[13] = PKT_HASH_TYPE_NONE, +	[14] = PKT_HASH_TYPE_NONE, +	[15] = PKT_HASH_TYPE_NONE, +}; +  static inline void igc_rx_hash(struct igc_ring *ring,  			       union igc_adv_rx_desc *rx_desc,  			       struct sk_buff *skb)  { -	if (ring->netdev->features & NETIF_F_RXHASH) -		skb_set_hash(skb, -			     le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), -			     PKT_HASH_TYPE_L3); +	if (ring->netdev->features & NETIF_F_RXHASH) { +		u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); +		u32 rss_type = igc_rss_type(rx_desc); + +		skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]); +	}  }  static void igc_rx_vlan(struct igc_ring *rx_ring, @@ -2214,6 +2269,8 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)  	if (!count)  		return ok; +	XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff); +  	desc = IGC_RX_DESC(ring, i);  	bi = &ring->rx_buffer_info[i];  	i -= ring->count; @@ -2387,6 +2444,8 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)  	nq = txring_txq(ring);  	__netif_tx_lock(nq, cpu); +	/* Avoid transmit queue timeout since we share it with the slow path */ +	txq_trans_cond_update(nq);  	res = igc_xdp_init_tx_descriptor(ring, xdpf);  	__netif_tx_unlock(nq);  	return res; @@ -2498,8 +2557,8 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)  		union igc_adv_rx_desc *rx_desc;  		struct igc_rx_buffer *rx_buffer;  		unsigned int size, truesize; +		struct igc_xdp_buff ctx;  		ktime_t timestamp = 0; -		struct xdp_buff xdp;  		int pkt_offset = 0;  		void *pktbuf; @@ -2528,18 +2587,20 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)  		if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {  			timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,  							pktbuf); +			ctx.rx_ts = timestamp;  			pkt_offset = IGC_TS_HDR_LEN;  			size -= IGC_TS_HDR_LEN;  		}  		if (!skb) { -			xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); -			xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), +			xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); +			xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),  					 igc_rx_offset(rx_ring) + pkt_offset,  					 size, true); -			xdp_buff_clear_frags_flag(&xdp); +			xdp_buff_clear_frags_flag(&ctx.xdp); +			ctx.rx_desc = rx_desc; -			skb = igc_xdp_run_prog(adapter, &xdp); +			skb = igc_xdp_run_prog(adapter, &ctx.xdp);  		}  		if (IS_ERR(skb)) { @@ -2561,9 +2622,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)  		} else if (skb)  			igc_add_rx_frag(rx_ring, rx_buffer, skb, size);  		else if (ring_uses_build_skb(rx_ring)) -			skb = igc_build_skb(rx_ring, rx_buffer, &xdp); +			skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp);  		else -			skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, +			skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp,  						timestamp);  		/* exit if we failed to retrieve a buffer */ @@ -2664,6 +2725,15 @@ static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,  	napi_gro_receive(&q_vector->napi, skb);  } +static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp) +{ +	/* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The +	 * igc_xdp_buff shares its layout with xdp_buff_xsk and private +	 * igc_xdp_buff fields fall into xdp_buff_xsk->cb +	 */ +       return (struct igc_xdp_buff *)xdp; +} +  static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)  {  	struct igc_adapter *adapter = q_vector->adapter; @@ -2682,6 +2752,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)  	while (likely(total_packets < budget)) {  		union igc_adv_rx_desc *desc;  		struct igc_rx_buffer *bi; +		struct igc_xdp_buff *ctx;  		ktime_t timestamp = 0;  		unsigned int size;  		int res; @@ -2699,9 +2770,13 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)  		bi = &ring->rx_buffer_info[ntc]; +		ctx = xsk_buff_to_igc_ctx(bi->xdp); +		ctx->rx_desc = desc; +  		if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {  			timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,  							bi->xdp->data); +			ctx->rx_ts = timestamp;  			bi->xdp->data += IGC_TS_HDR_LEN; @@ -2780,15 +2855,18 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)  	struct netdev_queue *nq = txring_txq(ring);  	union igc_adv_tx_desc *tx_desc = NULL;  	int cpu = smp_processor_id(); -	u16 ntu = ring->next_to_use;  	struct xdp_desc xdp_desc; -	u16 budget; +	u16 budget, ntu;  	if (!netif_carrier_ok(ring->netdev))  		return;  	__netif_tx_lock(nq, cpu); +	/* Avoid transmit queue timeout since we share it with the slow path */ +	txq_trans_cond_update(nq); + +	ntu = ring->next_to_use;  	budget = igc_desc_unused(ring);  	while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { @@ -2956,8 +3034,8 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)  		    time_after(jiffies, tx_buffer->time_stamp +  		    (adapter->tx_timeout_factor * HZ)) &&  		    !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && -		    (rd32(IGC_TDH(tx_ring->reg_idx)) != -		     readl(tx_ring->tail))) { +		    (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) && +		    !tx_ring->oper_gate_closed) {  			/* detected Tx unit hang */  			netdev_err(tx_ring->netdev,  				   "Detected Tx Unit Hang\n" @@ -4723,6 +4801,7 @@ static int igc_sw_init(struct igc_adapter *adapter)  	adapter->nfc_rule_count = 0;  	spin_lock_init(&adapter->stats64_lock); +	spin_lock_init(&adapter->qbv_tx_lock);  	/* Assume MSI-X interrupts, will be checked during IRQ allocation */  	adapter->flags |= IGC_FLAG_HAS_MSIX; @@ -5007,6 +5086,7 @@ void igc_down(struct igc_adapter *adapter)  	/* clear VLAN promisc flag so VFTA will be updated if necessary */  	adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; +	igc_disable_all_tx_rings_hw(adapter);  	igc_clean_all_tx_rings(adapter);  	igc_clean_all_rx_rings(adapter);  } @@ -5212,7 +5292,7 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)  	if (tsicr & IGC_TSICR_TXTS) {  		/* retrieve hardware timestamp */ -		schedule_work(&adapter->ptp_tx_work); +		igc_ptp_tx_tstamp_event(adapter);  		ack |= IGC_TSICR_TXTS;  	} @@ -6040,13 +6120,16 @@ static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,  	return igc_tsn_offload_apply(adapter);  } -static int igc_tsn_clear_schedule(struct igc_adapter *adapter) +static int igc_qbv_clear_schedule(struct igc_adapter *adapter)  { +	unsigned long flags;  	int i;  	adapter->base_time = 0;  	adapter->cycle_time = NSEC_PER_SEC; +	adapter->taprio_offload_enable = false;  	adapter->qbv_config_change_errors = 0; +	adapter->qbv_count = 0;  	for (i = 0; i < adapter->num_tx_queues; i++) {  		struct igc_ring *ring = adapter->tx_ring[i]; @@ -6056,6 +6139,26 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)  		ring->max_sdu = 0;  	} +	spin_lock_irqsave(&adapter->qbv_tx_lock, flags); + +	adapter->qbv_transition = false; + +	for (i = 0; i < adapter->num_tx_queues; i++) { +		struct igc_ring *ring = adapter->tx_ring[i]; + +		ring->oper_gate_closed = false; +		ring->admin_gate_closed = false; +	} + +	spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); + +	return 0; +} + +static int igc_tsn_clear_schedule(struct igc_adapter *adapter) +{ +	igc_qbv_clear_schedule(adapter); +  	return 0;  } @@ -6065,18 +6168,21 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,  	bool queue_configured[IGC_MAX_TX_QUEUES] = { };  	struct igc_hw *hw = &adapter->hw;  	u32 start_time = 0, end_time = 0; +	struct timespec64 now; +	unsigned long flags;  	size_t n;  	int i; -	adapter->qbv_enable = qopt->enable; - -	if (!qopt->enable) +	if (qopt->cmd == TAPRIO_CMD_DESTROY)  		return igc_tsn_clear_schedule(adapter); +	if (qopt->cmd != TAPRIO_CMD_REPLACE) +		return -EOPNOTSUPP; +  	if (qopt->base_time < 0)  		return -ERANGE; -	if (igc_is_device_id_i225(hw) && adapter->base_time) +	if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable)  		return -EALREADY;  	if (!validate_schedule(adapter, qopt)) @@ -6084,6 +6190,9 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,  	adapter->cycle_time = qopt->cycle_time;  	adapter->base_time = qopt->base_time; +	adapter->taprio_offload_enable = true; + +	igc_ptp_read(adapter, &now);  	for (n = 0; n < qopt->num_entries; n++) {  		struct tc_taprio_sched_entry *e = &qopt->entries[n]; @@ -6119,30 +6228,49 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,  				ring->start_time = start_time;  			ring->end_time = end_time; -			queue_configured[i] = true; +			if (ring->start_time >= adapter->cycle_time) +				queue_configured[i] = false; +			else +				queue_configured[i] = true;  		}  		start_time += e->interval;  	} +	spin_lock_irqsave(&adapter->qbv_tx_lock, flags); +  	/* Check whether a queue gets configured.  	 * If not, set the start and end time to be end time.  	 */  	for (i = 0; i < adapter->num_tx_queues; i++) { +		struct igc_ring *ring = adapter->tx_ring[i]; + +		if (!is_base_time_past(qopt->base_time, &now)) { +			ring->admin_gate_closed = false; +		} else { +			ring->oper_gate_closed = false; +			ring->admin_gate_closed = false; +		} +  		if (!queue_configured[i]) { -			struct igc_ring *ring = adapter->tx_ring[i]; +			if (!is_base_time_past(qopt->base_time, &now)) +				ring->admin_gate_closed = true; +			else +				ring->oper_gate_closed = true;  			ring->start_time = end_time;  			ring->end_time = end_time;  		}  	} +	spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); +  	for (i = 0; i < adapter->num_tx_queues; i++) {  		struct igc_ring *ring = adapter->tx_ring[i];  		struct net_device *dev = adapter->netdev;  		if (qopt->max_sdu[i]) -			ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len; +			ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN;  		else  			ring->max_sdu = 0;  	} @@ -6262,6 +6390,8 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,  {  	struct igc_adapter *adapter = netdev_priv(dev); +	adapter->tc_setup_type = type; +  	switch (type) {  	case TC_QUERY_CAPS:  		return igc_tc_query_caps(adapter, type_data); @@ -6314,6 +6444,9 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,  	__netif_tx_lock(nq, cpu); +	/* Avoid transmit queue timeout since we share it with the slow path */ +	txq_trans_cond_update(nq); +  	drops = 0;  	for (i = 0; i < num_frames; i++) {  		int err; @@ -6454,6 +6587,85 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)  	return value;  } +/* Mapping HW RSS Type to enum xdp_rss_hash_type */ +static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = { +	[IGC_RSS_TYPE_NO_HASH]		= XDP_RSS_TYPE_L2, +	[IGC_RSS_TYPE_HASH_TCP_IPV4]	= XDP_RSS_TYPE_L4_IPV4_TCP, +	[IGC_RSS_TYPE_HASH_IPV4]	= XDP_RSS_TYPE_L3_IPV4, +	[IGC_RSS_TYPE_HASH_TCP_IPV6]	= XDP_RSS_TYPE_L4_IPV6_TCP, +	[IGC_RSS_TYPE_HASH_IPV6_EX]	= XDP_RSS_TYPE_L3_IPV6_EX, +	[IGC_RSS_TYPE_HASH_IPV6]	= XDP_RSS_TYPE_L3_IPV6, +	[IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX, +	[IGC_RSS_TYPE_HASH_UDP_IPV4]	= XDP_RSS_TYPE_L4_IPV4_UDP, +	[IGC_RSS_TYPE_HASH_UDP_IPV6]	= XDP_RSS_TYPE_L4_IPV6_UDP, +	[IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX, +	[10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW  */ +	[11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask   */ +	[12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons       */ +	[13] = XDP_RSS_TYPE_NONE, +	[14] = XDP_RSS_TYPE_NONE, +	[15] = XDP_RSS_TYPE_NONE, +}; + +static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, +			   enum xdp_rss_hash_type *rss_type) +{ +	const struct igc_xdp_buff *ctx = (void *)_ctx; + +	if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) +		return -ENODATA; + +	*hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); +	*rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; + +	return 0; +} + +static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) +{ +	const struct igc_xdp_buff *ctx = (void *)_ctx; + +	if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { +		*timestamp = ctx->rx_ts; + +		return 0; +	} + +	return -ENODATA; +} + +static const struct xdp_metadata_ops igc_xdp_metadata_ops = { +	.xmo_rx_hash			= igc_xdp_rx_hash, +	.xmo_rx_timestamp		= igc_xdp_rx_timestamp, +}; + +static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer) +{ +	struct igc_adapter *adapter = container_of(timer, struct igc_adapter, +						   hrtimer); +	unsigned long flags; +	unsigned int i; + +	spin_lock_irqsave(&adapter->qbv_tx_lock, flags); + +	adapter->qbv_transition = true; +	for (i = 0; i < adapter->num_tx_queues; i++) { +		struct igc_ring *tx_ring = adapter->tx_ring[i]; + +		if (tx_ring->admin_gate_closed) { +			tx_ring->admin_gate_closed = false; +			tx_ring->oper_gate_closed = true; +		} else { +			tx_ring->oper_gate_closed = false; +		} +	} +	adapter->qbv_transition = false; + +	spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); + +	return HRTIMER_NORESTART; +} +  /**   * igc_probe - Device Initialization Routine   * @pdev: PCI device information struct @@ -6527,6 +6739,7 @@ static int igc_probe(struct pci_dev *pdev,  	hw->hw_addr = adapter->io_addr;  	netdev->netdev_ops = &igc_netdev_ops; +	netdev->xdp_metadata_ops = &igc_xdp_metadata_ops;  	igc_ethtool_set_ops(netdev);  	netdev->watchdog_timeo = 5 * HZ; @@ -6554,6 +6767,7 @@ static int igc_probe(struct pci_dev *pdev,  	netdev->features |= NETIF_F_TSO;  	netdev->features |= NETIF_F_TSO6;  	netdev->features |= NETIF_F_TSO_ECN; +	netdev->features |= NETIF_F_RXHASH;  	netdev->features |= NETIF_F_RXCSUM;  	netdev->features |= NETIF_F_HW_CSUM;  	netdev->features |= NETIF_F_SCTP_CRC; @@ -6630,6 +6844,9 @@ static int igc_probe(struct pci_dev *pdev,  	INIT_WORK(&adapter->reset_task, igc_reset_task);  	INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); +	hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); +	adapter->hrtimer.function = &igc_qbv_scheduling_timer; +  	/* Initialize link properties that are user-changeable */  	adapter->fc_autoneg = true;  	hw->mac.autoneg = true; @@ -6723,6 +6940,9 @@ static void igc_remove(struct pci_dev *pdev)  	igc_ptp_stop(adapter); +	pci_disable_ptm(pdev); +	pci_clear_master(pdev); +  	set_bit(__IGC_DOWN, &adapter->state);  	del_timer_sync(&adapter->watchdog_timer); @@ -6730,6 +6950,7 @@ static void igc_remove(struct pci_dev *pdev)  	cancel_work_sync(&adapter->reset_task);  	cancel_work_sync(&adapter->watchdog_task); +	hrtimer_cancel(&adapter->hrtimer);  	/* Release control of h/w to f/w.  If f/w is AMT enabled, this  	 * would have already happened in close and is redundant. @@ -7127,18 +7348,6 @@ void igc_enable_rx_ring(struct igc_ring *ring)  		igc_alloc_rx_buffers(ring, igc_desc_unused(ring));  } -static void igc_disable_tx_ring_hw(struct igc_ring *ring) -{ -	struct igc_hw *hw = &ring->q_vector->adapter->hw; -	u8 idx = ring->reg_idx; -	u32 txdctl; - -	txdctl = rd32(IGC_TXDCTL(idx)); -	txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; -	txdctl |= IGC_TXDCTL_SWFLUSH; -	wr32(IGC_TXDCTL(idx), txdctl); -} -  void igc_disable_tx_ring(struct igc_ring *ring)  {  	igc_disable_tx_ring_hw(ring); | 
