diff options
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_main.c')
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 297 |
1 files changed, 171 insertions, 126 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b56b13d64ab4..220626a8d499 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -63,8 +63,8 @@ static int phyaddr = -1; module_param(phyaddr, int, 0444); MODULE_PARM_DESC(phyaddr, "Physical device address"); -#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) -#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) +#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) +#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) static int flow_ctrl = FLOW_AUTO; module_param(flow_ctrl, int, 0644); @@ -176,32 +176,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv) } } -/** - * stmmac_stop_all_queues - Stop all queues - * @priv: driver private structure - */ -static void stmmac_stop_all_queues(struct stmmac_priv *priv) -{ - u32 tx_queues_cnt = priv->plat->tx_queues_to_use; - u32 queue; - - for (queue = 0; queue < tx_queues_cnt; queue++) - netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); -} - -/** - * stmmac_start_all_queues - Start all queues - * @priv: driver private structure - */ -static void stmmac_start_all_queues(struct stmmac_priv *priv) -{ - u32 tx_queues_cnt = priv->plat->tx_queues_to_use; - u32 queue; - - for (queue = 0; queue < tx_queues_cnt; queue++) - netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue)); -} - static void stmmac_service_event_schedule(struct stmmac_priv *priv) { if (!test_bit(STMMAC_DOWN, &priv->state) && @@ -297,7 +271,7 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) if (tx_q->dirty_tx > tx_q->cur_tx) avail = tx_q->dirty_tx - tx_q->cur_tx - 1; else - avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1; + avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; return avail; } @@ -315,7 +289,7 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) if (rx_q->dirty_rx <= rx_q->cur_rx) dirty = rx_q->cur_rx - rx_q->dirty_rx; else - dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx; + dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; return dirty; } @@ -360,7 +334,7 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv) /** * stmmac_eee_ctrl_timer - EEE TX SW timer. - * @arg : data hook + * @t: timer_list struct containing private info * Description: * if there is no data transfer and if we are not in LPI state, * then MAC Transmitter can be moved to LPI state. @@ -736,7 +710,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) * a proprietary structure used to pass information to the driver. * Description: * This function obtain the current hardware timestamping settings - as requested. + * as requested. */ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) { @@ -789,14 +763,14 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) static void stmmac_release_ptp(struct stmmac_priv *priv) { - if (priv->plat->clk_ptp_ref) - clk_disable_unprepare(priv->plat->clk_ptp_ref); + clk_disable_unprepare(priv->plat->clk_ptp_ref); stmmac_ptp_unregister(priv); } /** * stmmac_mac_flow_ctrl - Configure flow control in all queues * @priv: driver private structure + * @duplex: duplex passed to the next function * Description: It is used for configuring the flow control in all queues */ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) @@ -1150,7 +1124,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv) head_rx = (void *)rx_q->dma_rx; /* Display RX ring */ - stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true); + stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true); } } @@ -1173,7 +1147,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) else head_tx = (void *)tx_q->dma_tx; - stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false); + stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false); } } @@ -1217,16 +1191,16 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) int i; /* Clear the RX descriptors */ - for (i = 0; i < DMA_RX_SIZE; i++) + for (i = 0; i < priv->dma_rx_size; i++) if (priv->extend_desc) stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, priv->use_riwt, priv->mode, - (i == DMA_RX_SIZE - 1), + (i == priv->dma_rx_size - 1), priv->dma_buf_sz); else stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], priv->use_riwt, priv->mode, - (i == DMA_RX_SIZE - 1), + (i == priv->dma_rx_size - 1), priv->dma_buf_sz); } @@ -1243,8 +1217,8 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) int i; /* Clear the TX descriptors */ - for (i = 0; i < DMA_TX_SIZE; i++) { - int last = (i == (DMA_TX_SIZE - 1)); + for (i = 0; i < priv->dma_tx_size; i++) { + int last = (i == (priv->dma_tx_size - 1)); struct dma_desc *p; if (priv->extend_desc) @@ -1398,7 +1372,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) stmmac_clear_rx_descriptors(priv, queue); - for (i = 0; i < DMA_RX_SIZE; i++) { + for (i = 0; i < priv->dma_rx_size; i++) { struct dma_desc *p; if (priv->extend_desc) @@ -1413,16 +1387,18 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) } rx_q->cur_rx = 0; - rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); + rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size); /* Setup the chained descriptor addresses */ if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->extend_desc) stmmac_mode_init(priv, rx_q->dma_erx, - rx_q->dma_rx_phy, DMA_RX_SIZE, 1); + rx_q->dma_rx_phy, + priv->dma_rx_size, 1); else stmmac_mode_init(priv, rx_q->dma_rx, - rx_q->dma_rx_phy, DMA_RX_SIZE, 0); + rx_q->dma_rx_phy, + priv->dma_rx_size, 0); } } @@ -1436,7 +1412,7 @@ err_init_rx_buffers: if (queue == 0) break; - i = DMA_RX_SIZE; + i = priv->dma_rx_size; queue--; } @@ -1468,13 +1444,15 @@ static int init_dma_tx_desc_rings(struct net_device *dev) if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->extend_desc) stmmac_mode_init(priv, tx_q->dma_etx, - tx_q->dma_tx_phy, DMA_TX_SIZE, 1); + tx_q->dma_tx_phy, + priv->dma_tx_size, 1); else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) stmmac_mode_init(priv, tx_q->dma_tx, - tx_q->dma_tx_phy, DMA_TX_SIZE, 0); + tx_q->dma_tx_phy, + priv->dma_tx_size, 0); } - for (i = 0; i < DMA_TX_SIZE; i++) { + for (i = 0; i < priv->dma_tx_size; i++) { struct dma_desc *p; if (priv->extend_desc) p = &((tx_q->dma_etx + i)->basic); @@ -1538,7 +1516,7 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) { int i; - for (i = 0; i < DMA_RX_SIZE; i++) + for (i = 0; i < priv->dma_rx_size; i++) stmmac_free_rx_buffer(priv, queue, i); } @@ -1551,7 +1529,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) { int i; - for (i = 0; i < DMA_TX_SIZE; i++) + for (i = 0; i < priv->dma_tx_size; i++) stmmac_free_tx_buffer(priv, queue, i); } @@ -1573,11 +1551,11 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv) /* Free DMA regions of consistent memory previously allocated */ if (!priv->extend_desc) - dma_free_coherent(priv->device, - DMA_RX_SIZE * sizeof(struct dma_desc), + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_desc), rx_q->dma_rx, rx_q->dma_rx_phy); else - dma_free_coherent(priv->device, DMA_RX_SIZE * + dma_free_coherent(priv->device, priv->dma_rx_size * sizeof(struct dma_extended_desc), rx_q->dma_erx, rx_q->dma_rx_phy); @@ -1616,7 +1594,7 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv) addr = tx_q->dma_tx; } - size *= DMA_TX_SIZE; + size *= priv->dma_tx_size; dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); @@ -1649,7 +1627,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) rx_q->priv_data = priv; pp_params.flags = PP_FLAG_DMA_MAP; - pp_params.pool_size = DMA_RX_SIZE; + pp_params.pool_size = priv->dma_rx_size; num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); pp_params.order = ilog2(num_pages); pp_params.nid = dev_to_node(priv->device); @@ -1663,14 +1641,16 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) goto err_dma; } - rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool), + rx_q->buf_pool = kcalloc(priv->dma_rx_size, + sizeof(*rx_q->buf_pool), GFP_KERNEL); if (!rx_q->buf_pool) goto err_dma; if (priv->extend_desc) { rx_q->dma_erx = dma_alloc_coherent(priv->device, - DMA_RX_SIZE * sizeof(struct dma_extended_desc), + priv->dma_rx_size * + sizeof(struct dma_extended_desc), &rx_q->dma_rx_phy, GFP_KERNEL); if (!rx_q->dma_erx) @@ -1678,7 +1658,8 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) } else { rx_q->dma_rx = dma_alloc_coherent(priv->device, - DMA_RX_SIZE * sizeof(struct dma_desc), + priv->dma_rx_size * + sizeof(struct dma_desc), &rx_q->dma_rx_phy, GFP_KERNEL); if (!rx_q->dma_rx) @@ -1717,13 +1698,13 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) tx_q->queue_index = queue; tx_q->priv_data = priv; - tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE, + tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, sizeof(*tx_q->tx_skbuff_dma), GFP_KERNEL); if (!tx_q->tx_skbuff_dma) goto err_dma; - tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE, + tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, sizeof(struct sk_buff *), GFP_KERNEL); if (!tx_q->tx_skbuff) @@ -1736,7 +1717,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) else size = sizeof(struct dma_desc); - size *= DMA_TX_SIZE; + size *= priv->dma_tx_size; addr = dma_alloc_coherent(priv->device, size, &tx_q->dma_tx_phy, GFP_KERNEL); @@ -1965,6 +1946,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) /** * stmmac_tx_clean - to manage the transmission completion * @priv: driver private structure + * @budget: napi budget limiting this functions packet handling * @queue: TX queue index * Description: it reclaims the transmit resources after transmission completes. */ @@ -2046,7 +2028,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) stmmac_release_tx_desc(priv, p, priv->mode); - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); } tx_q->dirty_tx = entry; @@ -2055,7 +2037,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, queue))) && - stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) { + stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { netif_dbg(priv, tx_done, priv->dev, "%s: restart transmit\n", __func__); @@ -2328,7 +2310,8 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) rx_q->dma_rx_phy, chan); rx_q->rx_tail_addr = rx_q->dma_rx_phy + - (DMA_RX_SIZE * sizeof(struct dma_desc)); + (priv->dma_rx_size * + sizeof(struct dma_desc)); stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, chan); } @@ -2357,7 +2340,7 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) /** * stmmac_tx_timer - mitigation sw timer for tx. - * @data: data pointer + * @t: data pointer * Description: * This is the timer handler to directly invoke the stmmac_tx_clean. */ @@ -2412,12 +2395,12 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv) /* set TX ring length */ for (chan = 0; chan < tx_channels_count; chan++) stmmac_set_tx_ring_len(priv, priv->ioaddr, - (DMA_TX_SIZE - 1), chan); + (priv->dma_tx_size - 1), chan); /* set RX ring length */ for (chan = 0; chan < rx_channels_count; chan++) stmmac_set_rx_ring_len(priv, priv->ioaddr, - (DMA_RX_SIZE - 1), chan); + (priv->dma_rx_size - 1), chan); } /** @@ -2620,6 +2603,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. + * @init_ptp: initialize PTP if set * Description: * this is the main function to setup the HW in a usable state because the * dma engine is reset, the core registers are configured (e.g. AXI, @@ -2740,6 +2724,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); } + /* Configure real RX and TX queues */ + netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); + netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); + /* Start the ball rolling... */ stmmac_start_all_dma(priv); @@ -2797,6 +2785,11 @@ static int stmmac_open(struct net_device *dev) priv->rx_copybreak = STMMAC_RX_COPYBREAK; + if (!priv->dma_tx_size) + priv->dma_tx_size = DMA_DEFAULT_TX_SIZE; + if (!priv->dma_rx_size) + priv->dma_rx_size = DMA_DEFAULT_RX_SIZE; + /* Earlier check for TBS */ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; @@ -2868,7 +2861,7 @@ static int stmmac_open(struct net_device *dev) } stmmac_enable_all_queues(priv); - stmmac_start_all_queues(priv); + netif_tx_start_all_queues(priv->dev); return 0; @@ -2911,8 +2904,6 @@ static int stmmac_release(struct net_device *dev) phylink_stop(priv->phylink); phylink_disconnect_phy(priv->phylink); - stmmac_stop_all_queues(priv); - stmmac_disable_all_queues(priv); for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) @@ -2968,7 +2959,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, return false; stmmac_set_tx_owner(priv, p); - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); return true; } @@ -2977,7 +2968,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, * @priv: driver private structure * @des: buffer start address * @total_len: total length to fill in descriptors - * @last_segmant: condition for the last descriptor + * @last_segment: condition for the last descriptor * @queue: TX queue index * Description: * This function fills descriptor and request new descriptors according to @@ -2996,7 +2987,8 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, while (tmp_len > 0) { dma_addr_t curr_addr; - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, + priv->dma_tx_size); WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); if (tx_q->tbs & STMMAC_TBS_AVAIL) @@ -3103,7 +3095,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) stmmac_set_mss(priv, mss_desc, mss); tx_q->mss = mss; - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, + priv->dma_tx_size); WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); } @@ -3210,7 +3203,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) * ndo_start_xmit will fill this descriptor the next time it's * called and stmmac_tx_clean may clean up to this descriptor. */ - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", @@ -3373,7 +3366,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) int len = skb_frag_size(frag); bool last_segment = (i == (nfrags - 1)); - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); WARN_ON(tx_q->tx_skbuff[entry]); if (likely(priv->extend_desc)) @@ -3441,7 +3434,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) * ndo_start_xmit will fill this descriptor the next time it's * called and stmmac_tx_clean may clean up to this descriptor. */ - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); tx_q->cur_tx = entry; if (netif_msg_pktdata(priv)) { @@ -3626,7 +3619,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) dma_wmb(); stmmac_set_rx_owner(priv, p, use_rx_wd); - entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); + entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); } rx_q->dirty_rx = entry; rx_q->rx_tail_addr = rx_q->dma_rx_phy + @@ -3638,15 +3631,15 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, struct dma_desc *p, int status, unsigned int len) { - int ret, coe = priv->hw->rx_csum; unsigned int plen = 0, hlen = 0; + int coe = priv->hw->rx_csum; /* Not first descriptor, buffer is always zero */ if (priv->sph && len) return 0; /* First descriptor, get split header length */ - ret = stmmac_get_rx_header_len(priv, p, &hlen); + stmmac_get_rx_header_len(priv, p, &hlen); if (priv->sph && hlen) { priv->xstats.rx_split_hdr_pkt_n++; return hlen; @@ -3709,7 +3702,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) else rx_head = (void *)rx_q->dma_rx; - stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); + stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true); } while (count < limit) { unsigned int buf1_len = 0, buf2_len = 0; @@ -3751,7 +3744,8 @@ read_again: if (unlikely(status & dma_own)) break; - rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, + priv->dma_rx_size); next_entry = rx_q->cur_rx; if (priv->extend_desc) @@ -3926,7 +3920,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) priv->xstats.napi_poll++; - work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan); + work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan); work_done = min(work_done, budget); if (work_done < budget && napi_complete_done(napi, work_done)) { @@ -3943,6 +3937,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) /** * stmmac_tx_timeout * @dev : Pointer to net device structure + * @txqueue: the index of the hanging transmit queue * Description: this function is called when a packet transmission fails to * complete within a reasonable time. The driver will mark the error in the * netdev structure and arrange for the device to be reset to a sane state @@ -4319,11 +4314,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v) if (priv->extend_desc) { seq_printf(seq, "Extended descriptor ring:\n"); sysfs_display_ring((void *)rx_q->dma_erx, - DMA_RX_SIZE, 1, seq); + priv->dma_rx_size, 1, seq); } else { seq_printf(seq, "Descriptor ring:\n"); sysfs_display_ring((void *)rx_q->dma_rx, - DMA_RX_SIZE, 0, seq); + priv->dma_rx_size, 0, seq); } } @@ -4335,11 +4330,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v) if (priv->extend_desc) { seq_printf(seq, "Extended descriptor ring:\n"); sysfs_display_ring((void *)tx_q->dma_etx, - DMA_TX_SIZE, 1, seq); + priv->dma_tx_size, 1, seq); } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { seq_printf(seq, "Descriptor ring:\n"); sysfs_display_ring((void *)tx_q->dma_tx, - DMA_TX_SIZE, 0, seq); + priv->dma_tx_size, 0, seq); } } @@ -4725,6 +4720,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv) if (priv->dma_cap.tsoen) dev_info(priv->device, "TSO supported\n"); + priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; + priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; + /* Run HW quirks, if any */ if (priv->hwif_quirks) { ret = priv->hwif_quirks(priv); @@ -4747,6 +4745,86 @@ static int stmmac_hw_init(struct stmmac_priv *priv) return 0; } +static void stmmac_napi_add(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 queue, maxq; + + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); + + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; + + ch->priv_data = priv; + ch->index = queue; + + if (queue < priv->plat->rx_queues_to_use) { + netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx, + NAPI_POLL_WEIGHT); + } + if (queue < priv->plat->tx_queues_to_use) { + netif_tx_napi_add(dev, &ch->tx_napi, + stmmac_napi_poll_tx, + NAPI_POLL_WEIGHT); + } + } +} + +static void stmmac_napi_del(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 queue, maxq; + + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); + + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; + + if (queue < priv->plat->rx_queues_to_use) + netif_napi_del(&ch->rx_napi); + if (queue < priv->plat->tx_queues_to_use) + netif_napi_del(&ch->tx_napi); + } +} + +int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = 0; + + if (netif_running(dev)) + stmmac_release(dev); + + stmmac_napi_del(dev); + + priv->plat->rx_queues_to_use = rx_cnt; + priv->plat->tx_queues_to_use = tx_cnt; + + stmmac_napi_add(dev); + + if (netif_running(dev)) + ret = stmmac_open(dev); + + return ret; +} + +int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = 0; + + if (netif_running(dev)) + stmmac_release(dev); + + priv->dma_rx_size = rx_size; + priv->dma_tx_size = tx_size; + + if (netif_running(dev)) + ret = stmmac_open(dev); + + return ret; +} + /** * stmmac_dvr_probe * @device: device pointer @@ -4763,7 +4841,7 @@ int stmmac_dvr_probe(struct device *device, { struct net_device *ndev = NULL; struct stmmac_priv *priv; - u32 queue, rxq, maxq; + u32 rxq; int i, ret = 0; ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), @@ -4827,10 +4905,6 @@ int stmmac_dvr_probe(struct device *device, stmmac_check_ether_addr(priv); - /* Configure real RX and TX queues */ - netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); - netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); - ndev->netdev_ops = &stmmac_netdev_ops; ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -4928,25 +5002,7 @@ int stmmac_dvr_probe(struct device *device, priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ /* Setup channels NAPI */ - maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); - - for (queue = 0; queue < maxq; queue++) { - struct stmmac_channel *ch = &priv->channel[queue]; - - spin_lock_init(&ch->lock); - ch->priv_data = priv; - ch->index = queue; - - if (queue < priv->plat->rx_queues_to_use) { - netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx, - NAPI_POLL_WEIGHT); - } - if (queue < priv->plat->tx_queues_to_use) { - netif_tx_napi_add(ndev, &ch->tx_napi, - stmmac_napi_poll_tx, - NAPI_POLL_WEIGHT); - } - } + stmmac_napi_add(ndev); mutex_init(&priv->lock); @@ -5011,14 +5067,7 @@ error_phy_setup: priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); error_mdio_register: - for (queue = 0; queue < maxq; queue++) { - struct stmmac_channel *ch = &priv->channel[queue]; - - if (queue < priv->plat->rx_queues_to_use) - netif_napi_del(&ch->rx_napi); - if (queue < priv->plat->tx_queues_to_use) - netif_napi_del(&ch->tx_napi); - } + stmmac_napi_del(ndev); error_hw_init: destroy_workqueue(priv->wq); @@ -5086,7 +5135,6 @@ int stmmac_suspend(struct device *dev) mutex_lock(&priv->lock); netif_device_detach(ndev); - stmmac_stop_all_queues(priv); stmmac_disable_all_queues(priv); @@ -5115,8 +5163,7 @@ int stmmac_suspend(struct device *dev) stmmac_mac_set(priv, priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ - if (priv->plat->clk_ptp_ref) - clk_disable_unprepare(priv->plat->clk_ptp_ref); + clk_disable_unprepare(priv->plat->clk_ptp_ref); clk_disable_unprepare(priv->plat->pclk); clk_disable_unprepare(priv->plat->stmmac_clk); } @@ -5129,7 +5176,7 @@ EXPORT_SYMBOL_GPL(stmmac_suspend); /** * stmmac_reset_queues_param - reset queue parameters - * @dev: device pointer + * @priv: device pointer */ static void stmmac_reset_queues_param(struct stmmac_priv *priv) { @@ -5213,8 +5260,6 @@ int stmmac_resume(struct device *dev) stmmac_enable_all_queues(priv); - stmmac_start_all_queues(priv); - mutex_unlock(&priv->lock); if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { |