summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice/ice_txrx.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-02-13 14:10:11 -0800
committerDavid S. Miller <davem@davemloft.net>2020-02-13 14:10:11 -0800
commit89e960b5a99511f21d07da50eb34aeeb46cfba0f (patch)
tree80508da8ce89c1231699151dae9aa130eb88993f /drivers/net/ethernet/intel/ice/ice_txrx.c
parentb9287f2ac321ecac56eb51e6231f6579683dcdae (diff)
parent4ee656bba8013929bcc050bcebc39a47fe763ee9 (diff)
Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2020-02-12 This series contains fixes to only the ice driver. Dave fixes logic flaws in the DCB rebuild function which is used after a reset. Also fixed a configuration issue when switching between firmware and software LLDP mode where the number of TLV's configured was getting out of sync with what lldpad thinks is configured. Paul fixes how the driver displayed all the supported and advertised link modes by basing it on the PHY capabilities, and in the process cleaned up a lot of code. Brett fixes duplicate receive tail bumps by comparing the value we are writing to tail with the previously written tail value. Also cleaned up workarounds that are no longer needed with the latest NVM images. Anirudh cleaned up unnecessary CONFIG_PCI_IOV wrappers. Updated the driver to use ice_pf_to_dev() instead of &pf->pdev->dev or &vsi->back->pdev->dev. Cleaned up the string format in print function calls to remove newlines where applicable. Akeem updates the link message logging to include "Full Duplex" and "Negotiated", to help distinguish from "Requested" for FEC. Bruce fixes and consolidates the logging of firmware/NVM information during driver load, since the information is duplicate of what is available via ethtool. Fixed the checking of the Unit Load Status bits after reset to ensure they are 0x7FF before continuing, by updating the mask. Cleanup up possible NULL dereferences that were created by a previous commit. Ben fixes the driver to use the correct netif_msg_tx/rx_error() to determine whether to print the MDD event type. Tony provides several trivial fixes, which include whitespace, typos, function header comments, reverse Christmas tree issues. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c11
1 files changed, 4 insertions, 7 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index fd17ace6b226..4de61dbedd36 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -644,7 +644,7 @@ static bool ice_page_is_reserved(struct page *page)
* Update the offset within page so that Rx buf will be ready to be reused.
* For systems with PAGE_SIZE < 8192 this function will flip the page offset
* so the second half of page assigned to Rx buffer will be used, otherwise
- * the offset is moved by the @size bytes
+ * the offset is moved by "size" bytes
*/
static void
ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
@@ -1078,8 +1078,6 @@ construct_skb:
skb = ice_build_skb(rx_ring, rx_buf, &xdp);
else
skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
- } else {
- skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
}
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -1621,11 +1619,11 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
{
u64 td_offset, td_tag, td_cmd;
u16 i = tx_ring->next_to_use;
- skb_frag_t *frag;
unsigned int data_len, size;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
struct sk_buff *skb;
+ skb_frag_t *frag;
dma_addr_t dma;
td_tag = off->td_l2tag1;
@@ -1738,9 +1736,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
writel(i, tx_ring->tail);
- }
return;
@@ -2078,7 +2075,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We
- * use this as the worst case scenerio in which the frag ahead
+ * use this as the worst case scenario in which the frag ahead
* of us only provides one byte which is why we are limited to 6
* descriptors for a single transmit as the header and previous
* fragment are already consuming 2 descriptors.