diff options
Diffstat (limited to 'drivers/net/ethernet/wangxun/libwx')
-rw-r--r-- | drivers/net/ethernet/wangxun/libwx/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/wangxun/libwx/wx_ethtool.c | 18 | ||||
-rw-r--r-- | drivers/net/ethernet/wangxun/libwx/wx_ethtool.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/wangxun/libwx/wx_hw.c | 1197 | ||||
-rw-r--r-- | drivers/net/ethernet/wangxun/libwx/wx_hw.h | 42 | ||||
-rw-r--r-- | drivers/net/ethernet/wangxun/libwx/wx_lib.c | 2004 | ||||
-rw-r--r-- | drivers/net/ethernet/wangxun/libwx/wx_lib.h | 32 | ||||
-rw-r--r-- | drivers/net/ethernet/wangxun/libwx/wx_type.h | 409 |
8 files changed, 3466 insertions, 246 deletions
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile index 1ed5e23af944..42ccd6e4052e 100644 --- a/drivers/net/ethernet/wangxun/libwx/Makefile +++ b/drivers/net/ethernet/wangxun/libwx/Makefile @@ -4,4 +4,4 @@ obj-$(CONFIG_LIBWX) += libwx.o -libwx-objs := wx_hw.o +libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c new file mode 100644 index 000000000000..93cb6f2294e7 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/pci.h> +#include <linux/phy.h> + +#include "wx_type.h" +#include "wx_ethtool.h" + +void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) +{ + struct wx *wx = netdev_priv(netdev); + + strscpy(info->driver, wx->driver_name, sizeof(info->driver)); + strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); + strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); +} +EXPORT_SYMBOL(wx_get_drvinfo); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h new file mode 100644 index 000000000000..e85538c69454 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _WX_ETHTOOL_H_ +#define _WX_ETHTOOL_H_ + +void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info); +#endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index c57dc3238b3f..7db57f934a91 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -2,59 +2,100 @@ /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ #include <linux/etherdevice.h> +#include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/iopoll.h> #include <linux/pci.h> #include "wx_type.h" +#include "wx_lib.h" #include "wx_hw.h" -static void wx_intr_disable(struct wx_hw *wxhw, u64 qmask) +static void wx_intr_disable(struct wx *wx, u64 qmask) { u32 mask; - mask = (qmask & 0xFFFFFFFF); + mask = (qmask & U32_MAX); if (mask) - wr32(wxhw, WX_PX_IMS(0), mask); + wr32(wx, WX_PX_IMS(0), mask); - if (wxhw->mac.type == wx_mac_sp) { + if (wx->mac.type == wx_mac_sp) { mask = (qmask >> 32); if (mask) - wr32(wxhw, WX_PX_IMS(1), mask); + wr32(wx, WX_PX_IMS(1), mask); } } +void wx_intr_enable(struct wx *wx, u64 qmask) +{ + u32 mask; + + mask = (qmask & U32_MAX); + if (mask) + wr32(wx, WX_PX_IMC(0), mask); + if (wx->mac.type == wx_mac_sp) { + mask = (qmask >> 32); + if (mask) + wr32(wx, WX_PX_IMC(1), mask); + } +} +EXPORT_SYMBOL(wx_intr_enable); + +/** + * wx_irq_disable - Mask off interrupt generation on the NIC + * @wx: board private structure + **/ +void wx_irq_disable(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + + wr32(wx, WX_PX_MISC_IEN, 0); + wx_intr_disable(wx, WX_INTR_ALL); + + if (pdev->msix_enabled) { + int vector; + + for (vector = 0; vector < wx->num_q_vectors; vector++) + synchronize_irq(wx->msix_entries[vector].vector); + + synchronize_irq(wx->msix_entries[vector].vector); + } else { + synchronize_irq(pdev->irq); + } +} +EXPORT_SYMBOL(wx_irq_disable); + /* cmd_addr is used for some special command: * 1. to be sector address, when implemented erase sector command * 2. to be flash address when implemented read, write flash address */ -static int wx_fmgr_cmd_op(struct wx_hw *wxhw, u32 cmd, u32 cmd_addr) +static int wx_fmgr_cmd_op(struct wx *wx, u32 cmd, u32 cmd_addr) { u32 cmd_val = 0, val = 0; cmd_val = WX_SPI_CMD_CMD(cmd) | WX_SPI_CMD_CLK(WX_SPI_CLK_DIV) | cmd_addr; - wr32(wxhw, WX_SPI_CMD, cmd_val); + wr32(wx, WX_SPI_CMD, cmd_val); return read_poll_timeout(rd32, val, (val & 0x1), 10, 100000, - false, wxhw, WX_SPI_STATUS); + false, wx, WX_SPI_STATUS); } -static int wx_flash_read_dword(struct wx_hw *wxhw, u32 addr, u32 *data) +static int wx_flash_read_dword(struct wx *wx, u32 addr, u32 *data) { int ret = 0; - ret = wx_fmgr_cmd_op(wxhw, WX_SPI_CMD_READ_DWORD, addr); + ret = wx_fmgr_cmd_op(wx, WX_SPI_CMD_READ_DWORD, addr); if (ret < 0) return ret; - *data = rd32(wxhw, WX_SPI_DATA); + *data = rd32(wx, WX_SPI_DATA); return ret; } -int wx_check_flash_load(struct wx_hw *hw, u32 check_bit) +int wx_check_flash_load(struct wx *hw, u32 check_bit) { u32 reg = 0; int err = 0; @@ -73,29 +114,25 @@ int wx_check_flash_load(struct wx_hw *hw, u32 check_bit) } EXPORT_SYMBOL(wx_check_flash_load); -void wx_control_hw(struct wx_hw *wxhw, bool drv) +void wx_control_hw(struct wx *wx, bool drv) { - if (drv) { - /* Let firmware know the driver has taken over */ - wr32m(wxhw, WX_CFG_PORT_CTL, - WX_CFG_PORT_CTL_DRV_LOAD, WX_CFG_PORT_CTL_DRV_LOAD); - } else { - /* Let firmware take over control of hw */ - wr32m(wxhw, WX_CFG_PORT_CTL, - WX_CFG_PORT_CTL_DRV_LOAD, 0); - } + /* True : Let firmware know the driver has taken over + * False : Let firmware take over control of hw + */ + wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_DRV_LOAD, + drv ? WX_CFG_PORT_CTL_DRV_LOAD : 0); } EXPORT_SYMBOL(wx_control_hw); /** * wx_mng_present - returns 0 when management capability is present - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure */ -int wx_mng_present(struct wx_hw *wxhw) +int wx_mng_present(struct wx *wx) { u32 fwsm; - fwsm = rd32(wxhw, WX_MIS_ST); + fwsm = rd32(wx, WX_MIS_ST); if (fwsm & WX_MIS_ST_MNG_INIT_DN) return 0; else @@ -108,40 +145,40 @@ static DEFINE_MUTEX(wx_sw_sync_lock); /** * wx_release_sw_sync - Release SW semaphore - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @mask: Mask to specify which semaphore to release * * Releases the SW semaphore for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -static void wx_release_sw_sync(struct wx_hw *wxhw, u32 mask) +static void wx_release_sw_sync(struct wx *wx, u32 mask) { mutex_lock(&wx_sw_sync_lock); - wr32m(wxhw, WX_MNG_SWFW_SYNC, mask, 0); + wr32m(wx, WX_MNG_SWFW_SYNC, mask, 0); mutex_unlock(&wx_sw_sync_lock); } /** * wx_acquire_sw_sync - Acquire SW semaphore - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @mask: Mask to specify which semaphore to acquire * * Acquires the SW semaphore for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask) +static int wx_acquire_sw_sync(struct wx *wx, u32 mask) { u32 sem = 0; int ret = 0; mutex_lock(&wx_sw_sync_lock); ret = read_poll_timeout(rd32, sem, !(sem & mask), - 5000, 2000000, false, wxhw, WX_MNG_SWFW_SYNC); + 5000, 2000000, false, wx, WX_MNG_SWFW_SYNC); if (!ret) { sem |= mask; - wr32(wxhw, WX_MNG_SWFW_SYNC, sem); + wr32(wx, WX_MNG_SWFW_SYNC, sem); } else { - wx_err(wxhw, "SW Semaphore not granted: 0x%x.\n", sem); + wx_err(wx, "SW Semaphore not granted: 0x%x.\n", sem); } mutex_unlock(&wx_sw_sync_lock); @@ -150,7 +187,7 @@ static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask) /** * wx_host_interface_command - Issue command to manageability block - * @wxhw: pointer to the HW structure + * @wx: pointer to the HW structure * @buffer: contains the command to write and where the return status will * be placed * @length: length of buffer, must be multiple of 4 bytes @@ -162,7 +199,7 @@ static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask) * So we will leave this up to the caller to read back the data * in these cases. **/ -int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, +int wx_host_interface_command(struct wx *wx, u32 *buffer, u32 length, u32 timeout, bool return_data) { u32 hdr_size = sizeof(struct wx_hic_hdr); @@ -172,17 +209,17 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, u16 buf_len; if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) { - wx_err(wxhw, "Buffer length failure buffersize=%d.\n", length); + wx_err(wx, "Buffer length failure buffersize=%d.\n", length); return -EINVAL; } - status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_MB); + status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB); if (status != 0) return status; /* Calculate length in DWORDs. We must be DWORD aligned */ if ((length % (sizeof(u32))) != 0) { - wx_err(wxhw, "Buffer length failure, not aligned to dword"); + wx_err(wx, "Buffer length failure, not aligned to dword"); status = -EINVAL; goto rel_out; } @@ -193,38 +230,38 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, * into the ram area. */ for (i = 0; i < dword_len; i++) { - wr32a(wxhw, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i])); + wr32a(wx, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i])); /* write flush */ - buf[i] = rd32a(wxhw, WX_MNG_MBOX, i); + buf[i] = rd32a(wx, WX_MNG_MBOX, i); } /* Setting this bit tells the ARC that a new command is pending. */ - wr32m(wxhw, WX_MNG_MBOX_CTL, + wr32m(wx, WX_MNG_MBOX_CTL, WX_MNG_MBOX_CTL_SWRDY, WX_MNG_MBOX_CTL_SWRDY); status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000, - timeout * 1000, false, wxhw, WX_MNG_MBOX_CTL); + timeout * 1000, false, wx, WX_MNG_MBOX_CTL); /* Check command completion */ if (status) { - wx_dbg(wxhw, "Command has failed with no status valid.\n"); + wx_dbg(wx, "Command has failed with no status valid.\n"); - buf[0] = rd32(wxhw, WX_MNG_MBOX); + buf[0] = rd32(wx, WX_MNG_MBOX); if ((buffer[0] & 0xff) != (~buf[0] >> 24)) { status = -EINVAL; goto rel_out; } if ((buf[0] & 0xff0000) >> 16 == 0x80) { - wx_dbg(wxhw, "It's unknown cmd.\n"); + wx_dbg(wx, "It's unknown cmd.\n"); status = -EINVAL; goto rel_out; } - wx_dbg(wxhw, "write value:\n"); + wx_dbg(wx, "write value:\n"); for (i = 0; i < dword_len; i++) - wx_dbg(wxhw, "%x ", buffer[i]); - wx_dbg(wxhw, "read value:\n"); + wx_dbg(wx, "%x ", buffer[i]); + wx_dbg(wx, "read value:\n"); for (i = 0; i < dword_len; i++) - wx_dbg(wxhw, "%x ", buf[i]); + wx_dbg(wx, "%x ", buf[i]); } if (!return_data) @@ -235,7 +272,7 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, /* first pull in the header so we know the buffer length */ for (bi = 0; bi < dword_len; bi++) { - buffer[bi] = rd32a(wxhw, WX_MNG_MBOX, bi); + buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi); le32_to_cpus(&buffer[bi]); } @@ -245,7 +282,7 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, goto rel_out; if (length < buf_len + hdr_size) { - wx_err(wxhw, "Buffer not large enough for reply message.\n"); + wx_err(wx, "Buffer not large enough for reply message.\n"); status = -EFAULT; goto rel_out; } @@ -255,12 +292,12 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, /* Pull in the rest of the buffer (bi is where we left off) */ for (; bi <= dword_len; bi++) { - buffer[bi] = rd32a(wxhw, WX_MNG_MBOX, bi); + buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi); le32_to_cpus(&buffer[bi]); } rel_out: - wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_MB); + wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB); return status; } EXPORT_SYMBOL(wx_host_interface_command); @@ -268,13 +305,13 @@ EXPORT_SYMBOL(wx_host_interface_command); /** * wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd * assuming that the semaphore is already obtained. - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the hostif. **/ -static int wx_read_ee_hostif_data(struct wx_hw *wxhw, u16 offset, u16 *data) +static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data) { struct wx_hic_read_shadow_ram buffer; int status; @@ -289,33 +326,33 @@ static int wx_read_ee_hostif_data(struct wx_hw *wxhw, u16 offset, u16 *data) /* one word */ buffer.length = (__force u16)cpu_to_be16(sizeof(u16)); - status = wx_host_interface_command(wxhw, (u32 *)&buffer, sizeof(buffer), + status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), WX_HI_COMMAND_TIMEOUT, false); if (status != 0) return status; - *data = (u16)rd32a(wxhw, WX_MNG_MBOX, FW_NVM_DATA_OFFSET); + *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET); return status; } /** * wx_read_ee_hostif - Read EEPROM word using a host interface cmd - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the hostif. **/ -int wx_read_ee_hostif(struct wx_hw *wxhw, u16 offset, u16 *data) +int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data) { int status = 0; - status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH); + status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); if (status == 0) { - status = wx_read_ee_hostif_data(wxhw, offset, data); - wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH); + status = wx_read_ee_hostif_data(wx, offset, data); + wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); } return status; @@ -324,14 +361,14 @@ EXPORT_SYMBOL(wx_read_ee_hostif); /** * wx_read_ee_hostif_buffer- Read EEPROM word(s) using hostif - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @words: number of words * @data: word(s) read from the EEPROM * * Reads a 16 bit word(s) from the EEPROM using the hostif. **/ -int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, +int wx_read_ee_hostif_buffer(struct wx *wx, u16 offset, u16 words, u16 *data) { struct wx_hic_read_shadow_ram buffer; @@ -342,7 +379,7 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, u32 i; /* Take semaphore for the entire operation. */ - status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH); + status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); if (status != 0) return status; @@ -361,20 +398,20 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2); buffer.length = (__force u16)cpu_to_be16(words_to_read * 2); - status = wx_host_interface_command(wxhw, (u32 *)&buffer, + status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), WX_HI_COMMAND_TIMEOUT, false); if (status != 0) { - wx_err(wxhw, "Host interface command failed\n"); + wx_err(wx, "Host interface command failed\n"); goto out; } for (i = 0; i < words_to_read; i++) { u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i; - value = rd32(wxhw, reg); + value = rd32(wx, reg); data[current_word] = (u16)(value & 0xffff); current_word++; i++; @@ -388,7 +425,7 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, } out: - wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH); + wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); return status; } EXPORT_SYMBOL(wx_read_ee_hostif_buffer); @@ -416,12 +453,12 @@ static u8 wx_calculate_checksum(u8 *buffer, u32 length) /** * wx_reset_hostif - send reset cmd to fw - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Sends reset cmd to firmware through the manageability * block. **/ -int wx_reset_hostif(struct wx_hw *wxhw) +int wx_reset_hostif(struct wx *wx) { struct wx_hic_reset reset_cmd; int ret_val = 0; @@ -430,15 +467,15 @@ int wx_reset_hostif(struct wx_hw *wxhw) reset_cmd.hdr.cmd = FW_RESET_CMD; reset_cmd.hdr.buf_len = FW_RESET_LEN; reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - reset_cmd.lan_id = wxhw->bus.func; - reset_cmd.reset_type = (u16)wxhw->reset_type; + reset_cmd.lan_id = wx->bus.func; + reset_cmd.reset_type = (u16)wx->reset_type; reset_cmd.hdr.checksum = 0; reset_cmd.hdr.checksum = wx_calculate_checksum((u8 *)&reset_cmd, (FW_CEM_HDR_LEN + reset_cmd.hdr.buf_len)); for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - ret_val = wx_host_interface_command(wxhw, (u32 *)&reset_cmd, + ret_val = wx_host_interface_command(wx, (u32 *)&reset_cmd, sizeof(reset_cmd), WX_HI_COMMAND_TIMEOUT, true); @@ -460,14 +497,14 @@ EXPORT_SYMBOL(wx_reset_hostif); /** * wx_init_eeprom_params - Initialize EEPROM params - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Initializes the EEPROM parameters wx_eeprom_info within the * wx_hw struct in order to set up EEPROM access. **/ -void wx_init_eeprom_params(struct wx_hw *wxhw) +void wx_init_eeprom_params(struct wx *wx) { - struct wx_eeprom_info *eeprom = &wxhw->eeprom; + struct wx_eeprom_info *eeprom = &wx->eeprom; u16 eeprom_size; u16 data = 0x80; @@ -475,21 +512,21 @@ void wx_init_eeprom_params(struct wx_hw *wxhw) eeprom->semaphore_delay = 10; eeprom->type = wx_eeprom_none; - if (!(rd32(wxhw, WX_SPI_STATUS) & + if (!(rd32(wx, WX_SPI_STATUS) & WX_SPI_STATUS_FLASH_BYPASS)) { eeprom->type = wx_flash; eeprom_size = 4096; eeprom->word_size = eeprom_size >> 1; - wx_dbg(wxhw, "Eeprom params: type = %d, size = %d\n", + wx_dbg(wx, "Eeprom params: type = %d, size = %d\n", eeprom->type, eeprom->word_size); } } - if (wxhw->mac.type == wx_mac_sp) { - if (wx_read_ee_hostif(wxhw, WX_SW_REGION_PTR, &data)) { - wx_err(wxhw, "NVM Read Error\n"); + if (wx->mac.type == wx_mac_sp) { + if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) { + wx_err(wx, "NVM Read Error\n"); return; } data = data >> 1; @@ -501,22 +538,22 @@ EXPORT_SYMBOL(wx_init_eeprom_params); /** * wx_get_mac_addr - Generic get MAC address - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @mac_addr: Adapter MAC address * * Reads the adapter's MAC address from first Receive Address Register (RAR0) * A reset of the adapter must be performed prior to calling this function * in order for the MAC address to have been loaded from the EEPROM into RAR0 **/ -void wx_get_mac_addr(struct wx_hw *wxhw, u8 *mac_addr) +void wx_get_mac_addr(struct wx *wx, u8 *mac_addr) { u32 rar_high; u32 rar_low; u16 i; - wr32(wxhw, WX_PSR_MAC_SWC_IDX, 0); - rar_high = rd32(wxhw, WX_PSR_MAC_SWC_AD_H); - rar_low = rd32(wxhw, WX_PSR_MAC_SWC_AD_L); + wr32(wx, WX_PSR_MAC_SWC_IDX, 0); + rar_high = rd32(wx, WX_PSR_MAC_SWC_AD_H); + rar_low = rd32(wx, WX_PSR_MAC_SWC_AD_L); for (i = 0; i < 2; i++) mac_addr[i] = (u8)(rar_high >> (1 - i) * 8); @@ -528,7 +565,7 @@ EXPORT_SYMBOL(wx_get_mac_addr); /** * wx_set_rar - Set Rx address register - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @index: Receive address register to write * @addr: Address to put into receive address register * @pools: VMDq "set" or "pool" index @@ -536,25 +573,25 @@ EXPORT_SYMBOL(wx_get_mac_addr); * * Puts an ethernet address into a receive address register. **/ -int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools, - u32 enable_addr) +static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools, + u32 enable_addr) { - u32 rar_entries = wxhw->mac.num_rar_entries; + u32 rar_entries = wx->mac.num_rar_entries; u32 rar_low, rar_high; /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { - wx_err(wxhw, "RAR index %d is out of range.\n", index); + wx_err(wx, "RAR index %d is out of range.\n", index); return -EINVAL; } /* select the MAC address */ - wr32(wxhw, WX_PSR_MAC_SWC_IDX, index); + wr32(wx, WX_PSR_MAC_SWC_IDX, index); /* setup VMDq pool mapping */ - wr32(wxhw, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); - if (wxhw->mac.type == wx_mac_sp) - wr32(wxhw, WX_PSR_MAC_SWC_VM_H, pools >> 32); + wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); + if (wx->mac.type == wx_mac_sp) + wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32); /* HW expects these in little endian so we reverse the byte * order from network order (big endian) to little endian @@ -572,31 +609,30 @@ int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools, if (enable_addr != 0) rar_high |= WX_PSR_MAC_SWC_AD_H_AV; - wr32(wxhw, WX_PSR_MAC_SWC_AD_L, rar_low); - wr32m(wxhw, WX_PSR_MAC_SWC_AD_H, - (WX_PSR_MAC_SWC_AD_H_AD(~0) | - WX_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + wr32(wx, WX_PSR_MAC_SWC_AD_L, rar_low); + wr32m(wx, WX_PSR_MAC_SWC_AD_H, + (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) | + WX_PSR_MAC_SWC_AD_H_ADTYPE(1) | WX_PSR_MAC_SWC_AD_H_AV), rar_high); return 0; } -EXPORT_SYMBOL(wx_set_rar); /** * wx_clear_rar - Remove Rx address register - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @index: Receive address register to write * * Clears an ethernet address from a receive address register. **/ -int wx_clear_rar(struct wx_hw *wxhw, u32 index) +static int wx_clear_rar(struct wx *wx, u32 index) { - u32 rar_entries = wxhw->mac.num_rar_entries; + u32 rar_entries = wx->mac.num_rar_entries; /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { - wx_err(wxhw, "RAR index %d is out of range.\n", index); + wx_err(wx, "RAR index %d is out of range.\n", index); return -EINVAL; } @@ -604,78 +640,77 @@ int wx_clear_rar(struct wx_hw *wxhw, u32 index) * so save everything except the lower 16 bits that hold part * of the address and the address valid bit. */ - wr32(wxhw, WX_PSR_MAC_SWC_IDX, index); + wr32(wx, WX_PSR_MAC_SWC_IDX, index); - wr32(wxhw, WX_PSR_MAC_SWC_VM_L, 0); - wr32(wxhw, WX_PSR_MAC_SWC_VM_H, 0); + wr32(wx, WX_PSR_MAC_SWC_VM_L, 0); + wr32(wx, WX_PSR_MAC_SWC_VM_H, 0); - wr32(wxhw, WX_PSR_MAC_SWC_AD_L, 0); - wr32m(wxhw, WX_PSR_MAC_SWC_AD_H, - (WX_PSR_MAC_SWC_AD_H_AD(~0) | - WX_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + wr32(wx, WX_PSR_MAC_SWC_AD_L, 0); + wr32m(wx, WX_PSR_MAC_SWC_AD_H, + (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) | + WX_PSR_MAC_SWC_AD_H_ADTYPE(1) | WX_PSR_MAC_SWC_AD_H_AV), 0); return 0; } -EXPORT_SYMBOL(wx_clear_rar); /** * wx_clear_vmdq - Disassociate a VMDq pool index from a rx address - * @wxhw: pointer to hardware struct + * @wx: pointer to hardware struct * @rar: receive address register index to disassociate * @vmdq: VMDq pool index to remove from the rar **/ -static int wx_clear_vmdq(struct wx_hw *wxhw, u32 rar, u32 __maybe_unused vmdq) +static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq) { - u32 rar_entries = wxhw->mac.num_rar_entries; + u32 rar_entries = wx->mac.num_rar_entries; u32 mpsar_lo, mpsar_hi; /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { - wx_err(wxhw, "RAR index %d is out of range.\n", rar); + wx_err(wx, "RAR index %d is out of range.\n", rar); return -EINVAL; } - wr32(wxhw, WX_PSR_MAC_SWC_IDX, rar); - mpsar_lo = rd32(wxhw, WX_PSR_MAC_SWC_VM_L); - mpsar_hi = rd32(wxhw, WX_PSR_MAC_SWC_VM_H); + wr32(wx, WX_PSR_MAC_SWC_IDX, rar); + mpsar_lo = rd32(wx, WX_PSR_MAC_SWC_VM_L); + mpsar_hi = rd32(wx, WX_PSR_MAC_SWC_VM_H); if (!mpsar_lo && !mpsar_hi) return 0; /* was that the last pool using this rar? */ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) - wx_clear_rar(wxhw, rar); + wx_clear_rar(wx, rar); return 0; } /** * wx_init_uta_tables - Initialize the Unicast Table Array - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure **/ -static void wx_init_uta_tables(struct wx_hw *wxhw) +static void wx_init_uta_tables(struct wx *wx) { int i; - wx_dbg(wxhw, " Clearing UTA\n"); + wx_dbg(wx, " Clearing UTA\n"); for (i = 0; i < 128; i++) - wr32(wxhw, WX_PSR_UC_TBL(i), 0); + wr32(wx, WX_PSR_UC_TBL(i), 0); } /** * wx_init_rx_addrs - Initializes receive address filters. - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Places the MAC address in receive address register 0 and clears the rest * of the receive address registers. Clears the multicast table. Assumes * the receiver is in reset when the routine is called. **/ -void wx_init_rx_addrs(struct wx_hw *wxhw) +void wx_init_rx_addrs(struct wx *wx) { - u32 rar_entries = wxhw->mac.num_rar_entries; + u32 rar_entries = wx->mac.num_rar_entries; u32 psrctl; int i; @@ -683,97 +718,829 @@ void wx_init_rx_addrs(struct wx_hw *wxhw) * to the permanent address. * Otherwise, use the permanent address from the eeprom. */ - if (!is_valid_ether_addr(wxhw->mac.addr)) { + if (!is_valid_ether_addr(wx->mac.addr)) { /* Get the MAC address from the RAR0 for later reference */ - wx_get_mac_addr(wxhw, wxhw->mac.addr); - wx_dbg(wxhw, "Keeping Current RAR0 Addr = %pM\n", wxhw->mac.addr); + wx_get_mac_addr(wx, wx->mac.addr); + wx_dbg(wx, "Keeping Current RAR0 Addr = %pM\n", wx->mac.addr); } else { /* Setup the receive address. */ - wx_dbg(wxhw, "Overriding MAC Address in RAR[0]\n"); - wx_dbg(wxhw, "New MAC Addr = %pM\n", wxhw->mac.addr); + wx_dbg(wx, "Overriding MAC Address in RAR[0]\n"); + wx_dbg(wx, "New MAC Addr = %pM\n", wx->mac.addr); - wx_set_rar(wxhw, 0, wxhw->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV); + wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV); - if (wxhw->mac.type == wx_mac_sp) { + if (wx->mac.type == wx_mac_sp) { /* clear VMDq pool/queue selection for RAR 0 */ - wx_clear_vmdq(wxhw, 0, WX_CLEAR_VMDQ_ALL); + wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL); } } /* Zero out the other receive addresses. */ - wx_dbg(wxhw, "Clearing RAR[1-%d]\n", rar_entries - 1); + wx_dbg(wx, "Clearing RAR[1-%d]\n", rar_entries - 1); for (i = 1; i < rar_entries; i++) { - wr32(wxhw, WX_PSR_MAC_SWC_IDX, i); - wr32(wxhw, WX_PSR_MAC_SWC_AD_L, 0); - wr32(wxhw, WX_PSR_MAC_SWC_AD_H, 0); + wr32(wx, WX_PSR_MAC_SWC_IDX, i); + wr32(wx, WX_PSR_MAC_SWC_AD_L, 0); + wr32(wx, WX_PSR_MAC_SWC_AD_H, 0); } /* Clear the MTA */ - wxhw->addr_ctrl.mta_in_use = 0; - psrctl = rd32(wxhw, WX_PSR_CTL); + wx->addr_ctrl.mta_in_use = 0; + psrctl = rd32(wx, WX_PSR_CTL); psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE); - psrctl |= wxhw->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT; - wr32(wxhw, WX_PSR_CTL, psrctl); - wx_dbg(wxhw, " Clearing MTA\n"); - for (i = 0; i < wxhw->mac.mcft_size; i++) - wr32(wxhw, WX_PSR_MC_TBL(i), 0); + psrctl |= wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT; + wr32(wx, WX_PSR_CTL, psrctl); + wx_dbg(wx, " Clearing MTA\n"); + for (i = 0; i < wx->mac.mcft_size; i++) + wr32(wx, WX_PSR_MC_TBL(i), 0); - wx_init_uta_tables(wxhw); + wx_init_uta_tables(wx); } EXPORT_SYMBOL(wx_init_rx_addrs); -void wx_disable_rx(struct wx_hw *wxhw) +static void wx_sync_mac_table(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (wx->mac_table[i].state & WX_MAC_STATE_MODIFIED) { + if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) { + wx_set_rar(wx, i, + wx->mac_table[i].addr, + wx->mac_table[i].pools, + WX_PSR_MAC_SWC_AD_H_AV); + } else { + wx_clear_rar(wx, i); + } + wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED); + } + } +} + +/* this function destroys the first RAR entry */ +void wx_mac_set_default_filter(struct wx *wx, u8 *addr) +{ + memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN); + wx->mac_table[0].pools = 1ULL; + wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE); + wx_set_rar(wx, 0, wx->mac_table[0].addr, + wx->mac_table[0].pools, + WX_PSR_MAC_SWC_AD_H_AV); +} +EXPORT_SYMBOL(wx_mac_set_default_filter); + +void wx_flush_sw_mac_table(struct wx *wx) +{ + u32 i; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (!(wx->mac_table[i].state & WX_MAC_STATE_IN_USE)) + continue; + + wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED; + wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE; + memset(wx->mac_table[i].addr, 0, ETH_ALEN); + wx->mac_table[i].pools = 0; + } + wx_sync_mac_table(wx); +} +EXPORT_SYMBOL(wx_flush_sw_mac_table); + +static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool) +{ + u32 i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) { + if (ether_addr_equal(addr, wx->mac_table[i].addr)) { + if (wx->mac_table[i].pools != (1ULL << pool)) { + memcpy(wx->mac_table[i].addr, addr, ETH_ALEN); + wx->mac_table[i].pools |= (1ULL << pool); + wx_sync_mac_table(wx); + return i; + } + } + } + + if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) + continue; + wx->mac_table[i].state |= (WX_MAC_STATE_MODIFIED | + WX_MAC_STATE_IN_USE); + memcpy(wx->mac_table[i].addr, addr, ETH_ALEN); + wx->mac_table[i].pools |= (1ULL << pool); + wx_sync_mac_table(wx); + return i; + } + return -ENOMEM; +} + +static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool) +{ + u32 i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* search table for addr, if found, set to 0 and sync */ + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (!ether_addr_equal(addr, wx->mac_table[i].addr)) + continue; + + wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED; + wx->mac_table[i].pools &= ~(1ULL << pool); + if (!wx->mac_table[i].pools) { + wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE; + memset(wx->mac_table[i].addr, 0, ETH_ALEN); + } + wx_sync_mac_table(wx); + return 0; + } + return -ENOMEM; +} + +static int wx_available_rars(struct wx *wx) +{ + u32 i, count = 0; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (wx->mac_table[i].state == 0) + count++; + } + + return count; +} + +/** + * wx_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * @pool: index for mac table + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int wx_write_uc_addr_list(struct net_device *netdev, int pool) +{ + struct wx *wx = netdev_priv(netdev); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > wx_available_rars(wx)) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, netdev) { + wx_del_mac_filter(wx, ha->addr, pool); + wx_add_mac_filter(wx, ha->addr, pool); + count++; + } + } + return count; +} + +/** + * wx_mta_vector - Determines bit-vector in multicast table to set + * @wx: pointer to private structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr) +{ + u32 vector = 0; + + switch (wx->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + wx_err(wx, "MC filter type param set incorrectly\n"); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * wx_set_mta - Set bit-vector in multicast table + * @wx: pointer to private structure + * @mc_addr: Multicast address + * + * Sets the bit-vector in the multicast table. + **/ +static void wx_set_mta(struct wx *wx, u8 *mc_addr) +{ + u32 vector, vector_bit, vector_reg; + + wx->addr_ctrl.mta_in_use++; + + vector = wx_mta_vector(wx, mc_addr); + wx_dbg(wx, " bit-vector = 0x%03X\n", vector); + + /* The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + wx->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * wx_update_mc_addr_list - Updates MAC list of multicast addresses + * @wx: pointer to private structure + * @netdev: pointer to net device structure + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unused receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev) +{ + struct netdev_hw_addr *ha; + u32 i, psrctl; + + /* Set the new number of MC addresses that we are being requested to + * use. + */ + wx->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); + wx->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + wx_dbg(wx, " Clearing MTA\n"); + memset(&wx->mac.mta_shadow, 0, sizeof(wx->mac.mta_shadow)); + + /* Update mta_shadow */ + netdev_for_each_mc_addr(ha, netdev) { + wx_dbg(wx, " Adding the multicast addresses:\n"); + wx_set_mta(wx, ha->addr); + } + + /* Enable mta */ + for (i = 0; i < wx->mac.mcft_size; i++) + wr32a(wx, WX_PSR_MC_TBL(0), i, + wx->mac.mta_shadow[i]); + + if (wx->addr_ctrl.mta_in_use > 0) { + psrctl = rd32(wx, WX_PSR_CTL); + psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE); + psrctl |= WX_PSR_CTL_MFE | + (wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT); + wr32(wx, WX_PSR_CTL, psrctl); + } + + wx_dbg(wx, "Update mc addr list Complete\n"); +} + +/** + * wx_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int wx_write_mc_addr_list(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + if (!netif_running(netdev)) + return 0; + + wx_update_mc_addr_list(wx, netdev); + + return netdev_mc_count(netdev); +} + +/** + * wx_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +int wx_set_mac(struct net_device *netdev, void *p) +{ + struct wx *wx = netdev_priv(netdev); + struct sockaddr *addr = p; + int retval; + + retval = eth_prepare_mac_addr_change(netdev, addr); + if (retval) + return retval; + + wx_del_mac_filter(wx, wx->mac.addr, 0); + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len); + + wx_mac_set_default_filter(wx, wx->mac.addr); + + return 0; +} +EXPORT_SYMBOL(wx_set_mac); + +void wx_disable_rx(struct wx *wx) { u32 pfdtxgswc; u32 rxctrl; - rxctrl = rd32(wxhw, WX_RDB_PB_CTL); + rxctrl = rd32(wx, WX_RDB_PB_CTL); if (rxctrl & WX_RDB_PB_CTL_RXEN) { - pfdtxgswc = rd32(wxhw, WX_PSR_CTL); + pfdtxgswc = rd32(wx, WX_PSR_CTL); if (pfdtxgswc & WX_PSR_CTL_SW_EN) { pfdtxgswc &= ~WX_PSR_CTL_SW_EN; - wr32(wxhw, WX_PSR_CTL, pfdtxgswc); - wxhw->mac.set_lben = true; + wr32(wx, WX_PSR_CTL, pfdtxgswc); + wx->mac.set_lben = true; } else { - wxhw->mac.set_lben = false; + wx->mac.set_lben = false; } rxctrl &= ~WX_RDB_PB_CTL_RXEN; - wr32(wxhw, WX_RDB_PB_CTL, rxctrl); + wr32(wx, WX_RDB_PB_CTL, rxctrl); - if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || - ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { + if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || + ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { /* disable mac receiver */ - wr32m(wxhw, WX_MAC_RX_CFG, + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); } } } EXPORT_SYMBOL(wx_disable_rx); +static void wx_enable_rx(struct wx *wx) +{ + u32 psrctl; + + /* enable mac receiver */ + wr32m(wx, WX_MAC_RX_CFG, + WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); + + wr32m(wx, WX_RDB_PB_CTL, + WX_RDB_PB_CTL_RXEN, WX_RDB_PB_CTL_RXEN); + + if (wx->mac.set_lben) { + psrctl = rd32(wx, WX_PSR_CTL); + psrctl |= WX_PSR_CTL_SW_EN; + wr32(wx, WX_PSR_CTL, psrctl); + wx->mac.set_lben = false; + } +} + +/** + * wx_set_rxpba - Initialize Rx packet buffer + * @wx: pointer to private structure + **/ +static void wx_set_rxpba(struct wx *wx) +{ + u32 rxpktsize, txpktsize, txpbthresh; + + rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT; + wr32(wx, WX_RDB_PB_SZ(0), rxpktsize); + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = wx->mac.tx_pb_size; + txpbthresh = (txpktsize / 1024) - WX_TXPKT_SIZE_MAX; + wr32(wx, WX_TDB_PB_SZ(0), txpktsize); + wr32(wx, WX_TDM_PB_THRE(0), txpbthresh); +} + +static void wx_configure_port(struct wx *wx) +{ + u32 value, i; + + value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ; + wr32m(wx, WX_CFG_PORT_CTL, + WX_CFG_PORT_CTL_D_VLAN | + WX_CFG_PORT_CTL_QINQ, + value); + + wr32(wx, WX_CFG_TAG_TPID(0), + ETH_P_8021Q | ETH_P_8021AD << 16); + wx->tpid[0] = ETH_P_8021Q; + wx->tpid[1] = ETH_P_8021AD; + for (i = 1; i < 4; i++) + wr32(wx, WX_CFG_TAG_TPID(i), + ETH_P_8021Q | ETH_P_8021Q << 16); + for (i = 2; i < 8; i++) + wx->tpid[i] = ETH_P_8021Q; +} + +/** + * wx_disable_sec_rx_path - Stops the receive data path + * @wx: pointer to private structure + * + * Stops the receive data path and waits for the HW to internally empty + * the Rx security block + **/ +static int wx_disable_sec_rx_path(struct wx *wx) +{ + u32 secrx; + + wr32m(wx, WX_RSC_CTL, + WX_RSC_CTL_RX_DIS, WX_RSC_CTL_RX_DIS); + + return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY, + 1000, 40000, false, wx, WX_RSC_ST); +} + +/** + * wx_enable_sec_rx_path - Enables the receive data path + * @wx: pointer to private structure + * + * Enables the receive data path. + **/ +static void wx_enable_sec_rx_path(struct wx *wx) +{ + wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0); + WX_WRITE_FLUSH(wx); +} + +void wx_set_rx_mode(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + u32 fctrl, vmolr, vlnctrl; + int count; + + /* Check for Promiscuous and All Multicast modes */ + fctrl = rd32(wx, WX_PSR_CTL); + fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE); + vmolr = rd32(wx, WX_PSR_VM_L2CTL(0)); + vmolr &= ~(WX_PSR_VM_L2CTL_UPE | + WX_PSR_VM_L2CTL_MPE | + WX_PSR_VM_L2CTL_ROPE | + WX_PSR_VM_L2CTL_ROMPE); + vlnctrl = rd32(wx, WX_PSR_VLAN_CTL); + vlnctrl &= ~(WX_PSR_VLAN_CTL_VFE | WX_PSR_VLAN_CTL_CFIEN); + + /* set all bits that we expect to always be set */ + fctrl |= WX_PSR_CTL_BAM | WX_PSR_CTL_MFE; + vmolr |= WX_PSR_VM_L2CTL_BAM | + WX_PSR_VM_L2CTL_AUPE | + WX_PSR_VM_L2CTL_VACC; + vlnctrl |= WX_PSR_VLAN_CTL_VFE; + + wx->addr_ctrl.user_set_promisc = false; + if (netdev->flags & IFF_PROMISC) { + wx->addr_ctrl.user_set_promisc = true; + fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE; + /* pf don't want packets routing to vf, so clear UPE */ + vmolr |= WX_PSR_VM_L2CTL_MPE; + vlnctrl &= ~WX_PSR_VLAN_CTL_VFE; + } + + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= WX_PSR_CTL_MPE; + vmolr |= WX_PSR_VM_L2CTL_MPE; + } + + if (netdev->features & NETIF_F_RXALL) { + vmolr |= (WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE); + vlnctrl &= ~WX_PSR_VLAN_CTL_VFE; + /* receive bad packets */ + wr32m(wx, WX_RSC_CTL, + WX_RSC_CTL_SAVE_MAC_ERR, + WX_RSC_CTL_SAVE_MAC_ERR); + } else { + vmolr |= WX_PSR_VM_L2CTL_ROPE | WX_PSR_VM_L2CTL_ROMPE; + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = wx_write_uc_addr_list(netdev, 0); + if (count < 0) { + vmolr &= ~WX_PSR_VM_L2CTL_ROPE; + vmolr |= WX_PSR_VM_L2CTL_UPE; + } + + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = wx_write_mc_addr_list(netdev); + if (count < 0) { + vmolr &= ~WX_PSR_VM_L2CTL_ROMPE; + vmolr |= WX_PSR_VM_L2CTL_MPE; + } + + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); + wr32(wx, WX_PSR_CTL, fctrl); + wr32(wx, WX_PSR_VM_L2CTL(0), vmolr); +} +EXPORT_SYMBOL(wx_set_rx_mode); + +static void wx_set_rx_buffer_len(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + u32 mhadd, max_frame; + + max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + mhadd = rd32(wx, WX_PSR_MAX_SZ); + if (max_frame != mhadd) + wr32(wx, WX_PSR_MAX_SZ, max_frame); +} + +/* Disable the specified rx queue */ +void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring) +{ + u8 reg_idx = ring->reg_idx; + u32 rxdctl; + int ret; + + /* write value back with RRCFG.EN bit cleared */ + wr32m(wx, WX_PX_RR_CFG(reg_idx), + WX_PX_RR_CFG_RR_EN, 0); + + /* the hardware may take up to 100us to really disable the rx queue */ + ret = read_poll_timeout(rd32, rxdctl, !(rxdctl & WX_PX_RR_CFG_RR_EN), + 10, 100, true, wx, WX_PX_RR_CFG(reg_idx)); + + if (ret == -ETIMEDOUT) { + /* Just for information */ + wx_err(wx, + "RRCFG.EN on Rx queue %d not cleared within the polling period\n", + reg_idx); + } +} +EXPORT_SYMBOL(wx_disable_rx_queue); + +static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring) +{ + u8 reg_idx = ring->reg_idx; + u32 rxdctl; + int ret; + + ret = read_poll_timeout(rd32, rxdctl, rxdctl & WX_PX_RR_CFG_RR_EN, + 1000, 10000, true, wx, WX_PX_RR_CFG(reg_idx)); + + if (ret == -ETIMEDOUT) { + /* Just for information */ + wx_err(wx, + "RRCFG.EN on Rx queue %d not set within the polling period\n", + reg_idx); + } +} + +static void wx_configure_srrctl(struct wx *wx, + struct wx_ring *rx_ring) +{ + u16 reg_idx = rx_ring->reg_idx; + u32 srrctl; + + srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + srrctl &= ~(WX_PX_RR_CFG_RR_HDR_SZ | + WX_PX_RR_CFG_RR_BUF_SZ | + WX_PX_RR_CFG_SPLIT_MODE); + /* configure header buffer length, needed for RSC */ + srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT; + + /* configure the packet buffer length */ + srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT; + + wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl); +} + +static void wx_configure_tx_ring(struct wx *wx, + struct wx_ring *ring) +{ + u32 txdctl = WX_PX_TR_CFG_ENABLE; + u8 reg_idx = ring->reg_idx; + u64 tdba = ring->dma; + int ret; + + /* disable queue to avoid issues while updating state */ + wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); + WX_WRITE_FLUSH(wx); + + wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32)); + wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba)); + + /* reset head and tail pointers */ + wr32(wx, WX_PX_TR_RP(reg_idx), 0); + wr32(wx, WX_PX_TR_WP(reg_idx), 0); + ring->tail = wx->hw_addr + WX_PX_TR_WP(reg_idx); + + if (ring->count < WX_MAX_TXD) + txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT; + txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT; + + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct wx_tx_buffer) * ring->count); + + /* enable queue */ + wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl); + + /* poll to verify queue is enabled */ + ret = read_poll_timeout(rd32, txdctl, txdctl & WX_PX_TR_CFG_ENABLE, + 1000, 10000, true, wx, WX_PX_TR_CFG(reg_idx)); + if (ret == -ETIMEDOUT) + wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx); +} + +static void wx_configure_rx_ring(struct wx *wx, + struct wx_ring *ring) +{ + u16 reg_idx = ring->reg_idx; + union wx_rx_desc *rx_desc; + u64 rdba = ring->dma; + u32 rxdctl; + + /* disable queue to avoid issues while updating state */ + rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + wx_disable_rx_queue(wx, ring); + + wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32)); + wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba)); + + if (ring->count == WX_MAX_RXD) + rxdctl |= 0 << WX_PX_RR_CFG_RR_SIZE_SHIFT; + else + rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT; + + rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT; + wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl); + + /* reset head and tail pointers */ + wr32(wx, WX_PX_RR_RP(reg_idx), 0); + wr32(wx, WX_PX_RR_WP(reg_idx), 0); + ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx); + + wx_configure_srrctl(wx, ring); + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct wx_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = WX_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor ring */ + wr32m(wx, WX_PX_RR_CFG(reg_idx), + WX_PX_RR_CFG_RR_EN, WX_PX_RR_CFG_RR_EN); + + wx_enable_rx_queue(wx, ring); + wx_alloc_rx_buffers(ring, wx_desc_unused(ring)); +} + +/** + * wx_configure_tx - Configure Transmit Unit after Reset + * @wx: pointer to private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void wx_configure_tx(struct wx *wx) +{ + u32 i; + + /* TDM_CTL.TE must be before Tx queues are enabled */ + wr32m(wx, WX_TDM_CTL, + WX_TDM_CTL_TE, WX_TDM_CTL_TE); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < wx->num_tx_queues; i++) + wx_configure_tx_ring(wx, wx->tx_ring[i]); + + wr32m(wx, WX_TSC_BUF_AE, WX_TSC_BUF_AE_THR, 0x10); + + if (wx->mac.type == wx_mac_em) + wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS | WX_TSC_CTL_TSEC_DIS, 0x1); + + /* enable mac transmitter */ + wr32m(wx, WX_MAC_TX_CFG, + WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE); +} + +/** + * wx_configure_rx - Configure Receive Unit after Reset + * @wx: pointer to private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void wx_configure_rx(struct wx *wx) +{ + u32 psrtype, i; + int ret; + + wx_disable_rx(wx); + + psrtype = WX_RDB_PL_CFG_L4HDR | + WX_RDB_PL_CFG_L3HDR | + WX_RDB_PL_CFG_L2HDR | + WX_RDB_PL_CFG_TUN_TUNHDR | + WX_RDB_PL_CFG_TUN_TUNHDR; + wr32(wx, WX_RDB_PL_CFG(0), psrtype); + + /* enable hw crc stripping */ + wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP); + + if (wx->mac.type == wx_mac_sp) { + u32 psrctl; + + /* RSC Setup */ + psrctl = rd32(wx, WX_PSR_CTL); + psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */ + psrctl |= WX_PSR_CTL_RSC_DIS; + wr32(wx, WX_PSR_CTL, psrctl); + } + + /* set_rx_buffer_len must be called before ring initialization */ + wx_set_rx_buffer_len(wx); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < wx->num_rx_queues; i++) + wx_configure_rx_ring(wx, wx->rx_ring[i]); + + /* Enable all receives, disable security engine prior to block traffic */ + ret = wx_disable_sec_rx_path(wx); + if (ret < 0) + wx_err(wx, "The register status is abnormal, please check device."); + + wx_enable_rx(wx); + wx_enable_sec_rx_path(wx); +} + +static void wx_configure_isb(struct wx *wx) +{ + /* set ISB Address */ + wr32(wx, WX_PX_ISB_ADDR_L, wx->isb_dma & DMA_BIT_MASK(32)); + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + wr32(wx, WX_PX_ISB_ADDR_H, upper_32_bits(wx->isb_dma)); +} + +void wx_configure(struct wx *wx) +{ + wx_set_rxpba(wx); + wx_configure_port(wx); + + wx_set_rx_mode(wx->netdev); + + wx_enable_sec_rx_path(wx); + + wx_configure_tx(wx); + wx_configure_rx(wx); + wx_configure_isb(wx); +} +EXPORT_SYMBOL(wx_configure); + /** * wx_disable_pcie_master - Disable PCI-express master access - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Disables PCI-Express master access and verifies there are no pending * requests. **/ -int wx_disable_pcie_master(struct wx_hw *wxhw) +int wx_disable_pcie_master(struct wx *wx) { int status = 0; u32 val; /* Always set this bit to ensure any future transactions are blocked */ - pci_clear_master(wxhw->pdev); + pci_clear_master(wx->pdev); /* Exit if master requests are blocked */ - if (!(rd32(wxhw, WX_PX_TRANSACTION_PENDING))) + if (!(rd32(wx, WX_PX_TRANSACTION_PENDING))) return 0; /* Poll for master request bit to clear */ status = read_poll_timeout(rd32, val, !val, 100, WX_PCI_MASTER_DISABLE_TIMEOUT, - false, wxhw, WX_PX_TRANSACTION_PENDING); + false, wx, WX_PX_TRANSACTION_PENDING); if (status < 0) - wx_err(wxhw, "PCIe transaction pending bit did not clear.\n"); + wx_err(wx, "PCIe transaction pending bit did not clear.\n"); return status; } @@ -781,106 +1548,106 @@ EXPORT_SYMBOL(wx_disable_pcie_master); /** * wx_stop_adapter - Generic stop Tx/Rx units - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Sets the adapter_stopped flag within wx_hw struct. Clears interrupts, * disables transmit and receive units. The adapter_stopped flag is used by * the shared code and drivers to determine if the adapter is in a stopped * state and should not touch the hardware. **/ -int wx_stop_adapter(struct wx_hw *wxhw) +int wx_stop_adapter(struct wx *wx) { u16 i; /* Set the adapter_stopped flag so other driver functions stop touching * the hardware */ - wxhw->adapter_stopped = true; + wx->adapter_stopped = true; /* Disable the receive unit */ - wx_disable_rx(wxhw); + wx_disable_rx(wx); /* Set interrupt mask to stop interrupts from being generated */ - wx_intr_disable(wxhw, WX_INTR_ALL); + wx_intr_disable(wx, WX_INTR_ALL); /* Clear any pending interrupts, flush previous writes */ - wr32(wxhw, WX_PX_MISC_IC, 0xffffffff); - wr32(wxhw, WX_BME_CTL, 0x3); + wr32(wx, WX_PX_MISC_IC, 0xffffffff); + wr32(wx, WX_BME_CTL, 0x3); /* Disable the transmit unit. Each queue must be disabled. */ - for (i = 0; i < wxhw->mac.max_tx_queues; i++) { - wr32m(wxhw, WX_PX_TR_CFG(i), + for (i = 0; i < wx->mac.max_tx_queues; i++) { + wr32m(wx, WX_PX_TR_CFG(i), WX_PX_TR_CFG_SWFLSH | WX_PX_TR_CFG_ENABLE, WX_PX_TR_CFG_SWFLSH); } /* Disable the receive unit by stopping each queue */ - for (i = 0; i < wxhw->mac.max_rx_queues; i++) { - wr32m(wxhw, WX_PX_RR_CFG(i), + for (i = 0; i < wx->mac.max_rx_queues; i++) { + wr32m(wx, WX_PX_RR_CFG(i), WX_PX_RR_CFG_RR_EN, 0); } /* flush all queues disables */ - WX_WRITE_FLUSH(wxhw); + WX_WRITE_FLUSH(wx); /* Prevent the PCI-E bus from hanging by disabling PCI-E master * access and verify no pending requests */ - return wx_disable_pcie_master(wxhw); + return wx_disable_pcie_master(wx); } EXPORT_SYMBOL(wx_stop_adapter); -void wx_reset_misc(struct wx_hw *wxhw) +void wx_reset_misc(struct wx *wx) { int i; /* receive packets that size > 2048 */ - wr32m(wxhw, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE); + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE); /* clear counters on read */ - wr32m(wxhw, WX_MMC_CONTROL, + wr32m(wx, WX_MMC_CONTROL, WX_MMC_CONTROL_RSTONRD, WX_MMC_CONTROL_RSTONRD); - wr32m(wxhw, WX_MAC_RX_FLOW_CTRL, + wr32m(wx, WX_MAC_RX_FLOW_CTRL, WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE); - wr32(wxhw, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); + wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); - wr32m(wxhw, WX_MIS_RST_ST, + wr32m(wx, WX_MIS_RST_ST, WX_MIS_RST_ST_RST_INIT, 0x1E00); /* errata 4: initialize mng flex tbl and wakeup flex tbl*/ - wr32(wxhw, WX_PSR_MNG_FLEX_SEL, 0); + wr32(wx, WX_PSR_MNG_FLEX_SEL, 0); for (i = 0; i < 16; i++) { - wr32(wxhw, WX_PSR_MNG_FLEX_DW_L(i), 0); - wr32(wxhw, WX_PSR_MNG_FLEX_DW_H(i), 0); - wr32(wxhw, WX_PSR_MNG_FLEX_MSK(i), 0); + wr32(wx, WX_PSR_MNG_FLEX_DW_L(i), 0); + wr32(wx, WX_PSR_MNG_FLEX_DW_H(i), 0); + wr32(wx, WX_PSR_MNG_FLEX_MSK(i), 0); } - wr32(wxhw, WX_PSR_LAN_FLEX_SEL, 0); + wr32(wx, WX_PSR_LAN_FLEX_SEL, 0); for (i = 0; i < 16; i++) { - wr32(wxhw, WX_PSR_LAN_FLEX_DW_L(i), 0); - wr32(wxhw, WX_PSR_LAN_FLEX_DW_H(i), 0); - wr32(wxhw, WX_PSR_LAN_FLEX_MSK(i), 0); + wr32(wx, WX_PSR_LAN_FLEX_DW_L(i), 0); + wr32(wx, WX_PSR_LAN_FLEX_DW_H(i), 0); + wr32(wx, WX_PSR_LAN_FLEX_MSK(i), 0); } /* set pause frame dst mac addr */ - wr32(wxhw, WX_RDB_PFCMACDAL, 0xC2000001); - wr32(wxhw, WX_RDB_PFCMACDAH, 0x0180); + wr32(wx, WX_RDB_PFCMACDAL, 0xC2000001); + wr32(wx, WX_RDB_PFCMACDAH, 0x0180); } EXPORT_SYMBOL(wx_reset_misc); /** * wx_get_pcie_msix_counts - Gets MSI-X vector count - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @msix_count: number of MSI interrupts that can be obtained * @max_msix_count: number of MSI interrupts that mac need * * Read PCIe configuration space, and get the MSI-X vector count from * the capabilities table. **/ -int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_count) +int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count) { - struct pci_dev *pdev = wxhw->pdev; + struct pci_dev *pdev = wx->pdev; struct device *dev = &pdev->dev; int pos; @@ -904,31 +1671,39 @@ int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_co } EXPORT_SYMBOL(wx_get_pcie_msix_counts); -int wx_sw_init(struct wx_hw *wxhw) +int wx_sw_init(struct wx *wx) { - struct pci_dev *pdev = wxhw->pdev; + struct pci_dev *pdev = wx->pdev; u32 ssid = 0; int err = 0; - wxhw->vendor_id = pdev->vendor; - wxhw->device_id = pdev->device; - wxhw->revision_id = pdev->revision; - wxhw->oem_svid = pdev->subsystem_vendor; - wxhw->oem_ssid = pdev->subsystem_device; - wxhw->bus.device = PCI_SLOT(pdev->devfn); - wxhw->bus.func = PCI_FUNC(pdev->devfn); - - if (wxhw->oem_svid == PCI_VENDOR_ID_WANGXUN) { - wxhw->subsystem_vendor_id = pdev->subsystem_vendor; - wxhw->subsystem_device_id = pdev->subsystem_device; + wx->vendor_id = pdev->vendor; + wx->device_id = pdev->device; + wx->revision_id = pdev->revision; + wx->oem_svid = pdev->subsystem_vendor; + wx->oem_ssid = pdev->subsystem_device; + wx->bus.device = PCI_SLOT(pdev->devfn); + wx->bus.func = PCI_FUNC(pdev->devfn); + + if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) { + wx->subsystem_vendor_id = pdev->subsystem_vendor; + wx->subsystem_device_id = pdev->subsystem_device; } else { - err = wx_flash_read_dword(wxhw, 0xfffdc, &ssid); + err = wx_flash_read_dword(wx, 0xfffdc, &ssid); if (!err) - wxhw->subsystem_device_id = swab16((u16)ssid); + wx->subsystem_device_id = swab16((u16)ssid); return err; } + wx->mac_table = kcalloc(wx->mac.num_rar_entries, + sizeof(struct wx_mac_addr), + GFP_KERNEL); + if (!wx->mac_table) { + wx_err(wx, "mac_table allocation failed\n"); + return -ENOMEM; + } + return 0; } EXPORT_SYMBOL(wx_sw_init); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index a0652f5e9939..44dfd6ea442a 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -4,25 +4,31 @@ #ifndef _WX_HW_H_ #define _WX_HW_H_ -int wx_check_flash_load(struct wx_hw *hw, u32 check_bit); -void wx_control_hw(struct wx_hw *wxhw, bool drv); -int wx_mng_present(struct wx_hw *wxhw); -int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, +void wx_intr_enable(struct wx *wx, u64 qmask); +void wx_irq_disable(struct wx *wx); +int wx_check_flash_load(struct wx *wx, u32 check_bit); +void wx_control_hw(struct wx *wx, bool drv); +int wx_mng_present(struct wx *wx); +int wx_host_interface_command(struct wx *wx, u32 *buffer, u32 length, u32 timeout, bool return_data); -int wx_read_ee_hostif(struct wx_hw *wxhw, u16 offset, u16 *data); -int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, +int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data); +int wx_read_ee_hostif_buffer(struct wx *wx, u16 offset, u16 words, u16 *data); -int wx_reset_hostif(struct wx_hw *wxhw); -void wx_init_eeprom_params(struct wx_hw *wxhw); -void wx_get_mac_addr(struct wx_hw *wxhw, u8 *mac_addr); -int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools, u32 enable_addr); -int wx_clear_rar(struct wx_hw *wxhw, u32 index); -void wx_init_rx_addrs(struct wx_hw *wxhw); -void wx_disable_rx(struct wx_hw *wxhw); -int wx_disable_pcie_master(struct wx_hw *wxhw); -int wx_stop_adapter(struct wx_hw *wxhw); -void wx_reset_misc(struct wx_hw *wxhw); -int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_count); -int wx_sw_init(struct wx_hw *wxhw); +int wx_reset_hostif(struct wx *wx); +void wx_init_eeprom_params(struct wx *wx); +void wx_get_mac_addr(struct wx *wx, u8 *mac_addr); +void wx_init_rx_addrs(struct wx *wx); +void wx_mac_set_default_filter(struct wx *wx, u8 *addr); +void wx_flush_sw_mac_table(struct wx *wx); +int wx_set_mac(struct net_device *netdev, void *p); +void wx_disable_rx(struct wx *wx); +void wx_set_rx_mode(struct net_device *netdev); +void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring); +void wx_configure(struct wx *wx); +int wx_disable_pcie_master(struct wx *wx); +int wx_stop_adapter(struct wx *wx); +void wx_reset_misc(struct wx *wx); +int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); +int wx_sw_init(struct wx *wx); #endif /* _WX_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c new file mode 100644 index 000000000000..eb89a274083e --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -0,0 +1,2004 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/etherdevice.h> +#include <net/page_pool.h> +#include <linux/iopoll.h> +#include <linux/pci.h> + +#include "wx_type.h" +#include "wx_lib.h" +#include "wx_hw.h" + +/* wx_test_staterr - tests bits in Rx descriptor status and error fields */ +static __le32 wx_test_staterr(union wx_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * wx_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void wx_reuse_rx_page(struct wx_ring *rx_ring, + struct wx_rx_buffer *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct wx_rx_buffer *new_buff; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->page = old_buff->page; + new_buff->page_dma = old_buff->page_dma; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static void wx_dma_sync_frag(struct wx_ring *rx_ring, + struct wx_rx_buffer *rx_buffer) +{ + struct sk_buff *skb = rx_buffer->skb; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + WX_CB(skb)->dma, + skb_frag_off(frag), + skb_frag_size(frag), + DMA_FROM_DEVICE); + + /* If the page was released, just unmap it. */ + if (unlikely(WX_CB(skb)->page_released)) + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); +} + +static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring, + union wx_rx_desc *rx_desc, + struct sk_buff **skb, + int *rx_buffer_pgcnt) +{ + struct wx_rx_buffer *rx_buffer; + unsigned int size; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + size = le16_to_cpu(rx_desc->wb.upper.length); + +#if (PAGE_SIZE < 8192) + *rx_buffer_pgcnt = page_count(rx_buffer->page); +#else + *rx_buffer_pgcnt = 0; +#endif + + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; + + /* Delay unmapping of the first packet. It carries the header + * information, HW may still access the header after the writeback. + * Only unmap it when EOP is reached + */ + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) { + if (!*skb) + goto skip_sync; + } else { + if (*skb) + wx_dma_sync_frag(rx_ring, rx_buffer); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); +skip_sync: + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void wx_put_rx_buffer(struct wx_ring *rx_ring, + struct wx_rx_buffer *rx_buffer, + struct sk_buff *skb, + int rx_buffer_pgcnt) +{ + if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { + /* hand second half of page back to the ring */ + wx_reuse_rx_page(rx_ring, rx_buffer); + } else { + if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) + /* the page has been released from the ring */ + WX_CB(skb)->page_released = true; + else + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} + +static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, + struct wx_rx_buffer *rx_buffer, + union wx_rx_desc *rx_desc) +{ + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = WX_RX_BUFSZ; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif + struct sk_buff *skb = rx_buffer->skb; + + if (!skb) { + void *page_addr = page_address(rx_buffer->page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256); + if (unlikely(!skb)) + return NULL; + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + if (size <= WX_RXBUFFER_256) { + memcpy(__skb_put(skb, size), page_addr, + ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; + + return skb; + } + + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) + WX_CB(skb)->dma = rx_buffer->dma; + + skb_add_rx_frag(skb, 0, rx_buffer->page, + rx_buffer->page_offset, + size, truesize); + goto out; + + } else { + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + } + +out: +#if (PAGE_SIZE < 8192) + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, + struct wx_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + page = page_pool_dev_alloc_pages(rx_ring->page_pool); + WARN_ON(!page); + dma = page_pool_get_dma_addr(page); + + bi->page_dma = dma; + bi->page = page; + bi->page_offset = 0; + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * wx_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union wx_rx_desc *rx_desc; + struct wx_rx_buffer *bi; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = WX_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (!wx_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + WX_RX_BUFSZ, + DMA_FROM_DEVICE); + + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = WX_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +u16 wx_desc_unused(struct wx_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +/** + * wx_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool wx_is_non_eop(struct wx_ring *rx_ring, + union wx_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(WX_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))) + return false; + + rx_ring->rx_buffer_info[ntc].skb = skb; + + return true; +} + +static void wx_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned int pull_len; + unsigned char *va; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, WX_RXBUFFER_256); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * wx_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool wx_cleanup_headers(struct wx_ring *rx_ring, + union wx_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + + /* verify that the packet does not have any known errors */ + if (!netdev || + unlikely(wx_test_staterr(rx_desc, WX_RXD_ERR_RXE) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_any(skb); + return true; + } + + /* place header in linear portion of buffer */ + if (!skb_headlen(skb)) + wx_pull_tail(skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * wx_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed. + **/ +static int wx_clean_rx_irq(struct wx_q_vector *q_vector, + struct wx_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = wx_desc_unused(rx_ring); + + do { + struct wx_rx_buffer *rx_buffer; + union wx_rx_desc *rx_desc; + struct sk_buff *skb; + int rx_buffer_pgcnt; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= WX_RX_BUFFER_WRITE) { + wx_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = WX_RX_DESC(rx_ring, rx_ring->next_to_clean); + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb, &rx_buffer_pgcnt); + + /* retrieve a buffer from the ring */ + skb = wx_build_skb(rx_ring, rx_buffer, rx_desc); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_buffer->pagecnt_bias++; + break; + } + + wx_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (wx_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (wx_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + skb_record_rx_queue(skb, rx_ring->queue_index); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + napi_gro_receive(&q_vector->napi, skb); + + /* update budget accounting */ + total_rx_packets++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +static struct netdev_queue *wx_txring_txq(const struct wx_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +/** + * wx_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + * @napi_budget: Used to determine if we are in netpoll + **/ +static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, + struct wx_ring *tx_ring, int napi_budget) +{ + unsigned int budget = q_vector->wx->tx_work_limit; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int i = tx_ring->next_to_clean; + struct wx_tx_buffer *tx_buffer; + union wx_tx_desc *tx_desc; + + if (!netif_carrier_ok(tx_ring->netdev)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = WX_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union wx_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + napi_consume_skb(tx_buffer->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = WX_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = WX_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + netdev_tx_completed_queue(wx_txring_txq(tx_ring), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (wx_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + netif_running(tx_ring->netdev)) + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + } + + return !!budget; +} + +/** + * wx_poll - NAPI polling RX/TX cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + **/ +static int wx_poll(struct napi_struct *napi, int budget) +{ + struct wx_q_vector *q_vector = container_of(napi, struct wx_q_vector, napi); + int per_ring_budget, work_done = 0; + struct wx *wx = q_vector->wx; + bool clean_complete = true; + struct wx_ring *ring; + + wx_for_each_ring(ring, q_vector->tx) { + if (!wx_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } + + /* Exit if we are called by netpoll */ + if (budget <= 0) + return budget; + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling + */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget / q_vector->rx.count, 1); + else + per_ring_budget = budget; + + wx_for_each_ring(ring, q_vector->rx) { + int cleaned = wx_clean_rx_irq(q_vector, ring, per_ring_budget); + + work_done += cleaned; + if (cleaned >= per_ring_budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + if (likely(napi_complete_done(napi, work_done))) { + if (netif_running(wx->netdev)) + wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx)); + } + + return min(work_done, budget - 1); +} + +static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size) +{ + if (likely(wx_desc_unused(tx_ring) >= size)) + return 0; + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* For the next check */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(wx_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + + return 0; +} + +static void wx_tx_map(struct wx_ring *tx_ring, + struct wx_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + struct wx_tx_buffer *tx_buffer; + u16 i = tx_ring->next_to_use; + unsigned int data_len, size; + union wx_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + u32 cmd_type; + + cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS; + tx_desc = WX_TX_DESC(tx_ring, i); + + tx_desc->read.olinfo_status = cpu_to_le32(skb->len << WX_TXD_PAYLEN_SHIFT); + + size = skb_headlen(skb); + data_len = skb->data_len; + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > WX_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ WX_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = WX_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += WX_MAX_DATA_PER_TXD; + size -= WX_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = WX_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | WX_TXD_EOP | WX_TXD_RS; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount); + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + wx_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more()) + writel(i, tx_ring->tail); + + return; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + tx_ring->next_to_use = i; +} + +static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, + struct wx_ring *tx_ring) +{ + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + struct wx_tx_buffer *first; + unsigned short f; + + /* need: 1 descriptor per page * PAGE_SIZE/WX_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/WX_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> + frags[f])); + + if (wx_maybe_stop_tx(tx_ring, count + 3)) + return NETDEV_TX_BUSY; + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + wx_tx_map(tx_ring, first); + + return NETDEV_TX_OK; +} + +netdev_tx_t wx_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + unsigned int r_idx = skb->queue_mapping; + struct wx *wx = netdev_priv(netdev); + struct wx_ring *tx_ring; + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + if (r_idx >= wx->num_tx_queues) + r_idx = r_idx % wx->num_tx_queues; + tx_ring = wx->tx_ring[r_idx]; + + return wx_xmit_frame_ring(skb, tx_ring); +} +EXPORT_SYMBOL(wx_xmit_frame); + +void wx_napi_enable_all(struct wx *wx) +{ + struct wx_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) { + q_vector = wx->q_vector[q_idx]; + napi_enable(&q_vector->napi); + } +} +EXPORT_SYMBOL(wx_napi_enable_all); + +void wx_napi_disable_all(struct wx *wx) +{ + struct wx_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) { + q_vector = wx->q_vector[q_idx]; + napi_disable(&q_vector->napi); + } +} +EXPORT_SYMBOL(wx_napi_disable_all); + +/** + * wx_set_rss_queues: Allocate queues for RSS + * @wx: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static void wx_set_rss_queues(struct wx *wx) +{ + wx->num_rx_queues = wx->mac.max_rx_queues; + wx->num_tx_queues = wx->mac.max_tx_queues; +} + +static void wx_set_num_queues(struct wx *wx) +{ + /* Start with base case */ + wx->num_rx_queues = 1; + wx->num_tx_queues = 1; + wx->queues_per_pool = 1; + + wx_set_rss_queues(wx); +} + +/** + * wx_acquire_msix_vectors - acquire MSI-X vectors + * @wx: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int wx_acquire_msix_vectors(struct wx *wx) +{ + struct irq_affinity affd = {0, }; + int nvecs, i; + + nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors); + + wx->msix_entries = kcalloc(nvecs, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!wx->msix_entries) + return -ENOMEM; + + nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs, + nvecs, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, + &affd); + if (nvecs < 0) { + wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs); + kfree(wx->msix_entries); + wx->msix_entries = NULL; + return nvecs; + } + + for (i = 0; i < nvecs; i++) { + wx->msix_entries[i].entry = i; + wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i); + } + + /* one for msix_other */ + nvecs -= 1; + wx->num_q_vectors = nvecs; + wx->num_rx_queues = nvecs; + wx->num_tx_queues = nvecs; + + return 0; +} + +/** + * wx_set_interrupt_capability - set MSI-X or MSI if supported + * @wx: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int wx_set_interrupt_capability(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + int nvecs, ret; + + /* We will try to get MSI-X interrupts first */ + ret = wx_acquire_msix_vectors(wx); + if (ret == 0 || (ret == -ENOMEM)) + return ret; + + wx->num_rx_queues = 1; + wx->num_tx_queues = 1; + wx->num_q_vectors = 1; + + /* minmum one for queue, one for misc*/ + nvecs = 1; + nvecs = pci_alloc_irq_vectors(pdev, nvecs, + nvecs, PCI_IRQ_MSI | PCI_IRQ_LEGACY); + if (nvecs == 1) { + if (pdev->msi_enabled) + wx_err(wx, "Fallback to MSI.\n"); + else + wx_err(wx, "Fallback to LEGACY.\n"); + } else { + wx_err(wx, "Failed to allocate MSI/LEGACY interrupts. Error: %d\n", nvecs); + return nvecs; + } + + pdev->irq = pci_irq_vector(pdev, 0); + + return 0; +} + +/** + * wx_cache_ring_rss - Descriptor ring to register mapping for RSS + * @wx: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. + * + **/ +static void wx_cache_ring_rss(struct wx *wx) +{ + u16 i; + + for (i = 0; i < wx->num_rx_queues; i++) + wx->rx_ring[i]->reg_idx = i; + + for (i = 0; i < wx->num_tx_queues; i++) + wx->tx_ring[i]->reg_idx = i; +} + +static void wx_add_ring(struct wx_ring *ring, struct wx_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * wx_alloc_q_vector - Allocate memory for a single interrupt vector + * @wx: board private structure to initialize + * @v_count: q_vectors allocated on wx, used for ring interleaving + * @v_idx: index of vector in wx struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int wx_alloc_q_vector(struct wx *wx, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct wx_q_vector *q_vector; + int ring_count, default_itr; + struct wx_ring *ring; + + /* note this will allocate space for the ring structure as well! */ + ring_count = txr_count + rxr_count; + + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(wx->netdev, &q_vector->napi, + wx_poll); + + /* tie q_vector and wx together */ + wx->q_vector[v_idx] = q_vector; + q_vector->wx = wx; + q_vector->v_idx = v_idx; + if (cpu_online(v_idx)) + q_vector->numa_node = cpu_to_node(v_idx); + + /* initialize pointer to rings */ + ring = q_vector->ring; + + if (wx->mac.type == wx_mac_sp) + default_itr = WX_12K_ITR; + else + default_itr = WX_7K_ITR; + /* initialize ITR */ + if (txr_count && !rxr_count) + /* tx only vector */ + q_vector->itr = wx->tx_itr_setting ? + default_itr : wx->tx_itr_setting; + else + /* rx or rx/tx vector */ + q_vector->itr = wx->rx_itr_setting ? + default_itr : wx->rx_itr_setting; + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = &wx->pdev->dev; + ring->netdev = wx->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + wx_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = wx->tx_ring_count; + + ring->queue_index = txr_idx; + + /* assign ring to wx */ + wx->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = &wx->pdev->dev; + ring->netdev = wx->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + wx_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = wx->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to wx */ + wx->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * wx_free_q_vector - Free memory allocated for specific interrupt vector + * @wx: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void wx_free_q_vector(struct wx *wx, int v_idx) +{ + struct wx_q_vector *q_vector = wx->q_vector[v_idx]; + struct wx_ring *ring; + + wx_for_each_ring(ring, q_vector->tx) + wx->tx_ring[ring->queue_index] = NULL; + + wx_for_each_ring(ring, q_vector->rx) + wx->rx_ring[ring->queue_index] = NULL; + + wx->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + kfree_rcu(q_vector, rcu); +} + +/** + * wx_alloc_q_vectors - Allocate memory for interrupt vectors + * @wx: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int wx_alloc_q_vectors(struct wx *wx) +{ + unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; + unsigned int rxr_remaining = wx->num_rx_queues; + unsigned int txr_remaining = wx->num_tx_queues; + unsigned int q_vectors = wx->num_q_vectors; + int rqpv, tqpv; + int err; + + for (; v_idx < q_vectors; v_idx++) { + rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + err = wx_alloc_q_vector(wx, q_vectors, v_idx, + tqpv, txr_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + wx->num_tx_queues = 0; + wx->num_rx_queues = 0; + wx->num_q_vectors = 0; + + while (v_idx--) + wx_free_q_vector(wx, v_idx); + + return -ENOMEM; +} + +/** + * wx_free_q_vectors - Free memory allocated for interrupt vectors + * @wx: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void wx_free_q_vectors(struct wx *wx) +{ + int v_idx = wx->num_q_vectors; + + wx->num_tx_queues = 0; + wx->num_rx_queues = 0; + wx->num_q_vectors = 0; + + while (v_idx--) + wx_free_q_vector(wx, v_idx); +} + +void wx_reset_interrupt_capability(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + + if (!pdev->msi_enabled && !pdev->msix_enabled) + return; + + pci_free_irq_vectors(wx->pdev); + if (pdev->msix_enabled) { + kfree(wx->msix_entries); + wx->msix_entries = NULL; + } +} +EXPORT_SYMBOL(wx_reset_interrupt_capability); + +/** + * wx_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @wx: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void wx_clear_interrupt_scheme(struct wx *wx) +{ + wx_free_q_vectors(wx); + wx_reset_interrupt_capability(wx); +} +EXPORT_SYMBOL(wx_clear_interrupt_scheme); + +int wx_init_interrupt_scheme(struct wx *wx) +{ + int ret; + + /* Number of supported queues */ + wx_set_num_queues(wx); + + /* Set interrupt mode */ + ret = wx_set_interrupt_capability(wx); + if (ret) { + wx_err(wx, "Allocate irq vectors for failed.\n"); + return ret; + } + + /* Allocate memory for queues */ + ret = wx_alloc_q_vectors(wx); + if (ret) { + wx_err(wx, "Unable to allocate memory for queue vectors.\n"); + wx_reset_interrupt_capability(wx); + return ret; + } + + wx_cache_ring_rss(wx); + + return 0; +} +EXPORT_SYMBOL(wx_init_interrupt_scheme); + +irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data) +{ + struct wx_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL(wx_msix_clean_rings); + +void wx_free_irq(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + int vector; + + if (!(pdev->msix_enabled)) { + free_irq(pdev->irq, wx); + return; + } + + for (vector = 0; vector < wx->num_q_vectors; vector++) { + struct wx_q_vector *q_vector = wx->q_vector[vector]; + struct msix_entry *entry = &wx->msix_entries[vector]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + + free_irq(entry->vector, q_vector); + } + + free_irq(wx->msix_entries[vector].vector, wx); +} +EXPORT_SYMBOL(wx_free_irq); + +/** + * wx_setup_isb_resources - allocate interrupt status resources + * @wx: board private structure + * + * Return 0 on success, negative on failure + **/ +int wx_setup_isb_resources(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + + wx->isb_mem = dma_alloc_coherent(&pdev->dev, + sizeof(u32) * 4, + &wx->isb_dma, + GFP_KERNEL); + if (!wx->isb_mem) { + wx_err(wx, "Alloc isb_mem failed\n"); + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL(wx_setup_isb_resources); + +/** + * wx_free_isb_resources - allocate all queues Rx resources + * @wx: board private structure + * + * Return 0 on success, negative on failure + **/ +void wx_free_isb_resources(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + + dma_free_coherent(&pdev->dev, sizeof(u32) * 4, + wx->isb_mem, wx->isb_dma); + wx->isb_mem = NULL; +} +EXPORT_SYMBOL(wx_free_isb_resources); + +u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx) +{ + u32 cur_tag = 0; + + cur_tag = wx->isb_mem[WX_ISB_HEADER]; + wx->isb_tag[idx] = cur_tag; + + return (__force u32)cpu_to_le32(wx->isb_mem[idx]); +} +EXPORT_SYMBOL(wx_misc_isb); + +/** + * wx_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @wx: pointer to wx struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + **/ +static void wx_set_ivar(struct wx *wx, s8 direction, + u16 queue, u16 msix_vector) +{ + u32 ivar, index; + + if (direction == -1) { + /* other causes */ + msix_vector |= WX_PX_IVAR_ALLOC_VAL; + index = 0; + ivar = rd32(wx, WX_PX_MISC_IVAR); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(wx, WX_PX_MISC_IVAR, ivar); + } else { + /* tx or rx causes */ + msix_vector |= WX_PX_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = rd32(wx, WX_PX_IVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(wx, WX_PX_IVAR(queue >> 1), ivar); + } +} + +/** + * wx_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +static void wx_write_eitr(struct wx_q_vector *q_vector) +{ + struct wx *wx = q_vector->wx; + int v_idx = q_vector->v_idx; + u32 itr_reg; + + if (wx->mac.type == wx_mac_sp) + itr_reg = q_vector->itr & WX_SP_MAX_EITR; + else + itr_reg = q_vector->itr & WX_EM_MAX_EITR; + + itr_reg |= WX_PX_ITR_CNT_WDIS; + + wr32(wx, WX_PX_ITR(v_idx), itr_reg); +} + +/** + * wx_configure_vectors - Configure vectors for hardware + * @wx: board private structure + * + * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/LEGACY + * interrupts. + **/ +void wx_configure_vectors(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + u32 eitrsel = 0; + u16 v_idx; + + if (pdev->msix_enabled) { + /* Populate MSIX to EITR Select */ + wr32(wx, WX_PX_ITRSEL, eitrsel); + /* use EIAM to auto-mask when MSI-X interrupt is asserted + * this saves a register write for every interrupt + */ + wr32(wx, WX_PX_GPIE, WX_PX_GPIE_MODEL); + } else { + /* legacy interrupts, use EIAM to auto-mask when reading EICR, + * specifically only auto mask tx and rx interrupts. + */ + wr32(wx, WX_PX_GPIE, 0); + } + + /* Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) { + struct wx_q_vector *q_vector = wx->q_vector[v_idx]; + struct wx_ring *ring; + + wx_for_each_ring(ring, q_vector->rx) + wx_set_ivar(wx, 0, ring->reg_idx, v_idx); + + wx_for_each_ring(ring, q_vector->tx) + wx_set_ivar(wx, 1, ring->reg_idx, v_idx); + + wx_write_eitr(q_vector); + } + + wx_set_ivar(wx, -1, 0, v_idx); + if (pdev->msix_enabled) + wr32(wx, WX_PX_ITR(v_idx), 1950); +} +EXPORT_SYMBOL(wx_configure_vectors); + +/** + * wx_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void wx_clean_rx_ring(struct wx_ring *rx_ring) +{ + struct wx_rx_buffer *rx_buffer; + u16 i = rx_ring->next_to_clean; + + rx_buffer = &rx_ring->rx_buffer_info[i]; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + + if (WX_CB(skb)->page_released) + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); + + dev_kfree_skb(skb); + } + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + WX_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * wx_clean_all_rx_rings - Free Rx Buffers for all queues + * @wx: board private structure + **/ +void wx_clean_all_rx_rings(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_rx_queues; i++) + wx_clean_rx_ring(wx->rx_ring[i]); +} +EXPORT_SYMBOL(wx_clean_all_rx_rings); + +/** + * wx_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void wx_free_rx_resources(struct wx_ring *rx_ring) +{ + wx_clean_rx_ring(rx_ring); + kvfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; + + if (rx_ring->page_pool) { + page_pool_destroy(rx_ring->page_pool); + rx_ring->page_pool = NULL; + } +} + +/** + * wx_free_all_rx_resources - Free Rx Resources for All Queues + * @wx: pointer to hardware structure + * + * Free all receive software resources + **/ +static void wx_free_all_rx_resources(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_rx_queues; i++) + wx_free_rx_resources(wx->rx_ring[i]); +} + +/** + * wx_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void wx_clean_tx_ring(struct wx_ring *tx_ring) +{ + struct wx_tx_buffer *tx_buffer; + u16 i = tx_ring->next_to_clean; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + + while (i != tx_ring->next_to_use) { + union wx_tx_desc *eop_desc, *tx_desc; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = WX_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = WX_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + netdev_tx_reset_queue(wx_txring_txq(tx_ring)); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * wx_clean_all_tx_rings - Free Tx Buffers for all queues + * @wx: board private structure + **/ +void wx_clean_all_tx_rings(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_tx_queues; i++) + wx_clean_tx_ring(wx->tx_ring[i]); +} +EXPORT_SYMBOL(wx_clean_all_tx_rings); + +/** + * wx_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void wx_free_tx_resources(struct wx_ring *tx_ring) +{ + wx_clean_tx_ring(tx_ring); + kvfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * wx_free_all_tx_resources - Free Tx Resources for All Queues + * @wx: pointer to hardware structure + * + * Free all transmit software resources + **/ +static void wx_free_all_tx_resources(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_tx_queues; i++) + wx_free_tx_resources(wx->tx_ring[i]); +} + +void wx_free_resources(struct wx *wx) +{ + wx_free_isb_resources(wx); + wx_free_all_rx_resources(wx); + wx_free_all_tx_resources(wx); +} +EXPORT_SYMBOL(wx_free_resources); + +static int wx_alloc_page_pool(struct wx_ring *rx_ring) +{ + int ret = 0; + + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .order = 0, + .pool_size = rx_ring->size, + .nid = dev_to_node(rx_ring->dev), + .dev = rx_ring->dev, + .dma_dir = DMA_FROM_DEVICE, + .offset = 0, + .max_len = PAGE_SIZE, + }; + + rx_ring->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rx_ring->page_pool)) { + ret = PTR_ERR(rx_ring->page_pool); + rx_ring->page_pool = NULL; + } + + return ret; +} + +/** + * wx_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int wx_setup_rx_resources(struct wx_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = NUMA_NO_NODE; + int size, ret; + + size = sizeof(struct wx_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = kvmalloc(size, GFP_KERNEL); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union wx_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, numa_node); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) { + set_dev_node(dev, orig_node); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + } + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + ret = wx_alloc_page_pool(rx_ring); + if (ret < 0) { + dev_err(rx_ring->dev, "Page pool creation failed: %d\n", ret); + goto err; + } + + return 0; +err: + kvfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * wx_setup_all_rx_resources - allocate all queues Rx resources + * @wx: pointer to hardware structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int wx_setup_all_rx_resources(struct wx *wx) +{ + int i, err = 0; + + for (i = 0; i < wx->num_rx_queues; i++) { + err = wx_setup_rx_resources(wx->rx_ring[i]); + if (!err) + continue; + + wx_err(wx, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + wx_free_rx_resources(wx->rx_ring[i]); + return err; +} + +/** + * wx_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int wx_setup_tx_resources(struct wx_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = NUMA_NO_NODE; + int size; + + size = sizeof(struct wx_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + + tx_ring->tx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = kvmalloc(size, GFP_KERNEL); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union wx_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, numa_node); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + set_dev_node(dev, orig_node); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + } + + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + kvfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * wx_setup_all_tx_resources - allocate all queues Tx resources + * @wx: pointer to private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int wx_setup_all_tx_resources(struct wx *wx) +{ + int i, err = 0; + + for (i = 0; i < wx->num_tx_queues; i++) { + err = wx_setup_tx_resources(wx->tx_ring[i]); + if (!err) + continue; + + wx_err(wx, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + wx_free_tx_resources(wx->tx_ring[i]); + return err; +} + +int wx_setup_resources(struct wx *wx) +{ + int err; + + /* allocate transmit descriptors */ + err = wx_setup_all_tx_resources(wx); + if (err) + return err; + + /* allocate receive descriptors */ + err = wx_setup_all_rx_resources(wx); + if (err) + goto err_free_tx; + + err = wx_setup_isb_resources(wx); + if (err) + goto err_free_rx; + + return 0; + +err_free_rx: + wx_free_all_rx_resources(wx); +err_free_tx: + wx_free_all_tx_resources(wx); + + return err; +} +EXPORT_SYMBOL(wx_setup_resources); + +/** + * wx_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + */ +void wx_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct wx *wx = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < wx->num_rx_queues; i++) { + struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < wx->num_tx_queues; i++) { + struct wx_ring *ring = READ_ONCE(wx->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, + start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + + rcu_read_unlock(); +} +EXPORT_SYMBOL(wx_get_stats64); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h new file mode 100644 index 000000000000..50ee41f1fa10 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. + */ + +#ifndef _WX_LIB_H_ +#define _WX_LIB_H_ + +void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count); +u16 wx_desc_unused(struct wx_ring *ring); +netdev_tx_t wx_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +void wx_napi_enable_all(struct wx *wx); +void wx_napi_disable_all(struct wx *wx); +void wx_reset_interrupt_capability(struct wx *wx); +void wx_clear_interrupt_scheme(struct wx *wx); +int wx_init_interrupt_scheme(struct wx *wx); +irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data); +void wx_free_irq(struct wx *wx); +int wx_setup_isb_resources(struct wx *wx); +void wx_free_isb_resources(struct wx *wx); +u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx); +void wx_configure_vectors(struct wx *wx); +void wx_clean_all_rx_rings(struct wx *wx); +void wx_clean_all_tx_rings(struct wx *wx); +void wx_free_resources(struct wx *wx); +int wx_setup_resources(struct wx *wx); +void wx_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); + +#endif /* _NGBE_LIB_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 1cbeef8230bf..77d8d7f1707e 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -4,6 +4,9 @@ #ifndef _WX_TYPE_H_ #define _WX_TYPE_H_ +#include <linux/bitfield.h> +#include <linux/netdevice.h> + /* Vendor ID */ #ifndef PCI_VENDOR_ID_WANGXUN #define PCI_VENDOR_ID_WANGXUN 0x8088 @@ -36,12 +39,11 @@ #define WX_SPI_CMD 0x10104 #define WX_SPI_CMD_READ_DWORD 0x1 #define WX_SPI_CLK_DIV 0x3 -#define WX_SPI_CMD_CMD(_v) (((_v) & 0x7) << 28) -#define WX_SPI_CMD_CLK(_v) (((_v) & 0x7) << 25) -#define WX_SPI_CMD_ADDR(_v) (((_v) & 0xFFFFFF)) +#define WX_SPI_CMD_CMD(_v) FIELD_PREP(GENMASK(30, 28), _v) +#define WX_SPI_CMD_CLK(_v) FIELD_PREP(GENMASK(27, 25), _v) +#define WX_SPI_CMD_ADDR(_v) FIELD_PREP(GENMASK(23, 0), _v) #define WX_SPI_DATA 0x10108 #define WX_SPI_DATA_BYPASS BIT(31) -#define WX_SPI_DATA_STATUS(_v) (((_v) & 0xFF) << 16) #define WX_SPI_DATA_OP_DONE BIT(0) #define WX_SPI_STATUS 0x1010C #define WX_SPI_STATUS_OPDONE BIT(0) @@ -64,21 +66,50 @@ /* port cfg Registers */ #define WX_CFG_PORT_CTL 0x14400 #define WX_CFG_PORT_CTL_DRV_LOAD BIT(3) +#define WX_CFG_PORT_CTL_QINQ BIT(2) +#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/ +#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) + +/* GPIO Registers */ +#define WX_GPIO_DR 0x14800 +#define WX_GPIO_DR_0 BIT(0) /* SDP0 Data Value */ +#define WX_GPIO_DR_1 BIT(1) /* SDP1 Data Value */ +#define WX_GPIO_DDR 0x14804 +#define WX_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */ +#define WX_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */ +#define WX_GPIO_CTL 0x14808 +#define WX_GPIO_INTEN 0x14830 +#define WX_GPIO_INTEN_0 BIT(0) +#define WX_GPIO_INTEN_1 BIT(1) +#define WX_GPIO_INTMASK 0x14834 +#define WX_GPIO_INTTYPE_LEVEL 0x14838 +#define WX_GPIO_POLARITY 0x1483C +#define WX_GPIO_EOI 0x1484C /*********************** Transmit DMA registers **************************/ /* transmit global control */ #define WX_TDM_CTL 0x18000 /* TDM CTL BIT */ #define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ +#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) /***************************** RDB registers *********************************/ /* receive packet buffer */ #define WX_RDB_PB_CTL 0x19000 #define WX_RDB_PB_CTL_RXEN BIT(31) /* Enable Receiver */ #define WX_RDB_PB_CTL_DISABLED BIT(0) +#define WX_RDB_PB_SZ(_i) (0x19020 + ((_i) * 4)) +#define WX_RDB_PB_SZ_SHIFT 10 /* statistic */ #define WX_RDB_PFCMACDAL 0x19210 #define WX_RDB_PFCMACDAH 0x19214 +/* ring assignment */ +#define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) +#define WX_RDB_PL_CFG_L4HDR BIT(1) +#define WX_RDB_PL_CFG_L3HDR BIT(2) +#define WX_RDB_PL_CFG_L2HDR BIT(3) +#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4) +#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5) /******************************* PSR Registers *******************************/ /* psr control */ @@ -96,10 +127,24 @@ #define WX_PSR_CTL_MO_SHIFT 5 #define WX_PSR_CTL_MO (0x3 << WX_PSR_CTL_MO_SHIFT) #define WX_PSR_CTL_TPE BIT(4) +#define WX_PSR_MAX_SZ 0x15020 +#define WX_PSR_VLAN_CTL 0x15088 +#define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */ +#define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */ /* mcasst/ucast overflow tbl */ #define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) #define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) +/* VM L2 contorl */ +#define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4)) +#define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */ +#define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */ +#define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */ +#define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */ +#define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */ +#define WX_PSR_VM_L2CTL_BAM BIT(11) /* accept broadcast packets */ +#define WX_PSR_VM_L2CTL_MPE BIT(12) /* multicast promiscuous */ + /* Management */ #define WX_PSR_MNG_FLEX_SEL 0x1582C #define WX_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16)) @@ -113,14 +158,35 @@ /* mac switcher */ #define WX_PSR_MAC_SWC_AD_L 0x16200 #define WX_PSR_MAC_SWC_AD_H 0x16204 -#define WX_PSR_MAC_SWC_AD_H_AD(v) (((v) & 0xFFFF)) -#define WX_PSR_MAC_SWC_AD_H_ADTYPE(v) (((v) & 0x1) << 30) +#define WX_PSR_MAC_SWC_AD_H_AD(v) FIELD_PREP(U16_MAX, v) +#define WX_PSR_MAC_SWC_AD_H_ADTYPE(v) FIELD_PREP(BIT(30), v) #define WX_PSR_MAC_SWC_AD_H_AV BIT(31) #define WX_PSR_MAC_SWC_VM_L 0x16208 #define WX_PSR_MAC_SWC_VM_H 0x1620C #define WX_PSR_MAC_SWC_IDX 0x16210 #define WX_CLEAR_VMDQ_ALL 0xFFFFFFFFU +/********************************* RSEC **************************************/ +/* general rsec */ +#define WX_RSC_CTL 0x17000 +#define WX_RSC_CTL_SAVE_MAC_ERR BIT(6) +#define WX_RSC_CTL_CRC_STRIP BIT(2) +#define WX_RSC_CTL_RX_DIS BIT(1) +#define WX_RSC_ST 0x17004 +#define WX_RSC_ST_RSEC_RDY BIT(0) + +/****************************** TDB ******************************************/ +#define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4)) +#define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ + +/****************************** TSEC *****************************************/ +/* Security Control Registers */ +#define WX_TSC_CTL 0x1D000 +#define WX_TSC_CTL_TX_DIS BIT(1) +#define WX_TSC_CTL_TSEC_DIS BIT(0) +#define WX_TSC_BUF_AE 0x1D00C +#define WX_TSC_BUF_AE_THR GENMASK(9, 0) + /************************************** MNG ********************************/ #define WX_MNG_SWFW_SYNC 0x1E008 #define WX_MNG_SWFW_SYNC_SW_MB BIT(2) @@ -133,11 +199,15 @@ /************************************* ETH MAC *****************************/ #define WX_MAC_TX_CFG 0x11000 #define WX_MAC_TX_CFG_TE BIT(0) +#define WX_MAC_TX_CFG_SPEED_MASK GENMASK(30, 29) +#define WX_MAC_TX_CFG_SPEED_10G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 0) +#define WX_MAC_TX_CFG_SPEED_1G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 3) #define WX_MAC_RX_CFG 0x11004 #define WX_MAC_RX_CFG_RE BIT(0) #define WX_MAC_RX_CFG_JE BIT(8) #define WX_MAC_PKT_FLT 0x11008 #define WX_MAC_PKT_FLT_PR BIT(0) /* promiscuous mode */ +#define WX_MAC_WDG_TIMEOUT 0x1100C #define WX_MAC_RX_FLOW_CTRL 0x11090 #define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */ #define WX_MMC_CONTROL 0x11800 @@ -147,10 +217,34 @@ /* Interrupt Registers */ #define WX_BME_CTL 0x12020 #define WX_PX_MISC_IC 0x100 +#define WX_PX_MISC_ICS 0x104 +#define WX_PX_MISC_IEN 0x108 +#define WX_PX_INTA 0x110 +#define WX_PX_GPIE 0x118 +#define WX_PX_GPIE_MODEL BIT(0) +#define WX_PX_IC 0x120 #define WX_PX_IMS(_i) (0x140 + (_i) * 4) +#define WX_PX_IMC(_i) (0x150 + (_i) * 4) +#define WX_PX_ISB_ADDR_L 0x160 +#define WX_PX_ISB_ADDR_H 0x164 #define WX_PX_TRANSACTION_PENDING 0x168 +#define WX_PX_ITRSEL 0x180 +#define WX_PX_ITR(_i) (0x200 + (_i) * 4) +#define WX_PX_ITR_CNT_WDIS BIT(31) +#define WX_PX_MISC_IVAR 0x4FC +#define WX_PX_IVAR(_i) (0x500 + (_i) * 4) + +#define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ +#define WX_7K_ITR 595 +#define WX_12K_ITR 336 +#define WX_SP_MAX_EITR 0x00000FF8U +#define WX_EM_MAX_EITR 0x00007FFCU /* transmit DMA Registers */ +#define WX_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40)) +#define WX_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40)) +#define WX_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40)) +#define WX_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40)) #define WX_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) /* Transmit Config masks */ #define WX_PX_TR_CFG_ENABLE BIT(0) /* Ena specific Tx Queue */ @@ -160,8 +254,22 @@ #define WX_PX_TR_CFG_THRE_SHIFT 8 /* Receive DMA Registers */ +#define WX_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40)) +#define WX_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40)) +#define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) +#define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) #define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) /* PX_RR_CFG bit definitions */ +#define WX_PX_RR_CFG_SPLIT_MODE BIT(26) +#define WX_PX_RR_CFG_RR_THER_SHIFT 16 +#define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12) +#define WX_PX_RR_CFG_RR_BUF_SZ GENMASK(11, 8) +#define WX_PX_RR_CFG_BHDRSIZE_SHIFT 6 /* 64byte resolution (>> 6) + * + at bit 8 offset (<< 12) + * = (<< 6) + */ +#define WX_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */ +#define WX_PX_RR_CFG_RR_SIZE_SHIFT 1 #define WX_PX_RR_CFG_RR_EN BIT(0) /* Number of 80 microseconds we wait for PCI Express master disable */ @@ -185,6 +293,50 @@ #define WX_SW_REGION_PTR 0x1C +#define WX_MAC_STATE_DEFAULT 0x1 +#define WX_MAC_STATE_MODIFIED 0x2 +#define WX_MAC_STATE_IN_USE 0x4 + +#define WX_MAX_RXD 8192 +#define WX_MAX_TXD 8192 + +/* Supported Rx Buffer Sizes */ +#define WX_RXBUFFER_256 256 /* Used for skb receive header */ +#define WX_RXBUFFER_2K 2048 +#define WX_MAX_RXBUFFER 16384 /* largest size for single descriptor */ + +#if MAX_SKB_FRAGS < 8 +#define WX_RX_BUFSZ ALIGN(WX_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024) +#else +#define WX_RX_BUFSZ WX_RXBUFFER_2K +#endif + +#define WX_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define WX_MAX_DATA_PER_TXD BIT(14) +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* Ether Types */ +#define WX_ETH_P_CNM 0x22E7 + +#define WX_CFG_PORT_ST 0x14404 + +/******************* Receive Descriptor bit definitions **********************/ +#define WX_RXD_STAT_DD BIT(0) /* Done */ +#define WX_RXD_STAT_EOP BIT(1) /* End of Packet */ + +#define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */ + +/*********************** Transmit Descriptor Config Masks ****************/ +#define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */ +#define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */ +#define WX_TXD_PAYLEN_SHIFT 13 /* Desc PAYLEN shift */ +#define WX_TXD_EOP BIT(24) /* End of Packet */ +#define WX_TXD_IFCS BIT(25) /* Insert FCS */ +#define WX_TXD_RS BIT(27) /* Report Status */ + /* Host Interface Command Structures */ struct wx_hic_hdr { u8 cmd; @@ -249,14 +401,23 @@ enum wx_mac_type { wx_mac_em }; +enum em_mac_type { + em_mac_type_unknown = 0, + em_mac_type_mdi, + em_mac_type_rgmii +}; + struct wx_mac_info { enum wx_mac_type type; bool set_lben; u8 addr[ETH_ALEN]; u8 perm_addr[ETH_ALEN]; + u32 mta_shadow[128]; s32 mc_filter_type; u32 mcft_size; u32 num_rar_entries; + u32 rx_pb_size; + u32 tx_pb_size; u32 max_tx_queues; u32 max_rx_queues; @@ -284,19 +445,183 @@ struct wx_addr_filter_info { bool user_set_promisc; }; +struct wx_mac_addr { + u8 addr[ETH_ALEN]; + u16 state; /* bitmask */ + u64 pools; +}; + enum wx_reset_type { WX_LAN_RESET = 0, WX_SW_RESET, WX_GLOBAL_RESET }; -struct wx_hw { +struct wx_cb { + dma_addr_t dma; + u16 append_cnt; /* number of skb's appended */ + bool page_released; + bool dma_released; +}; + +#define WX_CB(skb) ((struct wx_cb *)(skb)->cb) + +/* Transmit Descriptor */ +union wx_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor */ +union wx_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define WX_RX_DESC(R, i) \ + (&(((union wx_rx_desc *)((R)->desc))[i])) +#define WX_TX_DESC(R, i) \ + (&(((union wx_tx_desc *)((R)->desc))[i])) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct wx_tx_buffer { + union wx_tx_desc *next_to_watch; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); +}; + +struct wx_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + dma_addr_t page_dma; + struct page *page; + unsigned int page_offset; + u16 pagecnt_bias; +}; + +struct wx_queue_stats { + u64 packets; + u64 bytes; +}; + +/* iterator for handling rings in ring container */ +#define wx_for_each_ring(posm, headm) \ + for (posm = (headm).ring; posm; posm = posm->next) + +struct wx_ring_container { + struct wx_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct wx_ring { + struct wx_ring *next; /* pointer to next ring in q_vector */ + struct wx_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + struct page_pool *page_pool; + void *desc; /* descriptor ring memory */ + union { + struct wx_tx_buffer *tx_buffer_info; + struct wx_rx_buffer *rx_buffer_info; + }; + u8 __iomem *tail; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u16 count; /* amount of descriptors */ + + u8 queue_index; /* needed for multiqueue queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + + struct wx_queue_stats stats; + struct u64_stats_sync syncp; +} ____cacheline_internodealigned_in_smp; + +struct wx_q_vector { + struct wx *wx; + int cpu; /* CPU for DCA */ + int numa_node; + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring + */ + u16 itr; /* Interrupt throttle rate written to EITR */ + struct wx_ring_container rx, tx; + struct napi_struct napi; + struct rcu_head rcu; /* to avoid race with update stats on free */ + + char name[IFNAMSIZ + 17]; + + /* for dynamic allocation of rings associated with this q_vector */ + struct wx_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +enum wx_isb_idx { + WX_ISB_HEADER, + WX_ISB_MISC, + WX_ISB_VEC0, + WX_ISB_VEC1, + WX_ISB_MAX +}; + +struct wx { u8 __iomem *hw_addr; struct pci_dev *pdev; + struct net_device *netdev; struct wx_bus_info bus; struct wx_mac_info mac; + enum em_mac_type mac_type; struct wx_eeprom_info eeprom; struct wx_addr_filter_info addr_ctrl; + struct wx_mac_addr *mac_table; u16 device_id; u16 vendor_id; u16 subsystem_device_id; @@ -304,11 +629,63 @@ struct wx_hw { u8 revision_id; u16 oem_ssid; u16 oem_svid; + u16 msg_enable; bool adapter_stopped; + u16 tpid[8]; + char eeprom_id[32]; + char *driver_name; enum wx_reset_type reset_type; + + /* PHY stuff */ + unsigned int link; + int speed; + int duplex; + struct phy_device *phydev; + + bool wol_enabled; + bool ncsi_enabled; + bool gpio_ctrl; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u16 rx_work_limit; + + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* upper limit of q_vectors for device */ + + u32 tx_ring_count; + u32 rx_ring_count; + + struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp; + struct wx_ring *rx_ring[64]; + struct wx_q_vector *q_vector[64]; + + unsigned int queues_per_pool; + struct msix_entry *msix_entries; + + /* misc interrupt status block */ + dma_addr_t isb_dma; + u32 *isb_mem; + u32 isb_tag[WX_ISB_MAX]; + +#define WX_MAX_RETA_ENTRIES 128 + u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES]; + +#define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ + u32 *rss_key; + u32 wol; + + u16 bd_number; }; #define WX_INTR_ALL (~0ULL) +#define WX_INTR_Q(i) BIT(i) /* register operations */ #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) @@ -319,23 +696,23 @@ struct wx_hw { wr32((a), (reg) + ((off) << 2), (val)) static inline u32 -rd32m(struct wx_hw *wxhw, u32 reg, u32 mask) +rd32m(struct wx *wx, u32 reg, u32 mask) { u32 val; - val = rd32(wxhw, reg); + val = rd32(wx, reg); return val & mask; } static inline void -wr32m(struct wx_hw *wxhw, u32 reg, u32 mask, u32 field) +wr32m(struct wx *wx, u32 reg, u32 mask, u32 field) { u32 val; - val = rd32(wxhw, reg); + val = rd32(wx, reg); val = ((val & ~mask) | (field & mask)); - wr32(wxhw, reg, val); + wr32(wx, reg, val); } /* On some domestic CPU platforms, sometimes IO is not synchronized with @@ -343,10 +720,10 @@ wr32m(struct wx_hw *wxhw, u32 reg, u32 mask, u32 field) */ #define WX_WRITE_FLUSH(H) rd32(H, WX_MIS_PWR) -#define wx_err(wxhw, fmt, arg...) \ - dev_err(&(wxhw)->pdev->dev, fmt, ##arg) +#define wx_err(wx, fmt, arg...) \ + dev_err(&(wx)->pdev->dev, fmt, ##arg) -#define wx_dbg(wxhw, fmt, arg...) \ - dev_dbg(&(wxhw)->pdev->dev, fmt, ##arg) +#define wx_dbg(wx, fmt, arg...) \ + dev_dbg(&(wx)->pdev->dev, fmt, ##arg) #endif /* _WX_TYPE_H_ */ |